Using ranked proactive cluster wide capper with clever recapping
This commit is contained in:
parent
ef839c530d
commit
b0140a8b93
2 changed files with 3 additions and 3 deletions
|
@ -56,7 +56,7 @@ func main() {
|
|||
fmt.Println(task)
|
||||
}
|
||||
|
||||
scheduler := schedulers.NewProactiveClusterwideCapFCFS(tasks, *ignoreWatts)
|
||||
scheduler := schedulers.NewProactiveClusterwideCapRanked(tasks, *ignoreWatts)
|
||||
driver, err := sched.NewMesosSchedulerDriver(sched.DriverConfig{
|
||||
Master: *master,
|
||||
Framework: &mesos.FrameworkInfo{
|
||||
|
|
|
@ -381,8 +381,8 @@ func (s *ProactiveClusterwideCapRanked) StatusUpdate(driver sched.SchedulerDrive
|
|||
// Need to remove the task from the window
|
||||
s.capper.taskFinished(*status.TaskId.Value)
|
||||
// Determining the new cluster wide cap.
|
||||
tempCap, err := s.capper.recap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
||||
// tempCap, err := s.capper.cleverRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
||||
//tempCap, err := s.capper.recap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
||||
tempCap, err := s.capper.cleverRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
||||
|
||||
if err == nil {
|
||||
// If new determined cap value is different from the current recap value then we need to recap.
|
||||
|
|
Reference in a new issue