From 042c64210af5e23fbd7e3b65dcde89ff21dd3aee Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 17:44:16 -0500 Subject: [PATCH 01/36] Added PowerClasses -- classification of hosts in the cluster, based on their TDP. --- constants/constants.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/constants/constants.go b/constants/constants.go index bcd051b..efb8ed0 100644 --- a/constants/constants.go +++ b/constants/constants.go @@ -14,6 +14,24 @@ var Hosts = []string{"stratos-001.cs.binghamton.edu", "stratos-002.cs.binghamton "stratos-005.cs.binghamton.edu", "stratos-006.cs.binghamton.edu", "stratos-007.cs.binghamton.edu", "stratos-008.cs.binghamton.edu"} +// Classification of the nodes in the cluster based on their power consumption. +var PowerClasses = map[string]map[string]bool{ + "ClassA": map[string]bool{ + "stratos-005.cs.binghamton.edu": true, + "stratos-006.cs.binghamton.edu": true, + }, + "ClassB": map[string]bool{ + "stratos-007.cs.binghamton.edu": true, + "stratos-008.cs.binghamton.edu": true, + }, + "ClassC": map[string]bool{ + "stratos-001.cs.binghamton.edu": true, + "stratos-002.cs.binghamton.edu": true, + "stratos-003.cs.binghamton.edu": true, + "stratos-004.cs.binghamton.edu": true, + }, +} + // Add a new host to the slice of hosts. func AddNewHost(newHost string) bool { // Validation From 477a319688981a0ce5d22a8a81f457b5e03c79f5 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 17:46:29 -0500 Subject: [PATCH 02/36] Removed unnecessary whitespace. --- schedulers/bpswClassMapWattsProacCC.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/schedulers/bpswClassMapWattsProacCC.go b/schedulers/bpswClassMapWattsProacCC.go index 19eb393..390aeb9 100644 --- a/schedulers/bpswClassMapWattsProacCC.go +++ b/schedulers/bpswClassMapWattsProacCC.go @@ -165,7 +165,7 @@ func (s *BPSWClassMapWattsProacCC) Disconnected(sched.SchedulerDriver) { } // go routine to cap the entire cluster in regular intervals of time. -var bpswClassMapWattsProacCCCapValue = 0.0 // initial value to indicate that we haven't capped the cluster yet. +var bpswClassMapWattsProacCCCapValue = 0.0 // initial value to indicate that we haven't capped the cluster yet. var bpswClassMapWattsProacCCNewCapValue = 0.0 // newly computed cap value func (s *BPSWClassMapWattsProacCC) startCapping() { go func() { From e60488f965753f9c934d9fff82e74b812eb7a5e7 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 17:47:24 -0500 Subject: [PATCH 03/36] added OffersSorter that implements sort interface to sort offers based on CPU. --- schedulers/helpers.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/schedulers/helpers.go b/schedulers/helpers.go index 2c6ffd2..8eedef3 100644 --- a/schedulers/helpers.go +++ b/schedulers/helpers.go @@ -29,6 +29,24 @@ func OfferAgg(offer *mesos.Offer) (float64, float64, float64) { return cpus, mem, watts } +type OffersSorter []*mesos.Offer + +func (offersSorter OffersSorter) Len() int { + return len(offersSorter) +} + +func (offersSorter OffersSorter) Swap(i, j int) { + offersSorter[i], offersSorter[j] = offersSorter[j], offersSorter[i] +} + +func (offersSorter OffersSorter) Less(i, j int) bool { + // getting CPU resource availability of offersSorter[i] + cpu1, _, _ := OfferAgg(offersSorter[i]) + // getting CPU resource availability of offersSorter[j] + cpu2, _, _ := OfferAgg(offersSorter[j]) + return cpu1 <= cpu2 +} + func coLocated(tasks map[string]bool) { for task := range tasks { From 6b0f4e37296acf6f74d6f7ac6090676b335f5d0c Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 17:48:15 -0500 Subject: [PATCH 04/36] removed commented lines. Changed the scheduler to BinPackedSortedWattsSortedOffers. --- scheduler.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scheduler.go b/scheduler.go index 0041939..e6b736d 100644 --- a/scheduler.go +++ b/scheduler.go @@ -58,7 +58,7 @@ func main() { startTime := time.Now().Format("20060102150405") logPrefix := *pcplogPrefix + "_" + startTime - scheduler := schedulers.NewFirstFitSortedWattsReducedWAR(tasks, *ignoreWatts, logPrefix) + scheduler := schedulers.NewBinPackSortedWattsSortedOffers(tasks, *ignoreWatts, logPrefix) driver, err := sched.NewMesosSchedulerDriver(sched.DriverConfig{ Master: *master, Framework: &mesos.FrameworkInfo{ @@ -72,8 +72,8 @@ func main() { return } - //go pcp.Start(scheduler.PCPLog, &scheduler.RecordPCP, logPrefix) - go pcp.StartPCPLogAndExtremaDynamicCap(scheduler.PCPLog, &scheduler.RecordPCP, logPrefix, *hiThreshold, *loThreshold) + go pcp.Start(scheduler.PCPLog, &scheduler.RecordPCP, logPrefix) + //go pcp.StartPCPLogAndExtremaDynamicCap(scheduler.PCPLog, &scheduler.RecordPCP, logPrefix, *hiThreshold, *loThreshold) time.Sleep(1 * time.Second) // Take a second between starting PCP log and continuing // Attempt to handle signint to not leave pmdumptext running @@ -96,7 +96,7 @@ func main() { // Signals we have scheduled every task we have select { case <-scheduler.Shutdown: - // case <-time.After(shutdownTimeout): + //case <-time.After(shutdownTimeout): } // All tasks have finished @@ -104,7 +104,7 @@ func main() { case <-scheduler.Done: close(scheduler.PCPLog) time.Sleep(5 * time.Second) //Wait for PCP to log a few more seconds - // case <-time.After(shutdownTimeout): + //case <-time.After(shutdownTimeout): } // Done shutting down From 581803c2f06c0a2e42d831f4ea76e7491743e4bb Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 17:52:24 -0500 Subject: [PATCH 05/36] FirstFit and BinPacking with SortedOffers based on CPU. --- schedulers/binPackSortedWattsSortedOffers.go | 237 ++++++++++++++++++ schedulers/firstfitSortedOffers.go | 228 +++++++++++++++++ schedulers/firstfitSortedWattsSortedOffers.go | 230 +++++++++++++++++ 3 files changed, 695 insertions(+) create mode 100644 schedulers/binPackSortedWattsSortedOffers.go create mode 100644 schedulers/firstfitSortedOffers.go create mode 100644 schedulers/firstfitSortedWattsSortedOffers.go diff --git a/schedulers/binPackSortedWattsSortedOffers.go b/schedulers/binPackSortedWattsSortedOffers.go new file mode 100644 index 0000000..5d926e9 --- /dev/null +++ b/schedulers/binPackSortedWattsSortedOffers.go @@ -0,0 +1,237 @@ +package schedulers + +import ( + "bitbucket.org/sunybingcloud/electron/def" + "fmt" + "github.com/golang/protobuf/proto" + mesos "github.com/mesos/mesos-go/mesosproto" + "github.com/mesos/mesos-go/mesosutil" + sched "github.com/mesos/mesos-go/scheduler" + "log" + "os" + "sort" + "strings" + "time" +) + +// Decides if to take an offer or not +func (*BinPackSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool { + + cpus, mem, watts := OfferAgg(offer) + + //TODO: Insert watts calculation here instead of taking them as a parameter + + if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + return true + } + + return false +} + +type BinPackSortedWattsSortedOffers struct { + base // Type embedded to inherit common functions + tasksCreated int + tasksRunning int + tasks []def.Task + metrics map[string]def.Metric + running map[string]map[string]bool + ignoreWatts bool + + // First set of PCP values are garbage values, signal to logger to start recording when we're + // about to schedule a new task + RecordPCP bool + + // This channel is closed when the program receives an interrupt, + // signalling that the program should shut down. + Shutdown chan struct{} + // This channel is closed after shutdown is closed, and only when all + // outstanding tasks have been cleaned up + Done chan struct{} + + // Controls when to shutdown pcp logging + PCPLog chan struct{} + + schedTrace *log.Logger +} + +// New electron scheduler +func NewBinPackSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BinPackSortedWattsSortedOffers { + sort.Sort(def.WattsSorter(tasks)) + + logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") + if err != nil { + log.Fatal(err) + } + + s := &BinPackSortedWattsSortedOffers{ + tasks: tasks, + ignoreWatts: ignoreWatts, + Shutdown: make(chan struct{}), + Done: make(chan struct{}), + PCPLog: make(chan struct{}), + running: make(map[string]map[string]bool), + RecordPCP: false, + schedTrace: log.New(logFile, "", log.LstdFlags), + } + return s +} + +func (s *BinPackSortedWattsSortedOffers) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo { + taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) + s.tasksCreated++ + + if !s.RecordPCP { + // Turn on logging + s.RecordPCP = true + time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts + } + + // If this is our first time running into this Agent + if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok { + s.running[offer.GetSlaveId().GoString()] = make(map[string]bool) + } + + // Add task to list of tasks running on node + s.running[offer.GetSlaveId().GoString()][taskName] = true + + resources := []*mesos.Resource{ + mesosutil.NewScalarResource("cpus", task.CPU), + mesosutil.NewScalarResource("mem", task.RAM), + } + + if !s.ignoreWatts { + resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts)) + } + + return &mesos.TaskInfo{ + Name: proto.String(taskName), + TaskId: &mesos.TaskID{ + Value: proto.String("electron-" + taskName), + }, + SlaveId: offer.SlaveId, + Resources: resources, + Command: &mesos.CommandInfo{ + Value: proto.String(task.CMD), + }, + Container: &mesos.ContainerInfo{ + Type: mesos.ContainerInfo_DOCKER.Enum(), + Docker: &mesos.ContainerInfo_DockerInfo{ + Image: proto.String(task.Image), + Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated + }, + }, + } +} + +func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) { + log.Printf("Received %d resource offers", len(offers)) + + // Sorting the offers + sort.Sort(OffersSorter(offers)) + + // Printing the sorted offers and the corresponding CPU resource availability + log.Println("Sorted Offers:") + for i := 0; i < len(offers); i++ { + offer := offers[i] + offerCPU, _, _ := OfferAgg(offer) + log.Printf("Offer[%s].CPU = %f\n", offer.GetHostname(), offerCPU) + } + + for _, offer := range offers { + select { + case <-s.Shutdown: + log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") + driver.DeclineOffer(offer.Id, longFilter) + + log.Println("Number of tasks still running: ", s.tasksRunning) + continue + default: + } + + tasks := []*mesos.TaskInfo{} + + offer_cpu, offer_ram, offer_watts := OfferAgg(offer) + + taken := false + totalWatts := 0.0 + totalCPU := 0.0 + totalRAM := 0.0 + for i := 0; i < len(s.tasks); i++ { + task := s.tasks[i] + + // Check host if it exists + if task.Host != "" { + // Don't take offer if it doesn't match our task's host requirement + if !strings.HasPrefix(*offer.Hostname, task.Host) { + continue + } + } + + for *task.Instances > 0 { + // Does the task fit + if (s.ignoreWatts || offer_watts >= (totalWatts+task.Watts)) && + (offer_cpu >= (totalCPU + task.CPU)) && + (offer_ram >= (totalRAM + task.RAM)) { + + taken = true + totalWatts += task.Watts + totalCPU += task.CPU + totalRAM += task.RAM + log.Println("Co-Located with: ") + coLocated(s.running[offer.GetSlaveId().GoString()]) + taskToSchedule := s.newTask(offer, task) + tasks = append(tasks, taskToSchedule) + + fmt.Println("Inst: ", *task.Instances) + s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) + *task.Instances-- + + if *task.Instances <= 0 { + // All instances of task have been scheduled, remove it + s.tasks = append(s.tasks[:i], s.tasks[i+1:]...) + + if len(s.tasks) <= 0 { + log.Println("Done scheduling all tasks") + close(s.Shutdown) + } + } + } else { + break // Continue on to next offer + } + } + } + + if taken { + log.Printf("Starting on [%s]\n", offer.GetHostname()) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + } else { + + // If there was no match for the task + fmt.Println("There is not enough resources to launch a task:") + cpus, mem, watts := OfferAgg(offer) + + log.Printf("\n", cpus, mem, watts) + driver.DeclineOffer(offer.Id, defaultFilter) + } + } +} + +func (s *BinPackSortedWattsSortedOffers) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) { + log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value) + + if *status.State == mesos.TaskState_TASK_RUNNING { + s.tasksRunning++ + } else if IsTerminal(status.State) { + delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value) + s.tasksRunning-- + if s.tasksRunning == 0 { + select { + case <-s.Shutdown: + close(s.Done) + default: + } + } + } + log.Printf("DONE: Task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value) +} + diff --git a/schedulers/firstfitSortedOffers.go b/schedulers/firstfitSortedOffers.go new file mode 100644 index 0000000..09f1d17 --- /dev/null +++ b/schedulers/firstfitSortedOffers.go @@ -0,0 +1,228 @@ +package schedulers + +import ( + "bitbucket.org/sunybingcloud/electron/def" + "fmt" + "github.com/golang/protobuf/proto" + mesos "github.com/mesos/mesos-go/mesosproto" + "github.com/mesos/mesos-go/mesosutil" + sched "github.com/mesos/mesos-go/scheduler" + "log" + "os" + "sort" + "strings" + "time" +) + +// Decides if to take an offer or not +func (s *FirstFitSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool { + + cpus, mem, watts := OfferAgg(offer) + + //TODO: Insert watts calculation here instead of taking them as a parameter + + if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= task.Watts) { + return true + } + + return false +} + +// electronScheduler implements the Scheduler interface +type FirstFitSortedOffers struct { + base // Type embedded to inherit common functions + tasksCreated int + tasksRunning int + tasks []def.Task + metrics map[string]def.Metric + running map[string]map[string]bool + ignoreWatts bool + + // First set of PCP values are garbage values, signal to logger to start recording when we're + // about to schedule a new task + RecordPCP bool + + // This channel is closed when the program receives an interrupt, + // signalling that the program should shut down. + Shutdown chan struct{} + // This channel is closed after shutdown is closed, and only when all + // outstanding tasks have been cleaned up + Done chan struct{} + + // Controls when to shutdown pcp logging + PCPLog chan struct{} + + schedTrace *log.Logger +} + +// New electron scheduler +func NewFirstFitSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *FirstFitSortedOffers { + + logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") + if err != nil { + log.Fatal(err) + } + + s := &FirstFitSortedOffers{ + tasks: tasks, + ignoreWatts: ignoreWatts, + Shutdown: make(chan struct{}), + Done: make(chan struct{}), + PCPLog: make(chan struct{}), + running: make(map[string]map[string]bool), + RecordPCP: false, + schedTrace: log.New(logFile, "", log.LstdFlags), + } + return s +} + +func (s *FirstFitSortedOffers) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo { + taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) + s.tasksCreated++ + + if !s.RecordPCP { + // Turn on logging + s.RecordPCP = true + time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts + } + + // If this is our first time running into this Agent + if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok { + s.running[offer.GetSlaveId().GoString()] = make(map[string]bool) + } + + // Add task to list of tasks running on node + s.running[offer.GetSlaveId().GoString()][taskName] = true + + resources := []*mesos.Resource{ + mesosutil.NewScalarResource("cpus", task.CPU), + mesosutil.NewScalarResource("mem", task.RAM), + } + + if !s.ignoreWatts { + resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts)) + } + + return &mesos.TaskInfo{ + Name: proto.String(taskName), + TaskId: &mesos.TaskID{ + Value: proto.String("electron-" + taskName), + }, + SlaveId: offer.SlaveId, + Resources: resources, + Command: &mesos.CommandInfo{ + Value: proto.String(task.CMD), + }, + Container: &mesos.ContainerInfo{ + Type: mesos.ContainerInfo_DOCKER.Enum(), + Docker: &mesos.ContainerInfo_DockerInfo{ + Image: proto.String(task.Image), + Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated + }, + }, + } +} + +func (s *FirstFitSortedOffers) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) { + log.Printf("Received %d resource offers", len(offers)) + + // Sorting the offers + sort.Sort(OffersSorter(offers)) + + // Printing the sorted offers and the corresponding CPU resource availability + log.Println("Sorted Offers:") + for i := 0; i < len(offers); i++ { + offer := offers[i] + offerCPU, _, _ := OfferAgg(offer) + log.Printf("Offer[%s].CPU = %f\n", offer.GetHostname(), offerCPU) + } + + for _, offer := range offers { + select { + case <-s.Shutdown: + log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") + driver.DeclineOffer(offer.Id, longFilter) + + log.Println("Number of tasks still running: ", s.tasksRunning) + continue + default: + } + + tasks := []*mesos.TaskInfo{} + + // First fit strategy + + taken := false + for i := 0; i < len(s.tasks); i++ { + task := s.tasks[i] + + // Check host if it exists + if task.Host != "" { + // Don't take offer if it doesn't match our task's host requirement + if !strings.HasPrefix(*offer.Hostname, task.Host) { + continue + } + } + + // Decision to take the offer or not + if s.takeOffer(offer, task) { + + log.Println("Co-Located with: ") + coLocated(s.running[offer.GetSlaveId().GoString()]) + + taskToSchedule := s.newTask(offer, task) + tasks = append(tasks, taskToSchedule) + + log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + + taken = true + + fmt.Println("Inst: ", *task.Instances) + s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) + *task.Instances-- + + if *task.Instances <= 0 { + // All instances of task have been scheduled, remove it + s.tasks[i] = s.tasks[len(s.tasks)-1] + s.tasks = s.tasks[:len(s.tasks)-1] + + if len(s.tasks) <= 0 { + log.Println("Done scheduling all tasks") + close(s.Shutdown) + } + } + break // Offer taken, move on + } + } + + // If there was no match for the task + if !taken { + fmt.Println("There is not enough resources to launch a task:") + cpus, mem, watts := OfferAgg(offer) + + log.Printf("\n", cpus, mem, watts) + driver.DeclineOffer(offer.Id, defaultFilter) + } + + } +} + +func (s *FirstFitSortedOffers) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) { + log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value) + + if *status.State == mesos.TaskState_TASK_RUNNING { + s.tasksRunning++ + } else if IsTerminal(status.State) { + delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value) + s.tasksRunning-- + if s.tasksRunning == 0 { + select { + case <-s.Shutdown: + close(s.Done) + default: + } + } + } + log.Printf("DONE: Task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value) +} diff --git a/schedulers/firstfitSortedWattsSortedOffers.go b/schedulers/firstfitSortedWattsSortedOffers.go new file mode 100644 index 0000000..0590585 --- /dev/null +++ b/schedulers/firstfitSortedWattsSortedOffers.go @@ -0,0 +1,230 @@ +package schedulers + +import ( + "bitbucket.org/sunybingcloud/electron/def" + "fmt" + "github.com/golang/protobuf/proto" + mesos "github.com/mesos/mesos-go/mesosproto" + "github.com/mesos/mesos-go/mesosutil" + sched "github.com/mesos/mesos-go/scheduler" + "log" + "os" + "sort" + "strings" + "time" +) + +// Decides if to take an offer or not +func (s *FirstFitSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool { + + cpus, mem, watts := OfferAgg(offer) + + //TODO: Insert watts calculation here instead of taking them as a parameter + + if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= task.Watts) { + return true + } + + return false +} + +// electronScheduler implements the Scheduler interface +type FirstFitSortedWattsSortedOffers struct { + base // Type embedded to inherit common functions + tasksCreated int + tasksRunning int + tasks []def.Task + metrics map[string]def.Metric + running map[string]map[string]bool + ignoreWatts bool + + // First set of PCP values are garbage values, signal to logger to start recording when we're + // about to schedule a new task + RecordPCP bool + + // This channel is closed when the program receives an interrupt, + // signalling that the program should shut down. + Shutdown chan struct{} + // This channel is closed after shutdown is closed, and only when all + // outstanding tasks have been cleaned up + Done chan struct{} + + // Controls when to shutdown pcp logging + PCPLog chan struct{} + + schedTrace *log.Logger +} + +// New electron scheduler +func NewFirstFitSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *FirstFitSortedWattsSortedOffers { + + // Sorting the tasks in increasing order of watts requirement. + sort.Sort(def.WattsSorter(tasks)) + + logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") + if err != nil { + log.Fatal(err) + } + + s := &FirstFitSortedWattsSortedOffers{ + tasks: tasks, + ignoreWatts: ignoreWatts, + Shutdown: make(chan struct{}), + Done: make(chan struct{}), + PCPLog: make(chan struct{}), + running: make(map[string]map[string]bool), + RecordPCP: false, + schedTrace: log.New(logFile, "", log.LstdFlags), + } + return s +} + +func (s *FirstFitSortedWattsSortedOffers) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo { + taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) + s.tasksCreated++ + + if !s.RecordPCP { + // Turn on logging + s.RecordPCP = true + time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts + } + + // If this is our first time running into this Agent + if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok { + s.running[offer.GetSlaveId().GoString()] = make(map[string]bool) + } + + // Add task to list of tasks running on node + s.running[offer.GetSlaveId().GoString()][taskName] = true + + resources := []*mesos.Resource{ + mesosutil.NewScalarResource("cpus", task.CPU), + mesosutil.NewScalarResource("mem", task.RAM), + } + + if !s.ignoreWatts { + resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts)) + } + + return &mesos.TaskInfo{ + Name: proto.String(taskName), + TaskId: &mesos.TaskID{ + Value: proto.String("electron-" + taskName), + }, + SlaveId: offer.SlaveId, + Resources: resources, + Command: &mesos.CommandInfo{ + Value: proto.String(task.CMD), + }, + Container: &mesos.ContainerInfo{ + Type: mesos.ContainerInfo_DOCKER.Enum(), + Docker: &mesos.ContainerInfo_DockerInfo{ + Image: proto.String(task.Image), + Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated + }, + }, + } +} + +func (s *FirstFitSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) { + // Sorting the offers + sort.Sort(OffersSorter(offers)) + + // Printing the sorted offers and the corresponding CPU resource availability + log.Println("Sorted Offers:") + for i := 0; i < len(offers); i++ { + offer := offers[i] + offerCPU, _, _ := OfferAgg(offer) + log.Printf("Offer[%s].CPU = %f\n", offer.GetHostname(), offerCPU) + } + + log.Printf("Received %d resource offers", len(offers)) + + for _, offer := range offers { + select { + case <-s.Shutdown: + log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") + driver.DeclineOffer(offer.Id, longFilter) + + log.Println("Number of tasks still running: ", s.tasksRunning) + continue + default: + } + + tasks := []*mesos.TaskInfo{} + + // First fit strategy + + taken := false + for i := 0; i < len(s.tasks); i++ { + task := s.tasks[i] + + // Check host if it exists + if task.Host != "" { + // Don't take offer if it doesn't match our task's host requirement + if !strings.HasPrefix(*offer.Hostname, task.Host) { + continue + } + } + + // Decision to take the offer or not + if s.takeOffer(offer, task) { + + log.Println("Co-Located with: ") + coLocated(s.running[offer.GetSlaveId().GoString()]) + + taskToSchedule := s.newTask(offer, task) + tasks = append(tasks, taskToSchedule) + + log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + + taken = true + + fmt.Println("Inst: ", *task.Instances) + s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) + *task.Instances-- + + if *task.Instances <= 0 { + // All instances of task have been scheduled, remove it + s.tasks = append(s.tasks[:i], s.tasks[i+1:]...) + + if len(s.tasks) <= 0 { + log.Println("Done scheduling all tasks") + close(s.Shutdown) + } + } + break // Offer taken, move on + } + } + + // If there was no match for the task + if !taken { + fmt.Println("There is not enough resources to launch a task:") + cpus, mem, watts := OfferAgg(offer) + + log.Printf("\n", cpus, mem, watts) + driver.DeclineOffer(offer.Id, defaultFilter) + } + + } +} + +func (s *FirstFitSortedWattsSortedOffers) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) { + log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value) + + if *status.State == mesos.TaskState_TASK_RUNNING { + s.tasksRunning++ + } else if IsTerminal(status.State) { + delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value) + s.tasksRunning-- + if s.tasksRunning == 0 { + select { + case <-s.Shutdown: + close(s.Done) + default: + } + } + } + log.Printf("DONE: Task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value) +} From 916581067b16086c13eca88d7eefe076aa12e42c Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 17:58:36 -0500 Subject: [PATCH 06/36] hybrid scheduler(TOP HEAVY) -- Pack Small tasks (less power intensive) using BinPacking and Spread large tasks (power intensive) using FirstFit. BOTTOM HEAVY -- Pack Large tasks (power intensive) using BinPacking and spread the small tasks (less power intensive) using FirstFit. --- schedulers/bottomHeavy.go | 338 ++++++++++++++++++++++++++++++++++++++ schedulers/topHeavy.go | 338 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 676 insertions(+) create mode 100644 schedulers/bottomHeavy.go create mode 100644 schedulers/topHeavy.go diff --git a/schedulers/bottomHeavy.go b/schedulers/bottomHeavy.go new file mode 100644 index 0000000..999d9d6 --- /dev/null +++ b/schedulers/bottomHeavy.go @@ -0,0 +1,338 @@ +package schedulers + +import ( + "bitbucket.org/sunybingcloud/electron/constants" + "bitbucket.org/sunybingcloud/electron/def" + "fmt" + "github.com/golang/protobuf/proto" + mesos "github.com/mesos/mesos-go/mesosproto" + "github.com/mesos/mesos-go/mesosutil" + sched "github.com/mesos/mesos-go/scheduler" + "log" + "math" + "os" + "sort" + "time" +) + +/* +Tasks are categorized into small and large tasks based on the watts requirement. +All the small tasks are packed into offers from agents belonging to power class C, using BinPacking. +All the large tasks are spread among the offers from agents belonging to power class A and power class B, using FirstFit. + +BinPacking has the most effect when co-scheduling of tasks is increased. Large tasks typically utilize more resources and hence, + co-scheduling them has a great impact on the total power utilization. +*/ + +// electronScheduler implements the Scheduler interface +type BottomHeavy struct { + base // Type embedded to inherit common functions + tasksCreated int + tasksRunning int + tasks []def.Task + metrics map[string]def.Metric + running map[string]map[string]bool + ignoreWatts bool + smallTasks, largeTasks []def.Task + + // First set of PCP values are garbage values, signal to logger to start recording when we're + // about to schedule a new task + RecordPCP bool + + // This channel is closed when the program receives an interrupt, + // signalling that the program should shut down. + Shutdown chan struct{} + // This channel is closed after shutdown is closed, and only when all + // outstanding tasks have been cleaned up + Done chan struct{} + + // Controls when to shutdown pcp logging + PCPLog chan struct{} + + schedTrace *log.Logger +} + +// New electron scheduler +func NewPackBigSpreadSmall(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BottomHeavy { + sort.Sort(def.WattsSorter(tasks)) + + logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") + if err != nil { + log.Fatal(err) + } + + // Separating small tasks from large tasks. + // Classification done based on MMPU watts requirements. + mid := int(math.Floor((float64(len(tasks)) / 2.0) + 0.5)) + s := &BottomHeavy{ + smallTasks: tasks[:mid], + largeTasks: tasks[mid+1:], + ignoreWatts: ignoreWatts, + Shutdown: make(chan struct{}), + Done: make(chan struct{}), + PCPLog: make(chan struct{}), + running: make(map[string]map[string]bool), + RecordPCP: false, + schedTrace: log.New(logFile, "", log.LstdFlags), + } + return s +} + +func (s *BottomHeavy) newTask(offer *mesos.Offer, task def.Task, newTaskClass string) *mesos.TaskInfo { + taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) + s.tasksCreated++ + + if !s.RecordPCP { + // Turn on logging + s.RecordPCP = true + time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts + } + + // If this is our first time running into this Agent + if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok { + s.running[offer.GetSlaveId().GoString()] = make(map[string]bool) + } + + // Add task to list of tasks running on node + s.running[offer.GetSlaveId().GoString()][taskName] = true + + resources := []*mesos.Resource{ + mesosutil.NewScalarResource("cpus", task.CPU), + mesosutil.NewScalarResource("mem", task.RAM), + } + + if !s.ignoreWatts { + resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[newTaskClass])) + } + + return &mesos.TaskInfo{ + Name: proto.String(taskName), + TaskId: &mesos.TaskID{ + Value: proto.String("electron-" + taskName), + }, + SlaveId: offer.SlaveId, + Resources: resources, + Command: &mesos.CommandInfo{ + Value: proto.String(task.CMD), + }, + Container: &mesos.ContainerInfo{ + Type: mesos.ContainerInfo_DOCKER.Enum(), + Docker: &mesos.ContainerInfo_DockerInfo{ + Image: proto.String(task.Image), + Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated + }, + }, + } +} + +// retrieve the power class of host in offer +func (s *BottomHeavy) getPowerClass(offer *mesos.Offer) string { + var powerClass string + for _, attr := range offer.GetAttributes() { + if attr.GetName() == "class" { + powerClass = attr.GetText().GetValue() + } + } + return powerClass +} + +// Shut down scheduler if no more tasks to schedule +func (s *BottomHeavy) shutDownIfNecessary() { + if len(s.smallTasks) <= 0 && len(s.largeTasks) <= 0 { + log.Println("Done scheduling all tasks") + close(s.Shutdown) + } +} + +// create TaskInfo and log scheduling trace +func (s *BottomHeavy) createTaskInfoAndLogSchedTrace(offer *mesos.Offer, + powerClass string, task def.Task) *mesos.TaskInfo { + log.Println("Co-Located with:") + coLocated(s.running[offer.GetSlaveId().GoString()]) + taskToSchedule := s.newTask(offer, task, powerClass) + + fmt.Println("Inst: ", *task.Instances) + s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) + *task.Instances-- + return taskToSchedule +} + +// Using BinPacking to pack small tasks into this offer. +func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) { + for _, offer := range offers { + select { + case <-s.Shutdown: + log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") + driver.DeclineOffer(offer.Id, longFilter) + + log.Println("Number of tasks still running: ", s.tasksRunning) + continue + default: + } + + tasks := []*mesos.TaskInfo{} + offerCPU, offerRAM, offerWatts := OfferAgg(offer) + totalWatts := 0.0 + totalCPU := 0.0 + totalRAM := 0.0 + taken := false + for i := 0; i < len(s.largeTasks); i++ { + task := s.largeTasks[i] + + for *task.Instances > 0 { + powerClass := s.getPowerClass(offer) + // Does the task fit + // OR lazy evaluation. If ignore watts is set to true, second statement won't + // be evaluated. + wattsToConsider := task.Watts + if !s.ignoreWatts { + wattsToConsider = task.ClassToWatts[powerClass] + } + if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsToConsider))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { + taken = true + totalWatts += wattsToConsider + totalCPU += task.CPU + totalRAM += task.RAM + tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, powerClass, task)) + + if *task.Instances <= 0 { + // All instances of task have been scheduled, remove it + s.largeTasks = append(s.largeTasks[:i], s.largeTasks[i+1:]...) + s.shutDownIfNecessary() + } + } else { + break // Continue on to next task + } + } + } + + if taken { + log.Printf("Starting on [%s]\n", offer.GetHostname()) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + } else { + // If there was no match for the task + fmt.Println("There is not enough resources to launch a task:") + cpus, mem, watts := OfferAgg(offer) + + log.Printf("\n", cpus, mem, watts) + driver.DeclineOffer(offer.Id, defaultFilter) + } + } +} + +// Using first fit to spread large tasks into these offers. +func (s *BottomHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) { + for _, offer := range offers { + select { + case <-s.Shutdown: + log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") + driver.DeclineOffer(offer.Id, longFilter) + + log.Println("Number of tasks still running: ", s.tasksRunning) + continue + default: + } + + tasks := []*mesos.TaskInfo{} + offerCPU, offerRAM, offerWatts := OfferAgg(offer) + taken := false + for i := 0; i < len(s.smallTasks); i++ { + task := s.smallTasks[i] + powerClass := s.getPowerClass(offer) + + // Decision to take the offer or not + wattsToConsider := task.Watts + if !s.ignoreWatts { + wattsToConsider = task.ClassToWatts[powerClass] + } + if (s.ignoreWatts || (offerWatts >= wattsToConsider)) && + (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + taken = true + tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, powerClass, task)) + log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + + if *task.Instances <= 0 { + // All instances of task have been scheduled, remove it + s.smallTasks = append(s.smallTasks[:i], s.smallTasks[i+1:]...) + s.shutDownIfNecessary() + } + break // Offer taken, move on + } + } + + if !taken { + // If there was no match for the task + fmt.Println("There is not enough resources to launch a task:") + cpus, mem, watts := OfferAgg(offer) + + log.Printf("\n", cpus, mem, watts) + driver.DeclineOffer(offer.Id, defaultFilter) + } + } +} + +func (s *BottomHeavy) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) { + log.Printf("Received %d resource offers", len(offers)) + + // We need to separate the offers into + // offers from ClassA and ClassB and offers from ClassC. + // Nodes in ClassA and ClassB will be packed with the large tasks. + // Small tasks will be spread out among the nodes in ClassC. + offersClassAB := []*mesos.Offer{} + offersClassC := []*mesos.Offer{} + + for _, offer := range offers { + select { + case <-s.Shutdown: + log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") + driver.DeclineOffer(offer.Id, longFilter) + + log.Println("Number of tasks still running: ", s.tasksRunning) + continue + default: + } + + if constants.PowerClasses["ClassA"][*offer.Hostname] || + constants.PowerClasses["ClassB"][*offer.Hostname] { + offersClassAB = append(offersClassAB, offer) + } else if constants.PowerClasses["ClassC"][*offer.Hostname] { + offersClassC = append(offersClassC, offer) + } + } + + log.Println("Packing Large tasks into ClassAB offers:") + for _, o := range offersClassAB { + log.Println(*o.Hostname) + } + // Packing tasks into offersClassAB + s.pack(offersClassAB, driver) + + log.Println("Spreading Small tasks among ClassC offers:") + for _, o := range offersClassC { + log.Println(*o.Hostname) + } + // Spreading tasks among offersClassC + s.spread(offersClassC, driver) +} + +func (s *BottomHeavy) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) { + log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value) + + if *status.State == mesos.TaskState_TASK_RUNNING { + s.tasksRunning++ + } else if IsTerminal(status.State) { + delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value) + s.tasksRunning-- + if s.tasksRunning == 0 { + select { + case <-s.Shutdown: + close(s.Done) + default: + } + } + } + log.Printf("DONE: Task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value) +} diff --git a/schedulers/topHeavy.go b/schedulers/topHeavy.go new file mode 100644 index 0000000..f0d0920 --- /dev/null +++ b/schedulers/topHeavy.go @@ -0,0 +1,338 @@ +package schedulers + +import ( + "bitbucket.org/sunybingcloud/electron/constants" + "bitbucket.org/sunybingcloud/electron/def" + "fmt" + "github.com/golang/protobuf/proto" + mesos "github.com/mesos/mesos-go/mesosproto" + "github.com/mesos/mesos-go/mesosutil" + sched "github.com/mesos/mesos-go/scheduler" + "log" + "math" + "os" + "sort" + "time" +) + +/* +Tasks are categorized into small and large tasks based on the watts requirement. +All the large tasks are packed into offers from agents belonging to power class A and power class B, using BinPacking. +All the small tasks are spread among the offers from agents belonging to power class C, using FirstFit. + +This was done to give a little more room for the large tasks (power intensive) for execution and reduce the possibility of +starvation of power intensive tasks. +*/ + +// electronScheduler implements the Scheduler interface +type TopHeavy struct { + base // Type embedded to inherit common functions + tasksCreated int + tasksRunning int + tasks []def.Task + metrics map[string]def.Metric + running map[string]map[string]bool + ignoreWatts bool + smallTasks, largeTasks []def.Task + + // First set of PCP values are garbage values, signal to logger to start recording when we're + // about to schedule a new task + RecordPCP bool + + // This channel is closed when the program receives an interrupt, + // signalling that the program should shut down. + Shutdown chan struct{} + // This channel is closed after shutdown is closed, and only when all + // outstanding tasks have been cleaned up + Done chan struct{} + + // Controls when to shutdown pcp logging + PCPLog chan struct{} + + schedTrace *log.Logger +} + +// New electron scheduler +func NewPackSmallSpreadBig(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *TopHeavy { + sort.Sort(def.WattsSorter(tasks)) + + logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") + if err != nil { + log.Fatal(err) + } + + // Separating small tasks from large tasks. + // Classification done based on MMPU watts requirements. + mid := int(math.Floor((float64(len(tasks)) / 2.0) + 0.5)) + s := &TopHeavy{ + smallTasks: tasks[:mid], + largeTasks: tasks[mid+1:], + ignoreWatts: ignoreWatts, + Shutdown: make(chan struct{}), + Done: make(chan struct{}), + PCPLog: make(chan struct{}), + running: make(map[string]map[string]bool), + RecordPCP: false, + schedTrace: log.New(logFile, "", log.LstdFlags), + } + return s +} + +func (s *TopHeavy) newTask(offer *mesos.Offer, task def.Task, newTaskClass string) *mesos.TaskInfo { + taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) + s.tasksCreated++ + + if !s.RecordPCP { + // Turn on logging + s.RecordPCP = true + time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts + } + + // If this is our first time running into this Agent + if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok { + s.running[offer.GetSlaveId().GoString()] = make(map[string]bool) + } + + // Add task to list of tasks running on node + s.running[offer.GetSlaveId().GoString()][taskName] = true + + resources := []*mesos.Resource{ + mesosutil.NewScalarResource("cpus", task.CPU), + mesosutil.NewScalarResource("mem", task.RAM), + } + + if !s.ignoreWatts { + resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[newTaskClass])) + } + + return &mesos.TaskInfo{ + Name: proto.String(taskName), + TaskId: &mesos.TaskID{ + Value: proto.String("electron-" + taskName), + }, + SlaveId: offer.SlaveId, + Resources: resources, + Command: &mesos.CommandInfo{ + Value: proto.String(task.CMD), + }, + Container: &mesos.ContainerInfo{ + Type: mesos.ContainerInfo_DOCKER.Enum(), + Docker: &mesos.ContainerInfo_DockerInfo{ + Image: proto.String(task.Image), + Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated + }, + }, + } +} + +// retrieve the power class of host in offer +func (s *TopHeavy) getPowerClass(offer *mesos.Offer) string { + var powerClass string + for _, attr := range offer.GetAttributes() { + if attr.GetName() == "class" { + powerClass = attr.GetText().GetValue() + } + } + return powerClass +} + +// Shut down scheduler if no more tasks to schedule +func (s *TopHeavy) shutDownIfNecessary() { + if len(s.smallTasks) <= 0 && len(s.largeTasks) <= 0 { + log.Println("Done scheduling all tasks") + close(s.Shutdown) + } +} + +// create TaskInfo and log scheduling trace +func (s *TopHeavy) createTaskInfoAndLogSchedTrace(offer *mesos.Offer, + powerClass string, task def.Task) *mesos.TaskInfo { + log.Println("Co-Located with:") + coLocated(s.running[offer.GetSlaveId().GoString()]) + taskToSchedule := s.newTask(offer, task, powerClass) + + fmt.Println("Inst: ", *task.Instances) + s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) + *task.Instances-- + return taskToSchedule +} + +// Using BinPacking to pack small tasks into this offer. +func (s *TopHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) { + for _, offer := range offers { + select { + case <-s.Shutdown: + log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") + driver.DeclineOffer(offer.Id, longFilter) + + log.Println("Number of tasks still running: ", s.tasksRunning) + continue + default: + } + + tasks := []*mesos.TaskInfo{} + offerCPU, offerRAM, offerWatts := OfferAgg(offer) + totalWatts := 0.0 + totalCPU := 0.0 + totalRAM := 0.0 + taken := false + for i := 0; i < len(s.smallTasks); i++ { + task := s.smallTasks[i] + + for *task.Instances > 0 { + powerClass := s.getPowerClass(offer) + // Does the task fit + // OR lazy evaluation. If ignore watts is set to true, second statement won't + // be evaluated. + wattsToConsider := task.Watts + if !s.ignoreWatts { + wattsToConsider = task.ClassToWatts[powerClass] + } + if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsToConsider))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { + taken = true + totalWatts += wattsToConsider + totalCPU += task.CPU + totalRAM += task.RAM + tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, powerClass, task)) + + if *task.Instances <= 0 { + // All instances of task have been scheduled, remove it + s.smallTasks = append(s.smallTasks[:i], s.smallTasks[i+1:]...) + s.shutDownIfNecessary() + } + } else { + break // Continue on to next task + } + } + } + + if taken { + log.Printf("Starting on [%s]\n", offer.GetHostname()) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + } else { + // If there was no match for the task + fmt.Println("There is not enough resources to launch a task:") + cpus, mem, watts := OfferAgg(offer) + + log.Printf("\n", cpus, mem, watts) + driver.DeclineOffer(offer.Id, defaultFilter) + } + } +} + +// Using first fit to spread large tasks into these offers. +func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) { + for _, offer := range offers { + select { + case <-s.Shutdown: + log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") + driver.DeclineOffer(offer.Id, longFilter) + + log.Println("Number of tasks still running: ", s.tasksRunning) + continue + default: + } + + tasks := []*mesos.TaskInfo{} + offerCPU, offerRAM, offerWatts := OfferAgg(offer) + taken := false + for i := 0; i < len(s.largeTasks); i++ { + task := s.largeTasks[i] + powerClass := s.getPowerClass(offer) + + // Decision to take the offer or not + wattsToConsider := task.Watts + if !s.ignoreWatts { + wattsToConsider = task.ClassToWatts[powerClass] + } + if (s.ignoreWatts || (offerWatts >= wattsToConsider)) && + (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + taken = true + tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, powerClass, task)) + log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + + if *task.Instances <= 0 { + // All instances of task have been scheduled, remove it + s.largeTasks = append(s.largeTasks[:i], s.largeTasks[i+1:]...) + s.shutDownIfNecessary() + } + break // Offer taken, move on + } + } + + if !taken { + // If there was no match for the task + fmt.Println("There is not enough resources to launch a task:") + cpus, mem, watts := OfferAgg(offer) + + log.Printf("\n", cpus, mem, watts) + driver.DeclineOffer(offer.Id, defaultFilter) + } + } +} + +func (s *TopHeavy) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) { + log.Printf("Received %d resource offers", len(offers)) + + // We need to separate the offers into + // offers from ClassA and ClassB and offers from ClassC. + // Offers from ClassA and ClassB would execute the large tasks. + // Offers from ClassC would execute the small tasks. + offersClassAB := []*mesos.Offer{} + offersClassC := []*mesos.Offer{} + + for _, offer := range offers { + select { + case <-s.Shutdown: + log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") + driver.DeclineOffer(offer.Id, longFilter) + + log.Println("Number of tasks still running: ", s.tasksRunning) + continue + default: + } + + if constants.PowerClasses["ClassA"][*offer.Hostname] || + constants.PowerClasses["ClassB"][*offer.Hostname] { + offersClassAB = append(offersClassAB, offer) + } else if constants.PowerClasses["ClassC"][*offer.Hostname] { + offersClassC = append(offersClassC, offer) + } + } + + log.Println("ClassAB Offers:") + for _, o := range offersClassAB { + log.Println(*o.Hostname) + } + log.Println("ClassC Offers:") + for _, o := range offersClassC { + log.Println(*o.Hostname) + } + + // Packing tasks into offersClassC + s.pack(offersClassC, driver) + // Spreading tasks among offersClassAB + s.spread(offersClassAB, driver) +} + +func (s *TopHeavy) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) { + log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value) + + if *status.State == mesos.TaskState_TASK_RUNNING { + s.tasksRunning++ + } else if IsTerminal(status.State) { + delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value) + s.tasksRunning-- + if s.tasksRunning == 0 { + select { + case <-s.Shutdown: + close(s.Done) + default: + } + } + } + log.Printf("DONE: Task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value) +} From 2cd77a7ba87413fea49e08fb12d9b1e3742e1ae3 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 18:29:00 -0500 Subject: [PATCH 07/36] Changed Window to ConsiderationWindow. --- constants/constants.go | 4 ++-- pcp/proactiveclusterwidecappers.go | 2 +- utilities/runAvg/runAvg.go | 32 +++++++++++++++--------------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/constants/constants.go b/constants/constants.go index efb8ed0..045c1a2 100644 --- a/constants/constants.go +++ b/constants/constants.go @@ -86,7 +86,7 @@ func UpdateCapMargin(newCapMargin float64) bool { var StarvationFactor = PowerThreshold / CapMargin // Window size for running average -var WindowSize = 20 +var ConsiderationWindowSize = 20 // Update the window size. func UpdateWindowSize(newWindowSize int) bool { @@ -94,7 +94,7 @@ func UpdateWindowSize(newWindowSize int) bool { if newWindowSize == 0 { return false } else { - WindowSize = newWindowSize + ConsiderationWindowSize = newWindowSize return true } } diff --git a/pcp/proactiveclusterwidecappers.go b/pcp/proactiveclusterwidecappers.go index ae90b61..acbe766 100644 --- a/pcp/proactiveclusterwidecappers.go +++ b/pcp/proactiveclusterwidecappers.go @@ -251,7 +251,7 @@ func (capper ClusterwideCapper) FCFSDeterminedCap(totalPower map[string]float64, return 100, errors.New("Invalid argument: totalPower") } else { // Need to calculate the running average - runningAverage := runAvg.Calc(taskWrapper{task: *newTask}, constants.WindowSize) + runningAverage := runAvg.Calc(taskWrapper{task: *newTask}, constants.ConsiderationWindowSize) // For each node, calculate the percentage of the running average to the total power. ratios := make(map[string]float64) for host, tpower := range totalPower { diff --git a/utilities/runAvg/runAvg.go b/utilities/runAvg/runAvg.go index 592929f..6c0d3b0 100644 --- a/utilities/runAvg/runAvg.go +++ b/utilities/runAvg/runAvg.go @@ -19,9 +19,9 @@ type Interface interface { } type runningAverageCalculator struct { - window list.List - windowSize int - currentSum float64 + considerationWindow list.List + considerationWindowSize int + currentSum float64 } // singleton instance @@ -31,14 +31,14 @@ var racSingleton *runningAverageCalculator func getInstance(curSum float64, wSize int) *runningAverageCalculator { if racSingleton == nil { racSingleton = &runningAverageCalculator{ - windowSize: wSize, + considerationWindowSize: wSize, currentSum: curSum, } return racSingleton } else { // Updating window size if a new window size is given. - if wSize != racSingleton.windowSize { - racSingleton.windowSize = wSize + if wSize != racSingleton.considerationWindowSize { + racSingleton.considerationWindowSize = wSize } return racSingleton } @@ -47,20 +47,20 @@ func getInstance(curSum float64, wSize int) *runningAverageCalculator { // Compute the running average by adding 'data' to the window. // Updating currentSum to get constant time complexity for every running average computation. func (rac *runningAverageCalculator) calculate(data Interface) float64 { - if rac.window.Len() < rac.windowSize { - rac.window.PushBack(data) + if rac.considerationWindow.Len() < rac.considerationWindowSize { + rac.considerationWindow.PushBack(data) rac.currentSum += data.Val() } else { // removing the element at the front of the window. - elementToRemove := rac.window.Front() + elementToRemove := rac.considerationWindow.Front() rac.currentSum -= elementToRemove.Value.(Interface).Val() - rac.window.Remove(elementToRemove) + rac.considerationWindow.Remove(elementToRemove) // adding new element to the window - rac.window.PushBack(data) + rac.considerationWindow.PushBack(data) rac.currentSum += data.Val() } - return rac.currentSum / float64(rac.window.Len()) + return rac.currentSum / float64(rac.considerationWindow.Len()) } /* @@ -68,9 +68,9 @@ If element with given ID present in the window, then remove it and return (remov Else, return (nil, error) */ func (rac *runningAverageCalculator) removeFromWindow(id string) (interface{}, error) { - for element := rac.window.Front(); element != nil; element = element.Next() { + for element := rac.considerationWindow.Front(); element != nil; element = element.Next() { if elementToRemove := element.Value.(Interface); elementToRemove.ID() == id { - rac.window.Remove(element) + rac.considerationWindow.Remove(element) rac.currentSum -= elementToRemove.Val() return elementToRemove, nil } @@ -102,7 +102,7 @@ func Init() { } // Setting parameters to default values. Could also set racSingleton to nil but this leads to unnecessary overhead of creating // another instance when Calc is called. - racSingleton.window.Init() - racSingleton.windowSize = 0 + racSingleton.considerationWindow.Init() + racSingleton.considerationWindowSize = 0 racSingleton.currentSum = 0.0 } From 6d4446413d81239e3e67d8afd2a091b961c19646 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 19:32:12 -0500 Subject: [PATCH 08/36] Created utilities/mesosUtils that can hold all the interactions with mesos. Moved longFilter and defaultFilter to mesosUtils. --- utilities/mesosUtils/mesosUtils.go | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 utilities/mesosUtils/mesosUtils.go diff --git a/utilities/mesosUtils/mesosUtils.go b/utilities/mesosUtils/mesosUtils.go new file mode 100644 index 0000000..be3d55b --- /dev/null +++ b/utilities/mesosUtils/mesosUtils.go @@ -0,0 +1,11 @@ +package mesosUtils + +import ( + mesos "github.com/mesos/mesos-go/mesosproto" + "github.com/golang/protobuf/proto" +) + +var ( + DefaultFilter = &mesos.Filters{RefuseSeconds: proto.Float64(1)} + LongFilter = &mesos.Filters{RefuseSeconds: proto.Float64(1000)} +) From e63784b0073a3548432712667fc2a084ceae3f27 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 19:32:54 -0500 Subject: [PATCH 09/36] Created utilities/offerUtils that can can hold all the utility functions for Offers. --- utilities/offerUtils/offerUtils.go | 40 ++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 utilities/offerUtils/offerUtils.go diff --git a/utilities/offerUtils/offerUtils.go b/utilities/offerUtils/offerUtils.go new file mode 100644 index 0000000..fc930f9 --- /dev/null +++ b/utilities/offerUtils/offerUtils.go @@ -0,0 +1,40 @@ +package offerUtils + +import ( + mesos "github.com/mesos/mesos-go/mesosproto" +) + +func OfferAgg(offer *mesos.Offer) (float64, float64, float64) { + var cpus, mem, watts float64 + + for _, resource := range offer.Resources { + switch resource.GetName() { + case "cpus": + cpus += *resource.GetScalar().Value + case "mem": + mem += *resource.GetScalar().Value + case "watts": + watts += *resource.GetScalar().Value + } + } + + return cpus, mem, watts +} + +type OffersSorter []*mesos.Offer + +func (offersSorter OffersSorter) Len() int { + return len(offersSorter) +} + +func (offersSorter OffersSorter) Swap(i, j int) { + offersSorter[i], offersSorter[j] = offersSorter[j], offersSorter[i] +} + +func (offersSorter OffersSorter) Less(i, j int) bool { + // getting CPU resource availability of offersSorter[i] + cpu1, _, _ := OfferAgg(offersSorter[i]) + // getting CPU resource availability of offersSorter[j] + cpu2, _, _ := OfferAgg(offersSorter[j]) + return cpu1 <= cpu2 +} From 44ce511eb1e6dee62edcf5be8f82ceeeb89bba2d Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 19:34:28 -0500 Subject: [PATCH 10/36] Added classMapWatts, topHeavy and bottomHeavy schedulers to the list of schedulers. Also, added TODO for creating a package to hold all the source code to perform logging. --- schedulers/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/schedulers/README.md b/schedulers/README.md index 8fb8dcf..6696696 100644 --- a/schedulers/README.md +++ b/schedulers/README.md @@ -8,6 +8,7 @@ To Do: * Separate the capping strategies from the scheduling algorithms and make it possible to use any capping strategy with any scheduler. * Make newTask(...) variadic where the newTaskClass argument can either be given or not. If not give, then pick task.Watts as the watts attribute, else pick task.ClassToWatts[newTaskClass]. * Retrofit pcp/proactiveclusterwidecappers.go to include the power capping go routines and to cap only when necessary. + * Create a package that would contain routines to perform various logging and move helpers.coLocated(...) into that. Scheduling Algorithms: @@ -17,3 +18,6 @@ Scheduling Algorithms: * FCFS Proactive Cluster-wide Capping * Ranked Proactive Cluster-wide Capping * Piston Capping -- Works when scheduler is run with WAR + * ClassMapWatts -- Bin-packing and First Fit that now use Watts per power class. + * Top Heavy -- Hybrid scheduler that packs small tasks (less power intensive) using Bin-packing and spreads large tasks (power intensive) using First Fit. + * Bottom Heavy -- Hybrid scheduler that packs large tasks (power intensive) using Bin-packing and spreads small tasks (less power intensive) using First Fit. From 354e89cac76304f6cc588f2f90ab0ff110ec4f3a Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 19:36:13 -0500 Subject: [PATCH 11/36] moved longFilter and defaultFilter to utilities/mesosUtils and OfferAgg and OffersSorter to utilities/offerUtils --- schedulers/helpers.go | 42 ------------------------------------------ 1 file changed, 42 deletions(-) diff --git a/schedulers/helpers.go b/schedulers/helpers.go index 8eedef3..6d90c6c 100644 --- a/schedulers/helpers.go +++ b/schedulers/helpers.go @@ -2,51 +2,9 @@ package schedulers import ( "fmt" - "github.com/golang/protobuf/proto" - mesos "github.com/mesos/mesos-go/mesosproto" "log" ) -var ( - defaultFilter = &mesos.Filters{RefuseSeconds: proto.Float64(1)} - longFilter = &mesos.Filters{RefuseSeconds: proto.Float64(1000)} -) - -func OfferAgg(offer *mesos.Offer) (float64, float64, float64) { - var cpus, mem, watts float64 - - for _, resource := range offer.Resources { - switch resource.GetName() { - case "cpus": - cpus += *resource.GetScalar().Value - case "mem": - mem += *resource.GetScalar().Value - case "watts": - watts += *resource.GetScalar().Value - } - } - - return cpus, mem, watts -} - -type OffersSorter []*mesos.Offer - -func (offersSorter OffersSorter) Len() int { - return len(offersSorter) -} - -func (offersSorter OffersSorter) Swap(i, j int) { - offersSorter[i], offersSorter[j] = offersSorter[j], offersSorter[i] -} - -func (offersSorter OffersSorter) Less(i, j int) bool { - // getting CPU resource availability of offersSorter[i] - cpu1, _, _ := OfferAgg(offersSorter[i]) - // getting CPU resource availability of offersSorter[j] - cpu2, _, _ := OfferAgg(offersSorter[j]) - return cpu1 <= cpu2 -} - func coLocated(tasks map[string]bool) { for task := range tasks { From 85817494353ceaf9dea3114a923b29695fc8c01d Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 19:40:39 -0500 Subject: [PATCH 12/36] retrofitted all schedulers to call OfferAgg(...) and OffersSorter from utilities/offerUtils and also to use defaultFilter and longFilter from utilities/mesosUtils --- schedulers/binPackSortedWattsSortedOffers.go | 33 ++++++++++--------- schedulers/binpackedpistoncapping.go | 14 ++++---- schedulers/binpacksortedwatts.go | 14 ++++---- schedulers/bottomHeavy.go | 26 ++++++++------- schedulers/bpMaxMin.go | 14 ++++---- schedulers/bpMaxMinPistonCapping.go | 14 ++++---- schedulers/bpMaxMinProacCC.go | 16 +++++---- schedulers/bpswClassMapWatts.go | 14 ++++---- schedulers/bpswClassMapWattsPistonCapping.go | 16 +++++---- schedulers/bpswClassMapWattsProacCC.go | 18 +++++----- schedulers/firstfit.go | 12 ++++--- schedulers/firstfitSortedOffers.go | 16 +++++---- .../firstfitSortedWattsClassMapWatts.go | 12 ++++--- ...firstfitSortedWattsClassMapWattsProacCC.go | 14 ++++---- schedulers/firstfitSortedWattsSortedOffers.go | 16 +++++---- schedulers/firstfitsortedwatts.go | 12 ++++--- schedulers/firstfitwattsonly.go | 12 ++++--- schedulers/proactiveclusterwidecappingfcfs.go | 14 ++++---- .../proactiveclusterwidecappingranked.go | 14 ++++---- schedulers/topHeavy.go | 24 +++++++------- 20 files changed, 182 insertions(+), 143 deletions(-) diff --git a/schedulers/binPackSortedWattsSortedOffers.go b/schedulers/binPackSortedWattsSortedOffers.go index 5d926e9..2f70cb3 100644 --- a/schedulers/binPackSortedWattsSortedOffers.go +++ b/schedulers/binPackSortedWattsSortedOffers.go @@ -2,6 +2,8 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/def" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -17,7 +19,7 @@ import ( // Decides if to take an offer or not func (*BinPackSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter @@ -37,18 +39,18 @@ type BinPackSortedWattsSortedOffers struct { running map[string]map[string]bool ignoreWatts bool - // First set of PCP values are garbage values, signal to logger to start recording when we're - // about to schedule a new task + // First set of PCP values are garbage values, signal to logger to start recording when we're + // about to schedule a new task RecordPCP bool - // This channel is closed when the program receives an interrupt, - // signalling that the program should shut down. + // This channel is closed when the program receives an interrupt, + // signalling that the program should shut down. Shutdown chan struct{} - // This channel is closed after shutdown is closed, and only when all - // outstanding tasks have been cleaned up + // This channel is closed after shutdown is closed, and only when all + // outstanding tasks have been cleaned up Done chan struct{} - // Controls when to shutdown pcp logging + // Controls when to shutdown pcp logging PCPLog chan struct{} schedTrace *log.Logger @@ -127,13 +129,13 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr log.Printf("Received %d resource offers", len(offers)) // Sorting the offers - sort.Sort(OffersSorter(offers)) + sort.Sort(offerUtils.OffersSorter(offers)) // Printing the sorted offers and the corresponding CPU resource availability log.Println("Sorted Offers:") for i := 0; i < len(offers); i++ { offer := offers[i] - offerCPU, _, _ := OfferAgg(offer) + offerCPU, _, _ := offerUtils.OfferAgg(offer) log.Printf("Offer[%s].CPU = %f\n", offer.GetHostname(), offerCPU) } @@ -141,7 +143,7 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -150,7 +152,7 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr tasks := []*mesos.TaskInfo{} - offer_cpu, offer_ram, offer_watts := OfferAgg(offer) + offer_cpu, offer_ram, offer_watts := offerUtils.OfferAgg(offer) taken := false totalWatts := 0.0 @@ -203,15 +205,15 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr if taken { log.Printf("Starting on [%s]\n", offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { // If there was no match for the task fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } @@ -234,4 +236,3 @@ func (s *BinPackSortedWattsSortedOffers) StatusUpdate(driver sched.SchedulerDriv } log.Printf("DONE: Task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value) } - diff --git a/schedulers/binpackedpistoncapping.go b/schedulers/binpackedpistoncapping.go index 2ed96f4..7cf4b9d 100644 --- a/schedulers/binpackedpistoncapping.go +++ b/schedulers/binpackedpistoncapping.go @@ -4,6 +4,8 @@ import ( "bitbucket.org/sunybingcloud/electron/constants" "bitbucket.org/sunybingcloud/electron/def" "bitbucket.org/sunybingcloud/electron/rapl" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "errors" "fmt" "github.com/golang/protobuf/proto" @@ -217,7 +219,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off // retrieving the total power for each host in the offers for _, offer := range offers { if _, ok := s.totalPower[*offer.Hostname]; !ok { - _, _, offer_watts := OfferAgg(offer) + _, _, offer_watts := offerUtils.OfferAgg(offer) s.totalPower[*offer.Hostname] = offer_watts } } @@ -238,7 +240,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -246,7 +248,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off } fitTasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) taken := false totalWatts := 0.0 totalCPU := 0.0 @@ -309,14 +311,14 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off bpPistonCapValues[*offer.Hostname] += partialLoad bpPistonMutex.Unlock() log.Printf("Starting on [%s]\n", offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, fitTasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, fitTasks, mesosUtils.DefaultFilter) } else { // If there was no match for task log.Println("There is not enough resources to launch task: ") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } diff --git a/schedulers/binpacksortedwatts.go b/schedulers/binpacksortedwatts.go index fdcc82a..00247c7 100644 --- a/schedulers/binpacksortedwatts.go +++ b/schedulers/binpacksortedwatts.go @@ -2,6 +2,8 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/def" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -17,7 +19,7 @@ import ( // Decides if to take an offer or not func (*BinPackSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter @@ -130,7 +132,7 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -139,7 +141,7 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers tasks := []*mesos.TaskInfo{} - offer_cpu, offer_ram, offer_watts := OfferAgg(offer) + offer_cpu, offer_ram, offer_watts := offerUtils.OfferAgg(offer) taken := false totalWatts := 0.0 @@ -192,15 +194,15 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers if taken { log.Printf("Starting on [%s]\n", offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { // If there was no match for the task fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } diff --git a/schedulers/bottomHeavy.go b/schedulers/bottomHeavy.go index 999d9d6..4b4391b 100644 --- a/schedulers/bottomHeavy.go +++ b/schedulers/bottomHeavy.go @@ -3,6 +3,8 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/constants" "bitbucket.org/sunybingcloud/electron/def" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -53,7 +55,7 @@ type BottomHeavy struct { } // New electron scheduler -func NewPackBigSpreadSmall(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BottomHeavy { +func NewBottomHeavy(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BottomHeavy { sort.Sort(def.WattsSorter(tasks)) logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") @@ -163,7 +165,7 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -171,7 +173,7 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) } tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) totalWatts := 0.0 totalCPU := 0.0 totalRAM := 0.0 @@ -210,14 +212,14 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) if taken { log.Printf("Starting on [%s]\n", offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { // If there was no match for the task fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } @@ -228,7 +230,7 @@ func (s *BottomHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -236,7 +238,7 @@ func (s *BottomHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver } tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) taken := false for i := 0; i < len(s.smallTasks); i++ { task := s.smallTasks[i] @@ -252,7 +254,7 @@ func (s *BottomHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver taken = true tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, powerClass, task)) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) if *task.Instances <= 0 { // All instances of task have been scheduled, remove it @@ -266,10 +268,10 @@ func (s *BottomHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver if !taken { // If there was no match for the task fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } @@ -288,7 +290,7 @@ func (s *BottomHeavy) ResourceOffers(driver sched.SchedulerDriver, offers []*mes select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue diff --git a/schedulers/bpMaxMin.go b/schedulers/bpMaxMin.go index 9221476..d5e791a 100644 --- a/schedulers/bpMaxMin.go +++ b/schedulers/bpMaxMin.go @@ -2,6 +2,8 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/def" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -17,7 +19,7 @@ import ( // Decides if to take an offer or not func (*BPMaxMinWatts) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter @@ -133,7 +135,7 @@ func (s *BPMaxMinWatts) CheckFit(i int, totalRAM *float64, totalWatts *float64) (bool, *mesos.TaskInfo) { - offerCPU, offerRAM, offerWatts := OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) // Does the task fit if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) && @@ -175,7 +177,7 @@ func (s *BPMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*m select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -240,15 +242,15 @@ func (s *BPMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*m if offerTaken { log.Printf("Starting on [%s]\n", offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { // If there was no match for the task fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } diff --git a/schedulers/bpMaxMinPistonCapping.go b/schedulers/bpMaxMinPistonCapping.go index edc27d8..b4d4e3c 100644 --- a/schedulers/bpMaxMinPistonCapping.go +++ b/schedulers/bpMaxMinPistonCapping.go @@ -4,6 +4,8 @@ import ( "bitbucket.org/sunybingcloud/electron/constants" "bitbucket.org/sunybingcloud/electron/def" "bitbucket.org/sunybingcloud/electron/rapl" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "errors" "fmt" "github.com/golang/protobuf/proto" @@ -22,7 +24,7 @@ import ( // Decides if to take an offer or not func (s *BPMaxMinPistonCapping) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter @@ -222,7 +224,7 @@ func (s *BPMaxMinPistonCapping) CheckFit(i int, totalWatts *float64, partialLoad *float64) (bool, *mesos.TaskInfo) { - offerCPU, offerRAM, offerWatts := OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) // Does the task fit if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) && @@ -271,7 +273,7 @@ func (s *BPMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, off select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -343,15 +345,15 @@ func (s *BPMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, off bpMaxMinPistonCappingCapValues[*offer.Hostname] += partialLoad bpMaxMinPistonCappingMutex.Unlock() log.Printf("Starting on [%s]\n", offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { // If there was no match for the task fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } diff --git a/schedulers/bpMaxMinProacCC.go b/schedulers/bpMaxMinProacCC.go index 39e96fc..2af372f 100644 --- a/schedulers/bpMaxMinProacCC.go +++ b/schedulers/bpMaxMinProacCC.go @@ -17,11 +17,13 @@ import ( "strings" "sync" "time" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" ) // Decides if to take an offer or not func (s *BPMaxMinProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter @@ -246,7 +248,7 @@ func (s *BPMaxMinProacCC) CheckFit(i int, totalRAM *float64, totalWatts *float64) (bool, *mesos.TaskInfo) { - offerCPU, offerRAM, offerWatts := OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) // Does the task fit if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) && @@ -308,7 +310,7 @@ func (s *BPMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers [] // retrieving the available power for all the hosts in the offers. for _, offer := range offers { - _, _, offerWatts := OfferAgg(offer) + _, _, offerWatts := offerUtils.OfferAgg(offer) s.availablePower[*offer.Hostname] = offerWatts // setting total power if the first time if _, ok := s.totalPower[*offer.Hostname]; !ok { @@ -324,7 +326,7 @@ func (s *BPMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers [] select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -389,15 +391,15 @@ func (s *BPMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers [] if offerTaken { log.Printf("Starting on [%s]\n", offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { // If there was no match for the task fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } diff --git a/schedulers/bpswClassMapWatts.go b/schedulers/bpswClassMapWatts.go index 1196459..d35629b 100644 --- a/schedulers/bpswClassMapWatts.go +++ b/schedulers/bpswClassMapWatts.go @@ -12,12 +12,14 @@ import ( "sort" "strings" "time" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" ) // Decides if to take an offer or not func (*BPSWClassMapWatts) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter @@ -130,7 +132,7 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -139,7 +141,7 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) taken := false totalWatts := 0.0 @@ -201,15 +203,15 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers if taken { log.Printf("Starting on [%s]\n", offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { // If there was no match for the task fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } diff --git a/schedulers/bpswClassMapWattsPistonCapping.go b/schedulers/bpswClassMapWattsPistonCapping.go index cae8cc3..4ee7825 100644 --- a/schedulers/bpswClassMapWattsPistonCapping.go +++ b/schedulers/bpswClassMapWattsPistonCapping.go @@ -4,6 +4,8 @@ import ( "bitbucket.org/sunybingcloud/electron/constants" "bitbucket.org/sunybingcloud/electron/def" "bitbucket.org/sunybingcloud/electron/rapl" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "errors" "fmt" "github.com/golang/protobuf/proto" @@ -21,7 +23,7 @@ import ( // Decides if to take offer or not func (s *BPSWClassMapWattsPistonCapping) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter @@ -215,7 +217,7 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr // retrieving the total power for each host in the offers. for _, offer := range offers { if _, ok := s.totalPower[*offer.Hostname]; !ok { - _, _, offerWatts := OfferAgg(offer) + _, _, offerWatts := offerUtils.OfferAgg(offer) s.totalPower[*offer.Hostname] = offerWatts } } @@ -229,7 +231,7 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -238,7 +240,7 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) taken := false totalWatts := 0.0 @@ -312,14 +314,14 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr bpswClassMapWattsPistonCapValues[*offer.Hostname] += partialLoad bpswClassMapWattsPistonMutex.Unlock() log.Printf("Starting on [%s]\n", offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { // If there was no match for task log.Println("There is not enough resources to launch task: ") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } diff --git a/schedulers/bpswClassMapWattsProacCC.go b/schedulers/bpswClassMapWattsProacCC.go index 390aeb9..d94df90 100644 --- a/schedulers/bpswClassMapWattsProacCC.go +++ b/schedulers/bpswClassMapWattsProacCC.go @@ -5,6 +5,8 @@ import ( "bitbucket.org/sunybingcloud/electron/def" "bitbucket.org/sunybingcloud/electron/pcp" "bitbucket.org/sunybingcloud/electron/rapl" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -21,7 +23,7 @@ import ( // Decides if to take an offer or not func (*BPSWClassMapWattsProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) // TODO: Insert watts calculation here instead of taking them as parameter @@ -165,7 +167,7 @@ func (s *BPSWClassMapWattsProacCC) Disconnected(sched.SchedulerDriver) { } // go routine to cap the entire cluster in regular intervals of time. -var bpswClassMapWattsProacCCCapValue = 0.0 // initial value to indicate that we haven't capped the cluster yet. +var bpswClassMapWattsProacCCCapValue = 0.0 // initial value to indicate that we haven't capped the cluster yet. var bpswClassMapWattsProacCCNewCapValue = 0.0 // newly computed cap value func (s *BPSWClassMapWattsProacCC) startCapping() { go func() { @@ -251,7 +253,7 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, // retrieving the available power for all the hosts in the offers. for _, offer := range offers { - _, _, offerWatts := OfferAgg(offer) + _, _, offerWatts := offerUtils.OfferAgg(offer) s.availablePower[*offer.Hostname] = offerWatts // setting total power if the first time if _, ok := s.totalPower[*offer.Hostname]; !ok { @@ -267,7 +269,7 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -276,7 +278,7 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) taken := false totalWatts := 0.0 @@ -357,14 +359,14 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, if taken { log.Printf("Starting on [%s]\n", offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { // If there was no match for the task fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } diff --git a/schedulers/firstfit.go b/schedulers/firstfit.go index 4eaecdd..5469bb4 100644 --- a/schedulers/firstfit.go +++ b/schedulers/firstfit.go @@ -2,6 +2,8 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/def" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -16,7 +18,7 @@ import ( // Decides if to take an offer or not func (s *FirstFit) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter @@ -129,7 +131,7 @@ func (s *FirstFit) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos. select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -162,7 +164,7 @@ func (s *FirstFit) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos. tasks = append(tasks, taskToSchedule) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) taken = true @@ -187,10 +189,10 @@ func (s *FirstFit) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos. // If there was no match for the task if !taken { fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } diff --git a/schedulers/firstfitSortedOffers.go b/schedulers/firstfitSortedOffers.go index 09f1d17..06ee713 100644 --- a/schedulers/firstfitSortedOffers.go +++ b/schedulers/firstfitSortedOffers.go @@ -2,6 +2,8 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/def" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -17,7 +19,7 @@ import ( // Decides if to take an offer or not func (s *FirstFitSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter @@ -127,13 +129,13 @@ func (s *FirstFitSortedOffers) ResourceOffers(driver sched.SchedulerDriver, offe log.Printf("Received %d resource offers", len(offers)) // Sorting the offers - sort.Sort(OffersSorter(offers)) + sort.Sort(offerUtils.OffersSorter(offers)) // Printing the sorted offers and the corresponding CPU resource availability log.Println("Sorted Offers:") for i := 0; i < len(offers); i++ { offer := offers[i] - offerCPU, _, _ := OfferAgg(offer) + offerCPU, _, _ := offerUtils.OfferAgg(offer) log.Printf("Offer[%s].CPU = %f\n", offer.GetHostname(), offerCPU) } @@ -141,7 +143,7 @@ func (s *FirstFitSortedOffers) ResourceOffers(driver sched.SchedulerDriver, offe select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -174,7 +176,7 @@ func (s *FirstFitSortedOffers) ResourceOffers(driver sched.SchedulerDriver, offe tasks = append(tasks, taskToSchedule) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) taken = true @@ -199,10 +201,10 @@ func (s *FirstFitSortedOffers) ResourceOffers(driver sched.SchedulerDriver, offe // If there was no match for the task if !taken { fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } diff --git a/schedulers/firstfitSortedWattsClassMapWatts.go b/schedulers/firstfitSortedWattsClassMapWatts.go index 4a03d89..3a0d1df 100644 --- a/schedulers/firstfitSortedWattsClassMapWatts.go +++ b/schedulers/firstfitSortedWattsClassMapWatts.go @@ -2,6 +2,8 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/def" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -117,14 +119,14 @@ func (s *FirstFitSortedWattsClassMapWatts) ResourceOffers(driver sched.Scheduler select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue default: } - offerCPU, offerRAM, offerWatts := OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) // First fit strategy taken := false @@ -155,7 +157,7 @@ func (s *FirstFitSortedWattsClassMapWatts) ResourceOffers(driver sched.Scheduler taskToSchedule := s.newTask(offer, task, nodeClass) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{taskToSchedule}, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{taskToSchedule}, mesosUtils.DefaultFilter) taken = true fmt.Println("Inst: ", *task.Instances) @@ -176,10 +178,10 @@ func (s *FirstFitSortedWattsClassMapWatts) ResourceOffers(driver sched.Scheduler // If there was no match for the task if !taken { fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } diff --git a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go index 3cc9fb9..f083ffb 100644 --- a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go +++ b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go @@ -17,6 +17,8 @@ import ( "strings" "sync" "time" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" ) // electron scheduler implements the Scheduler interface @@ -239,7 +241,7 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc // retrieving the available power for all the hosts in the offers. for _, offer := range offers { - _, _, offerWatts := OfferAgg(offer) + _, _, offerWatts := offerUtils.OfferAgg(offer) s.availablePower[*offer.Hostname] = offerWatts // setting total power if the first time if _, ok := s.totalPower[*offer.Hostname]; !ok { @@ -255,14 +257,14 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue default: } - offerCPU, offerRAM, offerWatts := OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) // First fit strategy taken := false @@ -313,7 +315,7 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc taskToSchedule := s.newTask(offer, task, nodeClass) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{taskToSchedule}, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{taskToSchedule}, mesosUtils.DefaultFilter) taken = true fmt.Println("Inst: ", *task.Instances) @@ -337,10 +339,10 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc // If there was no match for the task if !taken { fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } diff --git a/schedulers/firstfitSortedWattsSortedOffers.go b/schedulers/firstfitSortedWattsSortedOffers.go index 0590585..3b4bb4e 100644 --- a/schedulers/firstfitSortedWattsSortedOffers.go +++ b/schedulers/firstfitSortedWattsSortedOffers.go @@ -2,6 +2,8 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/def" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -17,7 +19,7 @@ import ( // Decides if to take an offer or not func (s *FirstFitSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter @@ -128,13 +130,13 @@ func (s *FirstFitSortedWattsSortedOffers) newTask(offer *mesos.Offer, task def.T func (s *FirstFitSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) { // Sorting the offers - sort.Sort(OffersSorter(offers)) + sort.Sort(offerUtils.OffersSorter(offers)) // Printing the sorted offers and the corresponding CPU resource availability log.Println("Sorted Offers:") for i := 0; i < len(offers); i++ { offer := offers[i] - offerCPU, _, _ := OfferAgg(offer) + offerCPU, _, _ := offerUtils.OfferAgg(offer) log.Printf("Offer[%s].CPU = %f\n", offer.GetHostname(), offerCPU) } @@ -144,7 +146,7 @@ func (s *FirstFitSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerD select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -177,7 +179,7 @@ func (s *FirstFitSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerD tasks = append(tasks, taskToSchedule) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) taken = true @@ -201,10 +203,10 @@ func (s *FirstFitSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerD // If there was no match for the task if !taken { fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } diff --git a/schedulers/firstfitsortedwatts.go b/schedulers/firstfitsortedwatts.go index 940ef90..ab8d9c3 100644 --- a/schedulers/firstfitsortedwatts.go +++ b/schedulers/firstfitsortedwatts.go @@ -2,6 +2,8 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/def" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -17,7 +19,7 @@ import ( // Decides if to take an offer or not func (s *FirstFitSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter @@ -132,7 +134,7 @@ func (s *FirstFitSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offer select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -165,7 +167,7 @@ func (s *FirstFitSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offer tasks = append(tasks, taskToSchedule) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) taken = true @@ -189,10 +191,10 @@ func (s *FirstFitSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offer // If there was no match for the task if !taken { fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } diff --git a/schedulers/firstfitwattsonly.go b/schedulers/firstfitwattsonly.go index c23727f..c24e75e 100644 --- a/schedulers/firstfitwattsonly.go +++ b/schedulers/firstfitwattsonly.go @@ -2,6 +2,8 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/def" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -16,7 +18,7 @@ import ( // Decides if to take an offer or not func (*FirstFitWattsOnly) takeOffer(offer *mesos.Offer, task def.Task) bool { - _, _, watts := OfferAgg(offer) + _, _, watts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter @@ -123,7 +125,7 @@ func (s *FirstFitWattsOnly) ResourceOffers(driver sched.SchedulerDriver, offers select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -156,7 +158,7 @@ func (s *FirstFitWattsOnly) ResourceOffers(driver sched.SchedulerDriver, offers tasks = append(tasks, taskToSchedule) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) taken = true @@ -181,10 +183,10 @@ func (s *FirstFitWattsOnly) ResourceOffers(driver sched.SchedulerDriver, offers // If there was no match for the task if !taken { fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } diff --git a/schedulers/proactiveclusterwidecappingfcfs.go b/schedulers/proactiveclusterwidecappingfcfs.go index d89390b..b7491f7 100644 --- a/schedulers/proactiveclusterwidecappingfcfs.go +++ b/schedulers/proactiveclusterwidecappingfcfs.go @@ -16,11 +16,13 @@ import ( "strings" "sync" "time" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" ) // Decides if to take an offer or not func (_ *ProactiveClusterwideCapFCFS) takeOffer(offer *mesos.Offer, task def.Task) bool { - offer_cpu, offer_mem, offer_watts := OfferAgg(offer) + offer_cpu, offer_mem, offer_watts := offerUtils.OfferAgg(offer) if offer_cpu >= task.CPU && offer_mem >= task.RAM && offer_watts >= task.Watts { return true @@ -240,7 +242,7 @@ func (s *ProactiveClusterwideCapFCFS) ResourceOffers(driver sched.SchedulerDrive // retrieving the available power for all the hosts in the offers. for _, offer := range offers { - _, _, offer_watts := OfferAgg(offer) + _, _, offer_watts := offerUtils.OfferAgg(offer) s.availablePower[*offer.Hostname] = offer_watts // setting total power if the first time. if _, ok := s.totalPower[*offer.Hostname]; !ok { @@ -256,7 +258,7 @@ func (s *ProactiveClusterwideCapFCFS) ResourceOffers(driver sched.SchedulerDrive select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -305,7 +307,7 @@ func (s *ProactiveClusterwideCapFCFS) ResourceOffers(driver sched.SchedulerDrive log.Printf("Starting on [%s]\n", offer.GetHostname()) taskToSchedule := s.newTask(offer, task) toSchedule := []*mesos.TaskInfo{taskToSchedule} - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, toSchedule, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, toSchedule, mesosUtils.DefaultFilter) log.Printf("Inst: %d", *task.Instances) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) *task.Instances-- @@ -331,10 +333,10 @@ func (s *ProactiveClusterwideCapFCFS) ResourceOffers(driver sched.SchedulerDrive // If no task fit the offer, then declining the offer. if !taken { log.Printf("There is not enough resources to launch a task on Host: %s\n", offer.GetHostname()) - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } diff --git a/schedulers/proactiveclusterwidecappingranked.go b/schedulers/proactiveclusterwidecappingranked.go index f4c3484..9a8e4c4 100644 --- a/schedulers/proactiveclusterwidecappingranked.go +++ b/schedulers/proactiveclusterwidecappingranked.go @@ -15,6 +15,8 @@ import ( "bitbucket.org/sunybingcloud/electron/def" "bitbucket.org/sunybingcloud/electron/pcp" "bitbucket.org/sunybingcloud/electron/rapl" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -31,7 +33,7 @@ import ( // Decides if to taken an offer or not func (_ *ProactiveClusterwideCapRanked) takeOffer(offer *mesos.Offer, task def.Task) bool { - offer_cpu, offer_mem, offer_watts := OfferAgg(offer) + offer_cpu, offer_mem, offer_watts := offerUtils.OfferAgg(offer) if offer_cpu >= task.CPU && offer_mem >= task.RAM && offer_watts >= task.Watts { return true @@ -251,7 +253,7 @@ func (s *ProactiveClusterwideCapRanked) ResourceOffers(driver sched.SchedulerDri // retrieving the available power for all the hosts in the offers. for _, offer := range offers { - _, _, offer_watts := OfferAgg(offer) + _, _, offer_watts := offerUtils.OfferAgg(offer) s.availablePower[*offer.Hostname] = offer_watts // setting total power if the first time. if _, ok := s.totalPower[*offer.Hostname]; !ok { @@ -277,7 +279,7 @@ func (s *ProactiveClusterwideCapRanked) ResourceOffers(driver sched.SchedulerDri select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -328,7 +330,7 @@ func (s *ProactiveClusterwideCapRanked) ResourceOffers(driver sched.SchedulerDri log.Printf("Starting on [%s]\n", offer.GetHostname()) taskToSchedule := s.newTask(offer, task) to_schedule := []*mesos.TaskInfo{taskToSchedule} - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, to_schedule, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, to_schedule, mesosUtils.DefaultFilter) log.Printf("Inst: %d", *task.Instances) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) *task.Instances-- @@ -354,10 +356,10 @@ func (s *ProactiveClusterwideCapRanked) ResourceOffers(driver sched.SchedulerDri // If no tasks fit the offer, then declining the offer. if !taken { log.Printf("There is not enough resources to launch a task on Host: %s\n", offer.GetHostname()) - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } diff --git a/schedulers/topHeavy.go b/schedulers/topHeavy.go index f0d0920..9454e40 100644 --- a/schedulers/topHeavy.go +++ b/schedulers/topHeavy.go @@ -3,6 +3,8 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/constants" "bitbucket.org/sunybingcloud/electron/def" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -163,7 +165,7 @@ func (s *TopHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) { select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -171,7 +173,7 @@ func (s *TopHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) { } tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) totalWatts := 0.0 totalCPU := 0.0 totalRAM := 0.0 @@ -210,14 +212,14 @@ func (s *TopHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) { if taken { log.Printf("Starting on [%s]\n", offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { // If there was no match for the task fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } @@ -228,7 +230,7 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) { select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue @@ -236,7 +238,7 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) { } tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) taken := false for i := 0; i < len(s.largeTasks); i++ { task := s.largeTasks[i] @@ -252,7 +254,7 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) { taken = true tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, powerClass, task)) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) - driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter) + driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) if *task.Instances <= 0 { // All instances of task have been scheduled, remove it @@ -266,10 +268,10 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) { if !taken { // If there was no match for the task fmt.Println("There is not enough resources to launch a task:") - cpus, mem, watts := OfferAgg(offer) + cpus, mem, watts := offerUtils.OfferAgg(offer) log.Printf("\n", cpus, mem, watts) - driver.DeclineOffer(offer.Id, defaultFilter) + driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter) } } } @@ -288,7 +290,7 @@ func (s *TopHeavy) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos. select { case <-s.Shutdown: log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]") - driver.DeclineOffer(offer.Id, longFilter) + driver.DeclineOffer(offer.Id, mesosUtils.LongFilter) log.Println("Number of tasks still running: ", s.tasksRunning) continue From 5a6edb802e4c29acff8884db86abf589faba94e3 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 19:43:12 -0500 Subject: [PATCH 13/36] fixed comment. --- utilities/runAvg/runAvg.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utilities/runAvg/runAvg.go b/utilities/runAvg/runAvg.go index 6c0d3b0..c12efa9 100644 --- a/utilities/runAvg/runAvg.go +++ b/utilities/runAvg/runAvg.go @@ -1,7 +1,7 @@ /* A utility to calculate the running average. -One should implement Val() to be able to use this utility. +One should implement Val() and ID() to use this utility. */ package runAvg From 5128c51b22c0a40d0e9db592c349a9d82e463c29 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 19:45:47 -0500 Subject: [PATCH 14/36] formatted code. --- schedulers/bpMaxMinProacCC.go | 4 ++-- schedulers/bpswClassMapWatts.go | 4 ++-- schedulers/firstfitSortedWattsClassMapWattsProacCC.go | 4 ++-- schedulers/proactiveclusterwidecappingfcfs.go | 4 ++-- utilities/mesosUtils/mesosUtils.go | 2 +- utilities/runAvg/runAvg.go | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/schedulers/bpMaxMinProacCC.go b/schedulers/bpMaxMinProacCC.go index 2af372f..1f834da 100644 --- a/schedulers/bpMaxMinProacCC.go +++ b/schedulers/bpMaxMinProacCC.go @@ -5,6 +5,8 @@ import ( "bitbucket.org/sunybingcloud/electron/def" "bitbucket.org/sunybingcloud/electron/pcp" "bitbucket.org/sunybingcloud/electron/rapl" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -17,8 +19,6 @@ import ( "strings" "sync" "time" - "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" - "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" ) // Decides if to take an offer or not diff --git a/schedulers/bpswClassMapWatts.go b/schedulers/bpswClassMapWatts.go index d35629b..a648e15 100644 --- a/schedulers/bpswClassMapWatts.go +++ b/schedulers/bpswClassMapWatts.go @@ -2,6 +2,8 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/def" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -12,8 +14,6 @@ import ( "sort" "strings" "time" - "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" - "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" ) // Decides if to take an offer or not diff --git a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go index f083ffb..f822b9a 100644 --- a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go +++ b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go @@ -5,6 +5,8 @@ import ( "bitbucket.org/sunybingcloud/electron/def" "bitbucket.org/sunybingcloud/electron/pcp" "bitbucket.org/sunybingcloud/electron/rapl" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -17,8 +19,6 @@ import ( "strings" "sync" "time" - "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" - "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" ) // electron scheduler implements the Scheduler interface diff --git a/schedulers/proactiveclusterwidecappingfcfs.go b/schedulers/proactiveclusterwidecappingfcfs.go index b7491f7..9fc5695 100644 --- a/schedulers/proactiveclusterwidecappingfcfs.go +++ b/schedulers/proactiveclusterwidecappingfcfs.go @@ -5,6 +5,8 @@ import ( "bitbucket.org/sunybingcloud/electron/def" "bitbucket.org/sunybingcloud/electron/pcp" "bitbucket.org/sunybingcloud/electron/rapl" + "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" "fmt" "github.com/golang/protobuf/proto" mesos "github.com/mesos/mesos-go/mesosproto" @@ -16,8 +18,6 @@ import ( "strings" "sync" "time" - "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" - "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" ) // Decides if to take an offer or not diff --git a/utilities/mesosUtils/mesosUtils.go b/utilities/mesosUtils/mesosUtils.go index be3d55b..dcc5e77 100644 --- a/utilities/mesosUtils/mesosUtils.go +++ b/utilities/mesosUtils/mesosUtils.go @@ -1,8 +1,8 @@ package mesosUtils import ( - mesos "github.com/mesos/mesos-go/mesosproto" "github.com/golang/protobuf/proto" + mesos "github.com/mesos/mesos-go/mesosproto" ) var ( diff --git a/utilities/runAvg/runAvg.go b/utilities/runAvg/runAvg.go index c12efa9..297dea4 100644 --- a/utilities/runAvg/runAvg.go +++ b/utilities/runAvg/runAvg.go @@ -32,7 +32,7 @@ func getInstance(curSum float64, wSize int) *runningAverageCalculator { if racSingleton == nil { racSingleton = &runningAverageCalculator{ considerationWindowSize: wSize, - currentSum: curSum, + currentSum: curSum, } return racSingleton } else { From e7166420ddbe27013d84d7392db7e79758caf758 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 21:08:11 -0500 Subject: [PATCH 15/36] add function to determine PowerClass of host in offer. This was earlier to be written in each scheduler when using classMapWatts. --- utilities/offerUtils/offerUtils.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/utilities/offerUtils/offerUtils.go b/utilities/offerUtils/offerUtils.go index fc930f9..16144dd 100644 --- a/utilities/offerUtils/offerUtils.go +++ b/utilities/offerUtils/offerUtils.go @@ -21,6 +21,17 @@ func OfferAgg(offer *mesos.Offer) (float64, float64, float64) { return cpus, mem, watts } +// Determine the power class of the host in the offer +func PowerClass(offer *mesos.Offer) string { + var powerClass string + for _, attr := range offer.GetAttributes() { + if attr.GetName() == "class" { + powerClass = attr.GetText().GetValue() + } + } + return powerClass +} + type OffersSorter []*mesos.Offer func (offersSorter OffersSorter) Len() int { From ae2e7eb3d7bd702534471dfacf657e33abea8c43 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 21:09:43 -0500 Subject: [PATCH 16/36] Added function to determine the watts value to consider for each task, depending on weather -classMapWatts was enabled and also weather the workload contained a map of power-class to the watts requirement. --- schedulers/helpers.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/schedulers/helpers.go b/schedulers/helpers.go index 6d90c6c..1891808 100644 --- a/schedulers/helpers.go +++ b/schedulers/helpers.go @@ -3,6 +3,9 @@ package schedulers import ( "fmt" "log" + "bitbucket.org/sunybingcloud/electron/def" + mesos "github.com/mesos/mesos-go/mesosproto" + "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" ) func coLocated(tasks map[string]bool) { @@ -13,3 +16,22 @@ func coLocated(tasks map[string]bool) { fmt.Println("---------------------") } + +/* + Determine the watts value to consider for each task. + + This value could either be task.Watts or task.ClassToWatts[] + If task.ClassToWatts is not present, then return task.Watts (this would be for workloads which don't have classMapWatts) +*/ +func wattsToConsider(task def.Task, classMapWatts bool, offer *mesos.Offer) float64 { + if classMapWatts { + // checking if ClassToWatts was present in the workload. + if task.ClassToWatts != nil { + return task.ClassToWatts[offerUtils.PowerClass(offer)] + } else { + return task.Watts + } + } else { + return task.Watts + } +} From 311972415483c4d76b2317fc1696374de8d37545 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 21:30:15 -0500 Subject: [PATCH 17/36] Added another command-line argument to indicate whether to use the Watts attribute or the ClassToWatts attribute in the workload JSON. --- scheduler.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scheduler.go b/scheduler.go index e6b736d..d0a5cf4 100644 --- a/scheduler.go +++ b/scheduler.go @@ -21,6 +21,7 @@ var ignoreWatts = flag.Bool("ignoreWatts", false, "Ignore watts in offers") var pcplogPrefix = flag.String("logPrefix", "", "Prefix for pcplog") var hiThreshold = flag.Float64("hiThreshold", 0.0, "Upperbound for when we should start capping") var loThreshold = flag.Float64("loThreshold", 0.0, "Lowerbound for when we should start uncapping") +var classMapWatts = flag.Bool("classMapWatts", false, "Map Watts to power-class of host in offer") // Short hand args func init() { @@ -30,6 +31,7 @@ func init() { flag.StringVar(pcplogPrefix, "p", "", "Prefix for pcplog (shorthand)") flag.Float64Var(hiThreshold, "ht", 700.0, "Upperbound for when we should start capping (shorthand)") flag.Float64Var(loThreshold, "lt", 400.0, "Lowerbound for when we should start uncapping (shorthand)") + flag.BoolVar(classMapWatts, "cmw", false, "Map watts to power-class of host in offer (shorthand)") } func main() { From 4f5036bd32ab99a0f0809a165331923295622b84 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 21:31:05 -0500 Subject: [PATCH 18/36] Added TODO to retrofit schedulers to run either with classMapWatts enabled or disabled. --- schedulers/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/schedulers/README.md b/schedulers/README.md index 6696696..b4cbd34 100644 --- a/schedulers/README.md +++ b/schedulers/README.md @@ -9,6 +9,7 @@ To Do: * Make newTask(...) variadic where the newTaskClass argument can either be given or not. If not give, then pick task.Watts as the watts attribute, else pick task.ClassToWatts[newTaskClass]. * Retrofit pcp/proactiveclusterwidecappers.go to include the power capping go routines and to cap only when necessary. * Create a package that would contain routines to perform various logging and move helpers.coLocated(...) into that. + * Retrofit schedulers to be able to run either using ClassMapWatts enabled or disabled. Scheduling Algorithms: From ee0cada7470adfba1871078bb1d163a781b19d29 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 21:33:36 -0500 Subject: [PATCH 19/36] changed name of command-line argument 'classMapWatts' to 'powerClass' --- scheduler.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scheduler.go b/scheduler.go index d0a5cf4..8a00dc7 100644 --- a/scheduler.go +++ b/scheduler.go @@ -21,7 +21,7 @@ var ignoreWatts = flag.Bool("ignoreWatts", false, "Ignore watts in offers") var pcplogPrefix = flag.String("logPrefix", "", "Prefix for pcplog") var hiThreshold = flag.Float64("hiThreshold", 0.0, "Upperbound for when we should start capping") var loThreshold = flag.Float64("loThreshold", 0.0, "Lowerbound for when we should start uncapping") -var classMapWatts = flag.Bool("classMapWatts", false, "Map Watts to power-class of host in offer") +var powerClass = flag.Bool("powerClass", false, "Map Watts to power-class of host in offer") // Short hand args func init() { @@ -31,7 +31,7 @@ func init() { flag.StringVar(pcplogPrefix, "p", "", "Prefix for pcplog (shorthand)") flag.Float64Var(hiThreshold, "ht", 700.0, "Upperbound for when we should start capping (shorthand)") flag.Float64Var(loThreshold, "lt", 400.0, "Lowerbound for when we should start uncapping (shorthand)") - flag.BoolVar(classMapWatts, "cmw", false, "Map watts to power-class of host in offer (shorthand)") + flag.BoolVar(powerClass, "pc", false, "Map watts to power-class of host in offer (shorthand)") } func main() { From 3a7c0dc33bb3b245e17566f69001a4218ce642cd Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 21:36:33 -0500 Subject: [PATCH 20/36] changed message for command-line argument 'powerClass' --- scheduler.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/scheduler.go b/scheduler.go index 8a00dc7..3b0c252 100644 --- a/scheduler.go +++ b/scheduler.go @@ -21,7 +21,8 @@ var ignoreWatts = flag.Bool("ignoreWatts", false, "Ignore watts in offers") var pcplogPrefix = flag.String("logPrefix", "", "Prefix for pcplog") var hiThreshold = flag.Float64("hiThreshold", 0.0, "Upperbound for when we should start capping") var loThreshold = flag.Float64("loThreshold", 0.0, "Lowerbound for when we should start uncapping") -var powerClass = flag.Bool("powerClass", false, "Map Watts to power-class of host in offer") +var powerClass = flag.Bool("powerClass", false, "Map Watts to power-class of host in offer. " + + "Workload needs to have 'ClassToWatts' attribute.") // Short hand args func init() { @@ -31,7 +32,8 @@ func init() { flag.StringVar(pcplogPrefix, "p", "", "Prefix for pcplog (shorthand)") flag.Float64Var(hiThreshold, "ht", 700.0, "Upperbound for when we should start capping (shorthand)") flag.Float64Var(loThreshold, "lt", 400.0, "Lowerbound for when we should start uncapping (shorthand)") - flag.BoolVar(powerClass, "pc", false, "Map watts to power-class of host in offer (shorthand)") + flag.BoolVar(powerClass, "pc", false, "Map watts to power-class of host in offer. " + + "Workload needs to have 'ClassToWatts' attribute (shorthand)") } func main() { From 92e432c312b1f4782d4ae8c2c681db6b591de3d7 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 21:37:01 -0500 Subject: [PATCH 21/36] formatted code --- scheduler.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scheduler.go b/scheduler.go index 3b0c252..86b10e9 100644 --- a/scheduler.go +++ b/scheduler.go @@ -21,7 +21,7 @@ var ignoreWatts = flag.Bool("ignoreWatts", false, "Ignore watts in offers") var pcplogPrefix = flag.String("logPrefix", "", "Prefix for pcplog") var hiThreshold = flag.Float64("hiThreshold", 0.0, "Upperbound for when we should start capping") var loThreshold = flag.Float64("loThreshold", 0.0, "Lowerbound for when we should start uncapping") -var powerClass = flag.Bool("powerClass", false, "Map Watts to power-class of host in offer. " + +var powerClass = flag.Bool("powerClass", false, "Map Watts to power-class of host in offer. "+ "Workload needs to have 'ClassToWatts' attribute.") // Short hand args @@ -32,7 +32,7 @@ func init() { flag.StringVar(pcplogPrefix, "p", "", "Prefix for pcplog (shorthand)") flag.Float64Var(hiThreshold, "ht", 700.0, "Upperbound for when we should start capping (shorthand)") flag.Float64Var(loThreshold, "lt", 400.0, "Lowerbound for when we should start uncapping (shorthand)") - flag.BoolVar(powerClass, "pc", false, "Map watts to power-class of host in offer. " + + flag.BoolVar(powerClass, "pc", false, "Map watts to power-class of host in offer. "+ "Workload needs to have 'ClassToWatts' attribute (shorthand)") } From 0413d2abd35667acaafecf2dfeb2508d3dc7445b Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 21:42:23 -0500 Subject: [PATCH 22/36] moved proactiveclusterwidecappers.go to powerCapping/ from pcp/ --- {pcp => powerCapping}/proactiveclusterwidecappers.go | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename {pcp => powerCapping}/proactiveclusterwidecappers.go (100%) diff --git a/pcp/proactiveclusterwidecappers.go b/powerCapping/proactiveclusterwidecappers.go similarity index 100% rename from pcp/proactiveclusterwidecappers.go rename to powerCapping/proactiveclusterwidecappers.go From 03a9d64a52aab151fe408585bd42ebbda6d48843 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 21:49:57 -0500 Subject: [PATCH 23/36] fixed package name from pcp to powerCapping --- powerCapping/proactiveclusterwidecappers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/powerCapping/proactiveclusterwidecappers.go b/powerCapping/proactiveclusterwidecappers.go index acbe766..422aff7 100644 --- a/powerCapping/proactiveclusterwidecappers.go +++ b/powerCapping/proactiveclusterwidecappers.go @@ -3,7 +3,7 @@ Cluster wide dynamic capping This is not a scheduler but a scheduling scheme that schedulers can use. */ -package pcp +package powerCapping import ( "bitbucket.org/sunybingcloud/electron/constants" From e188aff66fd419a436800d01a8c70031e9dadca0 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 28 Jan 2017 21:56:23 -0500 Subject: [PATCH 24/36] retrofitted to use clusterwidecapper from powerCapping/ instead of from pcp/ as clusterwidecapper was moved from pcp/ to powerCapping/ --- schedulers/bpMaxMinProacCC.go | 6 +++--- schedulers/bpswClassMapWattsProacCC.go | 6 +++--- schedulers/firstfitSortedWattsClassMapWattsProacCC.go | 6 +++--- schedulers/proactiveclusterwidecappingfcfs.go | 6 +++--- schedulers/proactiveclusterwidecappingranked.go | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/schedulers/bpMaxMinProacCC.go b/schedulers/bpMaxMinProacCC.go index 1f834da..fe44f60 100644 --- a/schedulers/bpMaxMinProacCC.go +++ b/schedulers/bpMaxMinProacCC.go @@ -3,7 +3,7 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/constants" "bitbucket.org/sunybingcloud/electron/def" - "bitbucket.org/sunybingcloud/electron/pcp" + powCap "bitbucket.org/sunybingcloud/electron/powerCapping" "bitbucket.org/sunybingcloud/electron/rapl" "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" @@ -45,7 +45,7 @@ type BPMaxMinProacCC struct { availablePower map[string]float64 totalPower map[string]float64 ignoreWatts bool - capper *pcp.ClusterwideCapper + capper *powCap.ClusterwideCapper ticker *time.Ticker recapTicker *time.Ticker isCapping bool // indicate whether we are currently performing cluster-wide capping. @@ -88,7 +88,7 @@ func NewBPMaxMinProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix str availablePower: make(map[string]float64), totalPower: make(map[string]float64), RecordPCP: false, - capper: pcp.GetClusterwideCapperInstance(), + capper: powCap.GetClusterwideCapperInstance(), ticker: time.NewTicker(10 * time.Second), recapTicker: time.NewTicker(20 * time.Second), isCapping: false, diff --git a/schedulers/bpswClassMapWattsProacCC.go b/schedulers/bpswClassMapWattsProacCC.go index d94df90..9a50a69 100644 --- a/schedulers/bpswClassMapWattsProacCC.go +++ b/schedulers/bpswClassMapWattsProacCC.go @@ -3,7 +3,7 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/constants" "bitbucket.org/sunybingcloud/electron/def" - "bitbucket.org/sunybingcloud/electron/pcp" + powCap "bitbucket.org/sunybingcloud/electron/powerCapping" "bitbucket.org/sunybingcloud/electron/rapl" "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" @@ -45,7 +45,7 @@ type BPSWClassMapWattsProacCC struct { availablePower map[string]float64 totalPower map[string]float64 ignoreWatts bool - capper *pcp.ClusterwideCapper + capper *powCap.ClusterwideCapper ticker *time.Ticker recapTicker *time.Ticker isCapping bool // indicate whether we are currently performing cluster-wide capping. @@ -88,7 +88,7 @@ func NewBPSWClassMapWattsProacCC(tasks []def.Task, ignoreWatts bool, schedTraceP availablePower: make(map[string]float64), totalPower: make(map[string]float64), RecordPCP: false, - capper: pcp.GetClusterwideCapperInstance(), + capper: powCap.GetClusterwideCapperInstance(), ticker: time.NewTicker(10 * time.Second), recapTicker: time.NewTicker(20 * time.Second), isCapping: false, diff --git a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go index f822b9a..f3a09d2 100644 --- a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go +++ b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go @@ -3,7 +3,7 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/constants" "bitbucket.org/sunybingcloud/electron/def" - "bitbucket.org/sunybingcloud/electron/pcp" + powCap "bitbucket.org/sunybingcloud/electron/powerCapping" "bitbucket.org/sunybingcloud/electron/rapl" "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" @@ -33,7 +33,7 @@ type FirstFitSortedWattsClassMapWattsProacCC struct { availablePower map[string]float64 totalPower map[string]float64 ignoreWatts bool - capper *pcp.ClusterwideCapper + capper *powCap.ClusterwideCapper ticker *time.Ticker recapTicker *time.Ticker isCapping bool // indicate whether we are currently performing cluster-wide capping. @@ -76,7 +76,7 @@ func NewFirstFitSortedWattsClassMapWattsProacCC(tasks []def.Task, ignoreWatts bo availablePower: make(map[string]float64), totalPower: make(map[string]float64), RecordPCP: false, - capper: pcp.GetClusterwideCapperInstance(), + capper: powCap.GetClusterwideCapperInstance(), ticker: time.NewTicker(10 * time.Second), recapTicker: time.NewTicker(20 * time.Second), isCapping: false, diff --git a/schedulers/proactiveclusterwidecappingfcfs.go b/schedulers/proactiveclusterwidecappingfcfs.go index 9fc5695..c48c9de 100644 --- a/schedulers/proactiveclusterwidecappingfcfs.go +++ b/schedulers/proactiveclusterwidecappingfcfs.go @@ -3,7 +3,7 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/constants" "bitbucket.org/sunybingcloud/electron/def" - "bitbucket.org/sunybingcloud/electron/pcp" + powCap "bitbucket.org/sunybingcloud/electron/powerCapping" "bitbucket.org/sunybingcloud/electron/rapl" "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" @@ -42,7 +42,7 @@ type ProactiveClusterwideCapFCFS struct { availablePower map[string]float64 // available power for each node in the cluster. totalPower map[string]float64 // total power for each node in the cluster. ignoreWatts bool - capper *pcp.ClusterwideCapper + capper *powCap.ClusterwideCapper ticker *time.Ticker recapTicker *time.Ticker isCapping bool // indicate whether we are currently performing cluster wide capping. @@ -85,7 +85,7 @@ func NewProactiveClusterwideCapFCFS(tasks []def.Task, ignoreWatts bool, schedTra availablePower: make(map[string]float64), totalPower: make(map[string]float64), RecordPCP: false, - capper: pcp.GetClusterwideCapperInstance(), + capper: powCap.GetClusterwideCapperInstance(), ticker: time.NewTicker(10 * time.Second), recapTicker: time.NewTicker(20 * time.Second), isCapping: false, diff --git a/schedulers/proactiveclusterwidecappingranked.go b/schedulers/proactiveclusterwidecappingranked.go index 9a8e4c4..c39f1c4 100644 --- a/schedulers/proactiveclusterwidecappingranked.go +++ b/schedulers/proactiveclusterwidecappingranked.go @@ -13,7 +13,7 @@ package schedulers import ( "bitbucket.org/sunybingcloud/electron/constants" "bitbucket.org/sunybingcloud/electron/def" - "bitbucket.org/sunybingcloud/electron/pcp" + powCap "bitbucket.org/sunybingcloud/electron/powerCapping" "bitbucket.org/sunybingcloud/electron/rapl" "bitbucket.org/sunybingcloud/electron/utilities/mesosUtils" "bitbucket.org/sunybingcloud/electron/utilities/offerUtils" @@ -53,7 +53,7 @@ type ProactiveClusterwideCapRanked struct { availablePower map[string]float64 // available power for each node in the cluster. totalPower map[string]float64 // total power for each node in the cluster. ignoreWatts bool - capper *pcp.ClusterwideCapper + capper *powCap.ClusterwideCapper ticker *time.Ticker recapTicker *time.Ticker isCapping bool // indicate whether we are currently performing cluster wide capping. @@ -96,7 +96,7 @@ func NewProactiveClusterwideCapRanked(tasks []def.Task, ignoreWatts bool, schedT availablePower: make(map[string]float64), totalPower: make(map[string]float64), RecordPCP: false, - capper: pcp.GetClusterwideCapperInstance(), + capper: powCap.GetClusterwideCapperInstance(), ticker: time.NewTicker(10 * time.Second), recapTicker: time.NewTicker(20 * time.Second), isCapping: false, From d5ae027e41453d10eecaf87c1fd1e5d1739d8dca Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Tue, 31 Jan 2017 14:27:57 -0500 Subject: [PATCH 25/36] fixed return value of String() by changing 'Cluster Capper' to 'Cluster-wide Capper' --- powerCapping/proactiveclusterwidecappers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/powerCapping/proactiveclusterwidecappers.go b/powerCapping/proactiveclusterwidecappers.go index 422aff7..7a7a49a 100644 --- a/powerCapping/proactiveclusterwidecappers.go +++ b/powerCapping/proactiveclusterwidecappers.go @@ -271,5 +271,5 @@ func (capper ClusterwideCapper) FCFSDeterminedCap(totalPower map[string]float64, // Stringer for an instance of clusterwideCapper func (capper ClusterwideCapper) String() string { - return "Cluster Capper -- Proactively cap the entire cluster." + return "Cluster-wide Capper -- Proactively cap the entire cluster." } From f883abd36dc4146eb498d234c96b1f148da66223 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Tue, 31 Jan 2017 15:12:43 -0500 Subject: [PATCH 26/36] Changed the description of the file. Now mentions that this is a capping strategy that can be used with different schedulers. --- powerCapping/proactiveclusterwidecappers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/powerCapping/proactiveclusterwidecappers.go b/powerCapping/proactiveclusterwidecappers.go index 7a7a49a..b3cc84a 100644 --- a/powerCapping/proactiveclusterwidecappers.go +++ b/powerCapping/proactiveclusterwidecappers.go @@ -1,7 +1,7 @@ /* Cluster wide dynamic capping -This is not a scheduler but a scheduling scheme that schedulers can use. +This is a capping strategy that can be used with schedulers to improve the power consumption. */ package powerCapping From 04d722d20fef06cbcfb9a6b755390fd231bb187c Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Tue, 31 Jan 2017 15:13:55 -0500 Subject: [PATCH 27/36] removed the new commandline argument for powerClass. This will be part of another patch to the code. --- scheduler.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/scheduler.go b/scheduler.go index 86b10e9..e6b736d 100644 --- a/scheduler.go +++ b/scheduler.go @@ -21,8 +21,6 @@ var ignoreWatts = flag.Bool("ignoreWatts", false, "Ignore watts in offers") var pcplogPrefix = flag.String("logPrefix", "", "Prefix for pcplog") var hiThreshold = flag.Float64("hiThreshold", 0.0, "Upperbound for when we should start capping") var loThreshold = flag.Float64("loThreshold", 0.0, "Lowerbound for when we should start uncapping") -var powerClass = flag.Bool("powerClass", false, "Map Watts to power-class of host in offer. "+ - "Workload needs to have 'ClassToWatts' attribute.") // Short hand args func init() { @@ -32,8 +30,6 @@ func init() { flag.StringVar(pcplogPrefix, "p", "", "Prefix for pcplog (shorthand)") flag.Float64Var(hiThreshold, "ht", 700.0, "Upperbound for when we should start capping (shorthand)") flag.Float64Var(loThreshold, "lt", 400.0, "Lowerbound for when we should start uncapping (shorthand)") - flag.BoolVar(powerClass, "pc", false, "Map watts to power-class of host in offer. "+ - "Workload needs to have 'ClassToWatts' attribute (shorthand)") } func main() { From 84cdea08fc33fe6863ff03ce2ca54e95c9579976 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Tue, 31 Jan 2017 15:33:31 -0500 Subject: [PATCH 28/36] retrofitted to use offerUtils.PowerClass(...) instead of inlining the code in every scheduler. Reduced redundant code. --- schedulers/bpswClassMapWatts.go | 19 +++++++------------ schedulers/bpswClassMapWattsPistonCapping.go | 19 +++++++------------ schedulers/bpswClassMapWattsProacCC.go | 19 +++++++------------ .../firstfitSortedWattsClassMapWatts.go | 18 +++++++----------- ...firstfitSortedWattsClassMapWattsProacCC.go | 19 +++++++------------ 5 files changed, 35 insertions(+), 59 deletions(-) diff --git a/schedulers/bpswClassMapWatts.go b/schedulers/bpswClassMapWatts.go index a648e15..79f3882 100644 --- a/schedulers/bpswClassMapWatts.go +++ b/schedulers/bpswClassMapWatts.go @@ -78,7 +78,7 @@ func NewBPSWClassMapWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix s return s } -func (s *BPSWClassMapWatts) newTask(offer *mesos.Offer, task def.Task, newTaskClass string) *mesos.TaskInfo { +func (s *BPSWClassMapWatts) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo { taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) s.tasksCreated++ @@ -102,7 +102,7 @@ func (s *BPSWClassMapWatts) newTask(offer *mesos.Offer, task def.Task, newTaskCl } if !s.ignoreWatts { - resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[newTaskClass])) + resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass])) } return &mesos.TaskInfo{ @@ -159,27 +159,22 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers } for *task.Instances > 0 { - var nodeClass string - for _, attr := range offer.GetAttributes() { - if attr.GetName() == "class" { - nodeClass = attr.GetText().GetValue() - } - } + powerClass := offerUtils.PowerClass(offer) // Does the task fit // OR lazy evaluation. If ignore watts is set to true, second statement won't // be evaluated. - if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[nodeClass]))) && + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && (offerCPU >= (totalCPU + task.CPU)) && (offerRAM >= (totalRAM + task.RAM)) { - fmt.Println("Watts being used: ", task.ClassToWatts[nodeClass]) + fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) taken = true - totalWatts += task.ClassToWatts[nodeClass] + totalWatts += task.ClassToWatts[powerClass] totalCPU += task.CPU totalRAM += task.RAM log.Println("Co-Located with: ") coLocated(s.running[offer.GetSlaveId().GoString()]) - taskToSchedule := s.newTask(offer, task, nodeClass) + taskToSchedule := s.newTask(offer, task, powerClass) tasks = append(tasks, taskToSchedule) fmt.Println("Inst: ", *task.Instances) diff --git a/schedulers/bpswClassMapWattsPistonCapping.go b/schedulers/bpswClassMapWattsPistonCapping.go index 4ee7825..baec23d 100644 --- a/schedulers/bpswClassMapWattsPistonCapping.go +++ b/schedulers/bpswClassMapWattsPistonCapping.go @@ -91,7 +91,7 @@ func NewBPSWClassMapWattsPistonCapping(tasks []def.Task, ignoreWatts bool, sched return s } -func (s *BPSWClassMapWattsPistonCapping) newTask(offer *mesos.Offer, task def.Task, newTaskClass string) *mesos.TaskInfo { +func (s *BPSWClassMapWattsPistonCapping) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo { taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) s.tasksCreated++ @@ -125,7 +125,7 @@ func (s *BPSWClassMapWattsPistonCapping) newTask(offer *mesos.Offer, task def.Ta } if !s.ignoreWatts { - resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[newTaskClass])) + resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass])) } return &mesos.TaskInfo{ @@ -260,16 +260,11 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr } for *task.Instances > 0 { - var nodeClass string - for _, attr := range offer.GetAttributes() { - if attr.GetName() == "class" { - nodeClass = attr.GetText().GetValue() - } - } + powerClass := offerUtils.PowerClass(offer) // Does the task fit // OR lazy evaluation. If ignoreWatts is set to true, second statement won't // be evaluated - if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[nodeClass]))) && + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && (offerCPU >= (totalCPU + task.CPU)) && (offerRAM >= (totalRAM + task.RAM)) { @@ -279,14 +274,14 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr s.startCapping() } - fmt.Println("Watts being used: ", task.ClassToWatts[nodeClass]) + fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) taken = true - totalWatts += task.ClassToWatts[nodeClass] + totalWatts += task.ClassToWatts[powerClass] totalCPU += task.CPU totalRAM += task.RAM log.Println("Co-Located with: ") coLocated(s.running[offer.GetSlaveId().GoString()]) - taskToSchedule := s.newTask(offer, task, nodeClass) + taskToSchedule := s.newTask(offer, task, powerClass) tasks = append(tasks, taskToSchedule) fmt.Println("Inst: ", *task.Instances) diff --git a/schedulers/bpswClassMapWattsProacCC.go b/schedulers/bpswClassMapWattsProacCC.go index 9a50a69..12ddb46 100644 --- a/schedulers/bpswClassMapWattsProacCC.go +++ b/schedulers/bpswClassMapWattsProacCC.go @@ -101,7 +101,7 @@ func NewBPSWClassMapWattsProacCC(tasks []def.Task, ignoreWatts bool, schedTraceP // mutex var bpswClassMapWattsProacCCMutex sync.Mutex -func (s *BPSWClassMapWattsProacCC) newTask(offer *mesos.Offer, task def.Task, newTaskClass string) *mesos.TaskInfo { +func (s *BPSWClassMapWattsProacCC) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo { taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) s.tasksCreated++ @@ -133,7 +133,7 @@ func (s *BPSWClassMapWattsProacCC) newTask(offer *mesos.Offer, task def.Task, ne } if !s.ignoreWatts { - resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[newTaskClass])) + resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass])) } return &mesos.TaskInfo{ @@ -295,16 +295,11 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, } for *task.Instances > 0 { - var nodeClass string - for _, attr := range offer.GetAttributes() { - if attr.GetName() == "class" { - nodeClass = attr.GetText().GetValue() - } - } + powerClass := offerUtils.PowerClass(offer) // Does the task fit // OR Lazy evaluation. If ignore watts is set to true, second statement won't // be evaluated. - if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[nodeClass]))) && + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && (offerCPU >= (totalCPU + task.CPU)) && (offerRAM >= (totalRAM + task.RAM)) { @@ -316,7 +311,7 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, s.startCapping() } - fmt.Println("Watts being used: ", task.ClassToWatts[nodeClass]) + fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) tempCap, err := s.capper.FCFSDeterminedCap(s.totalPower, &task) if err == nil { bpswClassMapWattsProacCCMutex.Lock() @@ -327,12 +322,12 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, log.Println(err) } taken = true - totalWatts += task.ClassToWatts[nodeClass] + totalWatts += task.ClassToWatts[powerClass] totalCPU += task.CPU totalRAM += task.RAM log.Println("Co-Located with: ") coLocated(s.running[offer.GetSlaveId().GoString()]) - taskToSchedule := s.newTask(offer, task, nodeClass) + taskToSchedule := s.newTask(offer, task, powerClass) tasks = append(tasks, taskToSchedule) fmt.Println("Inst: ", *task.Instances) diff --git a/schedulers/firstfitSortedWattsClassMapWatts.go b/schedulers/firstfitSortedWattsClassMapWatts.go index 3a0d1df..b8b51e8 100644 --- a/schedulers/firstfitSortedWattsClassMapWatts.go +++ b/schedulers/firstfitSortedWattsClassMapWatts.go @@ -65,7 +65,7 @@ func NewFirstFitSortedWattsClassMapWatts(tasks []def.Task, ignoreWatts bool, sch return s } -func (s *FirstFitSortedWattsClassMapWatts) newTask(offer *mesos.Offer, task def.Task, newTaskClass string) *mesos.TaskInfo { +func (s *FirstFitSortedWattsClassMapWatts) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo { taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) s.tasksCreated++ @@ -89,7 +89,7 @@ func (s *FirstFitSortedWattsClassMapWatts) newTask(offer *mesos.Offer, task def. } if !s.ignoreWatts { - resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[newTaskClass])) + resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass])) } return &mesos.TaskInfo{ @@ -140,21 +140,17 @@ func (s *FirstFitSortedWattsClassMapWatts) ResourceOffers(driver sched.Scheduler } } - // retrieving the node class from the offer - var nodeClass string - for _, attr := range offer.GetAttributes() { - if attr.GetName() == "class" { - nodeClass = attr.GetText().GetValue() - } - } + // retrieving the powerClass from the offer + powerClass := offerUtils.PowerClass(offer) // Decision to take the offer or not - if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[nodeClass])) && + if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[powerClass])) && (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) log.Println("Co-Located with: ") coLocated(s.running[offer.GetSlaveId().GoString()]) - taskToSchedule := s.newTask(offer, task, nodeClass) + taskToSchedule := s.newTask(offer, task, powerClass) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{taskToSchedule}, mesosUtils.DefaultFilter) diff --git a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go index f3a09d2..0db8a05 100644 --- a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go +++ b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go @@ -89,7 +89,7 @@ func NewFirstFitSortedWattsClassMapWattsProacCC(tasks []def.Task, ignoreWatts bo // mutex var ffswClassMapWattsProacCCMutex sync.Mutex -func (s *FirstFitSortedWattsClassMapWattsProacCC) newTask(offer *mesos.Offer, task def.Task, newTaskClass string) *mesos.TaskInfo { +func (s *FirstFitSortedWattsClassMapWattsProacCC) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo { taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) s.tasksCreated++ @@ -121,7 +121,7 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) newTask(offer *mesos.Offer, ta } if !s.ignoreWatts { - resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[newTaskClass])) + resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass])) } return &mesos.TaskInfo{ @@ -278,16 +278,11 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc } } - // Retrieving the node class from the offer - var nodeClass string - for _, attr := range offer.GetAttributes() { - if attr.GetName() == "class" { - nodeClass = attr.GetText().GetValue() - } - } + // retrieving the powerClass for the offer + powerClass := offerUtils.PowerClass(offer) // Decision to take the offer or not - if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[nodeClass])) && + if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[powerClass])) && (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { // Capping the cluster if haven't yet started @@ -298,7 +293,7 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc s.startCapping() } - fmt.Println("Watts being used: ", task.ClassToWatts[nodeClass]) + fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) tempCap, err := s.capper.FCFSDeterminedCap(s.totalPower, &task) if err == nil { ffswClassMapWattsProacCCMutex.Lock() @@ -312,7 +307,7 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc log.Println("Co-Located with: ") coLocated(s.running[offer.GetSlaveId().GoString()]) - taskToSchedule := s.newTask(offer, task, nodeClass) + taskToSchedule := s.newTask(offer, task, powerClass) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{taskToSchedule}, mesosUtils.DefaultFilter) From 3af1d561c26555ced934f9a9afc4184ce630c7e1 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Tue, 31 Jan 2017 15:33:31 -0500 Subject: [PATCH 29/36] retrofitted to use offerUtils.PowerClass(...) instead of inlining the code in every scheduler. Reduced redundant code. Changed name of newTaskClass in newTask(...) to powerClass. --- schedulers/bpswClassMapWatts.go | 19 +++++++------------ schedulers/bpswClassMapWattsPistonCapping.go | 19 +++++++------------ schedulers/bpswClassMapWattsProacCC.go | 19 +++++++------------ .../firstfitSortedWattsClassMapWatts.go | 18 +++++++----------- ...firstfitSortedWattsClassMapWattsProacCC.go | 19 +++++++------------ 5 files changed, 35 insertions(+), 59 deletions(-) diff --git a/schedulers/bpswClassMapWatts.go b/schedulers/bpswClassMapWatts.go index a648e15..79f3882 100644 --- a/schedulers/bpswClassMapWatts.go +++ b/schedulers/bpswClassMapWatts.go @@ -78,7 +78,7 @@ func NewBPSWClassMapWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix s return s } -func (s *BPSWClassMapWatts) newTask(offer *mesos.Offer, task def.Task, newTaskClass string) *mesos.TaskInfo { +func (s *BPSWClassMapWatts) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo { taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) s.tasksCreated++ @@ -102,7 +102,7 @@ func (s *BPSWClassMapWatts) newTask(offer *mesos.Offer, task def.Task, newTaskCl } if !s.ignoreWatts { - resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[newTaskClass])) + resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass])) } return &mesos.TaskInfo{ @@ -159,27 +159,22 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers } for *task.Instances > 0 { - var nodeClass string - for _, attr := range offer.GetAttributes() { - if attr.GetName() == "class" { - nodeClass = attr.GetText().GetValue() - } - } + powerClass := offerUtils.PowerClass(offer) // Does the task fit // OR lazy evaluation. If ignore watts is set to true, second statement won't // be evaluated. - if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[nodeClass]))) && + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && (offerCPU >= (totalCPU + task.CPU)) && (offerRAM >= (totalRAM + task.RAM)) { - fmt.Println("Watts being used: ", task.ClassToWatts[nodeClass]) + fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) taken = true - totalWatts += task.ClassToWatts[nodeClass] + totalWatts += task.ClassToWatts[powerClass] totalCPU += task.CPU totalRAM += task.RAM log.Println("Co-Located with: ") coLocated(s.running[offer.GetSlaveId().GoString()]) - taskToSchedule := s.newTask(offer, task, nodeClass) + taskToSchedule := s.newTask(offer, task, powerClass) tasks = append(tasks, taskToSchedule) fmt.Println("Inst: ", *task.Instances) diff --git a/schedulers/bpswClassMapWattsPistonCapping.go b/schedulers/bpswClassMapWattsPistonCapping.go index 4ee7825..baec23d 100644 --- a/schedulers/bpswClassMapWattsPistonCapping.go +++ b/schedulers/bpswClassMapWattsPistonCapping.go @@ -91,7 +91,7 @@ func NewBPSWClassMapWattsPistonCapping(tasks []def.Task, ignoreWatts bool, sched return s } -func (s *BPSWClassMapWattsPistonCapping) newTask(offer *mesos.Offer, task def.Task, newTaskClass string) *mesos.TaskInfo { +func (s *BPSWClassMapWattsPistonCapping) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo { taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) s.tasksCreated++ @@ -125,7 +125,7 @@ func (s *BPSWClassMapWattsPistonCapping) newTask(offer *mesos.Offer, task def.Ta } if !s.ignoreWatts { - resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[newTaskClass])) + resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass])) } return &mesos.TaskInfo{ @@ -260,16 +260,11 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr } for *task.Instances > 0 { - var nodeClass string - for _, attr := range offer.GetAttributes() { - if attr.GetName() == "class" { - nodeClass = attr.GetText().GetValue() - } - } + powerClass := offerUtils.PowerClass(offer) // Does the task fit // OR lazy evaluation. If ignoreWatts is set to true, second statement won't // be evaluated - if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[nodeClass]))) && + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && (offerCPU >= (totalCPU + task.CPU)) && (offerRAM >= (totalRAM + task.RAM)) { @@ -279,14 +274,14 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr s.startCapping() } - fmt.Println("Watts being used: ", task.ClassToWatts[nodeClass]) + fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) taken = true - totalWatts += task.ClassToWatts[nodeClass] + totalWatts += task.ClassToWatts[powerClass] totalCPU += task.CPU totalRAM += task.RAM log.Println("Co-Located with: ") coLocated(s.running[offer.GetSlaveId().GoString()]) - taskToSchedule := s.newTask(offer, task, nodeClass) + taskToSchedule := s.newTask(offer, task, powerClass) tasks = append(tasks, taskToSchedule) fmt.Println("Inst: ", *task.Instances) diff --git a/schedulers/bpswClassMapWattsProacCC.go b/schedulers/bpswClassMapWattsProacCC.go index 9a50a69..12ddb46 100644 --- a/schedulers/bpswClassMapWattsProacCC.go +++ b/schedulers/bpswClassMapWattsProacCC.go @@ -101,7 +101,7 @@ func NewBPSWClassMapWattsProacCC(tasks []def.Task, ignoreWatts bool, schedTraceP // mutex var bpswClassMapWattsProacCCMutex sync.Mutex -func (s *BPSWClassMapWattsProacCC) newTask(offer *mesos.Offer, task def.Task, newTaskClass string) *mesos.TaskInfo { +func (s *BPSWClassMapWattsProacCC) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo { taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) s.tasksCreated++ @@ -133,7 +133,7 @@ func (s *BPSWClassMapWattsProacCC) newTask(offer *mesos.Offer, task def.Task, ne } if !s.ignoreWatts { - resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[newTaskClass])) + resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass])) } return &mesos.TaskInfo{ @@ -295,16 +295,11 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, } for *task.Instances > 0 { - var nodeClass string - for _, attr := range offer.GetAttributes() { - if attr.GetName() == "class" { - nodeClass = attr.GetText().GetValue() - } - } + powerClass := offerUtils.PowerClass(offer) // Does the task fit // OR Lazy evaluation. If ignore watts is set to true, second statement won't // be evaluated. - if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[nodeClass]))) && + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && (offerCPU >= (totalCPU + task.CPU)) && (offerRAM >= (totalRAM + task.RAM)) { @@ -316,7 +311,7 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, s.startCapping() } - fmt.Println("Watts being used: ", task.ClassToWatts[nodeClass]) + fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) tempCap, err := s.capper.FCFSDeterminedCap(s.totalPower, &task) if err == nil { bpswClassMapWattsProacCCMutex.Lock() @@ -327,12 +322,12 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, log.Println(err) } taken = true - totalWatts += task.ClassToWatts[nodeClass] + totalWatts += task.ClassToWatts[powerClass] totalCPU += task.CPU totalRAM += task.RAM log.Println("Co-Located with: ") coLocated(s.running[offer.GetSlaveId().GoString()]) - taskToSchedule := s.newTask(offer, task, nodeClass) + taskToSchedule := s.newTask(offer, task, powerClass) tasks = append(tasks, taskToSchedule) fmt.Println("Inst: ", *task.Instances) diff --git a/schedulers/firstfitSortedWattsClassMapWatts.go b/schedulers/firstfitSortedWattsClassMapWatts.go index 3a0d1df..b8b51e8 100644 --- a/schedulers/firstfitSortedWattsClassMapWatts.go +++ b/schedulers/firstfitSortedWattsClassMapWatts.go @@ -65,7 +65,7 @@ func NewFirstFitSortedWattsClassMapWatts(tasks []def.Task, ignoreWatts bool, sch return s } -func (s *FirstFitSortedWattsClassMapWatts) newTask(offer *mesos.Offer, task def.Task, newTaskClass string) *mesos.TaskInfo { +func (s *FirstFitSortedWattsClassMapWatts) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo { taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) s.tasksCreated++ @@ -89,7 +89,7 @@ func (s *FirstFitSortedWattsClassMapWatts) newTask(offer *mesos.Offer, task def. } if !s.ignoreWatts { - resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[newTaskClass])) + resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass])) } return &mesos.TaskInfo{ @@ -140,21 +140,17 @@ func (s *FirstFitSortedWattsClassMapWatts) ResourceOffers(driver sched.Scheduler } } - // retrieving the node class from the offer - var nodeClass string - for _, attr := range offer.GetAttributes() { - if attr.GetName() == "class" { - nodeClass = attr.GetText().GetValue() - } - } + // retrieving the powerClass from the offer + powerClass := offerUtils.PowerClass(offer) // Decision to take the offer or not - if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[nodeClass])) && + if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[powerClass])) && (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) log.Println("Co-Located with: ") coLocated(s.running[offer.GetSlaveId().GoString()]) - taskToSchedule := s.newTask(offer, task, nodeClass) + taskToSchedule := s.newTask(offer, task, powerClass) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{taskToSchedule}, mesosUtils.DefaultFilter) diff --git a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go index f3a09d2..0db8a05 100644 --- a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go +++ b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go @@ -89,7 +89,7 @@ func NewFirstFitSortedWattsClassMapWattsProacCC(tasks []def.Task, ignoreWatts bo // mutex var ffswClassMapWattsProacCCMutex sync.Mutex -func (s *FirstFitSortedWattsClassMapWattsProacCC) newTask(offer *mesos.Offer, task def.Task, newTaskClass string) *mesos.TaskInfo { +func (s *FirstFitSortedWattsClassMapWattsProacCC) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo { taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) s.tasksCreated++ @@ -121,7 +121,7 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) newTask(offer *mesos.Offer, ta } if !s.ignoreWatts { - resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[newTaskClass])) + resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass])) } return &mesos.TaskInfo{ @@ -278,16 +278,11 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc } } - // Retrieving the node class from the offer - var nodeClass string - for _, attr := range offer.GetAttributes() { - if attr.GetName() == "class" { - nodeClass = attr.GetText().GetValue() - } - } + // retrieving the powerClass for the offer + powerClass := offerUtils.PowerClass(offer) // Decision to take the offer or not - if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[nodeClass])) && + if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[powerClass])) && (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { // Capping the cluster if haven't yet started @@ -298,7 +293,7 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc s.startCapping() } - fmt.Println("Watts being used: ", task.ClassToWatts[nodeClass]) + fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) tempCap, err := s.capper.FCFSDeterminedCap(s.totalPower, &task) if err == nil { ffswClassMapWattsProacCCMutex.Lock() @@ -312,7 +307,7 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc log.Println("Co-Located with: ") coLocated(s.running[offer.GetSlaveId().GoString()]) - taskToSchedule := s.newTask(offer, task, nodeClass) + taskToSchedule := s.newTask(offer, task, powerClass) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{taskToSchedule}, mesosUtils.DefaultFilter) From a39cc39c8c166e7b25e20a430b203738f9ac0216 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Tue, 31 Jan 2017 16:19:58 -0500 Subject: [PATCH 30/36] Added TODO for future work -- ClassMapWatts as commandLine argument --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 95edb6b..f491fcc 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,7 @@ To Do: * Log fix for declining offer -- different reason when insufficient resources as compared to when there are no longer any tasks to schedule. * Have a centralised logFile that can be filtered by identifier. All electron logs should go into this file. + * Make ClassMapWatts to commandLine arguments so Electron can be run with ClassMapWatts enabled/disabled. **Requires [Performance Co-Pilot](http://pcp.io/) tool pmdumptext to be installed on the From b815d6591077f2740117a83ff0dee8e29198074a Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Tue, 31 Jan 2017 16:29:06 -0500 Subject: [PATCH 31/36] Add another category called Capping Strategies. --- schedulers/README.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/schedulers/README.md b/schedulers/README.md index b4cbd34..275798b 100644 --- a/schedulers/README.md +++ b/schedulers/README.md @@ -16,9 +16,12 @@ Scheduling Algorithms: * First Fit * First Fit with sorted watts * Bin-packing with sorted watts - * FCFS Proactive Cluster-wide Capping - * Ranked Proactive Cluster-wide Capping - * Piston Capping -- Works when scheduler is run with WAR * ClassMapWatts -- Bin-packing and First Fit that now use Watts per power class. * Top Heavy -- Hybrid scheduler that packs small tasks (less power intensive) using Bin-packing and spreads large tasks (power intensive) using First Fit. * Bottom Heavy -- Hybrid scheduler that packs large tasks (power intensive) using Bin-packing and spreads small tasks (less power intensive) using First Fit. + + Capping Strategies + + * Extrema Dynamic Capping + * Proactive Cluster-wide Capping + * Piston Capping From f4b5ffc1de4f5c50102f9b3f18a0fdc04dfa814e Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Tue, 31 Jan 2017 16:33:59 -0500 Subject: [PATCH 32/36] used offerUtils#PowerClass(...) instead of inlining the source code. --- schedulers/bottomHeavy.go | 15 ++------------- schedulers/topHeavy.go | 15 ++------------- 2 files changed, 4 insertions(+), 26 deletions(-) diff --git a/schedulers/bottomHeavy.go b/schedulers/bottomHeavy.go index 4b4391b..6f0cccd 100644 --- a/schedulers/bottomHeavy.go +++ b/schedulers/bottomHeavy.go @@ -127,17 +127,6 @@ func (s *BottomHeavy) newTask(offer *mesos.Offer, task def.Task, newTaskClass st } } -// retrieve the power class of host in offer -func (s *BottomHeavy) getPowerClass(offer *mesos.Offer) string { - var powerClass string - for _, attr := range offer.GetAttributes() { - if attr.GetName() == "class" { - powerClass = attr.GetText().GetValue() - } - } - return powerClass -} - // Shut down scheduler if no more tasks to schedule func (s *BottomHeavy) shutDownIfNecessary() { if len(s.smallTasks) <= 0 && len(s.largeTasks) <= 0 { @@ -182,7 +171,7 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) task := s.largeTasks[i] for *task.Instances > 0 { - powerClass := s.getPowerClass(offer) + powerClass := offerUtils.PowerClass(offer) // Does the task fit // OR lazy evaluation. If ignore watts is set to true, second statement won't // be evaluated. @@ -242,7 +231,7 @@ func (s *BottomHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver taken := false for i := 0; i < len(s.smallTasks); i++ { task := s.smallTasks[i] - powerClass := s.getPowerClass(offer) + powerClass := offerUtils.PowerClass(offer) // Decision to take the offer or not wattsToConsider := task.Watts diff --git a/schedulers/topHeavy.go b/schedulers/topHeavy.go index 9454e40..d8bf16b 100644 --- a/schedulers/topHeavy.go +++ b/schedulers/topHeavy.go @@ -127,17 +127,6 @@ func (s *TopHeavy) newTask(offer *mesos.Offer, task def.Task, newTaskClass strin } } -// retrieve the power class of host in offer -func (s *TopHeavy) getPowerClass(offer *mesos.Offer) string { - var powerClass string - for _, attr := range offer.GetAttributes() { - if attr.GetName() == "class" { - powerClass = attr.GetText().GetValue() - } - } - return powerClass -} - // Shut down scheduler if no more tasks to schedule func (s *TopHeavy) shutDownIfNecessary() { if len(s.smallTasks) <= 0 && len(s.largeTasks) <= 0 { @@ -182,7 +171,7 @@ func (s *TopHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) { task := s.smallTasks[i] for *task.Instances > 0 { - powerClass := s.getPowerClass(offer) + powerClass := offerUtils.PowerClass(offer) // Does the task fit // OR lazy evaluation. If ignore watts is set to true, second statement won't // be evaluated. @@ -242,7 +231,7 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) { taken := false for i := 0; i < len(s.largeTasks); i++ { task := s.largeTasks[i] - powerClass := s.getPowerClass(offer) + powerClass := offerUtils.PowerClass(offer) // Decision to take the offer or not wattsToConsider := task.Watts From 7fc5b5d19edae1213ad601b35e4d091c90088625 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 4 Feb 2017 16:10:32 -0500 Subject: [PATCH 33/36] Added TODO to be able to choose schedulers from the config file (the creation of which is also in TODO) too. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index f491fcc..769efaa 100644 --- a/README.md +++ b/README.md @@ -8,6 +8,7 @@ To Do: * Add ability to use constraints * Running average calculations https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average * Make parameters corresponding to each scheduler configurable (possible to have a config template for each scheduler?) + * TODO : Adding type of scheduler to be used, to be picked from a config file, along with it's configurable parameters. * Write test code for each scheduler (This should be after the design change) * Some of the constants in constants/constants.go can vary based on the environment. Possible to setup the constants at runtime based on the environment? From eea0db0b3d34bfd26d87e138b5b7774859b8e53f Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 4 Feb 2017 16:59:25 -0500 Subject: [PATCH 34/36] retrofitted schedulers by renaming 'taken' to 'offerTaken' for the boolean to indicate whether an offer has been consumed. --- scheduler.go | 6 +++--- schedulers/binPackSortedWattsSortedOffers.go | 6 +++--- schedulers/binpackedpistoncapping.go | 6 +++--- schedulers/binpacksortedwatts.go | 6 +++--- schedulers/bottomHeavy.go | 6 +++--- schedulers/bpswClassMapWatts.go | 6 +++--- schedulers/bpswClassMapWattsPistonCapping.go | 6 +++--- schedulers/bpswClassMapWattsProacCC.go | 6 +++--- schedulers/firstfit.go | 6 +++--- schedulers/firstfitSortedOffers.go | 6 +++--- schedulers/firstfitSortedWattsClassMapWatts.go | 6 +++--- schedulers/firstfitSortedWattsClassMapWattsProacCC.go | 6 +++--- schedulers/firstfitSortedWattsSortedOffers.go | 6 +++--- schedulers/firstfitsortedwatts.go | 6 +++--- schedulers/firstfitwattsonly.go | 6 +++--- schedulers/proactiveclusterwidecappingfcfs.go | 6 +++--- schedulers/proactiveclusterwidecappingranked.go | 6 +++--- schedulers/topHeavy.go | 6 +++--- 18 files changed, 54 insertions(+), 54 deletions(-) diff --git a/scheduler.go b/scheduler.go index e6b736d..a6b11de 100644 --- a/scheduler.go +++ b/scheduler.go @@ -58,7 +58,7 @@ func main() { startTime := time.Now().Format("20060102150405") logPrefix := *pcplogPrefix + "_" + startTime - scheduler := schedulers.NewBinPackSortedWattsSortedOffers(tasks, *ignoreWatts, logPrefix) + scheduler := schedulers.NewBinPackSortedWatts(tasks, *ignoreWatts, logPrefix) driver, err := sched.NewMesosSchedulerDriver(sched.DriverConfig{ Master: *master, Framework: &mesos.FrameworkInfo{ @@ -72,8 +72,8 @@ func main() { return } - go pcp.Start(scheduler.PCPLog, &scheduler.RecordPCP, logPrefix) - //go pcp.StartPCPLogAndExtremaDynamicCap(scheduler.PCPLog, &scheduler.RecordPCP, logPrefix, *hiThreshold, *loThreshold) + //go pcp.Start(scheduler.PCPLog, &scheduler.RecordPCP, logPrefix) + go pcp.StartPCPLogAndExtremaDynamicCap(scheduler.PCPLog, &scheduler.RecordPCP, logPrefix, *hiThreshold, *loThreshold) time.Sleep(1 * time.Second) // Take a second between starting PCP log and continuing // Attempt to handle signint to not leave pmdumptext running diff --git a/schedulers/binPackSortedWattsSortedOffers.go b/schedulers/binPackSortedWattsSortedOffers.go index 2f70cb3..1cf2191 100644 --- a/schedulers/binPackSortedWattsSortedOffers.go +++ b/schedulers/binPackSortedWattsSortedOffers.go @@ -154,7 +154,7 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr offer_cpu, offer_ram, offer_watts := offerUtils.OfferAgg(offer) - taken := false + offerTaken := false totalWatts := 0.0 totalCPU := 0.0 totalRAM := 0.0 @@ -175,7 +175,7 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr (offer_cpu >= (totalCPU + task.CPU)) && (offer_ram >= (totalRAM + task.RAM)) { - taken = true + offerTaken = true totalWatts += task.Watts totalCPU += task.CPU totalRAM += task.RAM @@ -203,7 +203,7 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr } } - if taken { + if offerTaken { log.Printf("Starting on [%s]\n", offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { diff --git a/schedulers/binpackedpistoncapping.go b/schedulers/binpackedpistoncapping.go index 7cf4b9d..7f413f9 100644 --- a/schedulers/binpackedpistoncapping.go +++ b/schedulers/binpackedpistoncapping.go @@ -249,7 +249,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off fitTasks := []*mesos.TaskInfo{} offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - taken := false + offerTaken := false totalWatts := 0.0 totalCPU := 0.0 totalRAM := 0.0 @@ -276,7 +276,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off s.startCapping() } - taken = true + offerTaken = true totalWatts += task.Watts totalCPU += task.CPU totalRAM += task.RAM @@ -305,7 +305,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off } } - if taken { + if offerTaken { // Updating the cap value for offer.Hostname bpPistonMutex.Lock() bpPistonCapValues[*offer.Hostname] += partialLoad diff --git a/schedulers/binpacksortedwatts.go b/schedulers/binpacksortedwatts.go index 00247c7..cf8162f 100644 --- a/schedulers/binpacksortedwatts.go +++ b/schedulers/binpacksortedwatts.go @@ -143,7 +143,7 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers offer_cpu, offer_ram, offer_watts := offerUtils.OfferAgg(offer) - taken := false + offerTaken := false totalWatts := 0.0 totalCPU := 0.0 totalRAM := 0.0 @@ -164,7 +164,7 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers (offer_cpu >= (totalCPU + task.CPU)) && (offer_ram >= (totalRAM + task.RAM)) { - taken = true + offerTaken = true totalWatts += task.Watts totalCPU += task.CPU totalRAM += task.RAM @@ -192,7 +192,7 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers } } - if taken { + if offerTaken { log.Printf("Starting on [%s]\n", offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { diff --git a/schedulers/bottomHeavy.go b/schedulers/bottomHeavy.go index 6f0cccd..b108827 100644 --- a/schedulers/bottomHeavy.go +++ b/schedulers/bottomHeavy.go @@ -166,7 +166,7 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) totalWatts := 0.0 totalCPU := 0.0 totalRAM := 0.0 - taken := false + offerTaken := false for i := 0; i < len(s.largeTasks); i++ { task := s.largeTasks[i] @@ -182,7 +182,7 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsToConsider))) && (offerCPU >= (totalCPU + task.CPU)) && (offerRAM >= (totalRAM + task.RAM)) { - taken = true + offerTaken = true totalWatts += wattsToConsider totalCPU += task.CPU totalRAM += task.RAM @@ -199,7 +199,7 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) } } - if taken { + if offerTaken { log.Printf("Starting on [%s]\n", offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { diff --git a/schedulers/bpswClassMapWatts.go b/schedulers/bpswClassMapWatts.go index 79f3882..1464df8 100644 --- a/schedulers/bpswClassMapWatts.go +++ b/schedulers/bpswClassMapWatts.go @@ -143,7 +143,7 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - taken := false + offerTaken := false totalWatts := 0.0 totalCPU := 0.0 totalRAM := 0.0 @@ -168,7 +168,7 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers (offerRAM >= (totalRAM + task.RAM)) { fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) - taken = true + offerTaken = true totalWatts += task.ClassToWatts[powerClass] totalCPU += task.CPU totalRAM += task.RAM @@ -196,7 +196,7 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers } } - if taken { + if offerTaken { log.Printf("Starting on [%s]\n", offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { diff --git a/schedulers/bpswClassMapWattsPistonCapping.go b/schedulers/bpswClassMapWattsPistonCapping.go index baec23d..a80c599 100644 --- a/schedulers/bpswClassMapWattsPistonCapping.go +++ b/schedulers/bpswClassMapWattsPistonCapping.go @@ -242,7 +242,7 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - taken := false + offerTaken := false totalWatts := 0.0 totalCPU := 0.0 totalRAM := 0.0 @@ -275,7 +275,7 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr } fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) - taken = true + offerTaken = true totalWatts += task.ClassToWatts[powerClass] totalCPU += task.CPU totalRAM += task.RAM @@ -303,7 +303,7 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr } } - if taken { + if offerTaken { // Updating the cap value for offer.Hostname bpswClassMapWattsPistonMutex.Lock() bpswClassMapWattsPistonCapValues[*offer.Hostname] += partialLoad diff --git a/schedulers/bpswClassMapWattsProacCC.go b/schedulers/bpswClassMapWattsProacCC.go index 12ddb46..b250e67 100644 --- a/schedulers/bpswClassMapWattsProacCC.go +++ b/schedulers/bpswClassMapWattsProacCC.go @@ -280,7 +280,7 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - taken := false + offerTaken := false totalWatts := 0.0 totalCPU := 0.0 totalRAM := 0.0 @@ -321,7 +321,7 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, log.Println("Failed to determine new cluster-wide cap:") log.Println(err) } - taken = true + offerTaken = true totalWatts += task.ClassToWatts[powerClass] totalCPU += task.CPU totalRAM += task.RAM @@ -352,7 +352,7 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, } } - if taken { + if offerTaken { log.Printf("Starting on [%s]\n", offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) } else { diff --git a/schedulers/firstfit.go b/schedulers/firstfit.go index 5469bb4..4317a91 100644 --- a/schedulers/firstfit.go +++ b/schedulers/firstfit.go @@ -142,7 +142,7 @@ func (s *FirstFit) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos. // First fit strategy - taken := false + offerTaken := false for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] @@ -166,7 +166,7 @@ func (s *FirstFit) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos. log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) - taken = true + offerTaken = true fmt.Println("Inst: ", *task.Instances) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) @@ -187,7 +187,7 @@ func (s *FirstFit) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos. } // If there was no match for the task - if !taken { + if !offerTaken { fmt.Println("There is not enough resources to launch a task:") cpus, mem, watts := offerUtils.OfferAgg(offer) diff --git a/schedulers/firstfitSortedOffers.go b/schedulers/firstfitSortedOffers.go index 06ee713..0611581 100644 --- a/schedulers/firstfitSortedOffers.go +++ b/schedulers/firstfitSortedOffers.go @@ -154,7 +154,7 @@ func (s *FirstFitSortedOffers) ResourceOffers(driver sched.SchedulerDriver, offe // First fit strategy - taken := false + offerTaken := false for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] @@ -178,7 +178,7 @@ func (s *FirstFitSortedOffers) ResourceOffers(driver sched.SchedulerDriver, offe log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) - taken = true + offerTaken = true fmt.Println("Inst: ", *task.Instances) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) @@ -199,7 +199,7 @@ func (s *FirstFitSortedOffers) ResourceOffers(driver sched.SchedulerDriver, offe } // If there was no match for the task - if !taken { + if !offerTaken { fmt.Println("There is not enough resources to launch a task:") cpus, mem, watts := offerUtils.OfferAgg(offer) diff --git a/schedulers/firstfitSortedWattsClassMapWatts.go b/schedulers/firstfitSortedWattsClassMapWatts.go index b8b51e8..a7f5448 100644 --- a/schedulers/firstfitSortedWattsClassMapWatts.go +++ b/schedulers/firstfitSortedWattsClassMapWatts.go @@ -129,7 +129,7 @@ func (s *FirstFitSortedWattsClassMapWatts) ResourceOffers(driver sched.Scheduler offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) // First fit strategy - taken := false + offerTaken := false for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] // Check host if it exists @@ -155,7 +155,7 @@ func (s *FirstFitSortedWattsClassMapWatts) ResourceOffers(driver sched.Scheduler log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{taskToSchedule}, mesosUtils.DefaultFilter) - taken = true + offerTaken = true fmt.Println("Inst: ", *task.Instances) *task.Instances-- if *task.Instances <= 0 { @@ -172,7 +172,7 @@ func (s *FirstFitSortedWattsClassMapWatts) ResourceOffers(driver sched.Scheduler } // If there was no match for the task - if !taken { + if !offerTaken { fmt.Println("There is not enough resources to launch a task:") cpus, mem, watts := offerUtils.OfferAgg(offer) diff --git a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go index 0db8a05..a896468 100644 --- a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go +++ b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go @@ -267,7 +267,7 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) // First fit strategy - taken := false + offerTaken := false for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] // Check host if it exists @@ -312,7 +312,7 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{taskToSchedule}, mesosUtils.DefaultFilter) - taken = true + offerTaken = true fmt.Println("Inst: ", *task.Instances) *task.Instances-- if *task.Instances <= 0 { @@ -332,7 +332,7 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc } // If there was no match for the task - if !taken { + if !offerTaken { fmt.Println("There is not enough resources to launch a task:") cpus, mem, watts := offerUtils.OfferAgg(offer) diff --git a/schedulers/firstfitSortedWattsSortedOffers.go b/schedulers/firstfitSortedWattsSortedOffers.go index 3b4bb4e..9ceb095 100644 --- a/schedulers/firstfitSortedWattsSortedOffers.go +++ b/schedulers/firstfitSortedWattsSortedOffers.go @@ -157,7 +157,7 @@ func (s *FirstFitSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerD // First fit strategy - taken := false + offerTaken := false for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] @@ -181,7 +181,7 @@ func (s *FirstFitSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerD log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) - taken = true + offerTaken = true fmt.Println("Inst: ", *task.Instances) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) @@ -201,7 +201,7 @@ func (s *FirstFitSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerD } // If there was no match for the task - if !taken { + if !offerTaken { fmt.Println("There is not enough resources to launch a task:") cpus, mem, watts := offerUtils.OfferAgg(offer) diff --git a/schedulers/firstfitsortedwatts.go b/schedulers/firstfitsortedwatts.go index ab8d9c3..b62d5b3 100644 --- a/schedulers/firstfitsortedwatts.go +++ b/schedulers/firstfitsortedwatts.go @@ -145,7 +145,7 @@ func (s *FirstFitSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offer // First fit strategy - taken := false + offerTaken := false for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] @@ -169,7 +169,7 @@ func (s *FirstFitSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offer log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) - taken = true + offerTaken = true fmt.Println("Inst: ", *task.Instances) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) @@ -189,7 +189,7 @@ func (s *FirstFitSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offer } // If there was no match for the task - if !taken { + if !offerTaken { fmt.Println("There is not enough resources to launch a task:") cpus, mem, watts := offerUtils.OfferAgg(offer) diff --git a/schedulers/firstfitwattsonly.go b/schedulers/firstfitwattsonly.go index c24e75e..12f6331 100644 --- a/schedulers/firstfitwattsonly.go +++ b/schedulers/firstfitwattsonly.go @@ -136,7 +136,7 @@ func (s *FirstFitWattsOnly) ResourceOffers(driver sched.SchedulerDriver, offers // First fit strategy - taken := false + offerTaken := false for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] @@ -160,7 +160,7 @@ func (s *FirstFitWattsOnly) ResourceOffers(driver sched.SchedulerDriver, offers log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) - taken = true + offerTaken = true fmt.Println("Inst: ", *task.Instances) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) @@ -181,7 +181,7 @@ func (s *FirstFitWattsOnly) ResourceOffers(driver sched.SchedulerDriver, offers } // If there was no match for the task - if !taken { + if !offerTaken { fmt.Println("There is not enough resources to launch a task:") cpus, mem, watts := offerUtils.OfferAgg(offer) diff --git a/schedulers/proactiveclusterwidecappingfcfs.go b/schedulers/proactiveclusterwidecappingfcfs.go index c48c9de..b7d7c8c 100644 --- a/schedulers/proactiveclusterwidecappingfcfs.go +++ b/schedulers/proactiveclusterwidecappingfcfs.go @@ -275,7 +275,7 @@ func (s *ProactiveClusterwideCapFCFS) ResourceOffers(driver sched.SchedulerDrive Cluster wide capping is currently performed at regular intervals of time. */ - taken := false + offerTaken := false for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] @@ -293,7 +293,7 @@ func (s *ProactiveClusterwideCapFCFS) ResourceOffers(driver sched.SchedulerDrive fcfsMutex.Unlock() s.startCapping() } - taken = true + offerTaken = true tempCap, err := s.capper.FCFSDeterminedCap(s.totalPower, &task) if err == nil { @@ -331,7 +331,7 @@ func (s *ProactiveClusterwideCapFCFS) ResourceOffers(driver sched.SchedulerDrive } // If no task fit the offer, then declining the offer. - if !taken { + if !offerTaken { log.Printf("There is not enough resources to launch a task on Host: %s\n", offer.GetHostname()) cpus, mem, watts := offerUtils.OfferAgg(offer) diff --git a/schedulers/proactiveclusterwidecappingranked.go b/schedulers/proactiveclusterwidecappingranked.go index c39f1c4..52118db 100644 --- a/schedulers/proactiveclusterwidecappingranked.go +++ b/schedulers/proactiveclusterwidecappingranked.go @@ -299,7 +299,7 @@ func (s *ProactiveClusterwideCapRanked) ResourceOffers(driver sched.SchedulerDri Cluster wide capping is currently performed at regular intervals of time. */ - taken := false + offerTaken := false for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] @@ -317,7 +317,7 @@ func (s *ProactiveClusterwideCapRanked) ResourceOffers(driver sched.SchedulerDri rankedMutex.Unlock() s.startCapping() } - taken = true + offerTaken = true tempCap, err := s.capper.FCFSDeterminedCap(s.totalPower, &task) if err == nil { @@ -354,7 +354,7 @@ func (s *ProactiveClusterwideCapRanked) ResourceOffers(driver sched.SchedulerDri } // If no tasks fit the offer, then declining the offer. - if !taken { + if !offerTaken { log.Printf("There is not enough resources to launch a task on Host: %s\n", offer.GetHostname()) cpus, mem, watts := offerUtils.OfferAgg(offer) diff --git a/schedulers/topHeavy.go b/schedulers/topHeavy.go index d8bf16b..ab4fdd6 100644 --- a/schedulers/topHeavy.go +++ b/schedulers/topHeavy.go @@ -228,7 +228,7 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) { tasks := []*mesos.TaskInfo{} offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - taken := false + offerTaken := false for i := 0; i < len(s.largeTasks); i++ { task := s.largeTasks[i] powerClass := offerUtils.PowerClass(offer) @@ -240,7 +240,7 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) { } if (s.ignoreWatts || (offerWatts >= wattsToConsider)) && (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { - taken = true + offerTaken = true tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, powerClass, task)) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter) @@ -254,7 +254,7 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) { } } - if !taken { + if !offerTaken { // If there was no match for the task fmt.Println("There is not enough resources to launch a task:") cpus, mem, watts := offerUtils.OfferAgg(offer) From aabdd716ddb027920a87e61cd189623d52266986 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Thu, 9 Feb 2017 20:41:54 -0500 Subject: [PATCH 35/36] Added TODO to clean up constants.go and use Mesos attributes instead. --- constants/constants.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/constants/constants.go b/constants/constants.go index 045c1a2..01cfc8a 100644 --- a/constants/constants.go +++ b/constants/constants.go @@ -6,6 +6,8 @@ Constants that are used across scripts 5. window_size = number of tasks to consider for computation of the dynamic cap. Also, exposing functions to update or initialize some of the constants. + +TODO: Clean this up and use Mesos Attributes instead. */ package constants From 6c62b5326f8d505d32341ec08e5f12a9bfcd05d2 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Thu, 9 Feb 2017 22:48:34 -0500 Subject: [PATCH 36/36] Added a HostMismatch(...) in offerUtils that checks whether a task's host requirement matches the host corresponding to the offer. Made sure all schedulers call takeOffer(...) that is defined in each scheduler, to maintain consistency. --- schedulers/binPackSortedWattsSortedOffers.go | 28 ++++++-------- schedulers/binpackedpistoncapping.go | 10 +---- schedulers/binpacksortedwatts.go | 27 +++++--------- schedulers/bottomHeavy.go | 19 ++++++++-- schedulers/bpMaxMin.go | 36 ++++++------------ schedulers/bpMaxMinPistonCapping.go | 37 +++++++------------ schedulers/bpMaxMinProacCC.go | 36 +++++++----------- schedulers/bpswClassMapWatts.go | 28 +++++--------- schedulers/bpswClassMapWattsPistonCapping.go | 29 ++++++--------- schedulers/bpswClassMapWattsProacCC.go | 29 ++++++--------- schedulers/firstfit.go | 10 ++--- schedulers/firstfitSortedOffers.go | 10 ++--- .../firstfitSortedWattsClassMapWatts.go | 28 ++++++++------ ...firstfitSortedWattsClassMapWattsProacCC.go | 28 ++++++++------ schedulers/firstfitSortedWattsSortedOffers.go | 10 ++--- schedulers/firstfitsortedwatts.go | 10 ++--- schedulers/firstfitwattsonly.go | 10 ++--- schedulers/proactiveclusterwidecappingfcfs.go | 9 ++--- .../proactiveclusterwidecappingranked.go | 9 ++--- utilities/offerUtils/offerUtils.go | 11 ++++++ 20 files changed, 175 insertions(+), 239 deletions(-) diff --git a/schedulers/binPackSortedWattsSortedOffers.go b/schedulers/binPackSortedWattsSortedOffers.go index 1cf2191..7f73bb3 100644 --- a/schedulers/binPackSortedWattsSortedOffers.go +++ b/schedulers/binPackSortedWattsSortedOffers.go @@ -12,21 +12,22 @@ import ( "log" "os" "sort" - "strings" "time" ) // Decides if to take an offer or not -func (*BinPackSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool { +func (s *BinPackSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, + totalWatts float64, task def.Task) bool { - cpus, mem, watts := offerUtils.OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + // Does the task fit + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.Watts))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { return true } - return false } @@ -152,8 +153,6 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr tasks := []*mesos.TaskInfo{} - offer_cpu, offer_ram, offer_watts := offerUtils.OfferAgg(offer) - offerTaken := false totalWatts := 0.0 totalCPU := 0.0 @@ -161,19 +160,14 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { // Does the task fit - if (s.ignoreWatts || offer_watts >= (totalWatts+task.Watts)) && - (offer_cpu >= (totalCPU + task.CPU)) && - (offer_ram >= (totalRAM + task.RAM)) { + if s.takeOffer(offer, totalCPU, totalRAM, totalWatts, task) { offerTaken = true totalWatts += task.Watts diff --git a/schedulers/binpackedpistoncapping.go b/schedulers/binpackedpistoncapping.go index 7f413f9..2fce0b9 100644 --- a/schedulers/binpackedpistoncapping.go +++ b/schedulers/binpackedpistoncapping.go @@ -15,7 +15,6 @@ import ( "log" "math" "os" - "strings" "sync" "time" ) @@ -258,13 +257,8 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off partialLoad := 0.0 for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doens't match our task's host requirement. - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) {continue} for *task.Instances > 0 { // Does the task fit diff --git a/schedulers/binpacksortedwatts.go b/schedulers/binpacksortedwatts.go index cf8162f..87ee69b 100644 --- a/schedulers/binpacksortedwatts.go +++ b/schedulers/binpacksortedwatts.go @@ -12,21 +12,19 @@ import ( "log" "os" "sort" - "strings" "time" ) // Decides if to take an offer or not -func (*BinPackSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool { - - cpus, mem, watts := offerUtils.OfferAgg(offer) +func (s *BinPackSortedWatts) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, totalWatts float64, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.Watts))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { return true } - return false } @@ -141,8 +139,6 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers tasks := []*mesos.TaskInfo{} - offer_cpu, offer_ram, offer_watts := offerUtils.OfferAgg(offer) - offerTaken := false totalWatts := 0.0 totalCPU := 0.0 @@ -150,19 +146,14 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { // Does the task fit - if (s.ignoreWatts || offer_watts >= (totalWatts+task.Watts)) && - (offer_cpu >= (totalCPU + task.CPU)) && - (offer_ram >= (totalRAM + task.RAM)) { + if s.takeOffer(offer, totalCPU, totalRAM, totalWatts, task) { offerTaken = true totalWatts += task.Watts diff --git a/schedulers/bottomHeavy.go b/schedulers/bottomHeavy.go index b108827..2379725 100644 --- a/schedulers/bottomHeavy.go +++ b/schedulers/bottomHeavy.go @@ -26,6 +26,20 @@ BinPacking has the most effect when co-scheduling of tasks is increased. Large t co-scheduling them has a great impact on the total power utilization. */ +func (s *BottomHeavy) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, totalWatts, + wattsToConsider float64, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) + + //TODO: Insert watts calculation here instead of taking them as a parameter + if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsToConsider))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { + return true + } + return false + +} + // electronScheduler implements the Scheduler interface type BottomHeavy struct { base // Type embedded to inherit common functions @@ -162,7 +176,6 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) } tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) totalWatts := 0.0 totalCPU := 0.0 totalRAM := 0.0 @@ -179,9 +192,7 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) if !s.ignoreWatts { wattsToConsider = task.ClassToWatts[powerClass] } - if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsToConsider))) && - (offerCPU >= (totalCPU + task.CPU)) && - (offerRAM >= (totalRAM + task.RAM)) { + if s.takeOffer(offer, totalCPU, totalRAM, totalWatts, wattsToConsider, task) { offerTaken = true totalWatts += wattsToConsider totalCPU += task.CPU diff --git a/schedulers/bpMaxMin.go b/schedulers/bpMaxMin.go index d5e791a..6daa6a6 100644 --- a/schedulers/bpMaxMin.go +++ b/schedulers/bpMaxMin.go @@ -12,21 +12,19 @@ import ( "log" "os" "sort" - "strings" "time" ) // Decides if to take an offer or not -func (*BPMaxMinWatts) takeOffer(offer *mesos.Offer, task def.Task) bool { - - cpus, mem, watts := offerUtils.OfferAgg(offer) +func (s *BPMaxMinWatts) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, totalWatts float64, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.Watts))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { return true } - return false } @@ -135,12 +133,8 @@ func (s *BPMaxMinWatts) CheckFit(i int, totalRAM *float64, totalWatts *float64) (bool, *mesos.TaskInfo) { - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - // Does the task fit - if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) && - (offerCPU >= (*totalCPU + task.CPU)) && - (offerRAM >= (*totalRAM + task.RAM)) { + if s.takeOffer(offer, *totalCPU, *totalRAM, *totalWatts, task) { *totalWatts += task.Watts *totalCPU += task.CPU @@ -198,12 +192,9 @@ func (s *BPMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*m for i := len(s.tasks) - 1; i >= 0; i-- { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // TODO: Fix this so index doesn't need to be passed @@ -219,12 +210,9 @@ func (s *BPMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*m // Pack the rest of the offer with the smallest tasks for i, task := range s.tasks { - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { diff --git a/schedulers/bpMaxMinPistonCapping.go b/schedulers/bpMaxMinPistonCapping.go index b4d4e3c..9562751 100644 --- a/schedulers/bpMaxMinPistonCapping.go +++ b/schedulers/bpMaxMinPistonCapping.go @@ -16,22 +16,21 @@ import ( "math" "os" "sort" - "strings" "sync" "time" ) // Decides if to take an offer or not -func (s *BPMaxMinPistonCapping) takeOffer(offer *mesos.Offer, task def.Task) bool { - - cpus, mem, watts := offerUtils.OfferAgg(offer) +func (s *BPMaxMinPistonCapping) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, totalWatts float64, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + // Does the task fit + if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) && + (offerCPU >= (*totalCPU + task.CPU)) && + (offerRAM >= (*totalRAM + task.RAM)) { return true } - return false } @@ -224,12 +223,8 @@ func (s *BPMaxMinPistonCapping) CheckFit(i int, totalWatts *float64, partialLoad *float64) (bool, *mesos.TaskInfo) { - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - // Does the task fit - if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) && - (offerCPU >= (*totalCPU + task.CPU)) && - (offerRAM >= (*totalRAM + task.RAM)) { + if s.takeOffer(offer, *totalCPU, *totalRAM, *totalWatts, task) { // Start piston capping if haven't started yet if !s.isCapping { @@ -297,12 +292,9 @@ func (s *BPMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, off for i := len(s.tasks) - 1; i >= 0; i-- { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // TODO: Fix this so index doesn't need to be passed @@ -318,12 +310,9 @@ func (s *BPMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, off // Pack the rest of the offer with the smallest tasks for i, task := range s.tasks { - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { diff --git a/schedulers/bpMaxMinProacCC.go b/schedulers/bpMaxMinProacCC.go index fe44f60..96c27ee 100644 --- a/schedulers/bpMaxMinProacCC.go +++ b/schedulers/bpMaxMinProacCC.go @@ -16,21 +16,21 @@ import ( "math" "os" "sort" - "strings" "sync" "time" ) // Decides if to take an offer or not -func (s *BPMaxMinProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := offerUtils.OfferAgg(offer) +func (s *BPMaxMinProacCC) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, totalWatts float64, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + // Does the task fit + if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) && + (offerCPU >= (*totalCPU + task.CPU)) && + (offerRAM >= (*totalRAM + task.RAM)) { return true } - return false } @@ -248,12 +248,8 @@ func (s *BPMaxMinProacCC) CheckFit(i int, totalRAM *float64, totalWatts *float64) (bool, *mesos.TaskInfo) { - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - // Does the task fit - if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) && - (offerCPU >= (*totalCPU + task.CPU)) && - (offerRAM >= (*totalRAM + task.RAM)) { + if s.takeOffer(offer, *totalCPU, *totalRAM, *totalWatts, task) { // Capping the cluster if haven't yet started if !s.isCapping { @@ -347,12 +343,9 @@ func (s *BPMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers [] for i := len(s.tasks) - 1; i >= 0; i-- { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // TODO: Fix this so index doesn't need to be passed @@ -368,12 +361,9 @@ func (s *BPMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers [] // Pack the rest of the offer with the smallest tasks for i, task := range s.tasks { - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { diff --git a/schedulers/bpswClassMapWatts.go b/schedulers/bpswClassMapWatts.go index 1464df8..b6c3bc6 100644 --- a/schedulers/bpswClassMapWatts.go +++ b/schedulers/bpswClassMapWatts.go @@ -12,21 +12,20 @@ import ( "log" "os" "sort" - "strings" "time" ) // Decides if to take an offer or not -func (*BPSWClassMapWatts) takeOffer(offer *mesos.Offer, task def.Task) bool { - - cpus, mem, watts := offerUtils.OfferAgg(offer) +func (s *BPSWClassMapWatts) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, + totalWatts float64, powerClass string, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { return true } - return false } @@ -141,8 +140,6 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - offerTaken := false totalWatts := 0.0 totalCPU := 0.0 @@ -150,12 +147,9 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { @@ -163,9 +157,7 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers // Does the task fit // OR lazy evaluation. If ignore watts is set to true, second statement won't // be evaluated. - if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && - (offerCPU >= (totalCPU + task.CPU)) && - (offerRAM >= (totalRAM + task.RAM)) { + if s.takeOffer(offer, totalCPU, totalRAM, totalWatts, powerClass, task) { fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) offerTaken = true diff --git a/schedulers/bpswClassMapWattsPistonCapping.go b/schedulers/bpswClassMapWattsPistonCapping.go index a80c599..412ace6 100644 --- a/schedulers/bpswClassMapWattsPistonCapping.go +++ b/schedulers/bpswClassMapWattsPistonCapping.go @@ -16,21 +16,21 @@ import ( "math" "os" "sort" - "strings" "sync" "time" ) -// Decides if to take offer or not -func (s *BPSWClassMapWattsPistonCapping) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := offerUtils.OfferAgg(offer) +// Decides if to take an offer or not +func (s *BPSWClassMapWattsPistonCapping) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, + totalWatts float64, powerClass string, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { return true } - return false } @@ -240,8 +240,6 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - offerTaken := false totalWatts := 0.0 totalCPU := 0.0 @@ -251,12 +249,9 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr partialLoad := 0.0 for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { @@ -264,9 +259,7 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr // Does the task fit // OR lazy evaluation. If ignoreWatts is set to true, second statement won't // be evaluated - if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && - (offerCPU >= (totalCPU + task.CPU)) && - (offerRAM >= (totalRAM + task.RAM)) { + if s.takeOffer(offer, totalCPU, totalRAM, totalWatts, powerClass, task) { // Start piston capping if haven't started yet if !s.isCapping { diff --git a/schedulers/bpswClassMapWattsProacCC.go b/schedulers/bpswClassMapWattsProacCC.go index b250e67..3d9f14d 100644 --- a/schedulers/bpswClassMapWattsProacCC.go +++ b/schedulers/bpswClassMapWattsProacCC.go @@ -16,21 +16,21 @@ import ( "math" "os" "sort" - "strings" "sync" "time" ) // Decides if to take an offer or not -func (*BPSWClassMapWattsProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := offerUtils.OfferAgg(offer) +func (s *BPSWClassMapWattsProacCC) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, + totalWatts float64, powerClass string, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - // TODO: Insert watts calculation here instead of taking them as parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + //TODO: Insert watts calculation here instead of taking them as a parameter + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { return true } - return false } @@ -278,20 +278,15 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - offerTaken := false totalWatts := 0.0 totalCPU := 0.0 totalRAM := 0.0 for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer it it doesn't match our task's host requirement. - if strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { @@ -299,9 +294,7 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, // Does the task fit // OR Lazy evaluation. If ignore watts is set to true, second statement won't // be evaluated. - if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && - (offerCPU >= (totalCPU + task.CPU)) && - (offerRAM >= (totalRAM + task.RAM)) { + if s.takeOffer(offer, totalCPU, totalRAM, totalWatts, powerClass, task) { // Capping the cluster if haven't yet started if !s.isCapping { diff --git a/schedulers/firstfit.go b/schedulers/firstfit.go index 4317a91..3f6f4fc 100644 --- a/schedulers/firstfit.go +++ b/schedulers/firstfit.go @@ -11,7 +11,6 @@ import ( sched "github.com/mesos/mesos-go/scheduler" "log" "os" - "strings" "time" ) @@ -146,12 +145,9 @@ func (s *FirstFit) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos. for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // Decision to take the offer or not diff --git a/schedulers/firstfitSortedOffers.go b/schedulers/firstfitSortedOffers.go index 0611581..8db4147 100644 --- a/schedulers/firstfitSortedOffers.go +++ b/schedulers/firstfitSortedOffers.go @@ -12,7 +12,6 @@ import ( "log" "os" "sort" - "strings" "time" ) @@ -158,12 +157,9 @@ func (s *FirstFitSortedOffers) ResourceOffers(driver sched.SchedulerDriver, offe for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // Decision to take the offer or not diff --git a/schedulers/firstfitSortedWattsClassMapWatts.go b/schedulers/firstfitSortedWattsClassMapWatts.go index a7f5448..e2559ea 100644 --- a/schedulers/firstfitSortedWattsClassMapWatts.go +++ b/schedulers/firstfitSortedWattsClassMapWatts.go @@ -12,10 +12,22 @@ import ( "log" "os" "sort" - "strings" "time" ) +// Decides if to take an offer or not +func (s *FirstFitSortedWattsClassMapWatts) takeOffer(offer *mesos.Offer, powerClass string, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) + + //TODO: Insert watts calculation here instead of taking them as a parameter + // Decision to take the offer or not + if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[powerClass])) && + (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + return true + } + return false +} + // electron scheduler implements the Scheduler interface type FirstFitSortedWattsClassMapWatts struct { base // Type embedded to inherit common features. @@ -126,26 +138,20 @@ func (s *FirstFitSortedWattsClassMapWatts) ResourceOffers(driver sched.Scheduler default: } - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - // First fit strategy offerTaken := false for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doens't match our task's host requirement. - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // retrieving the powerClass from the offer powerClass := offerUtils.PowerClass(offer) // Decision to take the offer or not - if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[powerClass])) && - (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + if s.takeOffer(offer, powerClass, task) { fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) log.Println("Co-Located with: ") coLocated(s.running[offer.GetSlaveId().GoString()]) diff --git a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go index a896468..35c3d3b 100644 --- a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go +++ b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go @@ -16,11 +16,23 @@ import ( "math" "os" "sort" - "strings" "sync" "time" ) +// Decides if to take an offer or not +func (s *FirstFitSortedWattsClassMapWattsProacCC) takeOffer(offer *mesos.Offer, powerClass string, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) + + //TODO: Insert watts calculation here instead of taking them as a parameter + // Decision to take the offer or not + if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[powerClass])) && + (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + return true + } + return false +} + // electron scheduler implements the Scheduler interface type FirstFitSortedWattsClassMapWattsProacCC struct { base // Type embedded to inherit common features. @@ -264,26 +276,20 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc default: } - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - // First fit strategy offerTaken := false for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doens't match our task's host requirement. - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // retrieving the powerClass for the offer powerClass := offerUtils.PowerClass(offer) // Decision to take the offer or not - if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[powerClass])) && - (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + if s.takeOffer(offer, powerClass, task) { // Capping the cluster if haven't yet started if !s.isCapping { diff --git a/schedulers/firstfitSortedWattsSortedOffers.go b/schedulers/firstfitSortedWattsSortedOffers.go index 9ceb095..8dd22d2 100644 --- a/schedulers/firstfitSortedWattsSortedOffers.go +++ b/schedulers/firstfitSortedWattsSortedOffers.go @@ -12,7 +12,6 @@ import ( "log" "os" "sort" - "strings" "time" ) @@ -161,12 +160,9 @@ func (s *FirstFitSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerD for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // Decision to take the offer or not diff --git a/schedulers/firstfitsortedwatts.go b/schedulers/firstfitsortedwatts.go index b62d5b3..4553bfc 100644 --- a/schedulers/firstfitsortedwatts.go +++ b/schedulers/firstfitsortedwatts.go @@ -12,7 +12,6 @@ import ( "log" "os" "sort" - "strings" "time" ) @@ -149,12 +148,9 @@ func (s *FirstFitSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offer for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // Decision to take the offer or not diff --git a/schedulers/firstfitwattsonly.go b/schedulers/firstfitwattsonly.go index 12f6331..2413dcf 100644 --- a/schedulers/firstfitwattsonly.go +++ b/schedulers/firstfitwattsonly.go @@ -11,7 +11,6 @@ import ( sched "github.com/mesos/mesos-go/scheduler" "log" "os" - "strings" "time" ) @@ -140,12 +139,9 @@ func (s *FirstFitWattsOnly) ResourceOffers(driver sched.SchedulerDriver, offers for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // Decision to take the offer or not diff --git a/schedulers/proactiveclusterwidecappingfcfs.go b/schedulers/proactiveclusterwidecappingfcfs.go index b7d7c8c..2643335 100644 --- a/schedulers/proactiveclusterwidecappingfcfs.go +++ b/schedulers/proactiveclusterwidecappingfcfs.go @@ -15,16 +15,15 @@ import ( "log" "math" "os" - "strings" "sync" "time" ) // Decides if to take an offer or not -func (_ *ProactiveClusterwideCapFCFS) takeOffer(offer *mesos.Offer, task def.Task) bool { +func (s *ProactiveClusterwideCapFCFS) takeOffer(offer *mesos.Offer, task def.Task) bool { offer_cpu, offer_mem, offer_watts := offerUtils.OfferAgg(offer) - if offer_cpu >= task.CPU && offer_mem >= task.RAM && offer_watts >= task.Watts { + if offer_cpu >= task.CPU && offer_mem >= task.RAM && (s.ignoreWatts || (offer_watts >= task.Watts)) { return true } return false @@ -279,8 +278,8 @@ func (s *ProactiveClusterwideCapFCFS) ResourceOffers(driver sched.SchedulerDrive for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Don't take offer if it doesn't match our task's host requirement. - if !strings.HasPrefix(*offer.Hostname, task.Host) { + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { continue } diff --git a/schedulers/proactiveclusterwidecappingranked.go b/schedulers/proactiveclusterwidecappingranked.go index 52118db..786b1ff 100644 --- a/schedulers/proactiveclusterwidecappingranked.go +++ b/schedulers/proactiveclusterwidecappingranked.go @@ -26,16 +26,15 @@ import ( "math" "os" "sort" - "strings" "sync" "time" ) // Decides if to taken an offer or not -func (_ *ProactiveClusterwideCapRanked) takeOffer(offer *mesos.Offer, task def.Task) bool { +func (s *ProactiveClusterwideCapRanked) takeOffer(offer *mesos.Offer, task def.Task) bool { offer_cpu, offer_mem, offer_watts := offerUtils.OfferAgg(offer) - if offer_cpu >= task.CPU && offer_mem >= task.RAM && offer_watts >= task.Watts { + if offer_cpu >= task.CPU && offer_mem >= task.RAM && (s.ignoreWatts || (offer_watts >= task.Watts)) { return true } return false @@ -303,8 +302,8 @@ func (s *ProactiveClusterwideCapRanked) ResourceOffers(driver sched.SchedulerDri for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Don't take offer if it doesn't match our task's host requirement. - if !strings.HasPrefix(*offer.Hostname, task.Host) { + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { continue } diff --git a/utilities/offerUtils/offerUtils.go b/utilities/offerUtils/offerUtils.go index 16144dd..6f5dc81 100644 --- a/utilities/offerUtils/offerUtils.go +++ b/utilities/offerUtils/offerUtils.go @@ -2,6 +2,7 @@ package offerUtils import ( mesos "github.com/mesos/mesos-go/mesosproto" + "strings" ) func OfferAgg(offer *mesos.Offer) (float64, float64, float64) { @@ -32,6 +33,8 @@ func PowerClass(offer *mesos.Offer) string { return powerClass } +// Implements the sort.Sort interface to sort Offers based on CPU. +// TODO: Have a generic sorter that sorts based on a defined requirement (CPU, RAM, DISK or Watts) type OffersSorter []*mesos.Offer func (offersSorter OffersSorter) Len() int { @@ -49,3 +52,11 @@ func (offersSorter OffersSorter) Less(i, j int) bool { cpu2, _, _ := OfferAgg(offersSorter[j]) return cpu1 <= cpu2 } + +// Is there a mismatch between the task's host requirement and the host corresponding to the offer. +func HostMismatch(offerHost string, taskHost string) bool { + if taskHost != "" && !strings.HasPrefix(offerHost, taskHost) { + return true + } + return false +}