From aabdd716ddb027920a87e61cd189623d52266986 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Thu, 9 Feb 2017 20:41:54 -0500 Subject: [PATCH 01/11] Added TODO to clean up constants.go and use Mesos attributes instead. --- constants/constants.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/constants/constants.go b/constants/constants.go index 045c1a2..01cfc8a 100644 --- a/constants/constants.go +++ b/constants/constants.go @@ -6,6 +6,8 @@ Constants that are used across scripts 5. window_size = number of tasks to consider for computation of the dynamic cap. Also, exposing functions to update or initialize some of the constants. + +TODO: Clean this up and use Mesos Attributes instead. */ package constants From 6c62b5326f8d505d32341ec08e5f12a9bfcd05d2 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Thu, 9 Feb 2017 22:48:34 -0500 Subject: [PATCH 02/11] Added a HostMismatch(...) in offerUtils that checks whether a task's host requirement matches the host corresponding to the offer. Made sure all schedulers call takeOffer(...) that is defined in each scheduler, to maintain consistency. --- schedulers/binPackSortedWattsSortedOffers.go | 28 ++++++-------- schedulers/binpackedpistoncapping.go | 10 +---- schedulers/binpacksortedwatts.go | 27 +++++--------- schedulers/bottomHeavy.go | 19 ++++++++-- schedulers/bpMaxMin.go | 36 ++++++------------ schedulers/bpMaxMinPistonCapping.go | 37 +++++++------------ schedulers/bpMaxMinProacCC.go | 36 +++++++----------- schedulers/bpswClassMapWatts.go | 28 +++++--------- schedulers/bpswClassMapWattsPistonCapping.go | 29 ++++++--------- schedulers/bpswClassMapWattsProacCC.go | 29 ++++++--------- schedulers/firstfit.go | 10 ++--- schedulers/firstfitSortedOffers.go | 10 ++--- .../firstfitSortedWattsClassMapWatts.go | 28 ++++++++------ ...firstfitSortedWattsClassMapWattsProacCC.go | 28 ++++++++------ schedulers/firstfitSortedWattsSortedOffers.go | 10 ++--- schedulers/firstfitsortedwatts.go | 10 ++--- schedulers/firstfitwattsonly.go | 10 ++--- schedulers/proactiveclusterwidecappingfcfs.go | 9 ++--- .../proactiveclusterwidecappingranked.go | 9 ++--- utilities/offerUtils/offerUtils.go | 11 ++++++ 20 files changed, 175 insertions(+), 239 deletions(-) diff --git a/schedulers/binPackSortedWattsSortedOffers.go b/schedulers/binPackSortedWattsSortedOffers.go index 1cf2191..7f73bb3 100644 --- a/schedulers/binPackSortedWattsSortedOffers.go +++ b/schedulers/binPackSortedWattsSortedOffers.go @@ -12,21 +12,22 @@ import ( "log" "os" "sort" - "strings" "time" ) // Decides if to take an offer or not -func (*BinPackSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool { +func (s *BinPackSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, + totalWatts float64, task def.Task) bool { - cpus, mem, watts := offerUtils.OfferAgg(offer) + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + // Does the task fit + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.Watts))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { return true } - return false } @@ -152,8 +153,6 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr tasks := []*mesos.TaskInfo{} - offer_cpu, offer_ram, offer_watts := offerUtils.OfferAgg(offer) - offerTaken := false totalWatts := 0.0 totalCPU := 0.0 @@ -161,19 +160,14 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { // Does the task fit - if (s.ignoreWatts || offer_watts >= (totalWatts+task.Watts)) && - (offer_cpu >= (totalCPU + task.CPU)) && - (offer_ram >= (totalRAM + task.RAM)) { + if s.takeOffer(offer, totalCPU, totalRAM, totalWatts, task) { offerTaken = true totalWatts += task.Watts diff --git a/schedulers/binpackedpistoncapping.go b/schedulers/binpackedpistoncapping.go index 7f413f9..2fce0b9 100644 --- a/schedulers/binpackedpistoncapping.go +++ b/schedulers/binpackedpistoncapping.go @@ -15,7 +15,6 @@ import ( "log" "math" "os" - "strings" "sync" "time" ) @@ -258,13 +257,8 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off partialLoad := 0.0 for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doens't match our task's host requirement. - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) {continue} for *task.Instances > 0 { // Does the task fit diff --git a/schedulers/binpacksortedwatts.go b/schedulers/binpacksortedwatts.go index cf8162f..87ee69b 100644 --- a/schedulers/binpacksortedwatts.go +++ b/schedulers/binpacksortedwatts.go @@ -12,21 +12,19 @@ import ( "log" "os" "sort" - "strings" "time" ) // Decides if to take an offer or not -func (*BinPackSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool { - - cpus, mem, watts := offerUtils.OfferAgg(offer) +func (s *BinPackSortedWatts) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, totalWatts float64, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.Watts))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { return true } - return false } @@ -141,8 +139,6 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers tasks := []*mesos.TaskInfo{} - offer_cpu, offer_ram, offer_watts := offerUtils.OfferAgg(offer) - offerTaken := false totalWatts := 0.0 totalCPU := 0.0 @@ -150,19 +146,14 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { // Does the task fit - if (s.ignoreWatts || offer_watts >= (totalWatts+task.Watts)) && - (offer_cpu >= (totalCPU + task.CPU)) && - (offer_ram >= (totalRAM + task.RAM)) { + if s.takeOffer(offer, totalCPU, totalRAM, totalWatts, task) { offerTaken = true totalWatts += task.Watts diff --git a/schedulers/bottomHeavy.go b/schedulers/bottomHeavy.go index b108827..2379725 100644 --- a/schedulers/bottomHeavy.go +++ b/schedulers/bottomHeavy.go @@ -26,6 +26,20 @@ BinPacking has the most effect when co-scheduling of tasks is increased. Large t co-scheduling them has a great impact on the total power utilization. */ +func (s *BottomHeavy) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, totalWatts, + wattsToConsider float64, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) + + //TODO: Insert watts calculation here instead of taking them as a parameter + if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsToConsider))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { + return true + } + return false + +} + // electronScheduler implements the Scheduler interface type BottomHeavy struct { base // Type embedded to inherit common functions @@ -162,7 +176,6 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) } tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) totalWatts := 0.0 totalCPU := 0.0 totalRAM := 0.0 @@ -179,9 +192,7 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) if !s.ignoreWatts { wattsToConsider = task.ClassToWatts[powerClass] } - if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsToConsider))) && - (offerCPU >= (totalCPU + task.CPU)) && - (offerRAM >= (totalRAM + task.RAM)) { + if s.takeOffer(offer, totalCPU, totalRAM, totalWatts, wattsToConsider, task) { offerTaken = true totalWatts += wattsToConsider totalCPU += task.CPU diff --git a/schedulers/bpMaxMin.go b/schedulers/bpMaxMin.go index d5e791a..6daa6a6 100644 --- a/schedulers/bpMaxMin.go +++ b/schedulers/bpMaxMin.go @@ -12,21 +12,19 @@ import ( "log" "os" "sort" - "strings" "time" ) // Decides if to take an offer or not -func (*BPMaxMinWatts) takeOffer(offer *mesos.Offer, task def.Task) bool { - - cpus, mem, watts := offerUtils.OfferAgg(offer) +func (s *BPMaxMinWatts) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, totalWatts float64, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.Watts))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { return true } - return false } @@ -135,12 +133,8 @@ func (s *BPMaxMinWatts) CheckFit(i int, totalRAM *float64, totalWatts *float64) (bool, *mesos.TaskInfo) { - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - // Does the task fit - if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) && - (offerCPU >= (*totalCPU + task.CPU)) && - (offerRAM >= (*totalRAM + task.RAM)) { + if s.takeOffer(offer, *totalCPU, *totalRAM, *totalWatts, task) { *totalWatts += task.Watts *totalCPU += task.CPU @@ -198,12 +192,9 @@ func (s *BPMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*m for i := len(s.tasks) - 1; i >= 0; i-- { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // TODO: Fix this so index doesn't need to be passed @@ -219,12 +210,9 @@ func (s *BPMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*m // Pack the rest of the offer with the smallest tasks for i, task := range s.tasks { - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { diff --git a/schedulers/bpMaxMinPistonCapping.go b/schedulers/bpMaxMinPistonCapping.go index b4d4e3c..9562751 100644 --- a/schedulers/bpMaxMinPistonCapping.go +++ b/schedulers/bpMaxMinPistonCapping.go @@ -16,22 +16,21 @@ import ( "math" "os" "sort" - "strings" "sync" "time" ) // Decides if to take an offer or not -func (s *BPMaxMinPistonCapping) takeOffer(offer *mesos.Offer, task def.Task) bool { - - cpus, mem, watts := offerUtils.OfferAgg(offer) +func (s *BPMaxMinPistonCapping) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, totalWatts float64, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + // Does the task fit + if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) && + (offerCPU >= (*totalCPU + task.CPU)) && + (offerRAM >= (*totalRAM + task.RAM)) { return true } - return false } @@ -224,12 +223,8 @@ func (s *BPMaxMinPistonCapping) CheckFit(i int, totalWatts *float64, partialLoad *float64) (bool, *mesos.TaskInfo) { - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - // Does the task fit - if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) && - (offerCPU >= (*totalCPU + task.CPU)) && - (offerRAM >= (*totalRAM + task.RAM)) { + if s.takeOffer(offer, *totalCPU, *totalRAM, *totalWatts, task) { // Start piston capping if haven't started yet if !s.isCapping { @@ -297,12 +292,9 @@ func (s *BPMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, off for i := len(s.tasks) - 1; i >= 0; i-- { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // TODO: Fix this so index doesn't need to be passed @@ -318,12 +310,9 @@ func (s *BPMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, off // Pack the rest of the offer with the smallest tasks for i, task := range s.tasks { - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { diff --git a/schedulers/bpMaxMinProacCC.go b/schedulers/bpMaxMinProacCC.go index fe44f60..96c27ee 100644 --- a/schedulers/bpMaxMinProacCC.go +++ b/schedulers/bpMaxMinProacCC.go @@ -16,21 +16,21 @@ import ( "math" "os" "sort" - "strings" "sync" "time" ) // Decides if to take an offer or not -func (s *BPMaxMinProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := offerUtils.OfferAgg(offer) +func (s *BPMaxMinProacCC) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, totalWatts float64, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + // Does the task fit + if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) && + (offerCPU >= (*totalCPU + task.CPU)) && + (offerRAM >= (*totalRAM + task.RAM)) { return true } - return false } @@ -248,12 +248,8 @@ func (s *BPMaxMinProacCC) CheckFit(i int, totalRAM *float64, totalWatts *float64) (bool, *mesos.TaskInfo) { - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - // Does the task fit - if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) && - (offerCPU >= (*totalCPU + task.CPU)) && - (offerRAM >= (*totalRAM + task.RAM)) { + if s.takeOffer(offer, *totalCPU, *totalRAM, *totalWatts, task) { // Capping the cluster if haven't yet started if !s.isCapping { @@ -347,12 +343,9 @@ func (s *BPMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers [] for i := len(s.tasks) - 1; i >= 0; i-- { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // TODO: Fix this so index doesn't need to be passed @@ -368,12 +361,9 @@ func (s *BPMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers [] // Pack the rest of the offer with the smallest tasks for i, task := range s.tasks { - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { diff --git a/schedulers/bpswClassMapWatts.go b/schedulers/bpswClassMapWatts.go index 1464df8..b6c3bc6 100644 --- a/schedulers/bpswClassMapWatts.go +++ b/schedulers/bpswClassMapWatts.go @@ -12,21 +12,20 @@ import ( "log" "os" "sort" - "strings" "time" ) // Decides if to take an offer or not -func (*BPSWClassMapWatts) takeOffer(offer *mesos.Offer, task def.Task) bool { - - cpus, mem, watts := offerUtils.OfferAgg(offer) +func (s *BPSWClassMapWatts) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, + totalWatts float64, powerClass string, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { return true } - return false } @@ -141,8 +140,6 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - offerTaken := false totalWatts := 0.0 totalCPU := 0.0 @@ -150,12 +147,9 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { @@ -163,9 +157,7 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers // Does the task fit // OR lazy evaluation. If ignore watts is set to true, second statement won't // be evaluated. - if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && - (offerCPU >= (totalCPU + task.CPU)) && - (offerRAM >= (totalRAM + task.RAM)) { + if s.takeOffer(offer, totalCPU, totalRAM, totalWatts, powerClass, task) { fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) offerTaken = true diff --git a/schedulers/bpswClassMapWattsPistonCapping.go b/schedulers/bpswClassMapWattsPistonCapping.go index a80c599..412ace6 100644 --- a/schedulers/bpswClassMapWattsPistonCapping.go +++ b/schedulers/bpswClassMapWattsPistonCapping.go @@ -16,21 +16,21 @@ import ( "math" "os" "sort" - "strings" "sync" "time" ) -// Decides if to take offer or not -func (s *BPSWClassMapWattsPistonCapping) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := offerUtils.OfferAgg(offer) +// Decides if to take an offer or not +func (s *BPSWClassMapWattsPistonCapping) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, + totalWatts float64, powerClass string, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) //TODO: Insert watts calculation here instead of taking them as a parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { return true } - return false } @@ -240,8 +240,6 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - offerTaken := false totalWatts := 0.0 totalCPU := 0.0 @@ -251,12 +249,9 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr partialLoad := 0.0 for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { @@ -264,9 +259,7 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr // Does the task fit // OR lazy evaluation. If ignoreWatts is set to true, second statement won't // be evaluated - if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && - (offerCPU >= (totalCPU + task.CPU)) && - (offerRAM >= (totalRAM + task.RAM)) { + if s.takeOffer(offer, totalCPU, totalRAM, totalWatts, powerClass, task) { // Start piston capping if haven't started yet if !s.isCapping { diff --git a/schedulers/bpswClassMapWattsProacCC.go b/schedulers/bpswClassMapWattsProacCC.go index b250e67..3d9f14d 100644 --- a/schedulers/bpswClassMapWattsProacCC.go +++ b/schedulers/bpswClassMapWattsProacCC.go @@ -16,21 +16,21 @@ import ( "math" "os" "sort" - "strings" "sync" "time" ) // Decides if to take an offer or not -func (*BPSWClassMapWattsProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool { - cpus, mem, watts := offerUtils.OfferAgg(offer) +func (s *BPSWClassMapWattsProacCC) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, + totalWatts float64, powerClass string, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - // TODO: Insert watts calculation here instead of taking them as parameter - - if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts { + //TODO: Insert watts calculation here instead of taking them as a parameter + if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { return true } - return false } @@ -278,20 +278,15 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - offerTaken := false totalWatts := 0.0 totalCPU := 0.0 totalRAM := 0.0 for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer it it doesn't match our task's host requirement. - if strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } for *task.Instances > 0 { @@ -299,9 +294,7 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, // Does the task fit // OR Lazy evaluation. If ignore watts is set to true, second statement won't // be evaluated. - if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) && - (offerCPU >= (totalCPU + task.CPU)) && - (offerRAM >= (totalRAM + task.RAM)) { + if s.takeOffer(offer, totalCPU, totalRAM, totalWatts, powerClass, task) { // Capping the cluster if haven't yet started if !s.isCapping { diff --git a/schedulers/firstfit.go b/schedulers/firstfit.go index 4317a91..3f6f4fc 100644 --- a/schedulers/firstfit.go +++ b/schedulers/firstfit.go @@ -11,7 +11,6 @@ import ( sched "github.com/mesos/mesos-go/scheduler" "log" "os" - "strings" "time" ) @@ -146,12 +145,9 @@ func (s *FirstFit) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos. for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // Decision to take the offer or not diff --git a/schedulers/firstfitSortedOffers.go b/schedulers/firstfitSortedOffers.go index 0611581..8db4147 100644 --- a/schedulers/firstfitSortedOffers.go +++ b/schedulers/firstfitSortedOffers.go @@ -12,7 +12,6 @@ import ( "log" "os" "sort" - "strings" "time" ) @@ -158,12 +157,9 @@ func (s *FirstFitSortedOffers) ResourceOffers(driver sched.SchedulerDriver, offe for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // Decision to take the offer or not diff --git a/schedulers/firstfitSortedWattsClassMapWatts.go b/schedulers/firstfitSortedWattsClassMapWatts.go index a7f5448..e2559ea 100644 --- a/schedulers/firstfitSortedWattsClassMapWatts.go +++ b/schedulers/firstfitSortedWattsClassMapWatts.go @@ -12,10 +12,22 @@ import ( "log" "os" "sort" - "strings" "time" ) +// Decides if to take an offer or not +func (s *FirstFitSortedWattsClassMapWatts) takeOffer(offer *mesos.Offer, powerClass string, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) + + //TODO: Insert watts calculation here instead of taking them as a parameter + // Decision to take the offer or not + if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[powerClass])) && + (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + return true + } + return false +} + // electron scheduler implements the Scheduler interface type FirstFitSortedWattsClassMapWatts struct { base // Type embedded to inherit common features. @@ -126,26 +138,20 @@ func (s *FirstFitSortedWattsClassMapWatts) ResourceOffers(driver sched.Scheduler default: } - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - // First fit strategy offerTaken := false for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doens't match our task's host requirement. - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // retrieving the powerClass from the offer powerClass := offerUtils.PowerClass(offer) // Decision to take the offer or not - if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[powerClass])) && - (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + if s.takeOffer(offer, powerClass, task) { fmt.Println("Watts being used: ", task.ClassToWatts[powerClass]) log.Println("Co-Located with: ") coLocated(s.running[offer.GetSlaveId().GoString()]) diff --git a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go index a896468..35c3d3b 100644 --- a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go +++ b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go @@ -16,11 +16,23 @@ import ( "math" "os" "sort" - "strings" "sync" "time" ) +// Decides if to take an offer or not +func (s *FirstFitSortedWattsClassMapWattsProacCC) takeOffer(offer *mesos.Offer, powerClass string, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) + + //TODO: Insert watts calculation here instead of taking them as a parameter + // Decision to take the offer or not + if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[powerClass])) && + (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + return true + } + return false +} + // electron scheduler implements the Scheduler interface type FirstFitSortedWattsClassMapWattsProacCC struct { base // Type embedded to inherit common features. @@ -264,26 +276,20 @@ func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.Sc default: } - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) - // First fit strategy offerTaken := false for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doens't match our task's host requirement. - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // retrieving the powerClass for the offer powerClass := offerUtils.PowerClass(offer) // Decision to take the offer or not - if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[powerClass])) && - (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + if s.takeOffer(offer, powerClass, task) { // Capping the cluster if haven't yet started if !s.isCapping { diff --git a/schedulers/firstfitSortedWattsSortedOffers.go b/schedulers/firstfitSortedWattsSortedOffers.go index 9ceb095..8dd22d2 100644 --- a/schedulers/firstfitSortedWattsSortedOffers.go +++ b/schedulers/firstfitSortedWattsSortedOffers.go @@ -12,7 +12,6 @@ import ( "log" "os" "sort" - "strings" "time" ) @@ -161,12 +160,9 @@ func (s *FirstFitSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerD for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // Decision to take the offer or not diff --git a/schedulers/firstfitsortedwatts.go b/schedulers/firstfitsortedwatts.go index b62d5b3..4553bfc 100644 --- a/schedulers/firstfitsortedwatts.go +++ b/schedulers/firstfitsortedwatts.go @@ -12,7 +12,6 @@ import ( "log" "os" "sort" - "strings" "time" ) @@ -149,12 +148,9 @@ func (s *FirstFitSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offer for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // Decision to take the offer or not diff --git a/schedulers/firstfitwattsonly.go b/schedulers/firstfitwattsonly.go index 12f6331..2413dcf 100644 --- a/schedulers/firstfitwattsonly.go +++ b/schedulers/firstfitwattsonly.go @@ -11,7 +11,6 @@ import ( sched "github.com/mesos/mesos-go/scheduler" "log" "os" - "strings" "time" ) @@ -140,12 +139,9 @@ func (s *FirstFitWattsOnly) ResourceOffers(driver sched.SchedulerDriver, offers for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Check host if it exists - if task.Host != "" { - // Don't take offer if it doesn't match our task's host requirement - if !strings.HasPrefix(*offer.Hostname, task.Host) { - continue - } + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue } // Decision to take the offer or not diff --git a/schedulers/proactiveclusterwidecappingfcfs.go b/schedulers/proactiveclusterwidecappingfcfs.go index b7d7c8c..2643335 100644 --- a/schedulers/proactiveclusterwidecappingfcfs.go +++ b/schedulers/proactiveclusterwidecappingfcfs.go @@ -15,16 +15,15 @@ import ( "log" "math" "os" - "strings" "sync" "time" ) // Decides if to take an offer or not -func (_ *ProactiveClusterwideCapFCFS) takeOffer(offer *mesos.Offer, task def.Task) bool { +func (s *ProactiveClusterwideCapFCFS) takeOffer(offer *mesos.Offer, task def.Task) bool { offer_cpu, offer_mem, offer_watts := offerUtils.OfferAgg(offer) - if offer_cpu >= task.CPU && offer_mem >= task.RAM && offer_watts >= task.Watts { + if offer_cpu >= task.CPU && offer_mem >= task.RAM && (s.ignoreWatts || (offer_watts >= task.Watts)) { return true } return false @@ -279,8 +278,8 @@ func (s *ProactiveClusterwideCapFCFS) ResourceOffers(driver sched.SchedulerDrive for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Don't take offer if it doesn't match our task's host requirement. - if !strings.HasPrefix(*offer.Hostname, task.Host) { + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { continue } diff --git a/schedulers/proactiveclusterwidecappingranked.go b/schedulers/proactiveclusterwidecappingranked.go index 52118db..786b1ff 100644 --- a/schedulers/proactiveclusterwidecappingranked.go +++ b/schedulers/proactiveclusterwidecappingranked.go @@ -26,16 +26,15 @@ import ( "math" "os" "sort" - "strings" "sync" "time" ) // Decides if to taken an offer or not -func (_ *ProactiveClusterwideCapRanked) takeOffer(offer *mesos.Offer, task def.Task) bool { +func (s *ProactiveClusterwideCapRanked) takeOffer(offer *mesos.Offer, task def.Task) bool { offer_cpu, offer_mem, offer_watts := offerUtils.OfferAgg(offer) - if offer_cpu >= task.CPU && offer_mem >= task.RAM && offer_watts >= task.Watts { + if offer_cpu >= task.CPU && offer_mem >= task.RAM && (s.ignoreWatts || (offer_watts >= task.Watts)) { return true } return false @@ -303,8 +302,8 @@ func (s *ProactiveClusterwideCapRanked) ResourceOffers(driver sched.SchedulerDri for i := 0; i < len(s.tasks); i++ { task := s.tasks[i] - // Don't take offer if it doesn't match our task's host requirement. - if !strings.HasPrefix(*offer.Hostname, task.Host) { + // Don't take offer if it doesn't match our task's host requirement + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { continue } diff --git a/utilities/offerUtils/offerUtils.go b/utilities/offerUtils/offerUtils.go index 16144dd..6f5dc81 100644 --- a/utilities/offerUtils/offerUtils.go +++ b/utilities/offerUtils/offerUtils.go @@ -2,6 +2,7 @@ package offerUtils import ( mesos "github.com/mesos/mesos-go/mesosproto" + "strings" ) func OfferAgg(offer *mesos.Offer) (float64, float64, float64) { @@ -32,6 +33,8 @@ func PowerClass(offer *mesos.Offer) string { return powerClass } +// Implements the sort.Sort interface to sort Offers based on CPU. +// TODO: Have a generic sorter that sorts based on a defined requirement (CPU, RAM, DISK or Watts) type OffersSorter []*mesos.Offer func (offersSorter OffersSorter) Len() int { @@ -49,3 +52,11 @@ func (offersSorter OffersSorter) Less(i, j int) bool { cpu2, _, _ := OfferAgg(offersSorter[j]) return cpu1 <= cpu2 } + +// Is there a mismatch between the task's host requirement and the host corresponding to the offer. +func HostMismatch(offerHost string, taskHost string) bool { + if taskHost != "" && !strings.HasPrefix(offerHost, taskHost) { + return true + } + return false +} From 814d16b54d2bfb4441d9f20d847057caccad0625 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Fri, 10 Feb 2017 15:46:20 -0500 Subject: [PATCH 03/11] added hostmismatch function to be called by all schedulers --- utilities/offerUtils/offerUtils.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/utilities/offerUtils/offerUtils.go b/utilities/offerUtils/offerUtils.go index 16144dd..e1f6817 100644 --- a/utilities/offerUtils/offerUtils.go +++ b/utilities/offerUtils/offerUtils.go @@ -2,6 +2,7 @@ package offerUtils import ( mesos "github.com/mesos/mesos-go/mesosproto" + "strings" ) func OfferAgg(offer *mesos.Offer) (float64, float64, float64) { @@ -49,3 +50,11 @@ func (offersSorter OffersSorter) Less(i, j int) bool { cpu2, _, _ := OfferAgg(offersSorter[j]) return cpu1 <= cpu2 } + +// Is there a mismatch between the task's host requirement and the host corresponding to the offer. +func HostMismatch(offerHost string, taskHost string) bool { + if taskHost != "" && !strings.HasPrefix(offerHost, taskHost) { + return true + } + return false +} From f5ddc56f2752023add9a7ba47258ce4b144797c2 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Fri, 10 Feb 2017 16:39:13 -0500 Subject: [PATCH 04/11] changed the name of takeOffer(...) to takeOfferBinPack(...) and then created another function called takeOfferFirstFit(...). Made sure that these functions are called instead of inlining code. --- schedulers/bottomHeavy.go | 22 ++++++++++++++-------- schedulers/topHeavy.go | 33 ++++++++++++++++++++++++++------- 2 files changed, 40 insertions(+), 15 deletions(-) diff --git a/schedulers/bottomHeavy.go b/schedulers/bottomHeavy.go index 6ec542d..38d6b66 100644 --- a/schedulers/bottomHeavy.go +++ b/schedulers/bottomHeavy.go @@ -26,7 +26,7 @@ BinPacking has the most effect when co-scheduling of tasks is increased. Large t co-scheduling them has a great impact on the total power utilization. */ -func (s *BottomHeavy) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, totalWatts, +func (s *BottomHeavy) takeOfferBinPack(offer *mesos.Offer, totalCPU, totalRAM, totalWatts, wattsToConsider float64, task def.Task) bool { offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) @@ -40,6 +40,17 @@ func (s *BottomHeavy) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, totalWat } +func (s *BottomHeavy) takeOfferFirstFit(offer *mesos.Offer, wattsConsideration float64, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) + + //TODO: Insert watts calculation here instead of taking them as a parameter + if (!s.wattsAsAResource || (offerWatts >= wattsConsideration)) && + (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + return true + } + return false +} + // electronScheduler implements the Scheduler interface type BottomHeavy struct { base // Type embedded to inherit common functions @@ -199,7 +210,7 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) // Does the task fit // OR lazy evaluation. If ignore watts is set to true, second statement won't // be evaluated. - if s.takeOffer(offer, totalCPU, totalRAM, totalWatts, wattsConsideration, task) { + if s.takeOfferBinPack(offer, totalCPU, totalRAM, totalWatts, wattsConsideration, task) { offerTaken = true totalWatts += wattsConsideration totalCPU += task.CPU @@ -245,7 +256,6 @@ func (s *BottomHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver } tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) taken := false for i := 0; i < len(s.smallTasks); i++ { task := s.smallTasks[i] @@ -253,14 +263,10 @@ func (s *BottomHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver if err != nil { // Error in determining wattsConsideration log.Fatal(err) - } else { - // Logging the watts consideration - log.Printf("Watts Considered for host[%s], task[%s] = %f\n", *offer.Hostname, task.Name, wattsConsideration) } // Decision to take the offer or not - if (!s.wattsAsAResource || (offerWatts >= wattsConsideration)) && - (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + if s.takeOfferFirstFit(offer, wattsConsideration, task) { taken = true tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, task)) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) diff --git a/schedulers/topHeavy.go b/schedulers/topHeavy.go index 39ffe03..e42b527 100644 --- a/schedulers/topHeavy.go +++ b/schedulers/topHeavy.go @@ -26,6 +26,30 @@ This was done to give a little more room for the large tasks (power intensive) f starvation of power intensive tasks. */ +func (s *TopHeavy) takeOfferBinPack(offer *mesos.Offer, totalCPU, totalRAM, totalWatts, + wattsToConsider float64, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) + + //TODO: Insert watts calculation here instead of taking them as a parameter + if (!s.wattsAsAResource || (offerWatts >= (totalWatts + wattsToConsider))) && + (offerCPU >= (totalCPU + task.CPU)) && + (offerRAM >= (totalRAM + task.RAM)) { + return true + } + return false +} + +func (s *TopHeavy) takeOfferFirstFit(offer *mesos.Offer, wattsConsideration float64, task def.Task) bool { + offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) + + //TODO: Insert watts calculation here instead of taking them as a parameter + if (!s.wattsAsAResource || (offerWatts >= wattsConsideration)) && + (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + return true + } + return false +} + // electronScheduler implements the Scheduler interface type TopHeavy struct { base // Type embedded to inherit common functions @@ -169,7 +193,6 @@ func (s *TopHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) { } tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) totalWatts := 0.0 totalCPU := 0.0 totalRAM := 0.0 @@ -186,9 +209,7 @@ func (s *TopHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) { // Does the task fit // OR lazy evaluation. If ignore watts is set to true, second statement won't // be evaluated. - if (!s.wattsAsAResource || (offerWatts >= (totalWatts + wattsConsideration))) && - (offerCPU >= (totalCPU + task.CPU)) && - (offerRAM >= (totalRAM + task.RAM)) { + if s.takeOfferBinPack(offer, totalCPU, totalRAM, totalWatts, wattsConsideration, task) { taken = true totalWatts += wattsConsideration totalCPU += task.CPU @@ -234,7 +255,6 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) { } tasks := []*mesos.TaskInfo{} - offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) offerTaken := false for i := 0; i < len(s.largeTasks); i++ { task := s.largeTasks[i] @@ -245,8 +265,7 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) { } // Decision to take the offer or not - if (!s.wattsAsAResource || (offerWatts >= wattsConsideration)) && - (offerCPU >= task.CPU) && (offerRAM >= task.RAM) { + if s.takeOfferFirstFit(offer, wattsConsideration, task) { offerTaken = true tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, task)) log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname()) From 05f92bcfd29ffbe7c3fc420e117f9700ab19babc Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Fri, 10 Feb 2017 18:02:06 -0500 Subject: [PATCH 05/11] resolved merge conflict --- constants/constants.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/constants/constants.go b/constants/constants.go index e1045d5..8447e6c 100644 --- a/constants/constants.go +++ b/constants/constants.go @@ -1,18 +1,9 @@ /* Constants that are used across scripts 1. The available hosts = stratos-00x (x varies from 1 to 8) -<<<<<<< HEAD 2. CapMargin = percentage of the requested power to allocate 3. ConsiderationWindowSize = number of tasks to consider for computation of the dynamic cap. -======= -2. cap_margin = percentage of the requested power to allocate -3. power_threshold = overloading factor -5. window_size = number of tasks to consider for computation of the dynamic cap. - -Also, exposing functions to update or initialize some of the constants. - TODO: Clean this up and use Mesos Attributes instead. ->>>>>>> a0a3e78041067e5e2f9dc9b5d1e7b6dd001ce1e9 */ package constants From ae55790c7655e4e1a8eda98f6668e66c4c3f1919 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Fri, 10 Feb 2017 18:11:52 -0500 Subject: [PATCH 06/11] removed TODO for adding the possibility of enabling and disabling classMapWatts from command-line --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 96226e8..1948b49 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,6 @@ To Do: * Log fix for declining offer -- different reason when insufficient resources as compared to when there are no longer any tasks to schedule. * Have a centralised logFile that can be filtered by identifier. All electron logs should go into this file. - * Make ClassMapWatts to commandLine arguments so Electron can be run with ClassMapWatts enabled/disabled. * Make def.Task an interface for further modularization and flexibility. **Requires [Performance Co-Pilot](http://pcp.io/) tool pmdumptext to be installed on the From ad925dfc8ffc057f3e2b72f92cd268db1d3ce76a Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Fri, 10 Feb 2017 20:28:06 -0500 Subject: [PATCH 07/11] Added TODO for making WattsToConsider(...) a receiver of def.Task and changing its name to Watts(...) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 1948b49..64d6fcb 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ To Do: longer any tasks to schedule. * Have a centralised logFile that can be filtered by identifier. All electron logs should go into this file. * Make def.Task an interface for further modularization and flexibility. + * Convert def#WattsToConsider(...) to be a receiver of def.Task and change the name of it to Watts(...). **Requires [Performance Co-Pilot](http://pcp.io/) tool pmdumptext to be installed on the machine on which electron is launched for logging to work and PCP collector agents installed From ec7848006773fa694bb037c3baa3794c12863d0d Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Fri, 10 Feb 2017 20:53:18 -0500 Subject: [PATCH 08/11] renamed constants.CapMargin to constants.Tolerance for better semantics --- constants/constants.go | 6 +++--- powerCapping/proactiveclusterwidecappers.go | 6 +++--- schedulers/binpackedpistoncapping.go | 4 ++-- schedulers/bpswMaxMinPistonCapping.go | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/constants/constants.go b/constants/constants.go index 8447e6c..16225a5 100644 --- a/constants/constants.go +++ b/constants/constants.go @@ -1,7 +1,7 @@ /* Constants that are used across scripts 1. The available hosts = stratos-00x (x varies from 1 to 8) -2. CapMargin = percentage of the requested power to allocate +2. Tolerance = tolerance for a task that when exceeded would starve the task. 3. ConsiderationWindowSize = number of tasks to consider for computation of the dynamic cap. TODO: Clean this up and use Mesos Attributes instead. */ @@ -32,10 +32,10 @@ var PowerClasses = map[string]map[string]bool{ /* Margin with respect to the required power for a job. - So, if power required = 10W, the node would be capped to CapMargin * 10W. + So, if power required = 10W, the node would be capped to Tolerance * 10W. This value can be changed upon convenience. */ -var CapMargin = 0.70 +var Tolerance = 0.70 // Window size for running average var ConsiderationWindowSize = 20 diff --git a/powerCapping/proactiveclusterwidecappers.go b/powerCapping/proactiveclusterwidecappers.go index d1f8009..fb3a3e3 100644 --- a/powerCapping/proactiveclusterwidecappers.go +++ b/powerCapping/proactiveclusterwidecappers.go @@ -23,7 +23,7 @@ type taskWrapper struct { } func (tw taskWrapper) Val() float64 { - return tw.task.Watts * constants.CapMargin + return tw.task.Watts * constants.Tolerance } func (tw taskWrapper) ID() string { @@ -121,7 +121,7 @@ func (capper ClusterwideCapper) CleverRecap(totalPower map[string]float64, // Not considering this task for the computation of totalAllocatedPower and totalRunningTasks continue } - wattsUsages[host] = append(wattsUsages[host], float64(task.Watts)*constants.CapMargin) + wattsUsages[host] = append(wattsUsages[host], float64(task.Watts)*constants.Tolerance) } } @@ -202,7 +202,7 @@ func (capper ClusterwideCapper) NaiveRecap(totalPower map[string]float64, // Not considering this task for the computation of totalAllocatedPower and totalRunningTasks continue } - totalAllocatedPower += (float64(task.Watts) * constants.CapMargin) + totalAllocatedPower += (float64(task.Watts) * constants.Tolerance) totalRunningTasks++ } } diff --git a/schedulers/binpackedpistoncapping.go b/schedulers/binpackedpistoncapping.go index 2b24b8a..68f8448 100644 --- a/schedulers/binpackedpistoncapping.go +++ b/schedulers/binpackedpistoncapping.go @@ -305,7 +305,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) *task.Instances-- // updating the cap value for offer.Hostname - partialLoad += ((wattsConsideration * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100 + partialLoad += ((wattsConsideration * constants.Tolerance) / s.totalPower[*offer.Hostname]) * 100 if *task.Instances <= 0 { // All instances of task have been scheduled. Remove it @@ -395,7 +395,7 @@ func (s *BinPackedPistonCapper) StatusUpdate(driver sched.SchedulerDriver, statu } // Need to update the cap values for host of the finishedTask bpPistonMutex.Lock() - bpPistonCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100 + bpPistonCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.Tolerance) / s.totalPower[hostOfFinishedTask]) * 100 // Checking to see if the cap value has become 0, in which case we uncap the host. if int(math.Floor(bpPistonCapValues[hostOfFinishedTask]+0.5)) == 0 { bpPistonCapValues[hostOfFinishedTask] = 100 diff --git a/schedulers/bpswMaxMinPistonCapping.go b/schedulers/bpswMaxMinPistonCapping.go index 3bf48df..1789a8d 100644 --- a/schedulers/bpswMaxMinPistonCapping.go +++ b/schedulers/bpswMaxMinPistonCapping.go @@ -257,7 +257,7 @@ func (s *BPSWMaxMinPistonCapping) CheckFit(i int, fmt.Println("Inst: ", *task.Instances) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) *task.Instances-- - *partialLoad += ((wattsConsideration * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100 + *partialLoad += ((wattsConsideration * constants.Tolerance) / s.totalPower[*offer.Hostname]) * 100 if *task.Instances <= 0 { // All instances of task have been scheduled, remove it @@ -431,7 +431,7 @@ func (s *BPSWMaxMinPistonCapping) StatusUpdate(driver sched.SchedulerDriver, sta } // Need to update the cap values for host of the finishedTask bpMaxMinPistonCappingMutex.Lock() - bpMaxMinPistonCappingCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100 + bpMaxMinPistonCappingCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.Tolerance) / s.totalPower[hostOfFinishedTask]) * 100 // Checking to see if the cap value has become 0, in which case we uncap the host. if int(math.Floor(bpMaxMinPistonCappingCapValues[hostOfFinishedTask]+0.5)) == 0 { bpMaxMinPistonCappingCapValues[hostOfFinishedTask] = 100 From 13479e03a462fa9260e4287f0116f5311b226894 Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 11 Feb 2017 00:05:42 -0500 Subject: [PATCH 09/11] formatted files --- schedulers/binpackedpistoncapping.go | 4 +++- schedulers/bpswMaxMin.go | 34 ++++++++++++++-------------- schedulers/helpers.go | 3 +-- utilities/utils.go | 1 - 4 files changed, 21 insertions(+), 21 deletions(-) diff --git a/schedulers/binpackedpistoncapping.go b/schedulers/binpackedpistoncapping.go index 68f8448..e58f674 100644 --- a/schedulers/binpackedpistoncapping.go +++ b/schedulers/binpackedpistoncapping.go @@ -279,7 +279,9 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off } // Don't take offer if it doesn't match our task's host requirement - if offerUtils.HostMismatch(*offer.Hostname, task.Host) {continue} + if offerUtils.HostMismatch(*offer.Hostname, task.Host) { + continue + } for *task.Instances > 0 { // Does the task fit diff --git a/schedulers/bpswMaxMin.go b/schedulers/bpswMaxMin.go index bdb8f45..b98ab54 100644 --- a/schedulers/bpswMaxMin.go +++ b/schedulers/bpswMaxMin.go @@ -34,14 +34,14 @@ func (s *BPSWMaxMinWatts) takeOffer(offer *mesos.Offer, task def.Task) bool { } type BPSWMaxMinWatts struct { - base //Type embedding to inherit common functions - tasksCreated int - tasksRunning int - tasks []def.Task - metrics map[string]def.Metric - running map[string]map[string]bool - wattsAsAResource bool - classMapWatts bool + base //Type embedding to inherit common functions + tasksCreated int + tasksRunning int + tasks []def.Task + metrics map[string]def.Metric + running map[string]map[string]bool + wattsAsAResource bool + classMapWatts bool // First set of PCP values are garbage values, signal to logger to start recording when we're // about to schedule a new task @@ -70,15 +70,15 @@ func NewBPMaxMinWatts(tasks []def.Task, wattsAsAResource bool, schedTracePrefix } s := &BPSWMaxMinWatts{ - tasks: tasks, - wattsAsAResource: wattsAsAResource, - classMapWatts: classMapWatts, - Shutdown: make(chan struct{}), - Done: make(chan struct{}), - PCPLog: make(chan struct{}), - running: make(map[string]map[string]bool), - RecordPCP: false, - schedTrace: log.New(logFile, "", log.LstdFlags), + tasks: tasks, + wattsAsAResource: wattsAsAResource, + classMapWatts: classMapWatts, + Shutdown: make(chan struct{}), + Done: make(chan struct{}), + PCPLog: make(chan struct{}), + running: make(map[string]map[string]bool), + RecordPCP: false, + schedTrace: log.New(logFile, "", log.LstdFlags), } return s } diff --git a/schedulers/helpers.go b/schedulers/helpers.go index 23d1441..e6ba7fb 100644 --- a/schedulers/helpers.go +++ b/schedulers/helpers.go @@ -1,9 +1,9 @@ package schedulers import ( + "bitbucket.org/sunybingcloud/electron/constants" "fmt" "log" - "bitbucket.org/sunybingcloud/electron/constants" ) func coLocated(tasks map[string]bool) { @@ -24,4 +24,3 @@ func hostToPowerClass(hostName string) string { } return "" } - diff --git a/utilities/utils.go b/utilities/utils.go index 6662c59..18b2400 100644 --- a/utilities/utils.go +++ b/utilities/utils.go @@ -45,4 +45,3 @@ func OrderedKeys(plist PairList) ([]string, error) { } return orderedKeys, nil } - From 6ac1b388837e11a96a9ef2928bd77c1986bbde9f Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 11 Feb 2017 01:14:02 -0500 Subject: [PATCH 10/11] Added comment to explain the classification of nodes in the cluster into power-classes. --- constants/constants.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/constants/constants.go b/constants/constants.go index 16225a5..bbacf42 100644 --- a/constants/constants.go +++ b/constants/constants.go @@ -12,7 +12,11 @@ var Hosts = []string{"stratos-001.cs.binghamton.edu", "stratos-002.cs.binghamton "stratos-005.cs.binghamton.edu", "stratos-006.cs.binghamton.edu", "stratos-007.cs.binghamton.edu", "stratos-008.cs.binghamton.edu"} -// Classification of the nodes in the cluster based on their power consumption. +/* + Classification of the nodes in the cluster based on their Thermal Design Power (TDP). + The power classes are labelled in the decreasing order of the corresponding TDP, with class A nodes + having the highest TDP and class C nodes having the lowest TDP. +*/ var PowerClasses = map[string]map[string]bool{ "A": map[string]bool{ "stratos-005.cs.binghamton.edu": true, From ceff625d322082f2cf6758a82eaff4f0792259fd Mon Sep 17 00:00:00 2001 From: Pradyumna Kaushik Date: Sat, 11 Feb 2017 01:23:07 -0500 Subject: [PATCH 11/11] fixed formatting of function arguments for CheckFit(...) --- schedulers/bpswMaxMin.go | 3 ++- schedulers/bpswMaxMinPistonCapping.go | 3 ++- schedulers/bpswMaxMinProacCC.go | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/schedulers/bpswMaxMin.go b/schedulers/bpswMaxMin.go index b98ab54..41356e2 100644 --- a/schedulers/bpswMaxMin.go +++ b/schedulers/bpswMaxMin.go @@ -139,7 +139,8 @@ func (s *BPSWMaxMinWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.Task // Determine if the remaining space inside of the offer is enough for this // the task we need to create. If it is, create a TaskInfo and return it. -func (s *BPSWMaxMinWatts) CheckFit(i int, +func (s *BPSWMaxMinWatts) CheckFit( + i int, task def.Task, wattsConsideration float64, offer *mesos.Offer, diff --git a/schedulers/bpswMaxMinPistonCapping.go b/schedulers/bpswMaxMinPistonCapping.go index 1789a8d..925cbdc 100644 --- a/schedulers/bpswMaxMinPistonCapping.go +++ b/schedulers/bpswMaxMinPistonCapping.go @@ -228,7 +228,8 @@ func (s *BPSWMaxMinPistonCapping) stopCapping() { // Determine if the remaining sapce inside of the offer is enough for // the task we need to create. If it is, create a TaskInfo and return it. -func (s *BPSWMaxMinPistonCapping) CheckFit(i int, +func (s *BPSWMaxMinPistonCapping) CheckFit( + i int, task def.Task, wattsConsideration float64, offer *mesos.Offer, diff --git a/schedulers/bpswMaxMinProacCC.go b/schedulers/bpswMaxMinProacCC.go index dc6912a..8c7b880 100644 --- a/schedulers/bpswMaxMinProacCC.go +++ b/schedulers/bpswMaxMinProacCC.go @@ -252,7 +252,8 @@ func (s *BPSWMaxMinProacCC) stopRecapping() { // Determine if the remaining space inside of the offer is enough for // the task we need to create. If it is, create TaskInfo and return it. -func (s *BPSWMaxMinProacCC) CheckFit(i int, +func (s *BPSWMaxMinProacCC) CheckFit( + i int, task def.Task, wattsConsideration float64, offer *mesos.Offer,