diff --git a/constants/constants.go b/constants/constants.go index 8447e6c..16225a5 100644 --- a/constants/constants.go +++ b/constants/constants.go @@ -1,7 +1,7 @@ /* Constants that are used across scripts 1. The available hosts = stratos-00x (x varies from 1 to 8) -2. CapMargin = percentage of the requested power to allocate +2. Tolerance = tolerance for a task that when exceeded would starve the task. 3. ConsiderationWindowSize = number of tasks to consider for computation of the dynamic cap. TODO: Clean this up and use Mesos Attributes instead. */ @@ -32,10 +32,10 @@ var PowerClasses = map[string]map[string]bool{ /* Margin with respect to the required power for a job. - So, if power required = 10W, the node would be capped to CapMargin * 10W. + So, if power required = 10W, the node would be capped to Tolerance * 10W. This value can be changed upon convenience. */ -var CapMargin = 0.70 +var Tolerance = 0.70 // Window size for running average var ConsiderationWindowSize = 20 diff --git a/powerCapping/proactiveclusterwidecappers.go b/powerCapping/proactiveclusterwidecappers.go index d1f8009..fb3a3e3 100644 --- a/powerCapping/proactiveclusterwidecappers.go +++ b/powerCapping/proactiveclusterwidecappers.go @@ -23,7 +23,7 @@ type taskWrapper struct { } func (tw taskWrapper) Val() float64 { - return tw.task.Watts * constants.CapMargin + return tw.task.Watts * constants.Tolerance } func (tw taskWrapper) ID() string { @@ -121,7 +121,7 @@ func (capper ClusterwideCapper) CleverRecap(totalPower map[string]float64, // Not considering this task for the computation of totalAllocatedPower and totalRunningTasks continue } - wattsUsages[host] = append(wattsUsages[host], float64(task.Watts)*constants.CapMargin) + wattsUsages[host] = append(wattsUsages[host], float64(task.Watts)*constants.Tolerance) } } @@ -202,7 +202,7 @@ func (capper ClusterwideCapper) NaiveRecap(totalPower map[string]float64, // Not considering this task for the computation of totalAllocatedPower and totalRunningTasks continue } - totalAllocatedPower += (float64(task.Watts) * constants.CapMargin) + totalAllocatedPower += (float64(task.Watts) * constants.Tolerance) totalRunningTasks++ } } diff --git a/schedulers/binpackedpistoncapping.go b/schedulers/binpackedpistoncapping.go index 2b24b8a..68f8448 100644 --- a/schedulers/binpackedpistoncapping.go +++ b/schedulers/binpackedpistoncapping.go @@ -305,7 +305,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) *task.Instances-- // updating the cap value for offer.Hostname - partialLoad += ((wattsConsideration * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100 + partialLoad += ((wattsConsideration * constants.Tolerance) / s.totalPower[*offer.Hostname]) * 100 if *task.Instances <= 0 { // All instances of task have been scheduled. Remove it @@ -395,7 +395,7 @@ func (s *BinPackedPistonCapper) StatusUpdate(driver sched.SchedulerDriver, statu } // Need to update the cap values for host of the finishedTask bpPistonMutex.Lock() - bpPistonCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100 + bpPistonCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.Tolerance) / s.totalPower[hostOfFinishedTask]) * 100 // Checking to see if the cap value has become 0, in which case we uncap the host. if int(math.Floor(bpPistonCapValues[hostOfFinishedTask]+0.5)) == 0 { bpPistonCapValues[hostOfFinishedTask] = 100 diff --git a/schedulers/bpswMaxMinPistonCapping.go b/schedulers/bpswMaxMinPistonCapping.go index 3bf48df..1789a8d 100644 --- a/schedulers/bpswMaxMinPistonCapping.go +++ b/schedulers/bpswMaxMinPistonCapping.go @@ -257,7 +257,7 @@ func (s *BPSWMaxMinPistonCapping) CheckFit(i int, fmt.Println("Inst: ", *task.Instances) s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) *task.Instances-- - *partialLoad += ((wattsConsideration * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100 + *partialLoad += ((wattsConsideration * constants.Tolerance) / s.totalPower[*offer.Hostname]) * 100 if *task.Instances <= 0 { // All instances of task have been scheduled, remove it @@ -431,7 +431,7 @@ func (s *BPSWMaxMinPistonCapping) StatusUpdate(driver sched.SchedulerDriver, sta } // Need to update the cap values for host of the finishedTask bpMaxMinPistonCappingMutex.Lock() - bpMaxMinPistonCappingCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100 + bpMaxMinPistonCappingCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.Tolerance) / s.totalPower[hostOfFinishedTask]) * 100 // Checking to see if the cap value has become 0, in which case we uncap the host. if int(math.Floor(bpMaxMinPistonCappingCapValues[hostOfFinishedTask]+0.5)) == 0 { bpMaxMinPistonCappingCapValues[hostOfFinishedTask] = 100