renamed constants.CapMargin to constants.Tolerance for better semantics

This commit is contained in:
Pradyumna Kaushik 2017-02-10 20:53:18 -05:00
parent ad925dfc8f
commit ec78480067
4 changed files with 10 additions and 10 deletions

View file

@ -1,7 +1,7 @@
/*
Constants that are used across scripts
1. The available hosts = stratos-00x (x varies from 1 to 8)
2. CapMargin = percentage of the requested power to allocate
2. Tolerance = tolerance for a task that when exceeded would starve the task.
3. ConsiderationWindowSize = number of tasks to consider for computation of the dynamic cap.
TODO: Clean this up and use Mesos Attributes instead.
*/
@ -32,10 +32,10 @@ var PowerClasses = map[string]map[string]bool{
/*
Margin with respect to the required power for a job.
So, if power required = 10W, the node would be capped to CapMargin * 10W.
So, if power required = 10W, the node would be capped to Tolerance * 10W.
This value can be changed upon convenience.
*/
var CapMargin = 0.70
var Tolerance = 0.70
// Window size for running average
var ConsiderationWindowSize = 20

View file

@ -23,7 +23,7 @@ type taskWrapper struct {
}
func (tw taskWrapper) Val() float64 {
return tw.task.Watts * constants.CapMargin
return tw.task.Watts * constants.Tolerance
}
func (tw taskWrapper) ID() string {
@ -121,7 +121,7 @@ func (capper ClusterwideCapper) CleverRecap(totalPower map[string]float64,
// Not considering this task for the computation of totalAllocatedPower and totalRunningTasks
continue
}
wattsUsages[host] = append(wattsUsages[host], float64(task.Watts)*constants.CapMargin)
wattsUsages[host] = append(wattsUsages[host], float64(task.Watts)*constants.Tolerance)
}
}
@ -202,7 +202,7 @@ func (capper ClusterwideCapper) NaiveRecap(totalPower map[string]float64,
// Not considering this task for the computation of totalAllocatedPower and totalRunningTasks
continue
}
totalAllocatedPower += (float64(task.Watts) * constants.CapMargin)
totalAllocatedPower += (float64(task.Watts) * constants.Tolerance)
totalRunningTasks++
}
}

View file

@ -305,7 +305,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off
s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
*task.Instances--
// updating the cap value for offer.Hostname
partialLoad += ((wattsConsideration * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100
partialLoad += ((wattsConsideration * constants.Tolerance) / s.totalPower[*offer.Hostname]) * 100
if *task.Instances <= 0 {
// All instances of task have been scheduled. Remove it
@ -395,7 +395,7 @@ func (s *BinPackedPistonCapper) StatusUpdate(driver sched.SchedulerDriver, statu
}
// Need to update the cap values for host of the finishedTask
bpPistonMutex.Lock()
bpPistonCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100
bpPistonCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.Tolerance) / s.totalPower[hostOfFinishedTask]) * 100
// Checking to see if the cap value has become 0, in which case we uncap the host.
if int(math.Floor(bpPistonCapValues[hostOfFinishedTask]+0.5)) == 0 {
bpPistonCapValues[hostOfFinishedTask] = 100

View file

@ -257,7 +257,7 @@ func (s *BPSWMaxMinPistonCapping) CheckFit(i int,
fmt.Println("Inst: ", *task.Instances)
s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
*task.Instances--
*partialLoad += ((wattsConsideration * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100
*partialLoad += ((wattsConsideration * constants.Tolerance) / s.totalPower[*offer.Hostname]) * 100
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
@ -431,7 +431,7 @@ func (s *BPSWMaxMinPistonCapping) StatusUpdate(driver sched.SchedulerDriver, sta
}
// Need to update the cap values for host of the finishedTask
bpMaxMinPistonCappingMutex.Lock()
bpMaxMinPistonCappingCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100
bpMaxMinPistonCappingCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.Tolerance) / s.totalPower[hostOfFinishedTask]) * 100
// Checking to see if the cap value has become 0, in which case we uncap the host.
if int(math.Floor(bpMaxMinPistonCappingCapValues[hostOfFinishedTask]+0.5)) == 0 {
bpMaxMinPistonCappingCapValues[hostOfFinishedTask] = 100