Changed ignoreWatts to wattsAsAResource. This resulted in flipping of the condition checks that use these variablees

This commit is contained in:
Pradyumna Kaushik 2017-02-09 18:41:40 -05:00
parent fdcb401447
commit 57512ac2dd
16 changed files with 349 additions and 349 deletions

View file

@ -40,7 +40,7 @@ func (s *FirstFitSortedWattsProacCC) takeOffer(offer *mesos.Offer, task def.Task
// Error in determining wattsToConsider
log.Fatal(err)
}
if offer_cpu >= task.CPU && offer_mem >= task.RAM && (s.ignoreWatts || offer_watts >= wattsConsideration) {
if offer_cpu >= task.CPU && offer_mem >= task.RAM && (!s.wattsAsAResource || offer_watts >= wattsConsideration) {
return true
}
return false
@ -48,22 +48,22 @@ func (s *FirstFitSortedWattsProacCC) takeOffer(offer *mesos.Offer, task def.Task
// electronScheduler implements the Scheduler interface
type FirstFitSortedWattsProacCC struct {
base // Type embedded to inherit common functions
tasksCreated int
tasksRunning int
tasks []def.Task
metrics map[string]def.Metric
running map[string]map[string]bool
taskMonitor map[string][]def.Task // store tasks that are currently running.
availablePower map[string]float64 // available power for each node in the cluster.
totalPower map[string]float64 // total power for each node in the cluster.
ignoreWatts bool
classMapWatts bool
capper *powCap.ClusterwideCapper
ticker *time.Ticker
recapTicker *time.Ticker
isCapping bool // indicate whether we are currently performing cluster wide capping.
isRecapping bool // indicate whether we are currently performing cluster wide re-capping.
base // Type embedded to inherit common functions
tasksCreated int
tasksRunning int
tasks []def.Task
metrics map[string]def.Metric
running map[string]map[string]bool
taskMonitor map[string][]def.Task // store tasks that are currently running.
availablePower map[string]float64 // available power for each node in the cluster.
totalPower map[string]float64 // total power for each node in the cluster.
wattsAsAResource bool
classMapWatts bool
capper *powCap.ClusterwideCapper
ticker *time.Ticker
recapTicker *time.Ticker
isCapping bool // indicate whether we are currently performing cluster wide capping.
isRecapping bool // indicate whether we are currently performing cluster wide re-capping.
// First set of PCP values are garbage values, signal to logger to start recording when we're
// about to schedule the new task.
@ -84,7 +84,7 @@ type FirstFitSortedWattsProacCC struct {
}
// New electron scheduler.
func NewFirstFitSortedWattsProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
func NewFirstFitSortedWattsProacCC(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string,
classMapWatts bool) *FirstFitSortedWattsProacCC {
// Sorting tasks in ascending order of watts
@ -96,23 +96,23 @@ func NewFirstFitSortedWattsProacCC(tasks []def.Task, ignoreWatts bool, schedTrac
}
s := &FirstFitSortedWattsProacCC{
tasks: tasks,
ignoreWatts: ignoreWatts,
classMapWatts: classMapWatts,
Shutdown: make(chan struct{}),
Done: make(chan struct{}),
PCPLog: make(chan struct{}),
running: make(map[string]map[string]bool),
taskMonitor: make(map[string][]def.Task),
availablePower: make(map[string]float64),
totalPower: make(map[string]float64),
RecordPCP: false,
capper: powCap.GetClusterwideCapperInstance(),
ticker: time.NewTicker(10 * time.Second),
recapTicker: time.NewTicker(20 * time.Second),
isCapping: false,
isRecapping: false,
schedTrace: log.New(logFile, "", log.LstdFlags),
tasks: tasks,
wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts,
Shutdown: make(chan struct{}),
Done: make(chan struct{}),
PCPLog: make(chan struct{}),
running: make(map[string]map[string]bool),
taskMonitor: make(map[string][]def.Task),
availablePower: make(map[string]float64),
totalPower: make(map[string]float64),
RecordPCP: false,
capper: powCap.GetClusterwideCapperInstance(),
ticker: time.NewTicker(10 * time.Second),
recapTicker: time.NewTicker(20 * time.Second),
isCapping: false,
isRecapping: false,
schedTrace: log.New(logFile, "", log.LstdFlags),
}
return s
}
@ -151,7 +151,7 @@ func (s *FirstFitSortedWattsProacCC) newTask(offer *mesos.Offer, task def.Task)
mesosutil.NewScalarResource("mem", task.RAM),
}
if !s.ignoreWatts {
if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
} else {
@ -202,11 +202,11 @@ func (s *FirstFitSortedWattsProacCC) startCapping() {
if rankedCurrentCapValue > 0.0 {
for _, host := range constants.Hosts {
// Rounding currentCapValue to the nearest int.
if err := rapl.Cap(host, "rapl", int(math.Floor(rankedCurrentCapValue + 0.5))); err != nil {
if err := rapl.Cap(host, "rapl", int(math.Floor(rankedCurrentCapValue+0.5))); err != nil {
log.Println(err)
}
}
log.Printf("Capped the cluster to %d", int(math.Floor(rankedCurrentCapValue + 0.5)))
log.Printf("Capped the cluster to %d", int(math.Floor(rankedCurrentCapValue+0.5)))
}
rankedMutex.Unlock()
}
@ -226,11 +226,11 @@ func (s *FirstFitSortedWattsProacCC) startRecapping() {
if s.isRecapping && rankedRecapValue > 0.0 {
for _, host := range constants.Hosts {
// Rounding currentCapValue to the nearest int.
if err := rapl.Cap(host, "rapl", int(math.Floor(rankedRecapValue + 0.5))); err != nil {
if err := rapl.Cap(host, "rapl", int(math.Floor(rankedRecapValue+0.5))); err != nil {
log.Println(err)
}
}
log.Printf("Recapped the cluster to %d", int(math.Floor(rankedRecapValue + 0.5)))
log.Printf("Recapped the cluster to %d", int(math.Floor(rankedRecapValue+0.5)))
}
// setting recapping to false
s.isRecapping = false