Changed ignoreWatts to wattsAsAResource. This resulted in flipping of the condition checks that use these variablees

This commit is contained in:
Pradyumna Kaushik 2017-02-09 18:41:40 -05:00
parent fdcb401447
commit 57512ac2dd
16 changed files with 349 additions and 349 deletions

View file

@ -17,7 +17,7 @@ import (
var master = flag.String("master", "xavier:5050", "Location of leading Mesos master") var master = flag.String("master", "xavier:5050", "Location of leading Mesos master")
var tasksFile = flag.String("workload", "", "JSON file containing task definitions") var tasksFile = flag.String("workload", "", "JSON file containing task definitions")
var ignoreWatts = flag.Bool("ignoreWatts", false, "Ignore watts in offers") var wattsAsAResource = flag.Bool("wattsAsAResource", false, "Enable Watts as a Resource")
var pcplogPrefix = flag.String("logPrefix", "", "Prefix for pcplog") var pcplogPrefix = flag.String("logPrefix", "", "Prefix for pcplog")
var hiThreshold = flag.Float64("hiThreshold", 0.0, "Upperbound for when we should start capping") var hiThreshold = flag.Float64("hiThreshold", 0.0, "Upperbound for when we should start capping")
var loThreshold = flag.Float64("loThreshold", 0.0, "Lowerbound for when we should start uncapping") var loThreshold = flag.Float64("loThreshold", 0.0, "Lowerbound for when we should start uncapping")
@ -27,7 +27,7 @@ var classMapWatts = flag.Bool("classMapWatts", false, "Enable mapping of watts t
func init() { func init() {
flag.StringVar(master, "m", "xavier:5050", "Location of leading Mesos master (shorthand)") flag.StringVar(master, "m", "xavier:5050", "Location of leading Mesos master (shorthand)")
flag.StringVar(tasksFile, "w", "", "JSON file containing task definitions (shorthand)") flag.StringVar(tasksFile, "w", "", "JSON file containing task definitions (shorthand)")
flag.BoolVar(ignoreWatts, "i", false, "Ignore watts in offers (shorthand)") flag.BoolVar(wattsAsAResource, "waar", false, "Enable Watts as a Resource")
flag.StringVar(pcplogPrefix, "p", "", "Prefix for pcplog (shorthand)") flag.StringVar(pcplogPrefix, "p", "", "Prefix for pcplog (shorthand)")
flag.Float64Var(hiThreshold, "ht", 700.0, "Upperbound for when we should start capping (shorthand)") flag.Float64Var(hiThreshold, "ht", 700.0, "Upperbound for when we should start capping (shorthand)")
flag.Float64Var(loThreshold, "lt", 400.0, "Lowerbound for when we should start uncapping (shorthand)") flag.Float64Var(loThreshold, "lt", 400.0, "Lowerbound for when we should start uncapping (shorthand)")
@ -60,7 +60,7 @@ func main() {
startTime := time.Now().Format("20060102150405") startTime := time.Now().Format("20060102150405")
logPrefix := *pcplogPrefix + "_" + startTime logPrefix := *pcplogPrefix + "_" + startTime
scheduler := schedulers.NewBinPackedPistonCapper(tasks, *ignoreWatts, logPrefix, *classMapWatts) scheduler := schedulers.NewBinPackedPistonCapper(tasks, *wattsAsAResource, logPrefix, *classMapWatts)
driver, err := sched.NewMesosSchedulerDriver(sched.DriverConfig{ driver, err := sched.NewMesosSchedulerDriver(sched.DriverConfig{
Master: *master, Master: *master,
Framework: &mesos.FrameworkInfo{ Framework: &mesos.FrameworkInfo{

View file

@ -28,7 +28,7 @@ func (s *BinPackSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def.
// Error in determining wattsConsideration // Error in determining wattsConsideration
log.Fatal(err) log.Fatal(err)
} }
if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || (watts >= wattsConsideration)) { if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || (watts >= wattsConsideration)) {
return true return true
} }
@ -42,7 +42,7 @@ type BinPackSortedWattsSortedOffers struct {
tasks []def.Task tasks []def.Task
metrics map[string]def.Metric metrics map[string]def.Metric
running map[string]map[string]bool running map[string]map[string]bool
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
// First set of PCP values are garbage values, signal to logger to start recording when we're // First set of PCP values are garbage values, signal to logger to start recording when we're
@ -63,7 +63,7 @@ type BinPackSortedWattsSortedOffers struct {
} }
// New electron scheduler // New electron scheduler
func NewBinPackSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, func NewBinPackSortedWattsSortedOffers(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string,
classMapWatts bool) *BinPackSortedWattsSortedOffers { classMapWatts bool) *BinPackSortedWattsSortedOffers {
sort.Sort(def.WattsSorter(tasks)) sort.Sort(def.WattsSorter(tasks))
@ -74,7 +74,7 @@ func NewBinPackSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, sched
s := &BinPackSortedWattsSortedOffers{ s := &BinPackSortedWattsSortedOffers{
tasks: tasks, tasks: tasks,
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),
@ -109,7 +109,7 @@ func (s *BinPackSortedWattsSortedOffers) newTask(offer *mesos.Offer, task def.Ta
mesosutil.NewScalarResource("mem", task.RAM), mesosutil.NewScalarResource("mem", task.RAM),
} }
if !s.ignoreWatts { if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil { if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider) log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider)) resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
@ -190,7 +190,7 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr
for *task.Instances > 0 { for *task.Instances > 0 {
// Does the task fit // Does the task fit
if (s.ignoreWatts || (offer_watts >= (totalWatts + wattsConsideration))) && if (!s.wattsAsAResource || (offer_watts >= (totalWatts + wattsConsideration))) &&
(offer_cpu >= (totalCPU + task.CPU)) && (offer_cpu >= (totalCPU + task.CPU)) &&
(offer_ram >= (totalRAM + task.RAM)) { (offer_ram >= (totalRAM + task.RAM)) {

View file

@ -35,7 +35,7 @@ type BinPackedPistonCapper struct {
running map[string]map[string]bool running map[string]map[string]bool
taskMonitor map[string][]def.Task taskMonitor map[string][]def.Task
totalPower map[string]float64 totalPower map[string]float64
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
ticker *time.Ticker ticker *time.Ticker
isCapping bool isCapping bool
@ -59,7 +59,7 @@ type BinPackedPistonCapper struct {
} }
// New electron scheduler. // New electron scheduler.
func NewBinPackedPistonCapper(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, func NewBinPackedPistonCapper(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string,
classMapWatts bool) *BinPackedPistonCapper { classMapWatts bool) *BinPackedPistonCapper {
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@ -69,7 +69,7 @@ func NewBinPackedPistonCapper(tasks []def.Task, ignoreWatts bool, schedTracePref
s := &BinPackedPistonCapper{ s := &BinPackedPistonCapper{
tasks: tasks, tasks: tasks,
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),
@ -93,7 +93,7 @@ func (s *BinPackedPistonCapper) takeOffer(offer *mesos.Offer, offerWatts float64
// Error in determining wattsToConsider // Error in determining wattsToConsider
log.Fatal(err) log.Fatal(err)
} }
if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsConsideration))) && if (!s.wattsAsAResource || (offerWatts >= (totalWatts + wattsConsideration))) &&
(offerCPU >= (totalCPU + task.CPU)) && (offerCPU >= (totalCPU + task.CPU)) &&
(offerRAM >= (totalRAM + task.RAM)) { (offerRAM >= (totalRAM + task.RAM)) {
return true return true
@ -137,7 +137,7 @@ func (s *BinPackedPistonCapper) newTask(offer *mesos.Offer, task def.Task) *meso
mesosutil.NewScalarResource("mem", task.RAM), mesosutil.NewScalarResource("mem", task.RAM),
} }
if !s.ignoreWatts { if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil { if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider) log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider)) resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))

View file

@ -28,7 +28,7 @@ func (s *BinPackSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
// Error in determining wattsConsideration // Error in determining wattsConsideration
log.Fatal(err) log.Fatal(err)
} }
if cpus >= task.CPU && mem >= task.RAM && watts >= wattsConsideration { if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || (watts >= wattsConsideration)) {
return true return true
} }
@ -42,7 +42,7 @@ type BinPackSortedWatts struct {
tasks []def.Task tasks []def.Task
metrics map[string]def.Metric metrics map[string]def.Metric
running map[string]map[string]bool running map[string]map[string]bool
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
// First set of PCP values are garbage values, signal to logger to start recording when we're // First set of PCP values are garbage values, signal to logger to start recording when we're
@ -63,7 +63,7 @@ type BinPackSortedWatts struct {
} }
// New electron scheduler // New electron scheduler
func NewBinPackSortedWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *BinPackSortedWatts { func NewBinPackSortedWatts(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *BinPackSortedWatts {
sort.Sort(def.WattsSorter(tasks)) sort.Sort(def.WattsSorter(tasks))
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@ -73,7 +73,7 @@ func NewBinPackSortedWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix
s := &BinPackSortedWatts{ s := &BinPackSortedWatts{
tasks: tasks, tasks: tasks,
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),
@ -108,7 +108,7 @@ func (s *BinPackSortedWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.T
mesosutil.NewScalarResource("mem", task.RAM), mesosutil.NewScalarResource("mem", task.RAM),
} }
if !s.ignoreWatts { if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil { if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider) log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider)) resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
@ -178,7 +178,7 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers
for *task.Instances > 0 { for *task.Instances > 0 {
// Does the task fit // Does the task fit
if (s.ignoreWatts || (offer_watts >= (totalWatts + wattsConsideration))) && if (!s.wattsAsAResource || (offer_watts >= (totalWatts + wattsConsideration))) &&
(offer_cpu >= (totalCPU + task.CPU)) && (offer_cpu >= (totalCPU + task.CPU)) &&
(offer_ram >= (totalRAM + task.RAM)) { (offer_ram >= (totalRAM + task.RAM)) {

View file

@ -34,7 +34,7 @@ type BottomHeavy struct {
tasks []def.Task tasks []def.Task
metrics map[string]def.Metric metrics map[string]def.Metric
running map[string]map[string]bool running map[string]map[string]bool
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
smallTasks, largeTasks []def.Task smallTasks, largeTasks []def.Task
@ -56,7 +56,7 @@ type BottomHeavy struct {
} }
// New electron scheduler // New electron scheduler
func NewBottomHeavy(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *BottomHeavy { func NewBottomHeavy(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *BottomHeavy {
sort.Sort(def.WattsSorter(tasks)) sort.Sort(def.WattsSorter(tasks))
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@ -70,7 +70,7 @@ func NewBottomHeavy(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
s := &BottomHeavy{ s := &BottomHeavy{
smallTasks: tasks[:mid], smallTasks: tasks[:mid],
largeTasks: tasks[mid+1:], largeTasks: tasks[mid+1:],
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),
@ -105,7 +105,7 @@ func (s *BottomHeavy) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo
mesosutil.NewScalarResource("mem", task.RAM), mesosutil.NewScalarResource("mem", task.RAM),
} }
if !s.ignoreWatts { if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil { if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider) log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider)) resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
@ -186,7 +186,7 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver)
// Does the task fit // Does the task fit
// OR lazy evaluation. If ignore watts is set to true, second statement won't // OR lazy evaluation. If ignore watts is set to true, second statement won't
// be evaluated. // be evaluated.
if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsConsideration))) && if (!s.wattsAsAResource || (offerWatts >= (totalWatts + wattsConsideration))) &&
(offerCPU >= (totalCPU + task.CPU)) && (offerCPU >= (totalCPU + task.CPU)) &&
(offerRAM >= (totalRAM + task.RAM)) { (offerRAM >= (totalRAM + task.RAM)) {
offerTaken = true offerTaken = true
@ -248,7 +248,7 @@ func (s *BottomHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver
} }
// Decision to take the offer or not // Decision to take the offer or not
if (s.ignoreWatts || (offerWatts >= wattsConsideration)) && if (!s.wattsAsAResource || (offerWatts >= wattsConsideration)) &&
(offerCPU >= task.CPU) && (offerRAM >= task.RAM) { (offerCPU >= task.CPU) && (offerRAM >= task.RAM) {
taken = true taken = true
tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, task)) tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, task))

View file

@ -28,7 +28,7 @@ func (s *BPSWMaxMinWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
// Error in determining wattsConsideration // Error in determining wattsConsideration
log.Fatal(err) log.Fatal(err)
} }
if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || (watts >= wattsConsideration)) { if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || (watts >= wattsConsideration)) {
return true return true
} }
@ -42,7 +42,7 @@ type BPSWMaxMinWatts struct {
tasks []def.Task tasks []def.Task
metrics map[string]def.Metric metrics map[string]def.Metric
running map[string]map[string]bool running map[string]map[string]bool
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
// First set of PCP values are garbage values, signal to logger to start recording when we're // First set of PCP values are garbage values, signal to logger to start recording when we're
@ -63,7 +63,7 @@ type BPSWMaxMinWatts struct {
} }
// New electron scheduler // New electron scheduler
func NewBPMaxMinWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *BPSWMaxMinWatts { func NewBPMaxMinWatts(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *BPSWMaxMinWatts {
sort.Sort(def.WattsSorter(tasks)) sort.Sort(def.WattsSorter(tasks))
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@ -73,7 +73,7 @@ func NewBPMaxMinWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix strin
s := &BPSWMaxMinWatts{ s := &BPSWMaxMinWatts{
tasks: tasks, tasks: tasks,
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),
@ -109,7 +109,7 @@ func (s *BPSWMaxMinWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.Task
mesosutil.NewScalarResource("mem", task.RAM), mesosutil.NewScalarResource("mem", task.RAM),
} }
if !s.ignoreWatts { if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil { if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider) log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider)) resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
@ -152,7 +152,7 @@ func (s *BPSWMaxMinWatts) CheckFit(i int,
offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
// Does the task fit // Does the task fit
if (s.ignoreWatts || (offerWatts >= (*totalWatts + wattsConsideration))) && if (!s.wattsAsAResource || (offerWatts >= (*totalWatts + wattsConsideration))) &&
(offerCPU >= (*totalCPU + task.CPU)) && (offerCPU >= (*totalCPU + task.CPU)) &&
(offerRAM >= (*totalRAM + task.RAM)) { (offerRAM >= (*totalRAM + task.RAM)) {

View file

@ -33,7 +33,7 @@ func (s *BPSWMaxMinPistonCapping) takeOffer(offer *mesos.Offer, task def.Task) b
// Error in determining wattsConsideration // Error in determining wattsConsideration
log.Fatal(err) log.Fatal(err)
} }
if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || (watts >= wattsConsideration)) { if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || (watts >= wattsConsideration)) {
return true return true
} }
@ -49,7 +49,7 @@ type BPSWMaxMinPistonCapping struct {
running map[string]map[string]bool running map[string]map[string]bool
taskMonitor map[string][]def.Task taskMonitor map[string][]def.Task
totalPower map[string]float64 totalPower map[string]float64
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
ticker *time.Ticker ticker *time.Ticker
isCapping bool isCapping bool
@ -72,7 +72,7 @@ type BPSWMaxMinPistonCapping struct {
} }
// New electron scheduler // New electron scheduler
func NewBPSWMaxMinPistonCapping(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, func NewBPSWMaxMinPistonCapping(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string,
classMapWatts bool) *BPSWMaxMinPistonCapping { classMapWatts bool) *BPSWMaxMinPistonCapping {
sort.Sort(def.WattsSorter(tasks)) sort.Sort(def.WattsSorter(tasks))
@ -83,7 +83,7 @@ func NewBPSWMaxMinPistonCapping(tasks []def.Task, ignoreWatts bool, schedTracePr
s := &BPSWMaxMinPistonCapping{ s := &BPSWMaxMinPistonCapping{
tasks: tasks, tasks: tasks,
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),
@ -134,7 +134,7 @@ func (s *BPSWMaxMinPistonCapping) newTask(offer *mesos.Offer, task def.Task) *me
mesosutil.NewScalarResource("mem", task.RAM), mesosutil.NewScalarResource("mem", task.RAM),
} }
if !s.ignoreWatts { if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil { if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider) log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider)) resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
@ -242,7 +242,7 @@ func (s *BPSWMaxMinPistonCapping) CheckFit(i int,
offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
// Does the task fit // Does the task fit
if (s.ignoreWatts || (offerWatts >= (*totalWatts + wattsConsideration))) && if (!s.wattsAsAResource || (offerWatts >= (*totalWatts + wattsConsideration))) &&
(offerCPU >= (*totalCPU + task.CPU)) && (offerCPU >= (*totalCPU + task.CPU)) &&
(offerRAM >= (*totalRAM + task.RAM)) { (offerRAM >= (*totalRAM + task.RAM)) {

View file

@ -32,7 +32,7 @@ func (s *BPSWMaxMinProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool {
// Error in determining wattsConsideration // Error in determining wattsConsideration
log.Fatal(err) log.Fatal(err)
} }
if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || (watts >= wattsConsideration)) { if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || (watts >= wattsConsideration)) {
return true return true
} }
@ -49,7 +49,7 @@ type BPSWMaxMinProacCC struct {
taskMonitor map[string][]def.Task taskMonitor map[string][]def.Task
availablePower map[string]float64 availablePower map[string]float64
totalPower map[string]float64 totalPower map[string]float64
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
capper *powCap.ClusterwideCapper capper *powCap.ClusterwideCapper
ticker *time.Ticker ticker *time.Ticker
@ -75,7 +75,7 @@ type BPSWMaxMinProacCC struct {
} }
// New electron scheduler // New electron scheduler
func NewBPSWMaxMinProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *BPSWMaxMinProacCC { func NewBPSWMaxMinProacCC(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *BPSWMaxMinProacCC {
sort.Sort(def.WattsSorter(tasks)) sort.Sort(def.WattsSorter(tasks))
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@ -85,7 +85,7 @@ func NewBPSWMaxMinProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix s
s := &BPSWMaxMinProacCC{ s := &BPSWMaxMinProacCC{
tasks: tasks, tasks: tasks,
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),
@ -139,7 +139,7 @@ func (s *BPSWMaxMinProacCC) newTask(offer *mesos.Offer, task def.Task) *mesos.Ta
mesosutil.NewScalarResource("mem", task.RAM), mesosutil.NewScalarResource("mem", task.RAM),
} }
if !s.ignoreWatts { if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil { if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider) log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider)) resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
@ -265,7 +265,7 @@ func (s *BPSWMaxMinProacCC) CheckFit(i int,
offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer) offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
// Does the task fit // Does the task fit
if (s.ignoreWatts || (offerWatts >= (*totalWatts + wattsConsideration))) && if (!s.wattsAsAResource || (offerWatts >= (*totalWatts + wattsConsideration))) &&
(offerCPU >= (*totalCPU + task.CPU)) && (offerCPU >= (*totalCPU + task.CPU)) &&
(offerRAM >= (*totalRAM + task.RAM)) { (offerRAM >= (*totalRAM + task.RAM)) {

View file

@ -27,7 +27,7 @@ func (s *FirstFit) takeOffer(offer *mesos.Offer, task def.Task) bool {
// Error in determining wattsConsideration // Error in determining wattsConsideration
log.Fatal(err) log.Fatal(err)
} }
if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= wattsConsideration) { if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || watts >= wattsConsideration) {
return true return true
} }
@ -42,7 +42,7 @@ type FirstFit struct {
tasks []def.Task tasks []def.Task
metrics map[string]def.Metric metrics map[string]def.Metric
running map[string]map[string]bool running map[string]map[string]bool
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
// First set of PCP values are garbage values, signal to logger to start recording when we're // First set of PCP values are garbage values, signal to logger to start recording when we're
@ -63,7 +63,7 @@ type FirstFit struct {
} }
// New electron scheduler // New electron scheduler
func NewFirstFit(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *FirstFit { func NewFirstFit(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *FirstFit {
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
if err != nil { if err != nil {
@ -72,7 +72,7 @@ func NewFirstFit(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, cl
s := &FirstFit{ s := &FirstFit{
tasks: tasks, tasks: tasks,
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),
@ -107,7 +107,7 @@ func (s *FirstFit) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
mesosutil.NewScalarResource("mem", task.RAM), mesosutil.NewScalarResource("mem", task.RAM),
} }
if !s.ignoreWatts { if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil { if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider) log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider)) resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))

View file

@ -29,7 +29,7 @@ func (s *FirstFitProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool {
// Error in determining wattsConsideration // Error in determining wattsConsideration
log.Fatal(err) log.Fatal(err)
} }
if offer_cpu >= task.CPU && offer_mem >= task.RAM && (s.ignoreWatts || (offer_watts >= wattsConsideration)) { if offer_cpu >= task.CPU && offer_mem >= task.RAM && (!s.wattsAsAResource || (offer_watts >= wattsConsideration)) {
return true return true
} }
return false return false
@ -46,7 +46,7 @@ type FirstFitProacCC struct {
taskMonitor map[string][]def.Task // store tasks that are currently running. taskMonitor map[string][]def.Task // store tasks that are currently running.
availablePower map[string]float64 // available power for each node in the cluster. availablePower map[string]float64 // available power for each node in the cluster.
totalPower map[string]float64 // total power for each node in the cluster. totalPower map[string]float64 // total power for each node in the cluster.
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
capper *powCap.ClusterwideCapper capper *powCap.ClusterwideCapper
ticker *time.Ticker ticker *time.Ticker
@ -73,7 +73,7 @@ type FirstFitProacCC struct {
} }
// New electron scheduler. // New electron scheduler.
func NewFirstFitProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, func NewFirstFitProacCC(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string,
classMapWatts bool) *FirstFitProacCC { classMapWatts bool) *FirstFitProacCC {
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@ -83,7 +83,7 @@ func NewFirstFitProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix str
s := &FirstFitProacCC{ s := &FirstFitProacCC{
tasks: tasks, tasks: tasks,
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),
@ -137,7 +137,7 @@ func (s *FirstFitProacCC) newTask(offer *mesos.Offer, task def.Task) *mesos.Task
mesosutil.NewScalarResource("mem", task.RAM), mesosutil.NewScalarResource("mem", task.RAM),
} }
if !s.ignoreWatts { if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil { if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider) log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider)) resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))

View file

@ -28,7 +28,7 @@ func (s *FirstFitSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool
// Error in determining wattsConsideration // Error in determining wattsConsideration
log.Fatal(err) log.Fatal(err)
} }
if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= wattsConsideration) { if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || watts >= wattsConsideration) {
return true return true
} }
@ -43,7 +43,7 @@ type FirstFitSortedOffers struct {
tasks []def.Task tasks []def.Task
metrics map[string]def.Metric metrics map[string]def.Metric
running map[string]map[string]bool running map[string]map[string]bool
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
// First set of PCP values are garbage values, signal to logger to start recording when we're // First set of PCP values are garbage values, signal to logger to start recording when we're
@ -64,7 +64,7 @@ type FirstFitSortedOffers struct {
} }
// New electron scheduler // New electron scheduler
func NewFirstFitSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *FirstFitSortedOffers { func NewFirstFitSortedOffers(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *FirstFitSortedOffers {
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
if err != nil { if err != nil {
@ -73,7 +73,7 @@ func NewFirstFitSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefi
s := &FirstFitSortedOffers{ s := &FirstFitSortedOffers{
tasks: tasks, tasks: tasks,
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),
@ -108,7 +108,7 @@ func (s *FirstFitSortedOffers) newTask(offer *mesos.Offer, task def.Task) *mesos
mesosutil.NewScalarResource("mem", task.RAM), mesosutil.NewScalarResource("mem", task.RAM),
} }
if !s.ignoreWatts { if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil { if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider) log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider)) resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))

View file

@ -40,7 +40,7 @@ func (s *FirstFitSortedWattsProacCC) takeOffer(offer *mesos.Offer, task def.Task
// Error in determining wattsToConsider // Error in determining wattsToConsider
log.Fatal(err) log.Fatal(err)
} }
if offer_cpu >= task.CPU && offer_mem >= task.RAM && (s.ignoreWatts || offer_watts >= wattsConsideration) { if offer_cpu >= task.CPU && offer_mem >= task.RAM && (!s.wattsAsAResource || offer_watts >= wattsConsideration) {
return true return true
} }
return false return false
@ -57,7 +57,7 @@ type FirstFitSortedWattsProacCC struct {
taskMonitor map[string][]def.Task // store tasks that are currently running. taskMonitor map[string][]def.Task // store tasks that are currently running.
availablePower map[string]float64 // available power for each node in the cluster. availablePower map[string]float64 // available power for each node in the cluster.
totalPower map[string]float64 // total power for each node in the cluster. totalPower map[string]float64 // total power for each node in the cluster.
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
capper *powCap.ClusterwideCapper capper *powCap.ClusterwideCapper
ticker *time.Ticker ticker *time.Ticker
@ -84,7 +84,7 @@ type FirstFitSortedWattsProacCC struct {
} }
// New electron scheduler. // New electron scheduler.
func NewFirstFitSortedWattsProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, func NewFirstFitSortedWattsProacCC(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string,
classMapWatts bool) *FirstFitSortedWattsProacCC { classMapWatts bool) *FirstFitSortedWattsProacCC {
// Sorting tasks in ascending order of watts // Sorting tasks in ascending order of watts
@ -97,7 +97,7 @@ func NewFirstFitSortedWattsProacCC(tasks []def.Task, ignoreWatts bool, schedTrac
s := &FirstFitSortedWattsProacCC{ s := &FirstFitSortedWattsProacCC{
tasks: tasks, tasks: tasks,
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),
@ -151,7 +151,7 @@ func (s *FirstFitSortedWattsProacCC) newTask(offer *mesos.Offer, task def.Task)
mesosutil.NewScalarResource("mem", task.RAM), mesosutil.NewScalarResource("mem", task.RAM),
} }
if !s.ignoreWatts { if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil { if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider)) resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
} else { } else {

View file

@ -28,7 +28,7 @@ func (s *FirstFitSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def
// Error in determining wattsConsideration // Error in determining wattsConsideration
log.Fatal(err) log.Fatal(err)
} }
if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= wattsConsideration) { if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || watts >= wattsConsideration) {
return true return true
} }
@ -43,7 +43,7 @@ type FirstFitSortedWattsSortedOffers struct {
tasks []def.Task tasks []def.Task
metrics map[string]def.Metric metrics map[string]def.Metric
running map[string]map[string]bool running map[string]map[string]bool
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
// First set of PCP values are garbage values, signal to logger to start recording when we're // First set of PCP values are garbage values, signal to logger to start recording when we're
@ -64,7 +64,7 @@ type FirstFitSortedWattsSortedOffers struct {
} }
// New electron scheduler // New electron scheduler
func NewFirstFitSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, func NewFirstFitSortedWattsSortedOffers(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string,
classMapWatts bool) *FirstFitSortedWattsSortedOffers { classMapWatts bool) *FirstFitSortedWattsSortedOffers {
// Sorting the tasks in increasing order of watts requirement. // Sorting the tasks in increasing order of watts requirement.
@ -77,7 +77,7 @@ func NewFirstFitSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, sche
s := &FirstFitSortedWattsSortedOffers{ s := &FirstFitSortedWattsSortedOffers{
tasks: tasks, tasks: tasks,
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),
@ -112,7 +112,7 @@ func (s *FirstFitSortedWattsSortedOffers) newTask(offer *mesos.Offer, task def.T
mesosutil.NewScalarResource("mem", task.RAM), mesosutil.NewScalarResource("mem", task.RAM),
} }
if !s.ignoreWatts { if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil { if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider) log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider)) resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))

View file

@ -28,7 +28,7 @@ func (s *FirstFitSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool
// Error in determining wattsConsideration // Error in determining wattsConsideration
log.Fatal(err) log.Fatal(err)
} }
if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= wattsConsideration) { if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || watts >= wattsConsideration) {
return true return true
} }
@ -43,7 +43,7 @@ type FirstFitSortedWatts struct {
tasks []def.Task tasks []def.Task
metrics map[string]def.Metric metrics map[string]def.Metric
running map[string]map[string]bool running map[string]map[string]bool
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
// First set of PCP values are garbage values, signal to logger to start recording when we're // First set of PCP values are garbage values, signal to logger to start recording when we're
@ -64,7 +64,7 @@ type FirstFitSortedWatts struct {
} }
// New electron scheduler // New electron scheduler
func NewFirstFitSortedWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *FirstFitSortedWatts { func NewFirstFitSortedWatts(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *FirstFitSortedWatts {
sort.Sort(def.WattsSorter(tasks)) sort.Sort(def.WattsSorter(tasks))
@ -75,7 +75,7 @@ func NewFirstFitSortedWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix
s := &FirstFitSortedWatts{ s := &FirstFitSortedWatts{
tasks: tasks, tasks: tasks,
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),
@ -110,7 +110,7 @@ func (s *FirstFitSortedWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.
mesosutil.NewScalarResource("mem", task.RAM), mesosutil.NewScalarResource("mem", task.RAM),
} }
if !s.ignoreWatts { if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil { if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider) log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider)) resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))

View file

@ -41,7 +41,7 @@ type FirstFitWattsOnly struct {
tasks []def.Task tasks []def.Task
metrics map[string]def.Metric metrics map[string]def.Metric
running map[string]map[string]bool running map[string]map[string]bool
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
// First set of PCP values are garbage values, signal to logger to start recording when we're // First set of PCP values are garbage values, signal to logger to start recording when we're
@ -62,7 +62,7 @@ type FirstFitWattsOnly struct {
} }
// New electron scheduler // New electron scheduler
func NewFirstFitWattsOnly(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *FirstFitWattsOnly { func NewFirstFitWattsOnly(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *FirstFitWattsOnly {
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
if err != nil { if err != nil {
@ -71,7 +71,7 @@ func NewFirstFitWattsOnly(tasks []def.Task, ignoreWatts bool, schedTracePrefix s
s := &FirstFitWattsOnly{ s := &FirstFitWattsOnly{
tasks: tasks, tasks: tasks,
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),

View file

@ -34,7 +34,7 @@ type TopHeavy struct {
tasks []def.Task tasks []def.Task
metrics map[string]def.Metric metrics map[string]def.Metric
running map[string]map[string]bool running map[string]map[string]bool
ignoreWatts bool wattsAsAResource bool
classMapWatts bool classMapWatts bool
smallTasks, largeTasks []def.Task smallTasks, largeTasks []def.Task
@ -56,7 +56,7 @@ type TopHeavy struct {
} }
// New electron scheduler // New electron scheduler
func NewTopHeavy(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *TopHeavy { func NewTopHeavy(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *TopHeavy {
sort.Sort(def.WattsSorter(tasks)) sort.Sort(def.WattsSorter(tasks))
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log") logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@ -70,7 +70,7 @@ func NewTopHeavy(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, cl
s := &TopHeavy{ s := &TopHeavy{
smallTasks: tasks[:mid], smallTasks: tasks[:mid],
largeTasks: tasks[mid+1:], largeTasks: tasks[mid+1:],
ignoreWatts: ignoreWatts, wattsAsAResource: wattsAsAResource,
classMapWatts: classMapWatts, classMapWatts: classMapWatts,
Shutdown: make(chan struct{}), Shutdown: make(chan struct{}),
Done: make(chan struct{}), Done: make(chan struct{}),
@ -105,7 +105,7 @@ func (s *TopHeavy) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
mesosutil.NewScalarResource("mem", task.RAM), mesosutil.NewScalarResource("mem", task.RAM),
} }
if !s.ignoreWatts { if s.wattsAsAResource {
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil { if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider) log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider)) resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
@ -186,7 +186,7 @@ func (s *TopHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) {
// Does the task fit // Does the task fit
// OR lazy evaluation. If ignore watts is set to true, second statement won't // OR lazy evaluation. If ignore watts is set to true, second statement won't
// be evaluated. // be evaluated.
if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsConsideration))) && if (!s.wattsAsAResource || (offerWatts >= (totalWatts + wattsConsideration))) &&
(offerCPU >= (totalCPU + task.CPU)) && (offerCPU >= (totalCPU + task.CPU)) &&
(offerRAM >= (totalRAM + task.RAM)) { (offerRAM >= (totalRAM + task.RAM)) {
taken = true taken = true
@ -245,7 +245,7 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) {
} }
// Decision to take the offer or not // Decision to take the offer or not
if (s.ignoreWatts || (offerWatts >= wattsConsideration)) && if (!s.wattsAsAResource || (offerWatts >= wattsConsideration)) &&
(offerCPU >= task.CPU) && (offerRAM >= task.RAM) { (offerCPU >= task.CPU) && (offerRAM >= task.RAM) {
offerTaken = true offerTaken = true
tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, task)) tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, task))