Made classMapWatts a commandLine option where one can enable and disable mapping of watts to powerclasses when accepting offers from Mesos. Removed the schedulers that were created solely for the classMapWatts feature. Retrofitted all schedulers to use the powerClass mapped watts attribute for a task, if classMapWatts has been enabled. Removed unnecessary functions and variables from constants.go. Removed unnecessary functions from utilities/utils.go. Fixed operator precendence issue with takeOffer(...) in some of the schedulers. Added TODO to decouple capping strategies from the schedulers completely. Added TODO to move all the common struct attributes in the schedulers into base.go.

This commit is contained in:
Pradyumna Kaushik 2017-02-09 18:05:38 -05:00
parent a2b50dd313
commit fdcb401447
28 changed files with 686 additions and 2094 deletions

View file

@ -27,17 +27,18 @@ import (
corresponding to the load on that node.
*/
type BinPackedPistonCapper struct {
base // Type embedded to inherit common functions
tasksCreated int
tasksRunning int
tasks []def.Task
metrics map[string]def.Metric
running map[string]map[string]bool
taskMonitor map[string][]def.Task
totalPower map[string]float64
ignoreWatts bool
ticker *time.Ticker
isCapping bool
base // Type embedded to inherit common functions
tasksCreated int
tasksRunning int
tasks []def.Task
metrics map[string]def.Metric
running map[string]map[string]bool
taskMonitor map[string][]def.Task
totalPower map[string]float64
ignoreWatts bool
classMapWatts bool
ticker *time.Ticker
isCapping bool
// First set of PCP values are garbage values, signal to logger to start recording when we're
// about to schedule the new task.
@ -58,7 +59,8 @@ type BinPackedPistonCapper struct {
}
// New electron scheduler.
func NewBinPackedPistonCapper(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BinPackedPistonCapper {
func NewBinPackedPistonCapper(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
classMapWatts bool) *BinPackedPistonCapper {
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
if err != nil {
@ -66,26 +68,32 @@ func NewBinPackedPistonCapper(tasks []def.Task, ignoreWatts bool, schedTracePref
}
s := &BinPackedPistonCapper{
tasks: tasks,
ignoreWatts: ignoreWatts,
Shutdown: make(chan struct{}),
Done: make(chan struct{}),
PCPLog: make(chan struct{}),
running: make(map[string]map[string]bool),
taskMonitor: make(map[string][]def.Task),
totalPower: make(map[string]float64),
RecordPCP: false,
ticker: time.NewTicker(5 * time.Second),
isCapping: false,
schedTrace: log.New(logFile, "", log.LstdFlags),
tasks: tasks,
ignoreWatts: ignoreWatts,
classMapWatts: classMapWatts,
Shutdown: make(chan struct{}),
Done: make(chan struct{}),
PCPLog: make(chan struct{}),
running: make(map[string]map[string]bool),
taskMonitor: make(map[string][]def.Task),
totalPower: make(map[string]float64),
RecordPCP: false,
ticker: time.NewTicker(5 * time.Second),
isCapping: false,
schedTrace: log.New(logFile, "", log.LstdFlags),
}
return s
}
// check whether task fits the offer or not.
func (s *BinPackedPistonCapper) takeOffer(offerWatts float64, offerCPU float64, offerRAM float64,
func (s *BinPackedPistonCapper) takeOffer(offer *mesos.Offer, offerWatts float64, offerCPU float64, offerRAM float64,
totalWatts float64, totalCPU float64, totalRAM float64, task def.Task) bool {
if (s.ignoreWatts || (offerWatts >= (totalWatts + task.Watts))) &&
wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
if err != nil {
// Error in determining wattsToConsider
log.Fatal(err)
}
if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsConsideration))) &&
(offerCPU >= (totalCPU + task.CPU)) &&
(offerRAM >= (totalRAM + task.RAM)) {
return true
@ -130,7 +138,13 @@ func (s *BinPackedPistonCapper) newTask(offer *mesos.Offer, task def.Task) *meso
}
if !s.ignoreWatts {
resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
} else {
// Error in determining wattsConsideration
log.Fatal(err)
}
}
return &mesos.TaskInfo{
@ -183,7 +197,8 @@ func (s *BinPackedPistonCapper) startCapping() {
if err := rapl.Cap(host, "rapl", roundedCapValue); err != nil {
log.Println(err)
} else {
log.Printf("Capped [%s] at %d", host, int(math.Floor(capValue+0.5)))
log.Printf("Capped [%s] at %d", host,
int(math.Floor(capValue+0.5)))
}
bpPistonPreviousRoundedCapValues[host] = roundedCapValue
}
@ -219,8 +234,8 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off
// retrieving the total power for each host in the offers
for _, offer := range offers {
if _, ok := s.totalPower[*offer.Hostname]; !ok {
_, _, offer_watts := offerUtils.OfferAgg(offer)
s.totalPower[*offer.Hostname] = offer_watts
_, _, offerWatts := offerUtils.OfferAgg(offer)
s.totalPower[*offer.Hostname] = offerWatts
}
}
@ -258,6 +273,12 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off
partialLoad := 0.0
for i := 0; i < len(s.tasks); i++ {
task := s.tasks[i]
wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
log.Fatal(err)
}
// Check host if it exists
if task.Host != "" {
// Don't take offer if it doens't match our task's host requirement.
@ -268,7 +289,8 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off
for *task.Instances > 0 {
// Does the task fit
if s.takeOffer(offerWatts, offerCPU, offerRAM, totalWatts, totalCPU, totalRAM, task) {
if s.takeOffer(offer, offerWatts, offerCPU, offerRAM,
totalWatts, totalCPU, totalRAM, task) {
// Start piston capping if haven't started yet
if !s.isCapping {
@ -277,7 +299,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off
}
offerTaken = true
totalWatts += task.Watts
totalWatts += wattsConsideration
totalCPU += task.CPU
totalRAM += task.RAM
log.Println("Co-Located with: ")
@ -289,7 +311,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off
s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
*task.Instances--
// updating the cap value for offer.Hostname
partialLoad += ((task.Watts * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100
partialLoad += ((wattsConsideration * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100
if *task.Instances <= 0 {
// All instances of task have been scheduled. Remove it
@ -370,9 +392,16 @@ func (s *BinPackedPistonCapper) StatusUpdate(driver sched.SchedulerDriver, statu
log.Println(err)
}
// Need to determine the watts consideration for the finishedTask
var wattsConsideration float64
if s.classMapWatts {
wattsConsideration = finishedTask.ClassToWatts[hostToPowerClass(hostOfFinishedTask)]
} else {
wattsConsideration = finishedTask.Watts
}
// Need to update the cap values for host of the finishedTask
bpPistonMutex.Lock()
bpPistonCapValues[hostOfFinishedTask] -= ((finishedTask.Watts * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100
bpPistonCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100
// Checking to see if the cap value has become 0, in which case we uncap the host.
if int(math.Floor(bpPistonCapValues[hostOfFinishedTask]+0.5)) == 0 {
bpPistonCapValues[hostOfFinishedTask] = 100