Merged in bpswClassToWattsCapping (pull request #6)
Bpswclasstowattscapping
This commit is contained in:
commit
0c6d7f670e
8 changed files with 914 additions and 22 deletions
|
@ -99,7 +99,7 @@ func (capper ClusterwideCapper) CleverRecap(totalPower map[string]float64,
|
||||||
|
|
||||||
// determining the Recap value by calling the regular Recap(...)
|
// determining the Recap value by calling the regular Recap(...)
|
||||||
toggle := false
|
toggle := false
|
||||||
RecapValue, err := capper.Recap(totalPower, taskMonitor, finishedTaskId)
|
RecapValue, err := capper.NaiveRecap(totalPower, taskMonitor, finishedTaskId)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
toggle = true
|
toggle = true
|
||||||
}
|
}
|
||||||
|
@ -181,7 +181,7 @@ Recapping the entire cluster.
|
||||||
|
|
||||||
This needs to be called whenever a task finishes execution.
|
This needs to be called whenever a task finishes execution.
|
||||||
*/
|
*/
|
||||||
func (capper ClusterwideCapper) Recap(totalPower map[string]float64,
|
func (capper ClusterwideCapper) NaiveRecap(totalPower map[string]float64,
|
||||||
taskMonitor map[string][]def.Task, finishedTaskId string) (float64, error) {
|
taskMonitor map[string][]def.Task, finishedTaskId string) (float64, error) {
|
||||||
// Validation
|
// Validation
|
||||||
if totalPower == nil || taskMonitor == nil {
|
if totalPower == nil || taskMonitor == nil {
|
||||||
|
|
|
@ -7,6 +7,7 @@ To Do:
|
||||||
* Fix the race condition on 'tasksRunning' in proactiveclusterwidecappingfcfs.go and proactiveclusterwidecappingranked.go
|
* Fix the race condition on 'tasksRunning' in proactiveclusterwidecappingfcfs.go and proactiveclusterwidecappingranked.go
|
||||||
* Separate the capping strategies from the scheduling algorithms and make it possible to use any capping strategy with any scheduler.
|
* Separate the capping strategies from the scheduling algorithms and make it possible to use any capping strategy with any scheduler.
|
||||||
* Make newTask(...) variadic where the newTaskClass argument can either be given or not. If not give, then pick task.Watts as the watts attribute, else pick task.ClassToWatts[newTaskClass].
|
* Make newTask(...) variadic where the newTaskClass argument can either be given or not. If not give, then pick task.Watts as the watts attribute, else pick task.ClassToWatts[newTaskClass].
|
||||||
|
* Retrofit pcp/proactiveclusterwidecappers.go to include the power capping go routines and to cap only when necessary.
|
||||||
|
|
||||||
Scheduling Algorithms:
|
Scheduling Algorithms:
|
||||||
|
|
||||||
|
|
427
schedulers/bpMaxMinPistonCapping.go
Normal file
427
schedulers/bpMaxMinPistonCapping.go
Normal file
|
@ -0,0 +1,427 @@
|
||||||
|
package schedulers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bitbucket.org/sunybingcloud/electron/constants"
|
||||||
|
"bitbucket.org/sunybingcloud/electron/def"
|
||||||
|
"bitbucket.org/sunybingcloud/electron/rapl"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
mesos "github.com/mesos/mesos-go/mesosproto"
|
||||||
|
"github.com/mesos/mesos-go/mesosutil"
|
||||||
|
sched "github.com/mesos/mesos-go/scheduler"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Decides if to take an offer or not
|
||||||
|
func (s *BPMaxMinPistonCapping) takeOffer(offer *mesos.Offer, task def.Task) bool {
|
||||||
|
|
||||||
|
cpus, mem, watts := OfferAgg(offer)
|
||||||
|
|
||||||
|
//TODO: Insert watts calculation here instead of taking them as a parameter
|
||||||
|
|
||||||
|
if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type BPMaxMinPistonCapping struct {
|
||||||
|
base //Type embedding to inherit common functions
|
||||||
|
tasksCreated int
|
||||||
|
tasksRunning int
|
||||||
|
tasks []def.Task
|
||||||
|
metrics map[string]def.Metric
|
||||||
|
running map[string]map[string]bool
|
||||||
|
taskMonitor map[string][]def.Task
|
||||||
|
totalPower map[string]float64
|
||||||
|
ignoreWatts bool
|
||||||
|
ticker *time.Ticker
|
||||||
|
isCapping bool
|
||||||
|
|
||||||
|
// First set of PCP values are garbage values, signal to logger to start recording when we're
|
||||||
|
// about to schedule a new task
|
||||||
|
RecordPCP bool
|
||||||
|
|
||||||
|
// This channel is closed when the program receives an interrupt,
|
||||||
|
// signalling that the program should shut down.
|
||||||
|
Shutdown chan struct{}
|
||||||
|
// This channel is closed after shutdown is closed, and only when all
|
||||||
|
// outstanding tasks have been cleaned up
|
||||||
|
Done chan struct{}
|
||||||
|
|
||||||
|
// Controls when to shutdown pcp logging
|
||||||
|
PCPLog chan struct{}
|
||||||
|
|
||||||
|
schedTrace *log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// New electron scheduler
|
||||||
|
func NewBPMaxMinPistonCapping(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BPMaxMinPistonCapping {
|
||||||
|
sort.Sort(def.WattsSorter(tasks))
|
||||||
|
|
||||||
|
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s := &BPMaxMinPistonCapping{
|
||||||
|
tasks: tasks,
|
||||||
|
ignoreWatts: ignoreWatts,
|
||||||
|
Shutdown: make(chan struct{}),
|
||||||
|
Done: make(chan struct{}),
|
||||||
|
PCPLog: make(chan struct{}),
|
||||||
|
running: make(map[string]map[string]bool),
|
||||||
|
taskMonitor: make(map[string][]def.Task),
|
||||||
|
totalPower: make(map[string]float64),
|
||||||
|
RecordPCP: false,
|
||||||
|
ticker: time.NewTicker(5 * time.Second),
|
||||||
|
isCapping: false,
|
||||||
|
schedTrace: log.New(logFile, "", log.LstdFlags),
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BPMaxMinPistonCapping) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
|
||||||
|
taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
|
||||||
|
s.tasksCreated++
|
||||||
|
|
||||||
|
// Start recording only when we're creating the first task
|
||||||
|
if !s.RecordPCP {
|
||||||
|
// Turn on logging
|
||||||
|
s.RecordPCP = true
|
||||||
|
time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this is our first time running into this Agent
|
||||||
|
if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok {
|
||||||
|
s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add task to list of tasks running on node
|
||||||
|
s.running[offer.GetSlaveId().GoString()][taskName] = true
|
||||||
|
|
||||||
|
// Setting the task ID to the task. This is done so that we can consider each task to be different
|
||||||
|
// even though they have the same parameters.
|
||||||
|
task.SetTaskID(*proto.String("electron-" + taskName))
|
||||||
|
// Add task to list of tasks running on node
|
||||||
|
if len(s.taskMonitor[*offer.Hostname]) == 0 {
|
||||||
|
s.taskMonitor[*offer.Hostname] = []def.Task{task}
|
||||||
|
} else {
|
||||||
|
s.taskMonitor[*offer.Hostname] = append(s.taskMonitor[*offer.Hostname], task)
|
||||||
|
}
|
||||||
|
|
||||||
|
resources := []*mesos.Resource{
|
||||||
|
mesosutil.NewScalarResource("cpus", task.CPU),
|
||||||
|
mesosutil.NewScalarResource("mem", task.RAM),
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.ignoreWatts {
|
||||||
|
resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &mesos.TaskInfo{
|
||||||
|
Name: proto.String(taskName),
|
||||||
|
TaskId: &mesos.TaskID{
|
||||||
|
Value: proto.String("electron-" + taskName),
|
||||||
|
},
|
||||||
|
SlaveId: offer.SlaveId,
|
||||||
|
Resources: resources,
|
||||||
|
Command: &mesos.CommandInfo{
|
||||||
|
Value: proto.String(task.CMD),
|
||||||
|
},
|
||||||
|
Container: &mesos.ContainerInfo{
|
||||||
|
Type: mesos.ContainerInfo_DOCKER.Enum(),
|
||||||
|
Docker: &mesos.ContainerInfo_DockerInfo{
|
||||||
|
Image: proto.String(task.Image),
|
||||||
|
Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BPMaxMinPistonCapping) Disconnected(sched.SchedulerDriver) {
|
||||||
|
// Need to stop the capping process
|
||||||
|
s.ticker.Stop()
|
||||||
|
bpMaxMinPistonCappingMutex.Lock()
|
||||||
|
s.isCapping = false
|
||||||
|
bpMaxMinPistonCappingMutex.Unlock()
|
||||||
|
log.Println("Framework disconnected with master")
|
||||||
|
}
|
||||||
|
|
||||||
|
// mutex
|
||||||
|
var bpMaxMinPistonCappingMutex sync.Mutex
|
||||||
|
|
||||||
|
// go routine to cap each node in the cluster at regular intervals of time
|
||||||
|
var bpMaxMinPistonCappingCapValues = make(map[string]float64)
|
||||||
|
|
||||||
|
// Storing the previous cap value for each host so as to not repeatedly cap the nodes to the same value. (reduces overhead)
|
||||||
|
var bpMaxMinPistonCappingPreviousRoundedCapValues = make(map[string]int)
|
||||||
|
|
||||||
|
func (s *BPMaxMinPistonCapping) startCapping() {
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-s.ticker.C:
|
||||||
|
// Need to cap each node
|
||||||
|
bpMaxMinPistonCappingMutex.Lock()
|
||||||
|
for host, capValue := range bpMaxMinPistonCappingCapValues {
|
||||||
|
roundedCapValue := int(math.Floor(capValue + 0.5))
|
||||||
|
// has the cap value changed
|
||||||
|
if previousRoundedCap, ok := bpMaxMinPistonCappingPreviousRoundedCapValues[host]; ok {
|
||||||
|
if previousRoundedCap != roundedCapValue {
|
||||||
|
if err := rapl.Cap(host, "rapl", roundedCapValue); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
} else {
|
||||||
|
log.Printf("Capped [%s] at %d", host, int(math.Floor(capValue)))
|
||||||
|
}
|
||||||
|
bpMaxMinPistonCappingPreviousRoundedCapValues[host] = roundedCapValue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := rapl.Cap(host, "rapl", roundedCapValue); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
} else {
|
||||||
|
log.Printf("Capped [%s] at %d", host, int(math.Floor(capValue+0.5)))
|
||||||
|
}
|
||||||
|
bpMaxMinPistonCappingPreviousRoundedCapValues[host] = roundedCapValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bpMaxMinPistonCappingMutex.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop the capping
|
||||||
|
func (s *BPMaxMinPistonCapping) stopCapping() {
|
||||||
|
if s.isCapping {
|
||||||
|
log.Println("Stopping the capping.")
|
||||||
|
s.ticker.Stop()
|
||||||
|
bpMaxMinPistonCappingMutex.Lock()
|
||||||
|
s.isCapping = false
|
||||||
|
bpMaxMinPistonCappingMutex.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine if the remaining sapce inside of the offer is enough for
|
||||||
|
// the task we need to create. If it is, create a TaskInfo and return it.
|
||||||
|
func (s *BPMaxMinPistonCapping) CheckFit(i int,
|
||||||
|
task def.Task,
|
||||||
|
offer *mesos.Offer,
|
||||||
|
totalCPU *float64,
|
||||||
|
totalRAM *float64,
|
||||||
|
totalWatts *float64,
|
||||||
|
partialLoad *float64) (bool, *mesos.TaskInfo) {
|
||||||
|
|
||||||
|
offerCPU, offerRAM, offerWatts := OfferAgg(offer)
|
||||||
|
|
||||||
|
// Does the task fit
|
||||||
|
if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) &&
|
||||||
|
(offerCPU >= (*totalCPU + task.CPU)) &&
|
||||||
|
(offerRAM >= (*totalRAM + task.RAM)) {
|
||||||
|
|
||||||
|
// Start piston capping if haven't started yet
|
||||||
|
if !s.isCapping {
|
||||||
|
s.isCapping = true
|
||||||
|
s.startCapping()
|
||||||
|
}
|
||||||
|
|
||||||
|
*totalWatts += task.Watts
|
||||||
|
*totalCPU += task.CPU
|
||||||
|
*totalRAM += task.RAM
|
||||||
|
log.Println("Co-Located with: ")
|
||||||
|
coLocated(s.running[offer.GetSlaveId().GoString()])
|
||||||
|
|
||||||
|
taskToSchedule := s.newTask(offer, task)
|
||||||
|
|
||||||
|
fmt.Println("Inst: ", *task.Instances)
|
||||||
|
s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
|
||||||
|
*task.Instances--
|
||||||
|
*partialLoad += ((task.Watts * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100
|
||||||
|
|
||||||
|
if *task.Instances <= 0 {
|
||||||
|
// All instances of task have been scheduled, remove it
|
||||||
|
s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
|
||||||
|
|
||||||
|
if len(s.tasks) <= 0 {
|
||||||
|
log.Println("Done scheduling all tasks")
|
||||||
|
close(s.Shutdown)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, taskToSchedule
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BPMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
|
||||||
|
log.Printf("Received %d resource offers", len(offers))
|
||||||
|
|
||||||
|
for _, offer := range offers {
|
||||||
|
select {
|
||||||
|
case <-s.Shutdown:
|
||||||
|
log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")
|
||||||
|
driver.DeclineOffer(offer.Id, longFilter)
|
||||||
|
|
||||||
|
log.Println("Number of tasks still running: ", s.tasksRunning)
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
tasks := []*mesos.TaskInfo{}
|
||||||
|
|
||||||
|
offerTaken := false
|
||||||
|
totalWatts := 0.0
|
||||||
|
totalCPU := 0.0
|
||||||
|
totalRAM := 0.0
|
||||||
|
// Store the partialLoad for host corresponding to this offer
|
||||||
|
// Once we can't fit any more tasks, we update the capValue for this host using partialLoad and then launch the fit tasks.
|
||||||
|
partialLoad := 0.0
|
||||||
|
|
||||||
|
// Assumes s.tasks is ordered in non-decreasing median max peak order
|
||||||
|
|
||||||
|
// Attempt to schedule a single instance of the heaviest workload available first
|
||||||
|
// Start from the back until one fits
|
||||||
|
for i := len(s.tasks) - 1; i >= 0; i-- {
|
||||||
|
|
||||||
|
task := s.tasks[i]
|
||||||
|
// Check host if it exists
|
||||||
|
if task.Host != "" {
|
||||||
|
// Don't take offer if it doesn't match our task's host requirement
|
||||||
|
if !strings.HasPrefix(*offer.Hostname, task.Host) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Fix this so index doesn't need to be passed
|
||||||
|
taken, taskToSchedule := s.CheckFit(i, task, offer, &totalCPU, &totalRAM, &totalWatts, &partialLoad)
|
||||||
|
|
||||||
|
if taken {
|
||||||
|
offerTaken = true
|
||||||
|
tasks = append(tasks, taskToSchedule)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pack the rest of the offer with the smallest tasks
|
||||||
|
for i, task := range s.tasks {
|
||||||
|
|
||||||
|
// Check host if it exists
|
||||||
|
if task.Host != "" {
|
||||||
|
// Don't take offer if it doesn't match our task's host requirement
|
||||||
|
if !strings.HasPrefix(*offer.Hostname, task.Host) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for *task.Instances > 0 {
|
||||||
|
// TODO: Fix this so index doesn't need to be passed
|
||||||
|
taken, taskToSchedule := s.CheckFit(i, task, offer, &totalCPU, &totalRAM, &totalWatts, &partialLoad)
|
||||||
|
|
||||||
|
if taken {
|
||||||
|
offerTaken = true
|
||||||
|
tasks = append(tasks, taskToSchedule)
|
||||||
|
} else {
|
||||||
|
break // Continue on to next task
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if offerTaken {
|
||||||
|
// Updating the cap value for offer.Hostname
|
||||||
|
bpMaxMinPistonCappingMutex.Lock()
|
||||||
|
bpMaxMinPistonCappingCapValues[*offer.Hostname] += partialLoad
|
||||||
|
bpMaxMinPistonCappingMutex.Unlock()
|
||||||
|
log.Printf("Starting on [%s]\n", offer.GetHostname())
|
||||||
|
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter)
|
||||||
|
} else {
|
||||||
|
|
||||||
|
// If there was no match for the task
|
||||||
|
fmt.Println("There is not enough resources to launch a task:")
|
||||||
|
cpus, mem, watts := OfferAgg(offer)
|
||||||
|
|
||||||
|
log.Printf("<CPU: %f, RAM: %f, Watts: %f>\n", cpus, mem, watts)
|
||||||
|
driver.DeclineOffer(offer.Id, defaultFilter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove finished task from the taskMonitor
|
||||||
|
func (s *BPMaxMinPistonCapping) deleteFromTaskMonitor(finishedTaskID string) (def.Task, string, error) {
|
||||||
|
hostOfFinishedTask := ""
|
||||||
|
indexOfFinishedTask := -1
|
||||||
|
found := false
|
||||||
|
var finishedTask def.Task
|
||||||
|
|
||||||
|
for host, tasks := range s.taskMonitor {
|
||||||
|
for i, task := range tasks {
|
||||||
|
if task.TaskID == finishedTaskID {
|
||||||
|
hostOfFinishedTask = host
|
||||||
|
indexOfFinishedTask = i
|
||||||
|
found = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if found {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hostOfFinishedTask != "" && indexOfFinishedTask != -1 {
|
||||||
|
finishedTask = s.taskMonitor[hostOfFinishedTask][indexOfFinishedTask]
|
||||||
|
log.Printf("Removing task with TaskID [%s] from the list of running tasks\n",
|
||||||
|
s.taskMonitor[hostOfFinishedTask][indexOfFinishedTask].TaskID)
|
||||||
|
s.taskMonitor[hostOfFinishedTask] = append(s.taskMonitor[hostOfFinishedTask][:indexOfFinishedTask],
|
||||||
|
s.taskMonitor[hostOfFinishedTask][indexOfFinishedTask+1:]...)
|
||||||
|
} else {
|
||||||
|
return finishedTask, hostOfFinishedTask, errors.New("Finished Task not present in TaskMonitor")
|
||||||
|
}
|
||||||
|
return finishedTask, hostOfFinishedTask, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BPMaxMinPistonCapping) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
|
||||||
|
log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
|
||||||
|
|
||||||
|
if *status.State == mesos.TaskState_TASK_RUNNING {
|
||||||
|
bpMaxMinPistonCappingMutex.Lock()
|
||||||
|
s.tasksRunning++
|
||||||
|
bpMaxMinPistonCappingMutex.Unlock()
|
||||||
|
} else if IsTerminal(status.State) {
|
||||||
|
delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value)
|
||||||
|
// Deleting the task from the taskMonitor
|
||||||
|
finishedTask, hostOfFinishedTask, err := s.deleteFromTaskMonitor(*status.TaskId.Value)
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Need to update the cap values for host of the finishedTask
|
||||||
|
bpMaxMinPistonCappingMutex.Lock()
|
||||||
|
bpMaxMinPistonCappingCapValues[hostOfFinishedTask] -= ((finishedTask.Watts * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100
|
||||||
|
// Checking to see if the cap value has become 0, in which case we uncap the host.
|
||||||
|
if int(math.Floor(bpMaxMinPistonCappingCapValues[hostOfFinishedTask]+0.5)) == 0 {
|
||||||
|
bpMaxMinPistonCappingCapValues[hostOfFinishedTask] = 100
|
||||||
|
}
|
||||||
|
s.tasksRunning--
|
||||||
|
bpMaxMinPistonCappingMutex.Unlock()
|
||||||
|
|
||||||
|
if s.tasksRunning == 0 {
|
||||||
|
select {
|
||||||
|
case <-s.Shutdown:
|
||||||
|
s.stopCapping()
|
||||||
|
close(s.Done)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("DONE: Task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
|
||||||
|
|
||||||
|
}
|
447
schedulers/bpMaxMinProacCC.go
Normal file
447
schedulers/bpMaxMinProacCC.go
Normal file
|
@ -0,0 +1,447 @@
|
||||||
|
package schedulers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bitbucket.org/sunybingcloud/electron/constants"
|
||||||
|
"bitbucket.org/sunybingcloud/electron/def"
|
||||||
|
"bitbucket.org/sunybingcloud/electron/pcp"
|
||||||
|
"bitbucket.org/sunybingcloud/electron/rapl"
|
||||||
|
"fmt"
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
mesos "github.com/mesos/mesos-go/mesosproto"
|
||||||
|
"github.com/mesos/mesos-go/mesosutil"
|
||||||
|
sched "github.com/mesos/mesos-go/scheduler"
|
||||||
|
"log"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Decides if to take an offer or not
|
||||||
|
func (s *BPMaxMinProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool {
|
||||||
|
cpus, mem, watts := OfferAgg(offer)
|
||||||
|
|
||||||
|
//TODO: Insert watts calculation here instead of taking them as a parameter
|
||||||
|
|
||||||
|
if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type BPMaxMinProacCC struct {
|
||||||
|
base // Type embedding to inherit common functions
|
||||||
|
tasksCreated int
|
||||||
|
tasksRunning int
|
||||||
|
tasks []def.Task
|
||||||
|
metrics map[string]def.Metric
|
||||||
|
running map[string]map[string]bool
|
||||||
|
taskMonitor map[string][]def.Task
|
||||||
|
availablePower map[string]float64
|
||||||
|
totalPower map[string]float64
|
||||||
|
ignoreWatts bool
|
||||||
|
capper *pcp.ClusterwideCapper
|
||||||
|
ticker *time.Ticker
|
||||||
|
recapTicker *time.Ticker
|
||||||
|
isCapping bool // indicate whether we are currently performing cluster-wide capping.
|
||||||
|
isRecapping bool // indicate whether we are currently performing cluster-wide recapping.
|
||||||
|
|
||||||
|
// First set of PCP values are garbage values, signal to logger to start recording when we're
|
||||||
|
// about to schedule a new task
|
||||||
|
RecordPCP bool
|
||||||
|
|
||||||
|
// This channel is closed when the program receives an interrupt,
|
||||||
|
// signalling that the program should shut down
|
||||||
|
Shutdown chan struct{}
|
||||||
|
// This channel is closed after shutdown is closed, and only when all
|
||||||
|
// outstanding tasks have been cleaned up
|
||||||
|
Done chan struct{}
|
||||||
|
|
||||||
|
// Controls when to shutdown pcp logging
|
||||||
|
PCPLog chan struct{}
|
||||||
|
|
||||||
|
schedTrace *log.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// New electron scheduler
|
||||||
|
func NewBPMaxMinProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BPMaxMinProacCC {
|
||||||
|
sort.Sort(def.WattsSorter(tasks))
|
||||||
|
|
||||||
|
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s := &BPMaxMinProacCC{
|
||||||
|
tasks: tasks,
|
||||||
|
ignoreWatts: ignoreWatts,
|
||||||
|
Shutdown: make(chan struct{}),
|
||||||
|
Done: make(chan struct{}),
|
||||||
|
PCPLog: make(chan struct{}),
|
||||||
|
running: make(map[string]map[string]bool),
|
||||||
|
taskMonitor: make(map[string][]def.Task),
|
||||||
|
availablePower: make(map[string]float64),
|
||||||
|
totalPower: make(map[string]float64),
|
||||||
|
RecordPCP: false,
|
||||||
|
capper: pcp.GetClusterwideCapperInstance(),
|
||||||
|
ticker: time.NewTicker(10 * time.Second),
|
||||||
|
recapTicker: time.NewTicker(20 * time.Second),
|
||||||
|
isCapping: false,
|
||||||
|
isRecapping: false,
|
||||||
|
schedTrace: log.New(logFile, "", log.LstdFlags),
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// mutex
|
||||||
|
var bpMaxMinProacCCMutex sync.Mutex
|
||||||
|
|
||||||
|
func (s *BPMaxMinProacCC) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
|
||||||
|
taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
|
||||||
|
s.tasksCreated++
|
||||||
|
|
||||||
|
if !s.RecordPCP {
|
||||||
|
// Turn on logging.
|
||||||
|
s.RecordPCP = true
|
||||||
|
time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this is our first time running into this Agent
|
||||||
|
if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok {
|
||||||
|
s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Setting the task ID to the task. This is done so that we can consider each task to be different,
|
||||||
|
// even though they have the same parameters.
|
||||||
|
task.SetTaskID(*proto.String("electron-" + taskName))
|
||||||
|
// Add task to the list of tasks running on the node.
|
||||||
|
s.running[offer.GetSlaveId().GoString()][taskName] = true
|
||||||
|
if len(s.taskMonitor[*offer.Hostname]) == 0 {
|
||||||
|
s.taskMonitor[*offer.Hostname] = []def.Task{task}
|
||||||
|
} else {
|
||||||
|
s.taskMonitor[*offer.Hostname] = append(s.taskMonitor[*offer.Hostname], task)
|
||||||
|
}
|
||||||
|
|
||||||
|
resources := []*mesos.Resource{
|
||||||
|
mesosutil.NewScalarResource("cpus", task.CPU),
|
||||||
|
mesosutil.NewScalarResource("mem", task.RAM),
|
||||||
|
}
|
||||||
|
|
||||||
|
if !s.ignoreWatts {
|
||||||
|
resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &mesos.TaskInfo{
|
||||||
|
Name: proto.String(taskName),
|
||||||
|
TaskId: &mesos.TaskID{
|
||||||
|
Value: proto.String("electron-" + taskName),
|
||||||
|
},
|
||||||
|
SlaveId: offer.SlaveId,
|
||||||
|
Resources: resources,
|
||||||
|
Command: &mesos.CommandInfo{
|
||||||
|
Value: proto.String(task.CMD),
|
||||||
|
},
|
||||||
|
Container: &mesos.ContainerInfo{
|
||||||
|
Type: mesos.ContainerInfo_DOCKER.Enum(),
|
||||||
|
Docker: &mesos.ContainerInfo_DockerInfo{
|
||||||
|
Image: proto.String(task.Image),
|
||||||
|
Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// go routine to cap the entire cluster in regular intervals of time.
|
||||||
|
var bpMaxMinProacCCCapValue = 0.0 // initial value to indicate that we haven't capped the cluster yet.
|
||||||
|
var bpMaxMinProacCCNewCapValue = 0.0 // newly computed cap value
|
||||||
|
func (s *BPMaxMinProacCC) startCapping() {
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-s.ticker.C:
|
||||||
|
// Need to cap the cluster only if new cap value different from old cap value.
|
||||||
|
// This way we don't unnecessarily cap the cluster.
|
||||||
|
bpMaxMinProacCCMutex.Lock()
|
||||||
|
if s.isCapping {
|
||||||
|
if int(math.Floor(bpMaxMinProacCCNewCapValue+0.5)) != int(math.Floor(bpMaxMinProacCCCapValue+0.5)) {
|
||||||
|
// updating cap value
|
||||||
|
bpMaxMinProacCCCapValue = bpMaxMinProacCCNewCapValue
|
||||||
|
if bpMaxMinProacCCCapValue > 0.0 {
|
||||||
|
for _, host := range constants.Hosts {
|
||||||
|
// Rounding cap value to nearest int
|
||||||
|
if err := rapl.Cap(host, "rapl", int(math.Floor(bpMaxMinProacCCCapValue+0.5))); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("Capped the cluster to %d", int(math.Floor(bpMaxMinProacCCCapValue+0.5)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bpMaxMinProacCCMutex.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// go routine to recap the entire cluster in regular intervals of time.
|
||||||
|
var bpMaxMinProacCCRecapValue = 0.0 // The cluster-wide cap value when recapping.
|
||||||
|
func (s *BPMaxMinProacCC) startRecapping() {
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-s.recapTicker.C:
|
||||||
|
bpMaxMinProacCCMutex.Lock()
|
||||||
|
// If stopped performing cluster-wide capping, then we need to recap.
|
||||||
|
if s.isRecapping && bpMaxMinProacCCRecapValue > 0.0 {
|
||||||
|
for _, host := range constants.Hosts {
|
||||||
|
// Rounding the recap value to the nearest int
|
||||||
|
if err := rapl.Cap(host, "rapl", int(math.Floor(bpMaxMinProacCCRecapValue+0.5))); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("Capped the cluster to %d", int(math.Floor(bpMaxMinProacCCRecapValue+0.5)))
|
||||||
|
}
|
||||||
|
// Setting the recapping to false
|
||||||
|
s.isRecapping = false
|
||||||
|
bpMaxMinProacCCMutex.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop cluster-wide capping
|
||||||
|
func (s *BPMaxMinProacCC) stopCapping() {
|
||||||
|
if s.isCapping {
|
||||||
|
log.Println("Stopping the cluster-wide capping.")
|
||||||
|
s.ticker.Stop()
|
||||||
|
bpMaxMinProacCCMutex.Lock()
|
||||||
|
s.isCapping = false
|
||||||
|
s.isRecapping = true
|
||||||
|
bpMaxMinProacCCMutex.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop the cluster-wide recapping
|
||||||
|
func (s *BPMaxMinProacCC) stopRecapping() {
|
||||||
|
// If not capping, then definitely recapping.
|
||||||
|
if !s.isCapping && s.isRecapping {
|
||||||
|
log.Println("Stopping the cluster-wide re-capping.")
|
||||||
|
s.recapTicker.Stop()
|
||||||
|
bpMaxMinProacCCMutex.Lock()
|
||||||
|
s.isRecapping = false
|
||||||
|
bpMaxMinProacCCMutex.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine if the remaining space inside of the offer is enough for
|
||||||
|
// the task we need to create. If it is, create TaskInfo and return it.
|
||||||
|
func (s *BPMaxMinProacCC) CheckFit(i int,
|
||||||
|
task def.Task,
|
||||||
|
offer *mesos.Offer,
|
||||||
|
totalCPU *float64,
|
||||||
|
totalRAM *float64,
|
||||||
|
totalWatts *float64) (bool, *mesos.TaskInfo) {
|
||||||
|
|
||||||
|
offerCPU, offerRAM, offerWatts := OfferAgg(offer)
|
||||||
|
|
||||||
|
// Does the task fit
|
||||||
|
if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) &&
|
||||||
|
(offerCPU >= (*totalCPU + task.CPU)) &&
|
||||||
|
(offerRAM >= (*totalRAM + task.RAM)) {
|
||||||
|
|
||||||
|
// Capping the cluster if haven't yet started
|
||||||
|
if !s.isCapping {
|
||||||
|
bpMaxMinProacCCMutex.Lock()
|
||||||
|
s.isCapping = true
|
||||||
|
bpMaxMinProacCCMutex.Unlock()
|
||||||
|
s.startCapping()
|
||||||
|
}
|
||||||
|
|
||||||
|
tempCap, err := s.capper.FCFSDeterminedCap(s.totalPower, &task)
|
||||||
|
if err == nil {
|
||||||
|
bpMaxMinProacCCMutex.Lock()
|
||||||
|
bpMaxMinProacCCNewCapValue = tempCap
|
||||||
|
bpMaxMinProacCCMutex.Unlock()
|
||||||
|
} else {
|
||||||
|
log.Println("Failed to determine new cluster-wide cap:")
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
*totalWatts += task.Watts
|
||||||
|
*totalCPU += task.CPU
|
||||||
|
*totalRAM += task.RAM
|
||||||
|
log.Println("Co-Located with: ")
|
||||||
|
coLocated(s.running[offer.GetSlaveId().GoString()])
|
||||||
|
|
||||||
|
taskToSchedule := s.newTask(offer, task)
|
||||||
|
|
||||||
|
fmt.Println("Inst: ", *task.Instances)
|
||||||
|
s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
|
||||||
|
*task.Instances--
|
||||||
|
|
||||||
|
if *task.Instances <= 0 {
|
||||||
|
// All instances of task have been scheduled, remove it
|
||||||
|
s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
|
||||||
|
|
||||||
|
if len(s.tasks) <= 0 {
|
||||||
|
log.Println("Done scheduling all tasks")
|
||||||
|
// Need to stop the cluster wide capping
|
||||||
|
s.stopCapping()
|
||||||
|
s.startRecapping() // Load changes after every task finishes and hence, we need to change the capping of the cluster.
|
||||||
|
close(s.Shutdown)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, taskToSchedule
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BPMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
|
||||||
|
log.Printf("Received %d resource offers", len(offers))
|
||||||
|
|
||||||
|
// retrieving the available power for all the hosts in the offers.
|
||||||
|
for _, offer := range offers {
|
||||||
|
_, _, offerWatts := OfferAgg(offer)
|
||||||
|
s.availablePower[*offer.Hostname] = offerWatts
|
||||||
|
// setting total power if the first time
|
||||||
|
if _, ok := s.totalPower[*offer.Hostname]; !ok {
|
||||||
|
s.totalPower[*offer.Hostname] = offerWatts
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for host, tpower := range s.totalPower {
|
||||||
|
log.Printf("TotalPower[%s] = %f", host, tpower)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, offer := range offers {
|
||||||
|
select {
|
||||||
|
case <-s.Shutdown:
|
||||||
|
log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")
|
||||||
|
driver.DeclineOffer(offer.Id, longFilter)
|
||||||
|
|
||||||
|
log.Println("Number of tasks still running: ", s.tasksRunning)
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
tasks := []*mesos.TaskInfo{}
|
||||||
|
|
||||||
|
offerTaken := false
|
||||||
|
totalWatts := 0.0
|
||||||
|
totalCPU := 0.0
|
||||||
|
totalRAM := 0.0
|
||||||
|
|
||||||
|
// Assumes s.tasks is ordered in non-decreasing median max peak order
|
||||||
|
|
||||||
|
// Attempt to schedule a single instance of the heaviest workload available first
|
||||||
|
// Start from the back until one fits
|
||||||
|
for i := len(s.tasks) - 1; i >= 0; i-- {
|
||||||
|
|
||||||
|
task := s.tasks[i]
|
||||||
|
// Check host if it exists
|
||||||
|
if task.Host != "" {
|
||||||
|
// Don't take offer if it doesn't match our task's host requirement
|
||||||
|
if !strings.HasPrefix(*offer.Hostname, task.Host) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Fix this so index doesn't need to be passed
|
||||||
|
taken, taskToSchedule := s.CheckFit(i, task, offer, &totalCPU, &totalRAM, &totalWatts)
|
||||||
|
|
||||||
|
if taken {
|
||||||
|
offerTaken = true
|
||||||
|
tasks = append(tasks, taskToSchedule)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pack the rest of the offer with the smallest tasks
|
||||||
|
for i, task := range s.tasks {
|
||||||
|
|
||||||
|
// Check host if it exists
|
||||||
|
if task.Host != "" {
|
||||||
|
// Don't take offer if it doesn't match our task's host requirement
|
||||||
|
if !strings.HasPrefix(*offer.Hostname, task.Host) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for *task.Instances > 0 {
|
||||||
|
// TODO: Fix this so index doesn't need to be passed
|
||||||
|
taken, taskToSchedule := s.CheckFit(i, task, offer, &totalCPU, &totalRAM, &totalWatts)
|
||||||
|
|
||||||
|
if taken {
|
||||||
|
offerTaken = true
|
||||||
|
tasks = append(tasks, taskToSchedule)
|
||||||
|
} else {
|
||||||
|
break // Continue on to next task
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if offerTaken {
|
||||||
|
log.Printf("Starting on [%s]\n", offer.GetHostname())
|
||||||
|
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter)
|
||||||
|
} else {
|
||||||
|
|
||||||
|
// If there was no match for the task
|
||||||
|
fmt.Println("There is not enough resources to launch a task:")
|
||||||
|
cpus, mem, watts := OfferAgg(offer)
|
||||||
|
|
||||||
|
log.Printf("<CPU: %f, RAM: %f, Watts: %f>\n", cpus, mem, watts)
|
||||||
|
driver.DeclineOffer(offer.Id, defaultFilter)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *BPMaxMinProacCC) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
|
||||||
|
log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
|
||||||
|
|
||||||
|
if *status.State == mesos.TaskState_TASK_RUNNING {
|
||||||
|
s.tasksRunning++
|
||||||
|
} else if IsTerminal(status.State) {
|
||||||
|
delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value)
|
||||||
|
// Need to remove the task from the window
|
||||||
|
s.capper.TaskFinished(*status.TaskId.Value)
|
||||||
|
// Determining the new cluster wide recap value
|
||||||
|
tempCap, err := s.capper.NaiveRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
||||||
|
//tempCap, err := s.capper.CleverRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
||||||
|
if err == nil {
|
||||||
|
// If new determined recap value is different from the current recap value, then we need to recap.
|
||||||
|
if int(math.Floor(tempCap+0.5)) != int(math.Floor(bpMaxMinProacCCRecapValue+0.5)) {
|
||||||
|
bpMaxMinProacCCRecapValue = tempCap
|
||||||
|
bpMaxMinProacCCMutex.Lock()
|
||||||
|
s.isRecapping = true
|
||||||
|
bpMaxMinProacCCMutex.Unlock()
|
||||||
|
log.Printf("Determined re-cap value: %f\n", bpMaxMinProacCCRecapValue)
|
||||||
|
} else {
|
||||||
|
bpMaxMinProacCCMutex.Lock()
|
||||||
|
s.isRecapping = false
|
||||||
|
bpMaxMinProacCCMutex.Unlock()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s.tasksRunning--
|
||||||
|
if s.tasksRunning == 0 {
|
||||||
|
select {
|
||||||
|
case <-s.Shutdown:
|
||||||
|
// Need to stop the cluster-wide recapping
|
||||||
|
s.stopRecapping()
|
||||||
|
close(s.Done)
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("DONE: Task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
|
||||||
|
|
||||||
|
}
|
|
@ -104,6 +104,9 @@ func (s *BPSWClassMapWattsPistonCapping) newTask(offer *mesos.Offer, task def.Ta
|
||||||
s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
|
s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add task to list of tasks running on node
|
||||||
|
s.running[offer.GetSlaveId().GoString()][taskName] = true
|
||||||
|
|
||||||
// Setting the task ID to the task. This is done so that we can consider each task to be different
|
// Setting the task ID to the task. This is done so that we can consider each task to be different
|
||||||
// even though they have the same parameters.
|
// even though they have the same parameters.
|
||||||
task.SetTaskID(*proto.String("electron-" + taskName))
|
task.SetTaskID(*proto.String("electron-" + taskName))
|
||||||
|
@ -155,7 +158,7 @@ func (s *BPSWClassMapWattsPistonCapping) Disconnected(sched.SchedulerDriver) {
|
||||||
// mutex
|
// mutex
|
||||||
var bpswClassMapWattsPistonMutex sync.Mutex
|
var bpswClassMapWattsPistonMutex sync.Mutex
|
||||||
|
|
||||||
// go routine to cap eahc node in the cluster at regular intervals of time
|
// go routine to cap each node in the cluster at regular intervals of time
|
||||||
var bpswClassMapWattsPistonCapValues = make(map[string]float64)
|
var bpswClassMapWattsPistonCapValues = make(map[string]float64)
|
||||||
|
|
||||||
// Storing the previous cap value for each host so as to not repeatedly cap the nodes to the same value. (reduces overhead)
|
// Storing the previous cap value for each host so as to not repeatedly cap the nodes to the same value. (reduces overhead)
|
||||||
|
@ -268,6 +271,12 @@ func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDr
|
||||||
(offerCPU >= (totalCPU + task.CPU)) &&
|
(offerCPU >= (totalCPU + task.CPU)) &&
|
||||||
(offerRAM >= (totalRAM + task.RAM)) {
|
(offerRAM >= (totalRAM + task.RAM)) {
|
||||||
|
|
||||||
|
// Start piston capping if haven't started yet
|
||||||
|
if !s.isCapping {
|
||||||
|
s.isCapping = true
|
||||||
|
s.startCapping()
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Println("Watts being used: ", task.ClassToWatts[nodeClass])
|
fmt.Println("Watts being used: ", task.ClassToWatts[nodeClass])
|
||||||
taken = true
|
taken = true
|
||||||
totalWatts += task.ClassToWatts[nodeClass]
|
totalWatts += task.ClassToWatts[nodeClass]
|
||||||
|
|
|
@ -165,22 +165,30 @@ func (s *BPSWClassMapWattsProacCC) Disconnected(sched.SchedulerDriver) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// go routine to cap the entire cluster in regular intervals of time.
|
// go routine to cap the entire cluster in regular intervals of time.
|
||||||
var bpswClassMapWattsCapValue = 0.0 // initial value to indicate that we haven't capped the cluster yet.
|
var bpswClassMapWattsProacCCCapValue = 0.0 // initial value to indicate that we haven't capped the cluster yet.
|
||||||
|
var bpswClassMapWattsProacCCNewCapValue = 0.0 // newly computed cap value
|
||||||
func (s *BPSWClassMapWattsProacCC) startCapping() {
|
func (s *BPSWClassMapWattsProacCC) startCapping() {
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-s.ticker.C:
|
case <-s.ticker.C:
|
||||||
// Need to cap the cluster to the bpswClassMapWattsCapValue.
|
// Need to cap the cluster only if new cap value different from old cap value.
|
||||||
|
// This way we don't unnecessarily cap the cluster.
|
||||||
bpswClassMapWattsProacCCMutex.Lock()
|
bpswClassMapWattsProacCCMutex.Lock()
|
||||||
if bpswClassMapWattsCapValue > 0.0 {
|
if s.isCapping {
|
||||||
for _, host := range constants.Hosts {
|
if int(math.Floor(bpswClassMapWattsProacCCNewCapValue+0.5)) != int(math.Floor(bpswClassMapWattsProacCCCapValue+0.5)) {
|
||||||
// Rounding capValue to nearest int.
|
// updating cap value
|
||||||
if err := rapl.Cap(host, "rapl", int(math.Floor(bpswClassMapWattsCapValue+0.5))); err != nil {
|
bpswClassMapWattsProacCCCapValue = bpswClassMapWattsProacCCNewCapValue
|
||||||
log.Println(err)
|
if bpswClassMapWattsProacCCCapValue > 0.0 {
|
||||||
|
for _, host := range constants.Hosts {
|
||||||
|
// Rounding cap value to nearest int
|
||||||
|
if err := rapl.Cap(host, "rapl", int(math.Floor(bpswClassMapWattsProacCCCapValue+0.5))); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Printf("Capped the cluster to %d", int(math.Floor(bpswClassMapWattsProacCCCapValue+0.5)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Printf("Capped the cluster to %d", int(math.Floor(bpswClassMapWattsCapValue+0.5)))
|
|
||||||
}
|
}
|
||||||
bpswClassMapWattsProacCCMutex.Unlock()
|
bpswClassMapWattsProacCCMutex.Unlock()
|
||||||
}
|
}
|
||||||
|
@ -189,7 +197,7 @@ func (s *BPSWClassMapWattsProacCC) startCapping() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// go routine to recap the entire cluster in regular intervals of time.
|
// go routine to recap the entire cluster in regular intervals of time.
|
||||||
var bpswClassMapWattsRecapValue = 0.0 // The cluster-wide cap value when recapping
|
var bpswClassMapWattsProacCCRecapValue = 0.0 // The cluster-wide cap value when recapping
|
||||||
func (s *BPSWClassMapWattsProacCC) startRecapping() {
|
func (s *BPSWClassMapWattsProacCC) startRecapping() {
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
|
@ -197,14 +205,14 @@ func (s *BPSWClassMapWattsProacCC) startRecapping() {
|
||||||
case <-s.recapTicker.C:
|
case <-s.recapTicker.C:
|
||||||
bpswClassMapWattsProacCCMutex.Lock()
|
bpswClassMapWattsProacCCMutex.Lock()
|
||||||
// If stopped performing cluster wide capping, then we need to recap
|
// If stopped performing cluster wide capping, then we need to recap
|
||||||
if s.isRecapping && bpswClassMapWattsRecapValue > 0.0 {
|
if s.isRecapping && bpswClassMapWattsProacCCRecapValue > 0.0 {
|
||||||
for _, host := range constants.Hosts {
|
for _, host := range constants.Hosts {
|
||||||
// Rounding capValue to the nearest int
|
// Rounding capValue to the nearest int
|
||||||
if err := rapl.Cap(host, "rapl", int(math.Floor(bpswClassMapWattsRecapValue+0.5))); err != nil {
|
if err := rapl.Cap(host, "rapl", int(math.Floor(bpswClassMapWattsProacCCRecapValue +0.5))); err != nil {
|
||||||
log.Println(err)
|
log.Println(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.Printf("Recapping the cluster to %d", int(math.Floor(bpswClassMapWattsRecapValue+0.5)))
|
log.Printf("Recapping the cluster to %d", int(math.Floor(bpswClassMapWattsProacCCRecapValue +0.5)))
|
||||||
}
|
}
|
||||||
// Setting recapping to false
|
// Setting recapping to false
|
||||||
s.isRecapping = false
|
s.isRecapping = false
|
||||||
|
@ -309,7 +317,7 @@ func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver,
|
||||||
tempCap, err := s.capper.FCFSDeterminedCap(s.totalPower, &task)
|
tempCap, err := s.capper.FCFSDeterminedCap(s.totalPower, &task)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
bpswClassMapWattsProacCCMutex.Lock()
|
bpswClassMapWattsProacCCMutex.Lock()
|
||||||
bpswClassMapWattsCapValue = tempCap
|
bpswClassMapWattsProacCCNewCapValue = tempCap
|
||||||
bpswClassMapWattsProacCCMutex.Unlock()
|
bpswClassMapWattsProacCCMutex.Unlock()
|
||||||
} else {
|
} else {
|
||||||
log.Println("Failed to determine new cluster-wide cap:")
|
log.Println("Failed to determine new cluster-wide cap:")
|
||||||
|
@ -370,16 +378,16 @@ func (s *BPSWClassMapWattsProacCC) StatusUpdate(driver sched.SchedulerDriver, st
|
||||||
// Need to remove the task from the window
|
// Need to remove the task from the window
|
||||||
s.capper.TaskFinished(*status.TaskId.Value)
|
s.capper.TaskFinished(*status.TaskId.Value)
|
||||||
// Determining the new cluster wide recap value
|
// Determining the new cluster wide recap value
|
||||||
|
//tempCap, err := s.capper.NaiveRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
||||||
tempCap, err := s.capper.CleverRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
tempCap, err := s.capper.CleverRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
||||||
//tempCap, err := s.capper.CleverRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// If new determined cap value is different from the current recap value, then we need to recap
|
// If new determined cap value is different from the current recap value, then we need to recap
|
||||||
if int(math.Floor(tempCap+0.5)) != int(math.Floor(bpswClassMapWattsRecapValue+0.5)) {
|
if int(math.Floor(tempCap+0.5)) != int(math.Floor(bpswClassMapWattsProacCCRecapValue +0.5)) {
|
||||||
bpswClassMapWattsRecapValue = tempCap
|
bpswClassMapWattsProacCCRecapValue = tempCap
|
||||||
bpswClassMapWattsProacCCMutex.Lock()
|
bpswClassMapWattsProacCCMutex.Lock()
|
||||||
s.isRecapping = true
|
s.isRecapping = true
|
||||||
bpswClassMapWattsProacCCMutex.Unlock()
|
bpswClassMapWattsProacCCMutex.Unlock()
|
||||||
log.Printf("Determined re-cap value: %f\n", bpswClassMapWattsRecapValue)
|
log.Printf("Determined re-cap value: %f\n", bpswClassMapWattsProacCCRecapValue)
|
||||||
} else {
|
} else {
|
||||||
bpswClassMapWattsProacCCMutex.Lock()
|
bpswClassMapWattsProacCCMutex.Lock()
|
||||||
s.isRecapping = false
|
s.isRecapping = false
|
||||||
|
|
|
@ -348,7 +348,7 @@ func (s *ProactiveClusterwideCapFCFS) StatusUpdate(driver sched.SchedulerDriver,
|
||||||
// Need to remove the task from the window of tasks.
|
// Need to remove the task from the window of tasks.
|
||||||
s.capper.TaskFinished(*status.TaskId.Value)
|
s.capper.TaskFinished(*status.TaskId.Value)
|
||||||
// Determining the new cluster wide cap.
|
// Determining the new cluster wide cap.
|
||||||
//tempCap, err := s.capper.Recap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
//tempCap, err := s.capper.NaiveRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
||||||
tempCap, err := s.capper.CleverRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
tempCap, err := s.capper.CleverRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// if new determined cap value is different from the current recap value then we need to recap.
|
// if new determined cap value is different from the current recap value then we need to recap.
|
||||||
|
|
|
@ -383,7 +383,7 @@ func (s *ProactiveClusterwideCapRanked) StatusUpdate(driver sched.SchedulerDrive
|
||||||
// Need to remove the task from the window
|
// Need to remove the task from the window
|
||||||
s.capper.TaskFinished(*status.TaskId.Value)
|
s.capper.TaskFinished(*status.TaskId.Value)
|
||||||
// Determining the new cluster wide cap.
|
// Determining the new cluster wide cap.
|
||||||
//tempCap, err := s.capper.Recap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
//tempCap, err := s.capper.NaiveRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
||||||
tempCap, err := s.capper.CleverRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
tempCap, err := s.capper.CleverRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|
Reference in a new issue