Experimentation/schedPolicySwitcher 1. Initial commit for consolidated loggers using observer pattern. 2. class factory for schedulers. 3. Using the scheduling policy class factory in schedulers/store.go and the scheduler builder helpers in schedulers/helpers.go, feature to be able to be able to plug a scheduling policy of your choice from the command line (right now only first-fit and bin-packing are possible. Will be updating the class factory to include other scheduling policies as well. 4. Removed TODO for using generic task sorters. Modified TODO for a config file input to run electron. 5. Added other schedulers to the factory 6. Partially retrofitted the other scheduling policies to use the logging library. 7. Retrofitted extrema and progressive to use the consolidated logging library. Fixed parameter issue with s.base.Disconnected(). Formatted project 8. Move statusUpdate(...) into base.go to remove redundant code. 9. Converted the baseScheduler into a state machine where the state is a scheduling policy that defines an approach to consume resource offers. 10. Added another command line argument to be used to enable switching of scheduling policies. Retrofitted scheduling policies to switch only if the particular feature has been enabled. changed argument to coLocated(...) to take base type rather than ElectronScheduler type. Also, prepended the prefix to the directory of the logs so that it would be easier to determine what the files in a directory correspond to without viewing the contents of the directory. Defined methods in ElectronScheduler. Each of these methods corresponds to a type of log that an ElectronScheduler would make. Each of these methods would need to be implemented by the scheduling policy. Electron has only one scheduler that implements the mesos scheduler interface. All the scheduling policies are just different implementations of ways to consume mesos resource offers. Retrofitted scheduling policies to now embed SchedPolicyState instead of baseScheduler. Approved-by: Pradyumna Kaushik <pkaushi1@binghamton.edu>
180 lines
5.1 KiB
Go
180 lines
5.1 KiB
Go
package schedulers
|
|
|
|
import (
|
|
"bitbucket.org/sunybingcloud/elektron/def"
|
|
"bitbucket.org/sunybingcloud/elektron/utilities/mesosUtils"
|
|
"bitbucket.org/sunybingcloud/elektron/utilities/offerUtils"
|
|
"fmt"
|
|
mesos "github.com/mesos/mesos-go/mesosproto"
|
|
sched "github.com/mesos/mesos-go/scheduler"
|
|
"log"
|
|
"math/rand"
|
|
)
|
|
|
|
// Decides if to take an offer or not
|
|
func (s *MaxMin) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.Task,
|
|
totalCPU, totalRAM, totalWatts float64) bool {
|
|
baseSchedRef := spc.(*baseScheduler)
|
|
cpus, mem, watts := offerUtils.OfferAgg(offer)
|
|
|
|
//TODO: Insert watts calculation here instead of taking them as a parameter
|
|
|
|
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
|
if err != nil {
|
|
// Error in determining wattsConsideration
|
|
log.Fatal(err)
|
|
}
|
|
if (cpus >= (totalCPU + task.CPU)) && (mem >= (totalRAM + task.RAM)) &&
|
|
(!baseSchedRef.wattsAsAResource || (watts >= (totalWatts + wattsConsideration))) {
|
|
return true
|
|
}
|
|
return false
|
|
}
|
|
|
|
type MaxMin struct {
|
|
SchedPolicyState
|
|
}
|
|
|
|
// Determine if the remaining space inside of the offer is enough for this
|
|
// task that we need to create. If it is, create a TaskInfo and return it.
|
|
func (s *MaxMin) CheckFit(
|
|
spc SchedPolicyContext,
|
|
i int,
|
|
task def.Task,
|
|
wattsConsideration float64,
|
|
offer *mesos.Offer,
|
|
totalCPU *float64,
|
|
totalRAM *float64,
|
|
totalWatts *float64) (bool, *mesos.TaskInfo) {
|
|
|
|
baseSchedRef := spc.(*baseScheduler)
|
|
// Does the task fit.
|
|
if s.takeOffer(spc, offer, task, *totalCPU, *totalRAM, *totalWatts) {
|
|
|
|
*totalWatts += wattsConsideration
|
|
*totalCPU += task.CPU
|
|
*totalRAM += task.RAM
|
|
baseSchedRef.LogCoLocatedTasks(offer.GetSlaveId().GoString())
|
|
|
|
taskToSchedule := baseSchedRef.newTask(offer, task)
|
|
|
|
baseSchedRef.LogSchedTrace(taskToSchedule, offer)
|
|
*task.Instances--
|
|
|
|
if *task.Instances <= 0 {
|
|
// All instances of task have been scheduled, remove it.
|
|
baseSchedRef.tasks = append(baseSchedRef.tasks[:i], baseSchedRef.tasks[i+1:]...)
|
|
|
|
if len(baseSchedRef.tasks) <= 0 {
|
|
baseSchedRef.LogTerminateScheduler()
|
|
close(baseSchedRef.Shutdown)
|
|
}
|
|
}
|
|
|
|
return true, taskToSchedule
|
|
}
|
|
return false, nil
|
|
}
|
|
|
|
func (s *MaxMin) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver, offers []*mesos.Offer) {
|
|
fmt.Println("Max-Min scheduling...")
|
|
baseSchedRef := spc.(*baseScheduler)
|
|
def.SortTasks(baseSchedRef.tasks, def.SortByWatts)
|
|
baseSchedRef.LogOffersReceived(offers)
|
|
|
|
for _, offer := range offers {
|
|
offerUtils.UpdateEnvironment(offer)
|
|
select {
|
|
case <-baseSchedRef.Shutdown:
|
|
baseSchedRef.LogNoPendingTasksDeclineOffers(offer)
|
|
driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
|
|
baseSchedRef.LogNumberOfRunningTasks()
|
|
continue
|
|
default:
|
|
}
|
|
|
|
tasks := []*mesos.TaskInfo{}
|
|
|
|
offerTaken := false
|
|
totalWatts := 0.0
|
|
totalCPU := 0.0
|
|
totalRAM := 0.0
|
|
|
|
// Assumes s.tasks is ordered in non-decreasing median max-peak order
|
|
|
|
// Attempt to schedule a single instance of the heaviest workload available first.
|
|
// Start from the back until one fits.
|
|
|
|
direction := false // True = Min Max, False = Max Min
|
|
var index int
|
|
start := true // If false then index has changed and need to keep it that way
|
|
for i := 0; i < len(baseSchedRef.tasks); i++ {
|
|
// We need to pick a min task or a max task
|
|
// depending on the value of direction.
|
|
if direction && start {
|
|
index = 0
|
|
} else if start {
|
|
index = len(baseSchedRef.tasks) - i - 1
|
|
}
|
|
task := baseSchedRef.tasks[index]
|
|
|
|
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
|
if err != nil {
|
|
// Error in determining wattsConsideration.
|
|
log.Fatal(err)
|
|
}
|
|
|
|
// Don't take offer if it doesn't match our task's host requirement.
|
|
if offerUtils.HostMismatch(*offer.Hostname, task.Host) {
|
|
continue
|
|
}
|
|
|
|
// TODO: Fix this so index doesn't need to be passed.
|
|
taken, taskToSchedule := s.CheckFit(spc, index, task, wattsConsideration, offer,
|
|
&totalCPU, &totalRAM, &totalWatts)
|
|
|
|
if taken {
|
|
offerTaken = true
|
|
tasks = append(tasks, taskToSchedule)
|
|
// Need to change direction and set start to true.
|
|
// Setting start to true would ensure that index be set accurately again.
|
|
direction = !direction
|
|
start = true
|
|
i--
|
|
} else {
|
|
// Need to move index depending on the value of direction.
|
|
if direction {
|
|
index++
|
|
start = false
|
|
} else {
|
|
index--
|
|
start = false
|
|
}
|
|
}
|
|
}
|
|
|
|
if offerTaken {
|
|
baseSchedRef.LogTaskStarting(nil, offer)
|
|
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
|
|
} else {
|
|
// If there was no match for the task
|
|
cpus, mem, watts := offerUtils.OfferAgg(offer)
|
|
baseSchedRef.LogInsufficientResourcesDeclineOffer(offer, cpus, mem, watts)
|
|
driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter)
|
|
}
|
|
}
|
|
|
|
// Switch scheduling policy only if feature enabled from CLI
|
|
if baseSchedRef.schedPolSwitchEnabled {
|
|
// Switching to a random scheduling policy.
|
|
// TODO: Switch based on some criteria.
|
|
index := rand.Intn(len(SchedPolicies))
|
|
for _, v := range SchedPolicies {
|
|
if index == 0 {
|
|
spc.SwitchSchedPol(v)
|
|
break
|
|
}
|
|
index--
|
|
}
|
|
}
|
|
}
|