sched: Make baseScheduler publicly accessible

This commit is contained in:
Akash Kothawale 2018-01-30 14:12:37 -05:00 committed by Pradyumna Kaushik
parent 9599588fb5
commit 6f0f3788b9
7 changed files with 57 additions and 58 deletions

View file

@ -14,7 +14,7 @@ import (
// Decides if to take an offer or not // Decides if to take an offer or not
func (s *MaxGreedyMins) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.Task, func (s *MaxGreedyMins) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.Task,
totalCPU, totalRAM, totalWatts float64) bool { totalCPU, totalRAM, totalWatts float64) bool {
baseSchedRef := spc.(*baseScheduler) baseSchedRef := spc.(*BaseScheduler)
cpus, mem, watts := offerUtils.OfferAgg(offer) cpus, mem, watts := offerUtils.OfferAgg(offer)
//TODO: Insert watts calculation here instead of taking them as a parameter //TODO: Insert watts calculation here instead of taking them as a parameter
@ -47,7 +47,7 @@ func (s *MaxGreedyMins) CheckFit(
totalRAM *float64, totalRAM *float64,
totalWatts *float64) (bool, *mesos.TaskInfo) { totalWatts *float64) (bool, *mesos.TaskInfo) {
baseSchedRef := spc.(*baseScheduler) baseSchedRef := spc.(*BaseScheduler)
// Does the task fit // Does the task fit
if s.takeOffer(spc, offer, task, *totalCPU, *totalRAM, *totalWatts) { if s.takeOffer(spc, offer, task, *totalCPU, *totalRAM, *totalWatts) {
@ -79,7 +79,7 @@ func (s *MaxGreedyMins) CheckFit(
func (s *MaxGreedyMins) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver, offers []*mesos.Offer) { func (s *MaxGreedyMins) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver, offers []*mesos.Offer) {
fmt.Println("Max-GreedyMins scheduling...") fmt.Println("Max-GreedyMins scheduling...")
baseSchedRef := spc.(*baseScheduler) baseSchedRef := spc.(*BaseScheduler)
def.SortTasks(baseSchedRef.tasks, def.SortByWatts) def.SortTasks(baseSchedRef.tasks, def.SortByWatts)
baseSchedRef.LogOffersReceived(offers) baseSchedRef.LogOffersReceived(offers)

View file

@ -14,7 +14,7 @@ import (
// Decides if to take an offer or not // Decides if to take an offer or not
func (s *MaxMin) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.Task, func (s *MaxMin) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.Task,
totalCPU, totalRAM, totalWatts float64) bool { totalCPU, totalRAM, totalWatts float64) bool {
baseSchedRef := spc.(*baseScheduler) baseSchedRef := spc.(*BaseScheduler)
cpus, mem, watts := offerUtils.OfferAgg(offer) cpus, mem, watts := offerUtils.OfferAgg(offer)
//TODO: Insert watts calculation here instead of taking them as a parameter //TODO: Insert watts calculation here instead of taking them as a parameter
@ -47,7 +47,7 @@ func (s *MaxMin) CheckFit(
totalRAM *float64, totalRAM *float64,
totalWatts *float64) (bool, *mesos.TaskInfo) { totalWatts *float64) (bool, *mesos.TaskInfo) {
baseSchedRef := spc.(*baseScheduler) baseSchedRef := spc.(*BaseScheduler)
// Does the task fit. // Does the task fit.
if s.takeOffer(spc, offer, task, *totalCPU, *totalRAM, *totalWatts) { if s.takeOffer(spc, offer, task, *totalCPU, *totalRAM, *totalWatts) {
@ -78,7 +78,7 @@ func (s *MaxMin) CheckFit(
func (s *MaxMin) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver, offers []*mesos.Offer) { func (s *MaxMin) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver, offers []*mesos.Offer) {
fmt.Println("Max-Min scheduling...") fmt.Println("Max-Min scheduling...")
baseSchedRef := spc.(*baseScheduler) baseSchedRef := spc.(*BaseScheduler)
def.SortTasks(baseSchedRef.tasks, def.SortByWatts) def.SortTasks(baseSchedRef.tasks, def.SortByWatts)
baseSchedRef.LogOffersReceived(offers) baseSchedRef.LogOffersReceived(offers)

View file

@ -15,7 +15,7 @@ import (
"time" "time"
) )
type baseScheduler struct { type BaseScheduler struct {
ElectronScheduler ElectronScheduler
SchedPolicyContext SchedPolicyContext
// Current scheduling policy used for resource offer consumption. // Current scheduling policy used for resource offer consumption.
@ -57,7 +57,7 @@ type baseScheduler struct {
schedPolSwitchEnabled bool schedPolSwitchEnabled bool
} }
func (s *baseScheduler) init(opts ...schedPolicyOption) { func (s *BaseScheduler) init(opts ...schedPolicyOption) {
for _, opt := range opts { for _, opt := range opts {
// applying options // applying options
if err := opt(s); err != nil { if err := opt(s); err != nil {
@ -68,11 +68,11 @@ func (s *baseScheduler) init(opts ...schedPolicyOption) {
s.mutex = sync.Mutex{} s.mutex = sync.Mutex{}
} }
func (s *baseScheduler) SwitchSchedPol(newSchedPol SchedPolicyState) { func (s *BaseScheduler) SwitchSchedPol(newSchedPol SchedPolicyState) {
s.curSchedPolicy = newSchedPol s.curSchedPolicy = newSchedPol
} }
func (s *baseScheduler) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo { func (s *BaseScheduler) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances) taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
s.tasksCreated++ s.tasksCreated++
@ -125,22 +125,22 @@ func (s *baseScheduler) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskIn
} }
} }
func (s *baseScheduler) OfferRescinded(_ sched.SchedulerDriver, offerID *mesos.OfferID) { func (s *BaseScheduler) OfferRescinded(_ sched.SchedulerDriver, offerID *mesos.OfferID) {
s.LogOfferRescinded(offerID) s.LogOfferRescinded(offerID)
} }
func (s *baseScheduler) SlaveLost(_ sched.SchedulerDriver, slaveID *mesos.SlaveID) { func (s *BaseScheduler) SlaveLost(_ sched.SchedulerDriver, slaveID *mesos.SlaveID) {
s.LogSlaveLost(slaveID) s.LogSlaveLost(slaveID)
} }
func (s *baseScheduler) ExecutorLost(_ sched.SchedulerDriver, executorID *mesos.ExecutorID, func (s *BaseScheduler) ExecutorLost(_ sched.SchedulerDriver, executorID *mesos.ExecutorID,
slaveID *mesos.SlaveID, status int) { slaveID *mesos.SlaveID, status int) {
s.LogExecutorLost(executorID, slaveID) s.LogExecutorLost(executorID, slaveID)
} }
func (s *baseScheduler) Error(_ sched.SchedulerDriver, err string) { func (s *BaseScheduler) Error(_ sched.SchedulerDriver, err string) {
s.LogMesosError(err) s.LogMesosError(err)
} }
func (s *baseScheduler) FrameworkMessage( func (s *BaseScheduler) FrameworkMessage(
driver sched.SchedulerDriver, driver sched.SchedulerDriver,
executorID *mesos.ExecutorID, executorID *mesos.ExecutorID,
slaveID *mesos.SlaveID, slaveID *mesos.SlaveID,
@ -148,27 +148,26 @@ func (s *baseScheduler) FrameworkMessage(
s.LogFrameworkMessage(executorID, slaveID, message) s.LogFrameworkMessage(executorID, slaveID, message)
} }
func (s *baseScheduler) Registered( func (s *BaseScheduler) Registered(
_ sched.SchedulerDriver, _ sched.SchedulerDriver,
frameworkID *mesos.FrameworkID, frameworkID *mesos.FrameworkID,
masterInfo *mesos.MasterInfo) { masterInfo *mesos.MasterInfo) {
s.LogFrameworkRegistered(frameworkID, masterInfo) s.LogFrameworkRegistered(frameworkID, masterInfo)
} }
func (s *baseScheduler) Reregistered(_ sched.SchedulerDriver, masterInfo *mesos.MasterInfo) { func (s *BaseScheduler) Reregistered(_ sched.SchedulerDriver, masterInfo *mesos.MasterInfo) {
s.LogFrameworkReregistered(masterInfo) s.LogFrameworkReregistered(masterInfo)
} }
func (s *baseScheduler) Disconnected(sched.SchedulerDriver) { func (s *BaseScheduler) Disconnected(sched.SchedulerDriver) {
s.LogDisconnected() s.LogDisconnected()
} }
func (s *baseScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) { func (s *BaseScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
utilities.RecordTotalResourceAvailability(offers)
s.curSchedPolicy.ConsumeOffers(s, driver, offers) s.curSchedPolicy.ConsumeOffers(s, driver, offers)
} }
func (s *baseScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) { func (s *BaseScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
s.LogTaskStatusUpdate(status) s.LogTaskStatusUpdate(status)
if *status.State == mesos.TaskState_TASK_RUNNING { if *status.State == mesos.TaskState_TASK_RUNNING {
s.tasksRunning++ s.tasksRunning++
@ -188,14 +187,14 @@ func (s *baseScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos
} }
} }
func (s *baseScheduler) Log(lmt elecLogDef.LogMessageType, msg string) { func (s *BaseScheduler) Log(lmt elecLogDef.LogMessageType, msg string) {
s.mutex.Lock() s.mutex.Lock()
s.logMsgType <- lmt s.logMsgType <- lmt
s.logMsg <- msg s.logMsg <- msg
s.mutex.Unlock() s.mutex.Unlock()
} }
func (s *baseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) { func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
lmt := elecLogDef.GENERAL lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
var msg string var msg string
@ -208,7 +207,7 @@ func (s *baseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) { func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) {
lmt := elecLogDef.GENERAL lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Watts considered for task[%s] and host[%s] = %f Watts", msg := msgColor.Sprintf("Watts considered for task[%s] and host[%s] = %f Watts",
@ -216,14 +215,14 @@ func (s *baseScheduler) LogTaskWattsConsideration(ts def.Task, host string, watt
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogOffersReceived(offers []*mesos.Offer) { func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) {
lmt := elecLogDef.GENERAL lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Received %d resource offers", len(offers)) msg := msgColor.Sprintf("Received %d resource offers", len(offers))
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) { func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
lmt := elecLogDef.WARNING lmt := elecLogDef.WARNING
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("DECLINING OFFER for host[%s]... "+ msg := msgColor.Sprintf("DECLINING OFFER for host[%s]... "+
@ -231,14 +230,14 @@ func (s *baseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogNumberOfRunningTasks() { func (s *BaseScheduler) LogNumberOfRunningTasks() {
lmt := elecLogDef.GENERAL lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Number of tasks still running = %d", s.tasksRunning) msg := msgColor.Sprintf("Number of tasks still running = %d", s.tasksRunning)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogCoLocatedTasks(slaveID string) { func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
lmt := elecLogDef.GENERAL lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
buffer := bytes.Buffer{} buffer := bytes.Buffer{}
@ -250,19 +249,19 @@ func (s *baseScheduler) LogCoLocatedTasks(slaveID string) {
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) { func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) {
msg := fmt.Sprint(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) msg := fmt.Sprint(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
s.Log(elecLogDef.SCHED_TRACE, msg) s.Log(elecLogDef.SCHED_TRACE, msg)
} }
func (s *baseScheduler) LogTerminateScheduler() { func (s *BaseScheduler) LogTerminateScheduler() {
lmt := elecLogDef.GENERAL lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
msg := msgColor.Sprint("Done scheduling all tasks!") msg := msgColor.Sprint("Done scheduling all tasks!")
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer, func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
offerResources ...interface{}) { offerResources ...interface{}) {
lmt := elecLogDef.WARNING lmt := elecLogDef.WARNING
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
@ -273,28 +272,28 @@ func (s *baseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogOfferRescinded(offerID *mesos.OfferID) { func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) {
lmt := elecLogDef.ERROR lmt := elecLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("OFFER RESCINDED: OfferID = %s", offerID) msg := msgColor.Sprintf("OFFER RESCINDED: OfferID = %s", offerID)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) { func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) {
lmt := elecLogDef.ERROR lmt := elecLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("SLAVE LOST: SlaveID = %s", slaveID) msg := msgColor.Sprintf("SLAVE LOST: SlaveID = %s", slaveID)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) { func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) {
lmt := elecLogDef.ERROR lmt := elecLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("EXECUTOR LOST: ExecutorID = %s, SlaveID = %s", executorID, slaveID) msg := msgColor.Sprintf("EXECUTOR LOST: ExecutorID = %s, SlaveID = %s", executorID, slaveID)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID, func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
slaveID *mesos.SlaveID, message string) { slaveID *mesos.SlaveID, message string) {
lmt := elecLogDef.GENERAL lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
@ -302,21 +301,21 @@ func (s *baseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogMesosError(err string) { func (s *BaseScheduler) LogMesosError(err string) {
lmt := elecLogDef.ERROR lmt := elecLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("MESOS ERROR: %s", err) msg := msgColor.Sprintf("MESOS ERROR: %s", err)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogElectronError(err error) { func (s *BaseScheduler) LogElectronError(err error) {
lmt := elecLogDef.ERROR lmt := elecLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("ELECTRON ERROR: %v", err) msg := msgColor.Sprintf("ELECTRON ERROR: %v", err)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID, func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
masterInfo *mesos.MasterInfo) { masterInfo *mesos.MasterInfo) {
lmt := elecLogDef.SUCCESS lmt := elecLogDef.SUCCESS
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
@ -325,21 +324,21 @@ func (s *baseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) { func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) {
lmt := elecLogDef.GENERAL lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Framework re-registered with master %s", masterInfo) msg := msgColor.Sprintf("Framework re-registered with master %s", masterInfo)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogDisconnected() { func (s *BaseScheduler) LogDisconnected() {
lmt := elecLogDef.WARNING lmt := elecLogDef.WARNING
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elecLogDef.LogMessageColors[lmt]
msg := msgColor.Sprint("Framework disconnected with master") msg := msgColor.Sprint("Framework disconnected with master")
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *baseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) { func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
var lmt elecLogDef.LogMessageType var lmt elecLogDef.LogMessageType
switch *status.State { switch *status.State {
case mesos.TaskState_TASK_ERROR, mesos.TaskState_TASK_FAILED, case mesos.TaskState_TASK_ERROR, mesos.TaskState_TASK_FAILED,

View file

@ -14,7 +14,7 @@ import (
// Decides if to take an offer or not // Decides if to take an offer or not
func (s *BinPackSortedWatts) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.Task, totalCPU, totalRAM, totalWatts float64) bool { func (s *BinPackSortedWatts) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.Task, totalCPU, totalRAM, totalWatts float64) bool {
baseSchedRef := spc.(*baseScheduler) baseSchedRef := spc.(*BaseScheduler)
cpus, mem, watts := offerUtils.OfferAgg(offer) cpus, mem, watts := offerUtils.OfferAgg(offer)
//TODO: Insert watts calculation here instead of taking them as a parameter //TODO: Insert watts calculation here instead of taking them as a parameter
@ -37,7 +37,7 @@ type BinPackSortedWatts struct {
func (s *BinPackSortedWatts) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver, offers []*mesos.Offer) { func (s *BinPackSortedWatts) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver, offers []*mesos.Offer) {
fmt.Println("BPSW scheduling...") fmt.Println("BPSW scheduling...")
baseSchedRef := spc.(*baseScheduler) baseSchedRef := spc.(*BaseScheduler)
def.SortTasks(baseSchedRef.tasks, def.SortByWatts) def.SortTasks(baseSchedRef.tasks, def.SortByWatts)
baseSchedRef.LogOffersReceived(offers) baseSchedRef.LogOffersReceived(offers)

View file

@ -12,7 +12,7 @@ import (
// Decides if to take an offer or not // Decides if to take an offer or not
func (s *FirstFit) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.Task) bool { func (s *FirstFit) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.Task) bool {
baseSchedRef := spc.(*baseScheduler) baseSchedRef := spc.(*BaseScheduler)
cpus, mem, watts := offerUtils.OfferAgg(offer) cpus, mem, watts := offerUtils.OfferAgg(offer)
//TODO: Insert watts calculation here instead of taking them as a parameter //TODO: Insert watts calculation here instead of taking them as a parameter
@ -36,7 +36,7 @@ type FirstFit struct {
func (s *FirstFit) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver, offers []*mesos.Offer) { func (s *FirstFit) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver, offers []*mesos.Offer) {
fmt.Println("FirstFit scheduling...") fmt.Println("FirstFit scheduling...")
baseSchedRef := spc.(*baseScheduler) baseSchedRef := spc.(*BaseScheduler)
baseSchedRef.LogOffersReceived(offers) baseSchedRef.LogOffersReceived(offers)
for _, offer := range offers { for _, offer := range offers {

View file

@ -12,7 +12,7 @@ import (
"log" "log"
) )
func coLocated(tasks map[string]bool, s baseScheduler) { func coLocated(tasks map[string]bool, s BaseScheduler) {
for task := range tasks { for task := range tasks {
s.Log(elecLogDef.GENERAL, task) s.Log(elecLogDef.GENERAL, task)
@ -39,7 +39,7 @@ func WithSchedPolicy(schedPolicyName string) schedPolicyOption {
if schedPolicy, ok := SchedPolicies[schedPolicyName]; !ok { if schedPolicy, ok := SchedPolicies[schedPolicyName]; !ok {
return errors.New("Incorrect scheduling policy.") return errors.New("Incorrect scheduling policy.")
} else { } else {
s.(*baseScheduler).curSchedPolicy = schedPolicy s.(*BaseScheduler).curSchedPolicy = schedPolicy
return nil return nil
} }
} }
@ -50,7 +50,7 @@ func WithTasks(ts []def.Task) schedPolicyOption {
if ts == nil { if ts == nil {
return errors.New("Task[] is empty.") return errors.New("Task[] is empty.")
} else { } else {
s.(*baseScheduler).tasks = ts s.(*BaseScheduler).tasks = ts
return nil return nil
} }
} }
@ -58,21 +58,21 @@ func WithTasks(ts []def.Task) schedPolicyOption {
func WithWattsAsAResource(waar bool) schedPolicyOption { func WithWattsAsAResource(waar bool) schedPolicyOption {
return func(s ElectronScheduler) error { return func(s ElectronScheduler) error {
s.(*baseScheduler).wattsAsAResource = waar s.(*BaseScheduler).wattsAsAResource = waar
return nil return nil
} }
} }
func WithClassMapWatts(cmw bool) schedPolicyOption { func WithClassMapWatts(cmw bool) schedPolicyOption {
return func(s ElectronScheduler) error { return func(s ElectronScheduler) error {
s.(*baseScheduler).classMapWatts = cmw s.(*BaseScheduler).classMapWatts = cmw
return nil return nil
} }
} }
func WithRecordPCP(recordPCP *bool) schedPolicyOption { func WithRecordPCP(recordPCP *bool) schedPolicyOption {
return func(s ElectronScheduler) error { return func(s ElectronScheduler) error {
s.(*baseScheduler).RecordPCP = recordPCP s.(*BaseScheduler).RecordPCP = recordPCP
return nil return nil
} }
} }
@ -82,7 +82,7 @@ func WithShutdown(shutdown chan struct{}) schedPolicyOption {
if shutdown == nil { if shutdown == nil {
return errors.New("Shutdown channel is nil.") return errors.New("Shutdown channel is nil.")
} else { } else {
s.(*baseScheduler).Shutdown = shutdown s.(*BaseScheduler).Shutdown = shutdown
return nil return nil
} }
} }
@ -93,7 +93,7 @@ func WithDone(done chan struct{}) schedPolicyOption {
if done == nil { if done == nil {
return errors.New("Done channel is nil.") return errors.New("Done channel is nil.")
} else { } else {
s.(*baseScheduler).Done = done s.(*BaseScheduler).Done = done
return nil return nil
} }
} }
@ -104,7 +104,7 @@ func WithPCPLog(pcpLog chan struct{}) schedPolicyOption {
if pcpLog == nil { if pcpLog == nil {
return errors.New("PCPLog channel is nil.") return errors.New("PCPLog channel is nil.")
} else { } else {
s.(*baseScheduler).PCPLog = pcpLog s.(*BaseScheduler).PCPLog = pcpLog
return nil return nil
} }
} }
@ -112,15 +112,15 @@ func WithPCPLog(pcpLog chan struct{}) schedPolicyOption {
func WithLoggingChannels(lmt chan elecLogDef.LogMessageType, msg chan string) schedPolicyOption { func WithLoggingChannels(lmt chan elecLogDef.LogMessageType, msg chan string) schedPolicyOption {
return func(s ElectronScheduler) error { return func(s ElectronScheduler) error {
s.(*baseScheduler).logMsgType = lmt s.(*BaseScheduler).logMsgType = lmt
s.(*baseScheduler).logMsg = msg s.(*BaseScheduler).logMsg = msg
return nil return nil
} }
} }
func WithSchedPolSwitchEnabled(enableSchedPolicySwitch bool) schedPolicyOption { func WithSchedPolSwitchEnabled(enableSchedPolicySwitch bool) schedPolicyOption {
return func(s ElectronScheduler) error { return func(s ElectronScheduler) error {
s.(*baseScheduler).schedPolSwitchEnabled = enableSchedPolicySwitch s.(*BaseScheduler).schedPolSwitchEnabled = enableSchedPolicySwitch
return nil return nil
} }
} }

View file

@ -26,7 +26,7 @@ func buildScheduler(s sched.Scheduler, opts ...schedPolicyOption) {
} }
func SchedFactory(opts ...schedPolicyOption) sched.Scheduler { func SchedFactory(opts ...schedPolicyOption) sched.Scheduler {
s := &baseScheduler{} s := &BaseScheduler{}
buildScheduler(s, opts...) buildScheduler(s, opts...)
return s return s
} }