Revert previous commit. Renamed wrong alias
This commit is contained in:
parent
9952b9861d
commit
270c8669e6
24 changed files with 229 additions and 229 deletions
|
@ -19,7 +19,7 @@
|
|||
package schedulers
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
|
||||
sched "github.com/mesos/mesos-go/api/v0/scheduler"
|
||||
|
@ -39,7 +39,7 @@ func (s *MaxGreedyMins) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, ta
|
|||
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
||||
if err != nil {
|
||||
// Error in determining wattsConsideration
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
if (cpus >= (totalCPU + task.CPU)) && (mem >= (totalRAM + task.RAM)) &&
|
||||
(!baseSchedRef.wattsAsAResource || (watts >= (totalWatts + wattsConsideration))) {
|
||||
|
@ -136,7 +136,7 @@ func (s *MaxGreedyMins) ConsumeOffers(spc SchedPolicyContext, driver sched.Sched
|
|||
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
||||
if err != nil {
|
||||
// Error in determining wattsConsideration
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Don't take offer if it doesn't match our task's host requirement
|
||||
|
@ -161,7 +161,7 @@ func (s *MaxGreedyMins) ConsumeOffers(spc SchedPolicyContext, driver sched.Sched
|
|||
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
||||
if err != nil {
|
||||
// Error in determining wattsConsideration
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Don't take offer if it doesn't match our task's host requirement
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package schedulers
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
|
||||
sched "github.com/mesos/mesos-go/api/v0/scheduler"
|
||||
|
@ -39,7 +39,7 @@ func (s *MaxMin) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.
|
|||
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
||||
if err != nil {
|
||||
// Error in determining wattsConsideration
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
if (cpus >= (totalCPU + task.CPU)) && (mem >= (totalRAM + task.RAM)) &&
|
||||
(!baseSchedRef.wattsAsAResource || (watts >= (totalWatts + wattsConsideration))) {
|
||||
|
@ -148,7 +148,7 @@ func (s *MaxMin) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDri
|
|||
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
||||
if err != nil {
|
||||
// Error in determining wattsConsideration.
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Don't take offer if it doesn't match our task's host requirement.
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
|
||||
"github.com/mesos/mesos-go/api/v0/mesosutil"
|
||||
sched "github.com/mesos/mesos-go/api/v0/scheduler"
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spdfg/elektron/def"
|
||||
"github.com/spdfg/elektron/elektronLogging"
|
||||
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||
|
@ -67,7 +67,7 @@ type BaseScheduler struct {
|
|||
// Controls when to shutdown pcp logging.
|
||||
PCPLog chan struct{}
|
||||
|
||||
schedTrace *elekLog.Logger
|
||||
schedTrace *log.Logger
|
||||
|
||||
mutex sync.Mutex
|
||||
|
||||
|
@ -102,7 +102,7 @@ func (s *BaseScheduler) init(opts ...SchedulerOptions) {
|
|||
for _, opt := range opts {
|
||||
// applying options
|
||||
if err := opt(s); err != nil {
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
s.TasksRunningMutex.Lock()
|
||||
|
@ -251,12 +251,12 @@ func (s *BaseScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos
|
|||
func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
|
||||
lmt := elekLogTypes.GENERAL
|
||||
if ts == nil {
|
||||
elektronLogging.ElektronLog.Log(lmt, elekLog.InfoLevel,
|
||||
elekLog.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}, "TASKS STARTING...")
|
||||
elektronLogging.ElektronLog.Log(lmt, log.InfoLevel,
|
||||
log.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}, "TASKS STARTING...")
|
||||
} else {
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"task": fmt.Sprintf("%s", ts.Name),
|
||||
log.InfoLevel,
|
||||
log.Fields{"task": fmt.Sprintf("%s", ts.Name),
|
||||
"Instance": fmt.Sprintf("%d", *ts.Instances), "host": fmt.Sprintf("%s", offer.GetHostname())},
|
||||
"TASK STARTING... ")
|
||||
}
|
||||
|
@ -265,29 +265,29 @@ func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
|
|||
func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) {
|
||||
lmt := elekLogTypes.GENERAL
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}, "Watts considered for ")
|
||||
log.InfoLevel,
|
||||
log.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}, "Watts considered for ")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) {
|
||||
lmt := elekLogTypes.GENERAL
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
|
||||
lmt := elekLogTypes.WARNING
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.WarnLevel,
|
||||
elekLog.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}, "No tasks left to schedule ")
|
||||
log.WarnLevel,
|
||||
log.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}, "No tasks left to schedule ")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogNumberOfRunningTasks() {
|
||||
lmt := elekLogTypes.GENERAL
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
|
||||
|
@ -299,21 +299,21 @@ func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
|
|||
}
|
||||
s.TasksRunningMutex.Unlock()
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Colocated with": fmt.Sprintf("%s", buffer.String())}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Colocated with": fmt.Sprintf("%s", buffer.String())}, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.SCHED_TRACE,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{offer.GetHostname(): fmt.Sprintf("%s", taskToSchedule.GetTaskId().GetValue())}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{offer.GetHostname(): fmt.Sprintf("%s", taskToSchedule.GetTaskId().GetValue())}, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogTerminateScheduler() {
|
||||
lmt := elekLogTypes.GENERAL
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "Done scheduling all tasks!")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Done scheduling all tasks!")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
|
||||
|
@ -322,73 +322,73 @@ func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
|
|||
buffer := bytes.Buffer{}
|
||||
buffer.WriteString(fmt.Sprintf("<CPU: %f, RAM: %f, Watts: %f>", offerResources...))
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.WarnLevel,
|
||||
elekLog.Fields{"Offer Resources": fmt.Sprintf("%s", buffer.String())}, "DECLINING OFFER... Offer has insufficient resources to launch a task")
|
||||
log.WarnLevel,
|
||||
log.Fields{"Offer Resources": fmt.Sprintf("%s", buffer.String())}, "DECLINING OFFER... Offer has insufficient resources to launch a task")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) {
|
||||
lmt := elekLogTypes.ERROR
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{"OfferID": fmt.Sprintf("%s", offerID)}, "OFFER RESCINDED")
|
||||
log.ErrorLevel,
|
||||
log.Fields{"OfferID": fmt.Sprintf("%s", offerID)}, "OFFER RESCINDED")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) {
|
||||
lmt := elekLogTypes.ERROR
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}, "SLAVE LOST")
|
||||
log.ErrorLevel,
|
||||
log.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}, "SLAVE LOST")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) {
|
||||
lmt := elekLogTypes.ERROR
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}, "EXECUTOR LOST")
|
||||
log.ErrorLevel,
|
||||
log.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}, "EXECUTOR LOST")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
|
||||
slaveID *mesos.SlaveID, message string) {
|
||||
lmt := elekLogTypes.GENERAL
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Received Framework message from executor": executorID}, message)
|
||||
log.InfoLevel,
|
||||
log.Fields{"Received Framework message from executor": executorID}, message)
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogMesosError(err string) {
|
||||
lmt := elekLogTypes.ERROR
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{"MESOS ERROR": fmt.Sprintf("%v", err)}, "")
|
||||
log.ErrorLevel,
|
||||
log.Fields{"MESOS ERROR": fmt.Sprintf("%v", err)}, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogElectronError(err error) {
|
||||
lmt := elekLogTypes.ERROR
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{"ELECTRON ERROR": fmt.Sprintf("%v", err)}, "")
|
||||
log.ErrorLevel,
|
||||
log.Fields{"ELECTRON ERROR": fmt.Sprintf("%v", err)}, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
|
||||
masterInfo *mesos.MasterInfo) {
|
||||
lmt := elekLogTypes.SUCCESS
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%s", masterInfo)}, "FRAMEWORK REGISTERED!")
|
||||
log.InfoLevel,
|
||||
log.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%s", masterInfo)}, "FRAMEWORK REGISTERED!")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) {
|
||||
lmt := elekLogTypes.GENERAL
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"master": fmt.Sprintf("%s", masterInfo)}, "Framework re-registered")
|
||||
log.InfoLevel,
|
||||
log.Fields{"master": fmt.Sprintf("%s", masterInfo)}, "Framework re-registered")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogDisconnected() {
|
||||
lmt := elekLogTypes.WARNING
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.WarnLevel,
|
||||
elekLog.Fields{}, "Framework disconnected with master")
|
||||
log.WarnLevel,
|
||||
log.Fields{}, "Framework disconnected with master")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
|
||||
|
@ -403,15 +403,15 @@ func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
|
|||
lmt = elekLogTypes.GENERAL
|
||||
}
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"task": fmt.Sprintf("%s", *status.TaskId.Value), "state": NameFor(status.State)}, "Task Status received")
|
||||
log.InfoLevel,
|
||||
log.Fields{"task": fmt.Sprintf("%s", *status.TaskId.Value), "state": NameFor(status.State)}, "Task Status received")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicyState) {
|
||||
logSPS := func() {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.SPS,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Name": name}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Name": name}, "")
|
||||
}
|
||||
if s.hasReceivedResourceOffers && (s.curSchedPolicy != nextPolicy) {
|
||||
logSPS()
|
||||
|
@ -421,13 +421,13 @@ func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicy
|
|||
// Logging the size of the scheduling window and the scheduling policy
|
||||
// that is going to schedule the tasks in the scheduling window.
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.SCHED_WINDOW,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Window size": fmt.Sprintf("%d", s.schedWindowSize), "Name": name}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Window size": fmt.Sprintf("%d", s.schedWindowSize), "Name": name}, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogClsfnAndTaskDistOverhead(overhead time.Duration) {
|
||||
// Logging the overhead in microseconds.
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.CLSFN_TASKDIST_OVERHEAD,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Overhead in microseconds": fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Overhead in microseconds": fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)}, "")
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package schedulers
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
|
||||
sched "github.com/mesos/mesos-go/api/v0/scheduler"
|
||||
|
@ -39,7 +39,7 @@ func (s *BinPackSortedWatts) takeOffer(spc SchedPolicyContext, offer *mesos.Offe
|
|||
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
||||
if err != nil {
|
||||
// Error in determining wattsConsideration.
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
if (cpus >= (totalCPU + task.CPU)) && (mem >= (totalRAM + task.RAM)) &&
|
||||
(!baseSchedRef.wattsAsAResource || (watts >= (totalWatts + wattsConsideration))) {
|
||||
|
@ -83,7 +83,7 @@ func (s *BinPackSortedWatts) ConsumeOffers(spc SchedPolicyContext, driver sched.
|
|||
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
||||
if err != nil {
|
||||
// Error in determining wattsConsideration.
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Don't take offer if it doesn't match our task's host requirement.
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
|
||||
sched "github.com/mesos/mesos-go/api/v0/scheduler"
|
||||
"github.com/pkg/errors"
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spdfg/elektron/constants"
|
||||
"github.com/spdfg/elektron/def"
|
||||
"github.com/spdfg/elektron/elektronLogging"
|
||||
|
@ -36,13 +36,13 @@ func coLocated(tasks map[string]bool, s BaseScheduler) {
|
|||
|
||||
for task := range tasks {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Task": task}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Task": task}, "")
|
||||
}
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "---------------------")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "---------------------")
|
||||
}
|
||||
|
||||
// Get the powerClass of the given hostname.
|
||||
|
@ -157,7 +157,7 @@ func WithSchedPolSwitchEnabled(enableSchedPolicySwitch bool, switchingCriteria s
|
|||
func WithNameOfFirstSchedPolToFix(nameOfFirstSchedPol string) SchedulerOptions {
|
||||
return func(s ElectronScheduler) error {
|
||||
if nameOfFirstSchedPol == "" {
|
||||
elekLog.Println("First scheduling policy to deploy not mentioned. This is now" +
|
||||
log.Println("First scheduling policy to deploy not mentioned. This is now" +
|
||||
" going to be determined at runtime.")
|
||||
return nil
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ func WithFixedSchedulingWindow(toFixSchedWindow bool, fixedSchedWindowSize int)
|
|||
if fixedSchedWindowSize <= 0 {
|
||||
return errors.New("Invalid value of scheduling window size. Please provide a value > 0.")
|
||||
}
|
||||
elekLog.Println(fmt.Sprintf("Fixing the size of the scheduling window to %d.."+
|
||||
log.Println(fmt.Sprintf("Fixing the size of the scheduling window to %d.."+
|
||||
".", fixedSchedWindowSize))
|
||||
s.(*BaseScheduler).toFixSchedWindow = toFixSchedWindow
|
||||
s.(*BaseScheduler).schedWindowSize = fixedSchedWindowSize
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
|
||||
sched "github.com/mesos/mesos-go/api/v0/scheduler"
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spdfg/elektron/def"
|
||||
"github.com/spdfg/elektron/elektronLogging"
|
||||
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||
|
@ -91,8 +91,8 @@ func switchTaskDistBased(baseSchedRef *BaseScheduler) string {
|
|||
taskDist, err := def.GetTaskDistributionInWindow(baseSchedRef.schedWindowSize, baseSchedRef.tasks)
|
||||
baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime))
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}, "Switching... ")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}, "Switching... ")
|
||||
if err != nil {
|
||||
// All the tasks in the window were only classified into 1 cluster.
|
||||
// Max-Min and Max-GreedyMins would work the same way as Bin-Packing for this situation.
|
||||
|
@ -220,8 +220,8 @@ func (bsps *baseSchedPolicyState) SwitchIfNecessary(spc SchedPolicyContext) {
|
|||
switchToPolicyName = switchBasedOn[baseSchedRef.schedPolSwitchCriteria](baseSchedRef)
|
||||
} else {
|
||||
// We continue working with the currently deployed scheduling policy.
|
||||
elekLog.Println("Continuing with the current scheduling policy...")
|
||||
elekLog.Printf("TasksScheduled[%d], SchedWindowSize[%d]", bsps.numTasksScheduled,
|
||||
log.Println("Continuing with the current scheduling policy...")
|
||||
log.Printf("TasksScheduled[%d], SchedWindowSize[%d]", bsps.numTasksScheduled,
|
||||
baseSchedRef.schedWindowSize)
|
||||
return
|
||||
}
|
||||
|
@ -234,8 +234,8 @@ func (bsps *baseSchedPolicyState) SwitchIfNecessary(spc SchedPolicyContext) {
|
|||
bsps.numTasksScheduled = 0
|
||||
} else {
|
||||
// We continue working with the currently deployed scheduling policy.
|
||||
elekLog.Println("Continuing with the current scheduling policy...")
|
||||
elekLog.Printf("TasksScheduled[%d], SchedWindowSize[%d]", bsps.numTasksScheduled,
|
||||
log.Println("Continuing with the current scheduling policy...")
|
||||
log.Printf("TasksScheduled[%d], SchedWindowSize[%d]", bsps.numTasksScheduled,
|
||||
baseSchedRef.schedWindowSize)
|
||||
return
|
||||
}
|
||||
|
|
Reference in a new issue