Used WithField() and Logf() where required.
Used elektronLogger#WithField(...) for single key-value pair. Used elektronLogger#Logf(...) where formatted string is required.
This commit is contained in:
parent
3b70a13cc8
commit
12f827c0fe
7 changed files with 91 additions and 72 deletions
|
@ -111,8 +111,10 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
||||||
powerHistories[host].Value = power
|
powerHistories[host].Value = power
|
||||||
powerHistories[host] = powerHistories[host].Next()
|
powerHistories[host] = powerHistories[host].Next()
|
||||||
|
|
||||||
elekLog.WithFields(log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]),
|
elekLog.WithFields(log.Fields{
|
||||||
"Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))}).Log(CONSOLE, log.InfoLevel, "")
|
"Host": indexToHost[powerIndex],
|
||||||
|
"Power": fmt.Sprintf("%f", power*pcp.RAPLUnits),
|
||||||
|
}).Log(CONSOLE, log.InfoLevel, "")
|
||||||
|
|
||||||
totalPower += power
|
totalPower += power
|
||||||
}
|
}
|
||||||
|
@ -123,12 +125,13 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
||||||
|
|
||||||
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
|
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
|
||||||
|
|
||||||
elekLog.WithFields(log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
|
elekLog.WithFields(log.Fields{
|
||||||
"Sec Avg": fmt.Sprintf("%f", clusterMean)}).Log(CONSOLE, log.InfoLevel, "")
|
"Total power": fmt.Sprintf("%f %d Sec", clusterPower, clusterPowerHist.Len()),
|
||||||
|
"Avg": fmt.Sprintf("%f", clusterMean),
|
||||||
|
}).Log(CONSOLE, log.InfoLevel, "")
|
||||||
|
|
||||||
if clusterMean > hiThreshold {
|
if clusterMean > hiThreshold {
|
||||||
elekLog.Log(CONSOLE,
|
elekLog.Log(CONSOLE, log.InfoLevel, "Need to cap a node")
|
||||||
log.InfoLevel, "Need to cap a node")
|
|
||||||
// Create statics for all victims and choose one to cap
|
// Create statics for all victims and choose one to cap
|
||||||
victims := make([]pcp.Victim, 0, 8)
|
victims := make([]pcp.Victim, 0, 8)
|
||||||
|
|
||||||
|
@ -149,12 +152,10 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
||||||
if !cappedHosts[victim.Host] {
|
if !cappedHosts[victim.Host] {
|
||||||
cappedHosts[victim.Host] = true
|
cappedHosts[victim.Host] = true
|
||||||
orderCapped = append(orderCapped, victim.Host)
|
orderCapped = append(orderCapped, victim.Host)
|
||||||
elekLog.WithFields(log.Fields{"Capping Victim": fmt.Sprintf("%s", victim.Host),
|
elekLog.WithField("Avg. Wattage",
|
||||||
"Avg. Wattage": fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)}).Log(CONSOLE, log.InfoLevel, "")
|
fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)).Logf(CONSOLE, log.InfoLevel, "Capping Victim %s", victim.Host)
|
||||||
if err := rapl.Cap(victim.Host, "rapl", 50); err != nil {
|
if err := rapl.Cap(victim.Host, "rapl", 50); err != nil {
|
||||||
elekLog.Log(CONSOLE,
|
elekLog.Log(CONSOLE, log.ErrorLevel, "Error capping host")
|
||||||
log.ErrorLevel,
|
|
||||||
"Error capping host")
|
|
||||||
}
|
}
|
||||||
break // Only cap one machine at at time.
|
break // Only cap one machine at at time.
|
||||||
}
|
}
|
||||||
|
@ -167,8 +168,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
||||||
orderCapped = orderCapped[:len(orderCapped)-1]
|
orderCapped = orderCapped[:len(orderCapped)-1]
|
||||||
cappedHosts[host] = false
|
cappedHosts[host] = false
|
||||||
// User RAPL package to send uncap.
|
// User RAPL package to send uncap.
|
||||||
log.Printf("Uncapping host %s", host)
|
elekLog.Logf(CONSOLE, log.InfoLevel, "Uncapping host %s", host)
|
||||||
elekLog.WithFields(log.Fields{"Uncapped host": host}).Log(CONSOLE, log.InfoLevel, "")
|
|
||||||
if err := rapl.Cap(host, "rapl", 100); err != nil {
|
if err := rapl.Cap(host, "rapl", 100); err != nil {
|
||||||
elekLog.Log(CONSOLE, log.ErrorLevel, "Error capping host")
|
elekLog.Log(CONSOLE, log.ErrorLevel, "Error capping host")
|
||||||
}
|
}
|
||||||
|
|
|
@ -126,8 +126,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
powerHistories[host].Value = power
|
powerHistories[host].Value = power
|
||||||
powerHistories[host] = powerHistories[host].Next()
|
powerHistories[host] = powerHistories[host].Next()
|
||||||
|
|
||||||
elekLog.WithFields(log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]),
|
elekLog.WithFields(log.Fields{
|
||||||
"Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))}).Log(CONSOLE, log.InfoLevel, "")
|
"Host": indexToHost[powerIndex],
|
||||||
|
"Power": fmt.Sprintf("%f", power*pcp.RAPLUnits),
|
||||||
|
}).Log(CONSOLE, log.InfoLevel, "")
|
||||||
totalPower += power
|
totalPower += power
|
||||||
}
|
}
|
||||||
clusterPower := totalPower * pcp.RAPLUnits
|
clusterPower := totalPower * pcp.RAPLUnits
|
||||||
|
@ -137,15 +139,16 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
|
|
||||||
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
|
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
|
||||||
|
|
||||||
elekLog.WithFields(log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
|
elekLog.WithFields(log.Fields{
|
||||||
"Sec Avg": fmt.Sprintf("%f", clusterMean)}).Log(CONSOLE, log.InfoLevel, "")
|
"Total power": fmt.Sprintf("%f %d Sec", clusterPower, clusterPowerHist.Len()),
|
||||||
|
"Avg": fmt.Sprintf("%f", clusterMean),
|
||||||
|
}).Log(CONSOLE, log.InfoLevel, "")
|
||||||
|
|
||||||
if clusterMean >= hiThreshold {
|
if clusterMean >= hiThreshold {
|
||||||
elekLog.Log(CONSOLE, log.InfoLevel, "Need to cap a node")
|
elekLog.Log(CONSOLE, log.InfoLevel, "Need to cap a node")
|
||||||
|
elekLog.Logf(CONSOLE, log.InfoLevel, "Cap values of capped victims %v", cappedVictims)
|
||||||
|
elekLog.Logf(CONSOLE, log.InfoLevel, "Cap values of victims to uncap %v", orderCappedVictims)
|
||||||
|
|
||||||
elekLog.WithFields(log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}).Log(CONSOLE, log.InfoLevel, "")
|
|
||||||
|
|
||||||
elekLog.WithFields(log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}).Log(CONSOLE, log.InfoLevel, "")
|
|
||||||
// Create statics for all victims and choose one to cap
|
// Create statics for all victims and choose one to cap
|
||||||
victims := make([]pcp.Victim, 0, 8)
|
victims := make([]pcp.Victim, 0, 8)
|
||||||
|
|
||||||
|
@ -173,7 +176,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
// Need to cap this victim.
|
// Need to cap this victim.
|
||||||
if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil {
|
if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil {
|
||||||
|
|
||||||
elekLog.WithFields(log.Fields{"Error capping host": fmt.Sprintf("%s", victims[i].Host)}).Log(CONSOLE, log.ErrorLevel, "")
|
elekLog.Logf(CONSOLE, log.ErrorLevel, "Error capping host %s", victims[i].Host)
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
elekLog.Logf(CONSOLE, log.InfoLevel, "Capped host[%s] at %f", victims[i].Host, 50.0)
|
elekLog.Logf(CONSOLE, log.InfoLevel, "Capped host[%s] at %f", victims[i].Host, 50.0)
|
||||||
|
@ -200,11 +203,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
newCapValue := getNextCapValue(capValue, 2)
|
newCapValue := getNextCapValue(capValue, 2)
|
||||||
if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil {
|
if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil {
|
||||||
|
|
||||||
elekLog.WithFields(log.Fields{"Error capping host": fmt.Sprintf("%s", alreadyCappedHosts[i])}).Log(CONSOLE, log.ErrorLevel, "")
|
elekLog.Logf(CONSOLE, log.ErrorLevel, "Error capping host %s", alreadyCappedHosts[i])
|
||||||
} else {
|
} else {
|
||||||
// Successful cap
|
// Successful cap
|
||||||
elekLog.Logf(CONSOLE, log.InfoLevel,
|
elekLog.Logf(CONSOLE, log.InfoLevel, "Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue)
|
||||||
"Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue)
|
|
||||||
// Checking whether this victim can be capped further
|
// Checking whether this victim can be capped further
|
||||||
if newCapValue <= constants.LowerCapLimit {
|
if newCapValue <= constants.LowerCapLimit {
|
||||||
// Deleting victim from cappedVictims.
|
// Deleting victim from cappedVictims.
|
||||||
|
@ -234,8 +236,8 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
} else if clusterMean < loThreshold {
|
} else if clusterMean < loThreshold {
|
||||||
|
|
||||||
elekLog.Log(CONSOLE, log.InfoLevel, "Need to uncap a node")
|
elekLog.Log(CONSOLE, log.InfoLevel, "Need to uncap a node")
|
||||||
elekLog.WithFields(log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}).Log(CONSOLE, log.InfoLevel, "")
|
elekLog.Logf(CONSOLE, log.InfoLevel, "Cap values of capped victims - %v", cappedVictims)
|
||||||
elekLog.WithFields(log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}).Log(CONSOLE, log.InfoLevel, "")
|
elekLog.Logf(CONSOLE, log.InfoLevel, "Cap values of victims to uncap - %v", orderCappedVictims)
|
||||||
if len(orderCapped) > 0 {
|
if len(orderCapped) > 0 {
|
||||||
// We pick the host that is capped the most to uncap.
|
// We pick the host that is capped the most to uncap.
|
||||||
orderCappedToSort := utilities.GetPairList(orderCappedVictims)
|
orderCappedToSort := utilities.GetPairList(orderCappedVictims)
|
||||||
|
@ -246,7 +248,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
newUncapValue := orderCappedVictims[hostToUncap] * 2.0
|
newUncapValue := orderCappedVictims[hostToUncap] * 2.0
|
||||||
if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil {
|
if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil {
|
||||||
|
|
||||||
elekLog.WithFields(log.Fields{"Error uncapping host": fmt.Sprintf("%s", hostToUncap)}).Log(CONSOLE, log.ErrorLevel, "")
|
elekLog.Logf(CONSOLE, log.ErrorLevel, "Error uncapping host %s", hostToUncap)
|
||||||
} else {
|
} else {
|
||||||
// Successful uncap
|
// Successful uncap
|
||||||
elekLog.Logf(CONSOLE, log.InfoLevel, "Uncapped host[%s] to %f", hostToUncap, newUncapValue)
|
elekLog.Logf(CONSOLE, log.InfoLevel, "Uncapped host[%s] to %f", hostToUncap, newUncapValue)
|
||||||
|
|
|
@ -285,8 +285,10 @@ func main() {
|
||||||
|
|
||||||
// Starting the scheduler driver.
|
// Starting the scheduler driver.
|
||||||
if status, err := driver.Run(); err != nil {
|
if status, err := driver.Run(); err != nil {
|
||||||
elekLog.WithFields(log.Fields{"status": status.String(), "error": err.Error()}).Log(CONSOLE,
|
elekLog.WithFields(log.Fields{
|
||||||
log.ErrorLevel, "Framework stopped ")
|
"status": status.String(),
|
||||||
|
"error": err.Error(),
|
||||||
|
}).Log(CONSOLE, log.ErrorLevel, "Framework stopped ")
|
||||||
}
|
}
|
||||||
elekLog.Log(CONSOLE, log.InfoLevel, "Exiting...")
|
elekLog.Log(CONSOLE, log.InfoLevel, "Exiting...")
|
||||||
}
|
}
|
||||||
|
|
|
@ -250,30 +250,34 @@ func (s *BaseScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos
|
||||||
|
|
||||||
func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
|
func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
|
||||||
if ts == nil {
|
if ts == nil {
|
||||||
elekLog.WithFields(log.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}).Log(CONSOLE, log.InfoLevel, "TASKS STARTING...")
|
elekLog.WithField("host", offer.GetHostname()).Log(CONSOLE, log.InfoLevel, "TASKS STARTING...")
|
||||||
} else {
|
} else {
|
||||||
elekLog.WithFields(log.Fields{"task": fmt.Sprintf("%s", ts.Name), "Instance": fmt.Sprintf("%d", *ts.Instances),
|
elekLog.WithFields(log.Fields{
|
||||||
"host": fmt.Sprintf("%s", offer.GetHostname())}).Log(CONSOLE, log.InfoLevel, "TASK STARTING... ")
|
"task": ts.Name,
|
||||||
|
"Instance": fmt.Sprintf("%d", *ts.Instances),
|
||||||
|
"host": offer.GetHostname(),
|
||||||
|
}).Log(CONSOLE, log.InfoLevel, "TASK STARTING... ")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) {
|
func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) {
|
||||||
elekLog.WithFields(log.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}).Log(CONSOLE, log.InfoLevel, "Watts considered for ")
|
elekLog.WithFields(log.Fields{
|
||||||
|
"task": ts.Name,
|
||||||
|
"host": host,
|
||||||
|
"Watts": fmt.Sprintf("%f", wattsToConsider),
|
||||||
|
}).Log(CONSOLE, log.InfoLevel, "Watts considered for ")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) {
|
func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) {
|
||||||
elekLog.WithFields(log.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}).Log(CONSOLE,
|
elekLog.WithField("numOffers", fmt.Sprintf("%d", len(offers))).Log(CONSOLE, log.InfoLevel, "Resource offers received")
|
||||||
log.InfoLevel, "")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
|
func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
|
||||||
elekLog.WithFields(log.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}).Log(CONSOLE,
|
elekLog.Logf(CONSOLE, log.WarnLevel, "DECLINING OFFER for host %s. No tasks left to schedule", offer.GetHostname())
|
||||||
log.WarnLevel, "No tasks left to schedule ")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogNumberOfRunningTasks() {
|
func (s *BaseScheduler) LogNumberOfRunningTasks() {
|
||||||
elekLog.WithFields(log.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}).Log(CONSOLE,
|
elekLog.Logf(CONSOLE, log.InfoLevel, "Number of tasks still running %d", s.tasksRunning)
|
||||||
log.InfoLevel, "")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
|
func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
|
||||||
|
@ -283,12 +287,11 @@ func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
|
||||||
buffer.WriteString(fmt.Sprintln(taskName))
|
buffer.WriteString(fmt.Sprintln(taskName))
|
||||||
}
|
}
|
||||||
s.TasksRunningMutex.Unlock()
|
s.TasksRunningMutex.Unlock()
|
||||||
elekLog.WithFields(log.Fields{"Colocated with": fmt.Sprintf("%s", buffer.String())}).Log(CONSOLE,
|
elekLog.WithField("Tasks", buffer.String()).Log(CONSOLE, log.InfoLevel, "Colocated with")
|
||||||
log.InfoLevel, "")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) {
|
func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) {
|
||||||
elekLog.WithFields(log.Fields{offer.GetHostname(): fmt.Sprintf("%s", taskToSchedule.GetTaskId().GetValue())}).Log(SCHED_TRACE, log.InfoLevel, "")
|
elekLog.WithField(offer.GetHostname(), taskToSchedule.GetTaskId().GetValue()).Log(SCHED_TRACE, log.InfoLevel, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogTerminateScheduler() {
|
func (s *BaseScheduler) LogTerminateScheduler() {
|
||||||
|
@ -299,47 +302,48 @@ func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
|
||||||
offerResources ...interface{}) {
|
offerResources ...interface{}) {
|
||||||
buffer := bytes.Buffer{}
|
buffer := bytes.Buffer{}
|
||||||
buffer.WriteString(fmt.Sprintf("<CPU: %f, RAM: %f, Watts: %f>", offerResources...))
|
buffer.WriteString(fmt.Sprintf("<CPU: %f, RAM: %f, Watts: %f>", offerResources...))
|
||||||
elekLog.WithFields(log.Fields{"Offer Resources": fmt.Sprintf("%s", buffer.String())}).Log(CONSOLE,
|
elekLog.WithField("Offer Resources", buffer.String()).Log(CONSOLE,
|
||||||
log.WarnLevel, "DECLINING OFFER... Offer has insufficient resources to launch a task")
|
log.WarnLevel, "DECLINING OFFER... Offer has insufficient resources to launch a task")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) {
|
func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) {
|
||||||
elekLog.WithFields(log.Fields{"OfferID": fmt.Sprintf("%s", offerID)}).Log(CONSOLE,
|
elekLog.WithField("OfferID", *offerID.Value).Log(CONSOLE, log.ErrorLevel, "OFFER RESCINDED")
|
||||||
log.ErrorLevel, "OFFER RESCINDED")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) {
|
func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) {
|
||||||
elekLog.WithFields(log.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}).Log(CONSOLE,
|
elekLog.WithField("SlaveID", *slaveID.Value).Log(CONSOLE, log.ErrorLevel, "SLAVE LOST")
|
||||||
log.ErrorLevel, "SLAVE LOST")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) {
|
func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) {
|
||||||
elekLog.WithFields(log.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}).Log(CONSOLE, log.ErrorLevel, "EXECUTOR LOST")
|
elekLog.WithFields(log.Fields{
|
||||||
|
"ExecutorID": executorID,
|
||||||
|
"SlaveID": slaveID,
|
||||||
|
}).Log(CONSOLE, log.ErrorLevel, "EXECUTOR LOST")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
|
func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
|
||||||
slaveID *mesos.SlaveID, message string) {
|
slaveID *mesos.SlaveID, message string) {
|
||||||
elekLog.WithFields(log.Fields{"Received Framework message from executor": executorID}).Log(CONSOLE,
|
elekLog.Logf(CONSOLE, log.InfoLevel, "Received Framework message from executor %v", executorID)
|
||||||
log.InfoLevel, message)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogMesosError(err string) {
|
func (s *BaseScheduler) LogMesosError(err string) {
|
||||||
elekLog.WithFields(log.Fields{"MESOS CONSOLE": fmt.Sprintf("%v", err)}).Log(CONSOLE,
|
elekLog.Logf(CONSOLE, log.ErrorLevel, "MESOS CONSOLE %v", err)
|
||||||
log.ErrorLevel, "")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogElectronError(err error) {
|
func (s *BaseScheduler) LogElectronError(err error) {
|
||||||
elekLog.WithFields(log.Fields{"ELECTRON CONSOLE": fmt.Sprintf("%v", err)}).Log(CONSOLE, log.ErrorLevel, "")
|
elekLog.Logf(CONSOLE, log.ErrorLevel, "ELEKTRON CONSOLE %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
|
func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
|
||||||
masterInfo *mesos.MasterInfo) {
|
masterInfo *mesos.MasterInfo) {
|
||||||
elekLog.WithFields(log.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%v", masterInfo)}).Log(CONSOLE, log.InfoLevel, "FRAMEWORK REGISTERED!")
|
elekLog.WithFields(log.Fields{
|
||||||
|
"frameworkID": frameworkID,
|
||||||
|
"master": fmt.Sprintf("%v", masterInfo),
|
||||||
|
}).Log(CONSOLE, log.InfoLevel, "FRAMEWORK REGISTERED!")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) {
|
func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) {
|
||||||
elekLog.WithFields(log.Fields{"master": fmt.Sprintf("%v", masterInfo)}).Log(CONSOLE,
|
elekLog.WithField("master", fmt.Sprintf("%v", masterInfo)).Log(CONSOLE, log.InfoLevel, "Framework re-registered")
|
||||||
log.InfoLevel, "Framework re-registered")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogDisconnected() {
|
func (s *BaseScheduler) LogDisconnected() {
|
||||||
|
@ -355,12 +359,15 @@ func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
|
||||||
default:
|
default:
|
||||||
level = log.InfoLevel
|
level = log.InfoLevel
|
||||||
}
|
}
|
||||||
elekLog.WithFields(log.Fields{"task": fmt.Sprintf("%s", *status.TaskId.Value), "state": NameFor(status.State)}).Log(CONSOLE, level, "Task Status received")
|
elekLog.WithFields(log.Fields{
|
||||||
|
"task": *status.TaskId.Value,
|
||||||
|
"state": NameFor(status.State),
|
||||||
|
}).Log(CONSOLE, level, "Task Status received")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicyState) {
|
func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicyState) {
|
||||||
logSPS := func() {
|
logSPS := func() {
|
||||||
elekLog.WithFields(log.Fields{"Name": name}).Log(SPS, log.InfoLevel, "")
|
elekLog.WithField("Name", name).Log(SPS, log.InfoLevel, "")
|
||||||
}
|
}
|
||||||
if s.hasReceivedResourceOffers && (s.curSchedPolicy != nextPolicy) {
|
if s.hasReceivedResourceOffers && (s.curSchedPolicy != nextPolicy) {
|
||||||
logSPS()
|
logSPS()
|
||||||
|
@ -369,10 +376,13 @@ func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicy
|
||||||
}
|
}
|
||||||
// Logging the size of the scheduling window and the scheduling policy
|
// Logging the size of the scheduling window and the scheduling policy
|
||||||
// that is going to schedule the tasks in the scheduling window.
|
// that is going to schedule the tasks in the scheduling window.
|
||||||
elekLog.WithFields(log.Fields{"Window size": fmt.Sprintf("%d", s.schedWindowSize), "Name": name}).Log(SCHED_WINDOW, log.InfoLevel, "")
|
elekLog.WithFields(log.Fields{
|
||||||
|
"Window size": fmt.Sprintf("%d", s.schedWindowSize),
|
||||||
|
"Name": name,
|
||||||
|
}).Log(SCHED_WINDOW, log.InfoLevel, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogClsfnAndTaskDistOverhead(overhead time.Duration) {
|
func (s *BaseScheduler) LogClsfnAndTaskDistOverhead(overhead time.Duration) {
|
||||||
// Logging the overhead in microseconds.
|
// Logging the overhead in microseconds.
|
||||||
elekLog.WithFields(log.Fields{"Overhead in microseconds": fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)}).Log(CLSFN_TASKDISTR_OVERHEAD, log.InfoLevel, "")
|
elekLog.WithField("Overhead in microseconds", fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)).Log(CLSFN_TASKDISTR_OVERHEAD, log.InfoLevel, "")
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ import (
|
||||||
func coLocated(tasks map[string]bool, s BaseScheduler) {
|
func coLocated(tasks map[string]bool, s BaseScheduler) {
|
||||||
|
|
||||||
for _, task := range tasks {
|
for _, task := range tasks {
|
||||||
elekLog.WithFields(log.Fields{"Task": task}).Log(CONSOLE, log.InfoLevel, "")
|
elekLog.WithField("Task", fmt.Sprintf("%v", task)).Log(CONSOLE, log.InfoLevel, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
elekLog.Log(CONSOLE, log.InfoLevel, "---------------------")
|
elekLog.Log(CONSOLE, log.InfoLevel, "---------------------")
|
||||||
|
|
|
@ -90,8 +90,7 @@ func switchTaskDistBased(baseSchedRef *BaseScheduler) string {
|
||||||
// Determine the distribution of tasks in the new scheduling window.
|
// Determine the distribution of tasks in the new scheduling window.
|
||||||
taskDist, err := def.GetTaskDistributionInWindow(baseSchedRef.schedWindowSize, baseSchedRef.tasks)
|
taskDist, err := def.GetTaskDistributionInWindow(baseSchedRef.schedWindowSize, baseSchedRef.tasks)
|
||||||
baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime))
|
baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime))
|
||||||
elekLog.WithFields(log.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}).Log(CONSOLE,
|
elekLog.WithField("Task Distribution", fmt.Sprintf("%f", taskDist)).Log(CONSOLE, log.InfoLevel, "Switching... ")
|
||||||
log.InfoLevel, "Switching... ")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// All the tasks in the window were only classified into 1 cluster.
|
// All the tasks in the window were only classified into 1 cluster.
|
||||||
// Max-Min and Max-GreedyMins would work the same way as Bin-Packing for this situation.
|
// Max-Min and Max-GreedyMins would work the same way as Bin-Packing for this situation.
|
||||||
|
@ -219,9 +218,11 @@ func (bsps *baseSchedPolicyState) SwitchIfNecessary(spc SchedPolicyContext) {
|
||||||
switchToPolicyName = switchBasedOn[baseSchedRef.schedPolSwitchCriteria](baseSchedRef)
|
switchToPolicyName = switchBasedOn[baseSchedRef.schedPolSwitchCriteria](baseSchedRef)
|
||||||
} else {
|
} else {
|
||||||
// We continue working with the currently deployed scheduling policy.
|
// We continue working with the currently deployed scheduling policy.
|
||||||
log.Println("Continuing with the current scheduling policy...")
|
elekLog.Log(CONSOLE, log.InfoLevel, "Continuing with the current scheduling policy...")
|
||||||
log.Printf("TasksScheduled[%d], SchedWindowSize[%d]", bsps.numTasksScheduled,
|
elekLog.WithFields(log.Fields{
|
||||||
baseSchedRef.schedWindowSize)
|
"TasksScheduled": fmt.Sprintf("%d", bsps.numTasksScheduled),
|
||||||
|
"SchedWindowSize": fmt.Sprintf("%d", baseSchedRef.schedWindowSize),
|
||||||
|
}).Log(CONSOLE, log.InfoLevel, "")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -233,9 +234,11 @@ func (bsps *baseSchedPolicyState) SwitchIfNecessary(spc SchedPolicyContext) {
|
||||||
bsps.numTasksScheduled = 0
|
bsps.numTasksScheduled = 0
|
||||||
} else {
|
} else {
|
||||||
// We continue working with the currently deployed scheduling policy.
|
// We continue working with the currently deployed scheduling policy.
|
||||||
log.Println("Continuing with the current scheduling policy...")
|
elekLog.Log(CONSOLE, log.InfoLevel, "Continuing with the current scheduling policy...")
|
||||||
log.Printf("TasksScheduled[%d], SchedWindowSize[%d]", bsps.numTasksScheduled,
|
elekLog.WithFields(log.Fields{
|
||||||
baseSchedRef.schedWindowSize)
|
"TasksScheduled": fmt.Sprintf("%d", bsps.numTasksScheduled),
|
||||||
|
"SchedWindowSize": fmt.Sprintf("%d", baseSchedRef.schedWindowSize),
|
||||||
|
}).Log(CONSOLE, log.InfoLevel, "")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,13 +90,15 @@ func UpdateEnvironment(offer *mesos.Offer) {
|
||||||
var host = offer.GetHostname()
|
var host = offer.GetHostname()
|
||||||
// If this host is not present in the set of hosts.
|
// If this host is not present in the set of hosts.
|
||||||
if _, ok := constants.Hosts[host]; !ok {
|
if _, ok := constants.Hosts[host]; !ok {
|
||||||
elekLog.WithFields(log.Fields{"Adding host": host}).Log(CONSOLE, log.InfoLevel, "New host detected")
|
elekLog.WithField("host", host).Log(CONSOLE, log.InfoLevel, "New host detected")
|
||||||
// Add this host.
|
// Add this host.
|
||||||
constants.Hosts[host] = struct{}{}
|
constants.Hosts[host] = struct{}{}
|
||||||
// Get the power class of this host.
|
// Get the power class of this host.
|
||||||
class := PowerClass(offer)
|
class := PowerClass(offer)
|
||||||
elekLog.WithFields(log.Fields{"host": host, "PowerClass": class}).Log(CONSOLE,
|
elekLog.WithFields(log.Fields{
|
||||||
log.InfoLevel, "Registering the power class...")
|
"host": host,
|
||||||
|
"PowerClass": class,
|
||||||
|
}).Log(CONSOLE, log.InfoLevel, "Registering the power class...")
|
||||||
// If new power class, register the power class.
|
// If new power class, register the power class.
|
||||||
if _, ok := constants.PowerClasses[class]; !ok {
|
if _, ok := constants.PowerClasses[class]; !ok {
|
||||||
constants.PowerClasses[class] = make(map[string]struct{})
|
constants.PowerClasses[class] = make(map[string]struct{})
|
||||||
|
|
Reference in a new issue