Used WithField() and Logf() where required.

Used elektronLogger#WithField(...) for single key-value pair.
Used elektronLogger#Logf(...) where formatted string is required.
This commit is contained in:
Pradyumna Kaushik 2019-12-05 22:33:28 -05:00
parent 3b70a13cc8
commit 12f827c0fe
7 changed files with 91 additions and 72 deletions

View file

@ -111,8 +111,10 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
powerHistories[host].Value = power
powerHistories[host] = powerHistories[host].Next()
elekLog.WithFields(log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]),
"Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))}).Log(CONSOLE, log.InfoLevel, "")
elekLog.WithFields(log.Fields{
"Host": indexToHost[powerIndex],
"Power": fmt.Sprintf("%f", power*pcp.RAPLUnits),
}).Log(CONSOLE, log.InfoLevel, "")
totalPower += power
}
@ -123,12 +125,13 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
elekLog.WithFields(log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
"Sec Avg": fmt.Sprintf("%f", clusterMean)}).Log(CONSOLE, log.InfoLevel, "")
elekLog.WithFields(log.Fields{
"Total power": fmt.Sprintf("%f %d Sec", clusterPower, clusterPowerHist.Len()),
"Avg": fmt.Sprintf("%f", clusterMean),
}).Log(CONSOLE, log.InfoLevel, "")
if clusterMean > hiThreshold {
elekLog.Log(CONSOLE,
log.InfoLevel, "Need to cap a node")
elekLog.Log(CONSOLE, log.InfoLevel, "Need to cap a node")
// Create statics for all victims and choose one to cap
victims := make([]pcp.Victim, 0, 8)
@ -149,12 +152,10 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
if !cappedHosts[victim.Host] {
cappedHosts[victim.Host] = true
orderCapped = append(orderCapped, victim.Host)
elekLog.WithFields(log.Fields{"Capping Victim": fmt.Sprintf("%s", victim.Host),
"Avg. Wattage": fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)}).Log(CONSOLE, log.InfoLevel, "")
elekLog.WithField("Avg. Wattage",
fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)).Logf(CONSOLE, log.InfoLevel, "Capping Victim %s", victim.Host)
if err := rapl.Cap(victim.Host, "rapl", 50); err != nil {
elekLog.Log(CONSOLE,
log.ErrorLevel,
"Error capping host")
elekLog.Log(CONSOLE, log.ErrorLevel, "Error capping host")
}
break // Only cap one machine at at time.
}
@ -167,8 +168,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
orderCapped = orderCapped[:len(orderCapped)-1]
cappedHosts[host] = false
// User RAPL package to send uncap.
log.Printf("Uncapping host %s", host)
elekLog.WithFields(log.Fields{"Uncapped host": host}).Log(CONSOLE, log.InfoLevel, "")
elekLog.Logf(CONSOLE, log.InfoLevel, "Uncapping host %s", host)
if err := rapl.Cap(host, "rapl", 100); err != nil {
elekLog.Log(CONSOLE, log.ErrorLevel, "Error capping host")
}

View file

@ -126,8 +126,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
powerHistories[host].Value = power
powerHistories[host] = powerHistories[host].Next()
elekLog.WithFields(log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]),
"Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))}).Log(CONSOLE, log.InfoLevel, "")
elekLog.WithFields(log.Fields{
"Host": indexToHost[powerIndex],
"Power": fmt.Sprintf("%f", power*pcp.RAPLUnits),
}).Log(CONSOLE, log.InfoLevel, "")
totalPower += power
}
clusterPower := totalPower * pcp.RAPLUnits
@ -137,15 +139,16 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
elekLog.WithFields(log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
"Sec Avg": fmt.Sprintf("%f", clusterMean)}).Log(CONSOLE, log.InfoLevel, "")
elekLog.WithFields(log.Fields{
"Total power": fmt.Sprintf("%f %d Sec", clusterPower, clusterPowerHist.Len()),
"Avg": fmt.Sprintf("%f", clusterMean),
}).Log(CONSOLE, log.InfoLevel, "")
if clusterMean >= hiThreshold {
elekLog.Log(CONSOLE, log.InfoLevel, "Need to cap a node")
elekLog.Logf(CONSOLE, log.InfoLevel, "Cap values of capped victims %v", cappedVictims)
elekLog.Logf(CONSOLE, log.InfoLevel, "Cap values of victims to uncap %v", orderCappedVictims)
elekLog.WithFields(log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}).Log(CONSOLE, log.InfoLevel, "")
elekLog.WithFields(log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}).Log(CONSOLE, log.InfoLevel, "")
// Create statics for all victims and choose one to cap
victims := make([]pcp.Victim, 0, 8)
@ -173,7 +176,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
// Need to cap this victim.
if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil {
elekLog.WithFields(log.Fields{"Error capping host": fmt.Sprintf("%s", victims[i].Host)}).Log(CONSOLE, log.ErrorLevel, "")
elekLog.Logf(CONSOLE, log.ErrorLevel, "Error capping host %s", victims[i].Host)
} else {
elekLog.Logf(CONSOLE, log.InfoLevel, "Capped host[%s] at %f", victims[i].Host, 50.0)
@ -200,11 +203,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
newCapValue := getNextCapValue(capValue, 2)
if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil {
elekLog.WithFields(log.Fields{"Error capping host": fmt.Sprintf("%s", alreadyCappedHosts[i])}).Log(CONSOLE, log.ErrorLevel, "")
elekLog.Logf(CONSOLE, log.ErrorLevel, "Error capping host %s", alreadyCappedHosts[i])
} else {
// Successful cap
elekLog.Logf(CONSOLE, log.InfoLevel,
"Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue)
elekLog.Logf(CONSOLE, log.InfoLevel, "Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue)
// Checking whether this victim can be capped further
if newCapValue <= constants.LowerCapLimit {
// Deleting victim from cappedVictims.
@ -234,8 +236,8 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
} else if clusterMean < loThreshold {
elekLog.Log(CONSOLE, log.InfoLevel, "Need to uncap a node")
elekLog.WithFields(log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}).Log(CONSOLE, log.InfoLevel, "")
elekLog.WithFields(log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}).Log(CONSOLE, log.InfoLevel, "")
elekLog.Logf(CONSOLE, log.InfoLevel, "Cap values of capped victims - %v", cappedVictims)
elekLog.Logf(CONSOLE, log.InfoLevel, "Cap values of victims to uncap - %v", orderCappedVictims)
if len(orderCapped) > 0 {
// We pick the host that is capped the most to uncap.
orderCappedToSort := utilities.GetPairList(orderCappedVictims)
@ -246,7 +248,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
newUncapValue := orderCappedVictims[hostToUncap] * 2.0
if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil {
elekLog.WithFields(log.Fields{"Error uncapping host": fmt.Sprintf("%s", hostToUncap)}).Log(CONSOLE, log.ErrorLevel, "")
elekLog.Logf(CONSOLE, log.ErrorLevel, "Error uncapping host %s", hostToUncap)
} else {
// Successful uncap
elekLog.Logf(CONSOLE, log.InfoLevel, "Uncapped host[%s] to %f", hostToUncap, newUncapValue)

View file

@ -285,8 +285,10 @@ func main() {
// Starting the scheduler driver.
if status, err := driver.Run(); err != nil {
elekLog.WithFields(log.Fields{"status": status.String(), "error": err.Error()}).Log(CONSOLE,
log.ErrorLevel, "Framework stopped ")
elekLog.WithFields(log.Fields{
"status": status.String(),
"error": err.Error(),
}).Log(CONSOLE, log.ErrorLevel, "Framework stopped ")
}
elekLog.Log(CONSOLE, log.InfoLevel, "Exiting...")
}

View file

@ -250,30 +250,34 @@ func (s *BaseScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos
func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
if ts == nil {
elekLog.WithFields(log.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}).Log(CONSOLE, log.InfoLevel, "TASKS STARTING...")
elekLog.WithField("host", offer.GetHostname()).Log(CONSOLE, log.InfoLevel, "TASKS STARTING...")
} else {
elekLog.WithFields(log.Fields{"task": fmt.Sprintf("%s", ts.Name), "Instance": fmt.Sprintf("%d", *ts.Instances),
"host": fmt.Sprintf("%s", offer.GetHostname())}).Log(CONSOLE, log.InfoLevel, "TASK STARTING... ")
elekLog.WithFields(log.Fields{
"task": ts.Name,
"Instance": fmt.Sprintf("%d", *ts.Instances),
"host": offer.GetHostname(),
}).Log(CONSOLE, log.InfoLevel, "TASK STARTING... ")
}
}
func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) {
elekLog.WithFields(log.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}).Log(CONSOLE, log.InfoLevel, "Watts considered for ")
elekLog.WithFields(log.Fields{
"task": ts.Name,
"host": host,
"Watts": fmt.Sprintf("%f", wattsToConsider),
}).Log(CONSOLE, log.InfoLevel, "Watts considered for ")
}
func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) {
elekLog.WithFields(log.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}).Log(CONSOLE,
log.InfoLevel, "")
elekLog.WithField("numOffers", fmt.Sprintf("%d", len(offers))).Log(CONSOLE, log.InfoLevel, "Resource offers received")
}
func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
elekLog.WithFields(log.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}).Log(CONSOLE,
log.WarnLevel, "No tasks left to schedule ")
elekLog.Logf(CONSOLE, log.WarnLevel, "DECLINING OFFER for host %s. No tasks left to schedule", offer.GetHostname())
}
func (s *BaseScheduler) LogNumberOfRunningTasks() {
elekLog.WithFields(log.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}).Log(CONSOLE,
log.InfoLevel, "")
elekLog.Logf(CONSOLE, log.InfoLevel, "Number of tasks still running %d", s.tasksRunning)
}
func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
@ -283,12 +287,11 @@ func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
buffer.WriteString(fmt.Sprintln(taskName))
}
s.TasksRunningMutex.Unlock()
elekLog.WithFields(log.Fields{"Colocated with": fmt.Sprintf("%s", buffer.String())}).Log(CONSOLE,
log.InfoLevel, "")
elekLog.WithField("Tasks", buffer.String()).Log(CONSOLE, log.InfoLevel, "Colocated with")
}
func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) {
elekLog.WithFields(log.Fields{offer.GetHostname(): fmt.Sprintf("%s", taskToSchedule.GetTaskId().GetValue())}).Log(SCHED_TRACE, log.InfoLevel, "")
elekLog.WithField(offer.GetHostname(), taskToSchedule.GetTaskId().GetValue()).Log(SCHED_TRACE, log.InfoLevel, "")
}
func (s *BaseScheduler) LogTerminateScheduler() {
@ -299,47 +302,48 @@ func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
offerResources ...interface{}) {
buffer := bytes.Buffer{}
buffer.WriteString(fmt.Sprintf("<CPU: %f, RAM: %f, Watts: %f>", offerResources...))
elekLog.WithFields(log.Fields{"Offer Resources": fmt.Sprintf("%s", buffer.String())}).Log(CONSOLE,
elekLog.WithField("Offer Resources", buffer.String()).Log(CONSOLE,
log.WarnLevel, "DECLINING OFFER... Offer has insufficient resources to launch a task")
}
func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) {
elekLog.WithFields(log.Fields{"OfferID": fmt.Sprintf("%s", offerID)}).Log(CONSOLE,
log.ErrorLevel, "OFFER RESCINDED")
elekLog.WithField("OfferID", *offerID.Value).Log(CONSOLE, log.ErrorLevel, "OFFER RESCINDED")
}
func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) {
elekLog.WithFields(log.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}).Log(CONSOLE,
log.ErrorLevel, "SLAVE LOST")
elekLog.WithField("SlaveID", *slaveID.Value).Log(CONSOLE, log.ErrorLevel, "SLAVE LOST")
}
func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) {
elekLog.WithFields(log.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}).Log(CONSOLE, log.ErrorLevel, "EXECUTOR LOST")
elekLog.WithFields(log.Fields{
"ExecutorID": executorID,
"SlaveID": slaveID,
}).Log(CONSOLE, log.ErrorLevel, "EXECUTOR LOST")
}
func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
slaveID *mesos.SlaveID, message string) {
elekLog.WithFields(log.Fields{"Received Framework message from executor": executorID}).Log(CONSOLE,
log.InfoLevel, message)
elekLog.Logf(CONSOLE, log.InfoLevel, "Received Framework message from executor %v", executorID)
}
func (s *BaseScheduler) LogMesosError(err string) {
elekLog.WithFields(log.Fields{"MESOS CONSOLE": fmt.Sprintf("%v", err)}).Log(CONSOLE,
log.ErrorLevel, "")
elekLog.Logf(CONSOLE, log.ErrorLevel, "MESOS CONSOLE %v", err)
}
func (s *BaseScheduler) LogElectronError(err error) {
elekLog.WithFields(log.Fields{"ELECTRON CONSOLE": fmt.Sprintf("%v", err)}).Log(CONSOLE, log.ErrorLevel, "")
elekLog.Logf(CONSOLE, log.ErrorLevel, "ELEKTRON CONSOLE %v", err)
}
func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
masterInfo *mesos.MasterInfo) {
elekLog.WithFields(log.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%v", masterInfo)}).Log(CONSOLE, log.InfoLevel, "FRAMEWORK REGISTERED!")
elekLog.WithFields(log.Fields{
"frameworkID": frameworkID,
"master": fmt.Sprintf("%v", masterInfo),
}).Log(CONSOLE, log.InfoLevel, "FRAMEWORK REGISTERED!")
}
func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) {
elekLog.WithFields(log.Fields{"master": fmt.Sprintf("%v", masterInfo)}).Log(CONSOLE,
log.InfoLevel, "Framework re-registered")
elekLog.WithField("master", fmt.Sprintf("%v", masterInfo)).Log(CONSOLE, log.InfoLevel, "Framework re-registered")
}
func (s *BaseScheduler) LogDisconnected() {
@ -355,12 +359,15 @@ func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
default:
level = log.InfoLevel
}
elekLog.WithFields(log.Fields{"task": fmt.Sprintf("%s", *status.TaskId.Value), "state": NameFor(status.State)}).Log(CONSOLE, level, "Task Status received")
elekLog.WithFields(log.Fields{
"task": *status.TaskId.Value,
"state": NameFor(status.State),
}).Log(CONSOLE, level, "Task Status received")
}
func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicyState) {
logSPS := func() {
elekLog.WithFields(log.Fields{"Name": name}).Log(SPS, log.InfoLevel, "")
elekLog.WithField("Name", name).Log(SPS, log.InfoLevel, "")
}
if s.hasReceivedResourceOffers && (s.curSchedPolicy != nextPolicy) {
logSPS()
@ -369,10 +376,13 @@ func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicy
}
// Logging the size of the scheduling window and the scheduling policy
// that is going to schedule the tasks in the scheduling window.
elekLog.WithFields(log.Fields{"Window size": fmt.Sprintf("%d", s.schedWindowSize), "Name": name}).Log(SCHED_WINDOW, log.InfoLevel, "")
elekLog.WithFields(log.Fields{
"Window size": fmt.Sprintf("%d", s.schedWindowSize),
"Name": name,
}).Log(SCHED_WINDOW, log.InfoLevel, "")
}
func (s *BaseScheduler) LogClsfnAndTaskDistOverhead(overhead time.Duration) {
// Logging the overhead in microseconds.
elekLog.WithFields(log.Fields{"Overhead in microseconds": fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)}).Log(CLSFN_TASKDISTR_OVERHEAD, log.InfoLevel, "")
elekLog.WithField("Overhead in microseconds", fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)).Log(CLSFN_TASKDISTR_OVERHEAD, log.InfoLevel, "")
}

View file

@ -36,7 +36,7 @@ import (
func coLocated(tasks map[string]bool, s BaseScheduler) {
for _, task := range tasks {
elekLog.WithFields(log.Fields{"Task": task}).Log(CONSOLE, log.InfoLevel, "")
elekLog.WithField("Task", fmt.Sprintf("%v", task)).Log(CONSOLE, log.InfoLevel, "")
}
elekLog.Log(CONSOLE, log.InfoLevel, "---------------------")

View file

@ -90,8 +90,7 @@ func switchTaskDistBased(baseSchedRef *BaseScheduler) string {
// Determine the distribution of tasks in the new scheduling window.
taskDist, err := def.GetTaskDistributionInWindow(baseSchedRef.schedWindowSize, baseSchedRef.tasks)
baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime))
elekLog.WithFields(log.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}).Log(CONSOLE,
log.InfoLevel, "Switching... ")
elekLog.WithField("Task Distribution", fmt.Sprintf("%f", taskDist)).Log(CONSOLE, log.InfoLevel, "Switching... ")
if err != nil {
// All the tasks in the window were only classified into 1 cluster.
// Max-Min and Max-GreedyMins would work the same way as Bin-Packing for this situation.
@ -219,9 +218,11 @@ func (bsps *baseSchedPolicyState) SwitchIfNecessary(spc SchedPolicyContext) {
switchToPolicyName = switchBasedOn[baseSchedRef.schedPolSwitchCriteria](baseSchedRef)
} else {
// We continue working with the currently deployed scheduling policy.
log.Println("Continuing with the current scheduling policy...")
log.Printf("TasksScheduled[%d], SchedWindowSize[%d]", bsps.numTasksScheduled,
baseSchedRef.schedWindowSize)
elekLog.Log(CONSOLE, log.InfoLevel, "Continuing with the current scheduling policy...")
elekLog.WithFields(log.Fields{
"TasksScheduled": fmt.Sprintf("%d", bsps.numTasksScheduled),
"SchedWindowSize": fmt.Sprintf("%d", baseSchedRef.schedWindowSize),
}).Log(CONSOLE, log.InfoLevel, "")
return
}
}
@ -233,9 +234,11 @@ func (bsps *baseSchedPolicyState) SwitchIfNecessary(spc SchedPolicyContext) {
bsps.numTasksScheduled = 0
} else {
// We continue working with the currently deployed scheduling policy.
log.Println("Continuing with the current scheduling policy...")
log.Printf("TasksScheduled[%d], SchedWindowSize[%d]", bsps.numTasksScheduled,
baseSchedRef.schedWindowSize)
elekLog.Log(CONSOLE, log.InfoLevel, "Continuing with the current scheduling policy...")
elekLog.WithFields(log.Fields{
"TasksScheduled": fmt.Sprintf("%d", bsps.numTasksScheduled),
"SchedWindowSize": fmt.Sprintf("%d", baseSchedRef.schedWindowSize),
}).Log(CONSOLE, log.InfoLevel, "")
return
}
}

View file

@ -90,13 +90,15 @@ func UpdateEnvironment(offer *mesos.Offer) {
var host = offer.GetHostname()
// If this host is not present in the set of hosts.
if _, ok := constants.Hosts[host]; !ok {
elekLog.WithFields(log.Fields{"Adding host": host}).Log(CONSOLE, log.InfoLevel, "New host detected")
elekLog.WithField("host", host).Log(CONSOLE, log.InfoLevel, "New host detected")
// Add this host.
constants.Hosts[host] = struct{}{}
// Get the power class of this host.
class := PowerClass(offer)
elekLog.WithFields(log.Fields{"host": host, "PowerClass": class}).Log(CONSOLE,
log.InfoLevel, "Registering the power class...")
elekLog.WithFields(log.Fields{
"host": host,
"PowerClass": class,
}).Log(CONSOLE, log.InfoLevel, "Registering the power class...")
// If new power class, register the power class.
if _, ok := constants.PowerClasses[class]; !ok {
constants.PowerClasses[class] = make(map[string]struct{})