Removed ERROR, GENERAL, SUCCESS, WARNING log types

This commit is contained in:
balandi1 2019-11-21 14:58:14 -05:00
parent 268df3cd51
commit cb5df9acc8
11 changed files with 71 additions and 76 deletions

View file

@ -52,7 +52,7 @@ func (tc TasksToClassify) taskObservationCalculator(task Task) []float64 {
} else if task.Watts != 0.0 { } else if task.Watts != 0.0 {
return []float64{task.Watts} return []float64{task.Watts}
} else { } else {
elekLog.ElektronLog.Log(elekLogTypes.ERROR, log.FatalLevel, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE, log.FatalLevel,
log.Fields{}, "Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload") log.Fields{}, "Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload")
return []float64{0.0} // Won't reach here. return []float64{0.0} // Won't reach here.
} }
@ -108,7 +108,7 @@ func clusterSizeAvgMMMPU(tasks []Task, taskObservation func(task Task) []float64
} else { } else {
// skip this value // skip this value
// there is an error in the task config. // there is an error in the task config.
elekLog.ElektronLog.Log(elekLogTypes.ERROR, log.ErrorLevel, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE, log.ErrorLevel,
log.Fields{}, fmt.Sprintf("%s", err)) log.Fields{}, fmt.Sprintf("%s", err))
} }
} else { } else {

View file

@ -1,11 +1,7 @@
package elektronLogging package elektronLogging
const ( const (
ERROR = iota CONSOLE = iota
WARNING
GENERAL
SUCCESS
CONSOLE
PCP PCP
SCHED_TRACE SCHED_TRACE
SPS SPS

View file

@ -26,7 +26,7 @@ import (
log "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus"
elekLog "github.com/spdfg/elektron/elektronLogging" elekLog "github.com/spdfg/elektron/elektronLogging"
elekLogT "github.com/spdfg/elektron/elektronLogging/types" elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
) )
func Start(quit chan struct{}, logging *bool, pcpConfigFile string) { func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
@ -47,7 +47,7 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
scanner.Scan() scanner.Scan()
// Write to logfile // Write to logfile
elekLog.ElektronLog.Log(elekLogT.PCP, elekLog.ElektronLog.Log(elekLogTypes.PCP,
log.InfoLevel, log.InfoLevel,
log.Fields{}, scanner.Text()) log.Fields{}, scanner.Text())
@ -60,7 +60,7 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
text := scanner.Text() text := scanner.Text()
if *logging { if *logging {
elekLog.ElektronLog.Log(elekLogT.PCP, elekLog.ElektronLog.Log(elekLogTypes.PCP,
log.InfoLevel, log.InfoLevel,
log.Fields{}, text) log.Fields{}, text)
} }
@ -69,7 +69,7 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
} }
}(logging) }(logging)
elekLog.ElektronLog.Log(elekLogT.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "PCP logging started") log.Fields{}, "PCP logging started")
@ -81,7 +81,7 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
select { select {
case <-quit: case <-quit:
elekLog.ElektronLog.Log(elekLogT.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "Stopping PCP logging in 5 seconds") log.Fields{}, "Stopping PCP logging in 5 seconds")
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)

View file

@ -43,7 +43,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if hiThreshold < loThreshold { if hiThreshold < loThreshold {
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "High threshold is lower than low threshold!") log.Fields{}, "High threshold is lower than low threshold!")
} }
@ -99,7 +99,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
if *logging { if *logging {
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "Logging PCP...") log.Fields{}, "Logging PCP...")
@ -119,7 +119,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
powerHistories[host].Value = power powerHistories[host].Value = power
powerHistories[host] = powerHistories[host].Next() powerHistories[host] = powerHistories[host].Next()
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))}, log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))},
"") "")
@ -133,14 +133,14 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist) clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()), log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
"Sec Avg": fmt.Sprintf("%f", clusterMean)}, "Sec Avg": fmt.Sprintf("%f", clusterMean)},
"") "")
if clusterMean > hiThreshold { if clusterMean > hiThreshold {
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "Need to cap a node") log.Fields{}, "Need to cap a node")
// Create statics for all victims and choose one to cap // Create statics for all victims and choose one to cap
@ -163,12 +163,12 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
if !cappedHosts[victim.Host] { if !cappedHosts[victim.Host] {
cappedHosts[victim.Host] = true cappedHosts[victim.Host] = true
orderCapped = append(orderCapped, victim.Host) orderCapped = append(orderCapped, victim.Host)
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{"Capping Victim": fmt.Sprintf("%s", victim.Host), log.Fields{"Capping Victim": fmt.Sprintf("%s", victim.Host),
"Avg. Wattage": fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)}, "") "Avg. Wattage": fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)}, "")
if err := rapl.Cap(victim.Host, "rapl", 50); err != nil { if err := rapl.Cap(victim.Host, "rapl", 50); err != nil {
elekLog.ElektronLog.Log(elekLogTypes.ERROR, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.ErrorLevel, log.ErrorLevel,
log.Fields{}, "Error capping host") log.Fields{}, "Error capping host")
} }
@ -184,11 +184,11 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
cappedHosts[host] = false cappedHosts[host] = false
// User RAPL package to send uncap. // User RAPL package to send uncap.
log.Printf("Uncapping host %s", host) log.Printf("Uncapping host %s", host)
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{"Uncapped host": host}, "") log.Fields{"Uncapped host": host}, "")
if err := rapl.Cap(host, "rapl", 100); err != nil { if err := rapl.Cap(host, "rapl", 100); err != nil {
elekLog.ElektronLog.Log(elekLogTypes.ERROR, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.ErrorLevel, log.ErrorLevel,
log.Fields{}, "Error capping host") log.Fields{}, "Error capping host")
} }
@ -200,7 +200,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
} }
}(logging, hiThreshold, loThreshold) }(logging, hiThreshold, loThreshold)
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "PCP logging started") log.Fields{}, "PCP logging started")
@ -212,7 +212,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
select { select {
case <-quit: case <-quit:
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "Stopping PCP logging in 5 seconds") log.Fields{}, "Stopping PCP logging in 5 seconds")
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)

View file

@ -56,7 +56,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if hiThreshold < loThreshold { if hiThreshold < loThreshold {
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "High threshold is lower than low threshold!") log.Fields{}, "High threshold is lower than low threshold!")
} }
@ -115,7 +115,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
for scanner.Scan() { for scanner.Scan() {
if *logging { if *logging {
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "Logging PCP...") log.Fields{}, "Logging PCP...")
split := strings.Split(scanner.Text(), ",") split := strings.Split(scanner.Text(), ",")
@ -134,7 +134,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
powerHistories[host].Value = power powerHistories[host].Value = power
powerHistories[host] = powerHistories[host].Next() powerHistories[host] = powerHistories[host].Next()
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))}, log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))},
"") "")
@ -147,22 +147,22 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist) clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()), log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
"Sec Avg": fmt.Sprintf("%f", clusterMean)}, "Sec Avg": fmt.Sprintf("%f", clusterMean)},
"") "")
if clusterMean >= hiThreshold { if clusterMean >= hiThreshold {
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "Need to cap a node") log.Fields{}, "Need to cap a node")
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "") log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "")
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "") log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "")
// Create statics for all victims and choose one to cap // Create statics for all victims and choose one to cap
@ -192,12 +192,12 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
// Need to cap this victim. // Need to cap this victim.
if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil { if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil {
elekLog.ElektronLog.Log(elekLogTypes.ERROR, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.ErrorLevel, log.ErrorLevel,
log.Fields{"Error capping host": fmt.Sprintf("%s", victims[i].Host)}, "") log.Fields{"Error capping host": fmt.Sprintf("%s", victims[i].Host)}, "")
} else { } else {
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, fmt.Sprintf("Capped host[%s] at %f", victims[i].Host, 50.0)) log.Fields{}, fmt.Sprintf("Capped host[%s] at %f", victims[i].Host, 50.0))
// Keeping track of this victim and it's cap value // Keeping track of this victim and it's cap value
@ -223,12 +223,12 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
newCapValue := getNextCapValue(capValue, 2) newCapValue := getNextCapValue(capValue, 2)
if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil { if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil {
elekLog.ElektronLog.Log(elekLogTypes.ERROR, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.ErrorLevel, log.ErrorLevel,
log.Fields{"Error capping host": fmt.Sprintf("%s", alreadyCappedHosts[i])}, "") log.Fields{"Error capping host": fmt.Sprintf("%s", alreadyCappedHosts[i])}, "")
} else { } else {
// Successful cap // Successful cap
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, fmt.Sprintf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue)) log.Fields{}, fmt.Sprintf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue))
// Checking whether this victim can be capped further // Checking whether this victim can be capped further
@ -253,7 +253,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
} }
} }
if !canCapAlreadyCappedVictim { if !canCapAlreadyCappedVictim {
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "No Victim left to cap") log.Fields{}, "No Victim left to cap")
} }
@ -261,13 +261,13 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
} else if clusterMean < loThreshold { } else if clusterMean < loThreshold {
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "Need to uncap a node") log.Fields{}, "Need to uncap a node")
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "") log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "")
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "") log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "")
if len(orderCapped) > 0 { if len(orderCapped) > 0 {
@ -280,12 +280,12 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
newUncapValue := orderCappedVictims[hostToUncap] * 2.0 newUncapValue := orderCappedVictims[hostToUncap] * 2.0
if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil { if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil {
elekLog.ElektronLog.Log(elekLogTypes.ERROR, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.ErrorLevel, log.ErrorLevel,
log.Fields{"Error uncapping host": fmt.Sprintf("%s", hostToUncap)}, "") log.Fields{"Error uncapping host": fmt.Sprintf("%s", hostToUncap)}, "")
} else { } else {
// Successful uncap // Successful uncap
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, fmt.Sprintf("Uncapped host[%s] to %f", hostToUncap, newUncapValue)) log.Fields{}, fmt.Sprintf("Uncapped host[%s] to %f", hostToUncap, newUncapValue))
// Can we uncap this host further. If not, then we remove its entry from orderCapped // Can we uncap this host further. If not, then we remove its entry from orderCapped
@ -308,7 +308,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
} }
} }
} else { } else {
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "No host staged for Uncapped") log.Fields{}, "No host staged for Uncapped")
} }
@ -319,7 +319,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
}(logging, hiThreshold, loThreshold) }(logging, hiThreshold, loThreshold)
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "PCP logging started") log.Fields{}, "PCP logging started")
if err := cmd.Start(); err != nil { if err := cmd.Start(); err != nil {
@ -330,7 +330,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
select { select {
case <-quit: case <-quit:
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "Stopping PCP logging in 5 seconds") log.Fields{}, "Stopping PCP logging in 5 seconds")
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)

View file

@ -278,11 +278,11 @@ func main() {
// Starting the scheduler driver. // Starting the scheduler driver.
if status, err := driver.Run(); err != nil { if status, err := driver.Run(); err != nil {
elekLog.ElektronLog.Log(elekLogTypes.ERROR, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.ErrorLevel, log.ErrorLevel,
log.Fields{"status": status.String(), "error": err.Error()}, "Framework stopped ") log.Fields{"status": status.String(), "error": err.Error()}, "Framework stopped ")
} }
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "Exiting...") log.Fields{}, "Exiting...")
} }

View file

@ -249,7 +249,7 @@ func (s *BaseScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos
} }
func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) { func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
lmt := elekLogTypes.GENERAL lmt := elekLogTypes.CONSOLE
if ts == nil { if ts == nil {
elekLog.ElektronLog.Log(lmt, log.InfoLevel, elekLog.ElektronLog.Log(lmt, log.InfoLevel,
log.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}, "TASKS STARTING...") log.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}, "TASKS STARTING...")
@ -263,35 +263,35 @@ func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
} }
func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) { func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) {
lmt := elekLogTypes.GENERAL lmt := elekLogTypes.CONSOLE
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.InfoLevel, log.InfoLevel,
log.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}, "Watts considered for ") log.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}, "Watts considered for ")
} }
func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) { func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) {
lmt := elekLogTypes.GENERAL lmt := elekLogTypes.CONSOLE
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.InfoLevel, log.InfoLevel,
log.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}, "") log.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}, "")
} }
func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) { func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
lmt := elekLogTypes.WARNING lmt := elekLogTypes.CONSOLE
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.WarnLevel, log.WarnLevel,
log.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}, "No tasks left to schedule ") log.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}, "No tasks left to schedule ")
} }
func (s *BaseScheduler) LogNumberOfRunningTasks() { func (s *BaseScheduler) LogNumberOfRunningTasks() {
lmt := elekLogTypes.GENERAL lmt := elekLogTypes.CONSOLE
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.InfoLevel, log.InfoLevel,
log.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}, "") log.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}, "")
} }
func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) { func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
lmt := elekLogTypes.GENERAL lmt := elekLogTypes.CONSOLE
buffer := bytes.Buffer{} buffer := bytes.Buffer{}
s.TasksRunningMutex.Lock() s.TasksRunningMutex.Lock()
for taskName := range s.Running[slaveID] { for taskName := range s.Running[slaveID] {
@ -310,7 +310,7 @@ func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mes
} }
func (s *BaseScheduler) LogTerminateScheduler() { func (s *BaseScheduler) LogTerminateScheduler() {
lmt := elekLogTypes.GENERAL lmt := elekLogTypes.CONSOLE
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "Done scheduling all tasks!") log.Fields{}, "Done scheduling all tasks!")
@ -318,7 +318,7 @@ func (s *BaseScheduler) LogTerminateScheduler() {
func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer, func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
offerResources ...interface{}) { offerResources ...interface{}) {
lmt := elekLogTypes.WARNING lmt := elekLogTypes.CONSOLE
buffer := bytes.Buffer{} buffer := bytes.Buffer{}
buffer.WriteString(fmt.Sprintf("<CPU: %f, RAM: %f, Watts: %f>", offerResources...)) buffer.WriteString(fmt.Sprintf("<CPU: %f, RAM: %f, Watts: %f>", offerResources...))
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
@ -327,21 +327,21 @@ func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
} }
func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) { func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) {
lmt := elekLogTypes.ERROR lmt := elekLogTypes.CONSOLE
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.ErrorLevel, log.ErrorLevel,
log.Fields{"OfferID": fmt.Sprintf("%s", offerID)}, "OFFER RESCINDED") log.Fields{"OfferID": fmt.Sprintf("%s", offerID)}, "OFFER RESCINDED")
} }
func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) { func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) {
lmt := elekLogTypes.ERROR lmt := elekLogTypes.CONSOLE
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.ErrorLevel, log.ErrorLevel,
log.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}, "SLAVE LOST") log.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}, "SLAVE LOST")
} }
func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) { func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) {
lmt := elekLogTypes.ERROR lmt := elekLogTypes.CONSOLE
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.ErrorLevel, log.ErrorLevel,
log.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}, "EXECUTOR LOST") log.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}, "EXECUTOR LOST")
@ -349,61 +349,60 @@ func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *m
func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID, func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
slaveID *mesos.SlaveID, message string) { slaveID *mesos.SlaveID, message string) {
lmt := elekLogTypes.GENERAL lmt := elekLogTypes.CONSOLE
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.InfoLevel, log.InfoLevel,
log.Fields{"Received Framework message from executor": executorID}, message) log.Fields{"Received Framework message from executor": executorID}, message)
} }
func (s *BaseScheduler) LogMesosError(err string) { func (s *BaseScheduler) LogMesosError(err string) {
lmt := elekLogTypes.ERROR lmt := elekLogTypes.CONSOLE
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.ErrorLevel, log.ErrorLevel,
log.Fields{"MESOS ERROR": fmt.Sprintf("%v", err)}, "") log.Fields{"MESOS CONSOLE": fmt.Sprintf("%v", err)}, "")
} }
func (s *BaseScheduler) LogElectronError(err error) { func (s *BaseScheduler) LogElectronError(err error) {
lmt := elekLogTypes.ERROR lmt := elekLogTypes.CONSOLE
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.ErrorLevel, log.ErrorLevel,
log.Fields{"ELECTRON ERROR": fmt.Sprintf("%v", err)}, "") log.Fields{"ELECTRON CONSOLE": fmt.Sprintf("%v", err)}, "")
} }
func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID, func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
masterInfo *mesos.MasterInfo) { masterInfo *mesos.MasterInfo) {
lmt := elekLogTypes.SUCCESS lmt := elekLogTypes.CONSOLE
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.InfoLevel, log.InfoLevel,
log.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%s", masterInfo)}, "FRAMEWORK REGISTERED!") log.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%s", masterInfo)}, "FRAMEWORK REGISTERED!")
} }
func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) { func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) {
lmt := elekLogTypes.GENERAL lmt := elekLogTypes.CONSOLE
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.InfoLevel, log.InfoLevel,
log.Fields{"master": fmt.Sprintf("%s", masterInfo)}, "Framework re-registered") log.Fields{"master": fmt.Sprintf("%s", masterInfo)}, "Framework re-registered")
} }
func (s *BaseScheduler) LogDisconnected() { func (s *BaseScheduler) LogDisconnected() {
lmt := elekLogTypes.WARNING lmt := elekLogTypes.CONSOLE
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.WarnLevel, log.WarnLevel,
log.Fields{}, "Framework disconnected with master") log.Fields{}, "Framework disconnected with master")
} }
func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) { func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
lmt := elekLogTypes.GENERAL lmt := elekLogTypes.CONSOLE
level := log.InfoLevel
switch *status.State { switch *status.State {
case mesos.TaskState_TASK_ERROR, mesos.TaskState_TASK_FAILED, case mesos.TaskState_TASK_ERROR, mesos.TaskState_TASK_FAILED,
mesos.TaskState_TASK_KILLED, mesos.TaskState_TASK_LOST: mesos.TaskState_TASK_KILLED, mesos.TaskState_TASK_LOST:
lmt = elekLogTypes.ERROR level = log.ErrorLevel
case mesos.TaskState_TASK_FINISHED:
lmt = elekLogTypes.SUCCESS
default: default:
lmt = elekLogTypes.GENERAL level = log.InfoLevel
} }
elekLog.ElektronLog.Log(lmt, elekLog.ElektronLog.Log(lmt,
log.InfoLevel, level,
log.Fields{"task": fmt.Sprintf("%s", *status.TaskId.Value), "state": NameFor(status.State)}, "Task Status received") log.Fields{"task": fmt.Sprintf("%s", *status.TaskId.Value), "state": NameFor(status.State)}, "Task Status received")
} }

View file

@ -35,12 +35,12 @@ import (
func coLocated(tasks map[string]bool, s BaseScheduler) { func coLocated(tasks map[string]bool, s BaseScheduler) {
for task := range tasks { for task := range tasks {
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{"Task": task}, "") log.Fields{"Task": task}, "")
} }
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{}, "---------------------") log.Fields{}, "---------------------")
} }

View file

@ -90,7 +90,7 @@ func switchTaskDistBased(baseSchedRef *BaseScheduler) string {
// Determine the distribution of tasks in the new scheduling window. // Determine the distribution of tasks in the new scheduling window.
taskDist, err := def.GetTaskDistributionInWindow(baseSchedRef.schedWindowSize, baseSchedRef.tasks) taskDist, err := def.GetTaskDistributionInWindow(baseSchedRef.schedWindowSize, baseSchedRef.tasks)
baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime)) baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime))
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE,
log.InfoLevel, log.InfoLevel,
log.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}, "Switching... ") log.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}, "Switching... ")
if err != nil { if err != nil {

View file

@ -90,13 +90,13 @@ func UpdateEnvironment(offer *mesos.Offer) {
var host = offer.GetHostname() var host = offer.GetHostname()
// If this host is not present in the set of hosts. // If this host is not present in the set of hosts.
if _, ok := constants.Hosts[host]; !ok { if _, ok := constants.Hosts[host]; !ok {
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, log.InfoLevel, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE, log.InfoLevel,
log.Fields{"Adding host": fmt.Sprintf("%s", host)}, "New host detected") log.Fields{"Adding host": fmt.Sprintf("%s", host)}, "New host detected")
// Add this host. // Add this host.
constants.Hosts[host] = struct{}{} constants.Hosts[host] = struct{}{}
// Get the power class of this host. // Get the power class of this host.
class := PowerClass(offer) class := PowerClass(offer)
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, log.InfoLevel, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE, log.InfoLevel,
log.Fields{"host": fmt.Sprintf("%s", host), "PowerClass": fmt.Sprintf("%s", class)}, "Registering the power class...") log.Fields{"host": fmt.Sprintf("%s", host), "PowerClass": fmt.Sprintf("%s", class)}, "Registering the power class...")
// If new power class, register the power class. // If new power class, register the power class.
if _, ok := constants.PowerClasses[class]; !ok { if _, ok := constants.PowerClasses[class]; !ok {

View file

@ -79,7 +79,7 @@ func (s *fillNextOfferCycle) apply(taskQueue []def.Task) (int, int) {
for _, task := range taskQueue { for _, task := range taskQueue {
numberOfTasksTraversed++ numberOfTasksTraversed++
for i := *task.Instances; i > 0; i-- { for i := *task.Instances; i > 0; i-- {
elekLog.ElektronLog.Log(elekLogTypes.GENERAL, log.InfoLevel, elekLog.ElektronLog.Log(elekLogTypes.CONSOLE, log.InfoLevel,
log.Fields{}, fmt.Sprintf("Checking if Instance #%d of Task[%s] can be scheduled "+ log.Fields{}, fmt.Sprintf("Checking if Instance #%d of Task[%s] can be scheduled "+
"during the next offer cycle...", i, task.Name)) "during the next offer cycle...", i, task.Name))
if canSchedule(task) { if canSchedule(task) {