WIP : Elektron Logging library #16

Merged
balandi1 merged 50 commits from master into master 2019-12-10 01:15:34 +00:00
10 changed files with 82 additions and 82 deletions
Showing only changes of commit 3b70a13cc8 - Show all commits

View file

@ -27,7 +27,7 @@ import (
pradykaushik commented 2019-11-21 00:31:57 +00:00 (Migrated from github.com)

I would alias this to elekLog as you seem to be using that prefix when aliasing imports of sub-packages.

I would alias this to _elekLog_ as you seem to be using that prefix when aliasing imports of sub-packages.
balandi1 commented 2019-11-21 19:30:07 +00:00 (Migrated from github.com)

Okay. Done

Okay. Done
pradykaushik commented 2019-11-21 00:31:57 +00:00 (Migrated from github.com)

I would alias this to elekLog as you seem to be using that prefix when aliasing imports of sub-packages.

I would alias this to _elekLog_ as you seem to be using that prefix when aliasing imports of sub-packages.
balandi1 commented 2019-11-21 19:30:07 +00:00 (Migrated from github.com)

Okay. Done

Okay. Done
"github.com/montanaflynn/stats"
log "github.com/sirupsen/logrus"
elekLog "github.com/spdfg/elektron/logging"
elekLogTypes "github.com/spdfg/elektron/logging/types"
pradykaushik commented 2019-11-21 00:31:57 +00:00 (Migrated from github.com)

I would alias this to elekLog as you seem to be using that prefix when aliasing imports of sub-packages.

I would alias this to _elekLog_ as you seem to be using that prefix when aliasing imports of sub-packages.
balandi1 commented 2019-11-21 19:30:07 +00:00 (Migrated from github.com)

Okay. Done

Okay. Done
. "github.com/spdfg/elektron/logging/types"
pradykaushik commented 2019-11-21 00:31:57 +00:00 (Migrated from github.com)

I would alias this to elekLog as you seem to be using that prefix when aliasing imports of sub-packages.

I would alias this to _elekLog_ as you seem to be using that prefix when aliasing imports of sub-packages.
balandi1 commented 2019-11-21 19:30:07 +00:00 (Migrated from github.com)

Okay. Done

Okay. Done
)
// Information about a cluster of tasks.
@ -52,7 +52,7 @@ func (tc TasksToClassify) taskObservationCalculator(task Task) []float64 {
pradykaushik commented 2019-11-21 00:31:57 +00:00 (Migrated from github.com)

I would alias this to elekLog as you seem to be using that prefix when aliasing imports of sub-packages.

I would alias this to _elekLog_ as you seem to be using that prefix when aliasing imports of sub-packages.
balandi1 commented 2019-11-21 19:30:07 +00:00 (Migrated from github.com)

Okay. Done

Okay. Done
pradykaushik commented 2019-11-21 00:31:57 +00:00 (Migrated from github.com)

I would alias this to elekLog as you seem to be using that prefix when aliasing imports of sub-packages.

I would alias this to _elekLog_ as you seem to be using that prefix when aliasing imports of sub-packages.
balandi1 commented 2019-11-21 19:30:07 +00:00 (Migrated from github.com)

Okay. Done

Okay. Done
} else if task.Watts != 0.0 {
return []float64{task.Watts}
} else {
elekLog.Log(elekLogTypes.CONSOLE, log.FatalLevel, "Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload")
pradykaushik commented 2019-11-21 00:31:57 +00:00 (Migrated from github.com)

I would alias this to elekLog as you seem to be using that prefix when aliasing imports of sub-packages.

I would alias this to _elekLog_ as you seem to be using that prefix when aliasing imports of sub-packages.
balandi1 commented 2019-11-21 19:30:07 +00:00 (Migrated from github.com)

Okay. Done

Okay. Done
elekLog.Log(CONSOLE, log.FatalLevel, "Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload")
pradykaushik commented 2019-11-21 00:31:57 +00:00 (Migrated from github.com)

I would alias this to elekLog as you seem to be using that prefix when aliasing imports of sub-packages.

I would alias this to _elekLog_ as you seem to be using that prefix when aliasing imports of sub-packages.
balandi1 commented 2019-11-21 19:30:07 +00:00 (Migrated from github.com)

Okay. Done

Okay. Done
return []float64{0.0} // Won't reach here.
}
}
@ -107,7 +107,7 @@ func clusterSizeAvgMMMPU(tasks []Task, taskObservation func(task Task) []float64
pradykaushik commented 2019-11-21 00:31:57 +00:00 (Migrated from github.com)

I would alias this to elekLog as you seem to be using that prefix when aliasing imports of sub-packages.

I would alias this to _elekLog_ as you seem to be using that prefix when aliasing imports of sub-packages.
balandi1 commented 2019-11-21 19:30:07 +00:00 (Migrated from github.com)

Okay. Done

Okay. Done
pradykaushik commented 2019-11-21 00:31:57 +00:00 (Migrated from github.com)

I would alias this to elekLog as you seem to be using that prefix when aliasing imports of sub-packages.

I would alias this to _elekLog_ as you seem to be using that prefix when aliasing imports of sub-packages.
balandi1 commented 2019-11-21 19:30:07 +00:00 (Migrated from github.com)

Okay. Done

Okay. Done
} else {
ridv commented 2019-11-19 02:56:12 +00:00 (Migrated from github.com)

Is Logf an option here instead of sprinting?

Is Logf an option here instead of sprinting?
pradykaushik commented 2019-11-21 00:38:18 +00:00 (Migrated from github.com)

Replace elekLogT.ERROR with elekLogT.CONSOLE.

Replace `elekLogT.ERROR` with `elekLogT.CONSOLE`.
pradykaushik commented 2019-11-21 00:39:20 +00:00 (Migrated from github.com)

err.Error() returns the string representation.

`err.Error()` returns the string representation.
balandi1 commented 2019-11-21 18:07:17 +00:00 (Migrated from github.com)

Yes. I will work on implementing Logf()

Yes. I will work on implementing Logf()
balandi1 commented 2019-11-21 19:59:56 +00:00 (Migrated from github.com)

Done

Done
balandi1 commented 2019-11-21 20:07:53 +00:00 (Migrated from github.com)

Okay. Will do the change

Okay. Will do the change
// skip this value
// there is an error in the task config.
elekLog.Log(elekLogTypes.CONSOLE, log.ErrorLevel, err.Error())
pradykaushik commented 2019-11-21 00:31:57 +00:00 (Migrated from github.com)

I would alias this to elekLog as you seem to be using that prefix when aliasing imports of sub-packages.

I would alias this to _elekLog_ as you seem to be using that prefix when aliasing imports of sub-packages.
balandi1 commented 2019-11-21 19:30:07 +00:00 (Migrated from github.com)

Okay. Done

Okay. Done
elekLog.Log(CONSOLE, log.ErrorLevel, err.Error())
pradykaushik commented 2019-11-21 00:31:57 +00:00 (Migrated from github.com)

I would alias this to elekLog as you seem to be using that prefix when aliasing imports of sub-packages.

I would alias this to _elekLog_ as you seem to be using that prefix when aliasing imports of sub-packages.
balandi1 commented 2019-11-21 19:30:07 +00:00 (Migrated from github.com)

Okay. Done

Okay. Done
}
} else {
// There is only one observation for the task.

pradykaushik commented 2019-11-21 00:31:57 +00:00 (Migrated from github.com)

I would alias this to elekLog as you seem to be using that prefix when aliasing imports of sub-packages.

I would alias this to _elekLog_ as you seem to be using that prefix when aliasing imports of sub-packages.
balandi1 commented 2019-11-21 19:30:07 +00:00 (Migrated from github.com)

Okay. Done

Okay. Done
pradykaushik commented 2019-11-21 00:31:57 +00:00 (Migrated from github.com)

I would alias this to elekLog as you seem to be using that prefix when aliasing imports of sub-packages.

I would alias this to _elekLog_ as you seem to be using that prefix when aliasing imports of sub-packages.
balandi1 commented 2019-11-21 19:30:07 +00:00 (Migrated from github.com)

Okay. Done

Okay. Done

View file

@ -26,7 +26,7 @@ import (
log "github.com/sirupsen/logrus"
elekLog "github.com/spdfg/elektron/logging"
elekLogTypes "github.com/spdfg/elektron/logging/types"
. "github.com/spdfg/elektron/logging/types"
)
func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
@ -47,7 +47,7 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
scanner.Scan()
// Write to logfile
elekLog.Log(elekLogTypes.PCP, log.InfoLevel, scanner.Text())
elekLog.Log(PCP, log.InfoLevel, scanner.Text())
// Throw away first set of results
scanner.Scan()
@ -58,14 +58,14 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
text := scanner.Text()
if *logging {
elekLog.Log(elekLogTypes.PCP, log.InfoLevel, text)
elekLog.Log(PCP, log.InfoLevel, text)
}
seconds++
}
}(logging)
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "PCP logging started")
elekLog.Log(CONSOLE, log.InfoLevel, "PCP logging started")
if err := cmd.Start(); err != nil {
log.Fatal(err)
@ -75,7 +75,7 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
select {
case <-quit:
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Stopping PCP logging in 5 seconds")
elekLog.Log(CONSOLE, log.InfoLevel, "Stopping PCP logging in 5 seconds")
time.Sleep(5 * time.Second)
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly

View file

@ -31,7 +31,7 @@ import (
log "github.com/sirupsen/logrus"
elekLog "github.com/spdfg/elektron/logging"
elekLogTypes "github.com/spdfg/elektron/logging/types"
. "github.com/spdfg/elektron/logging/types"
"github.com/spdfg/elektron/pcp"
"github.com/spdfg/elektron/rapl"
)
@ -43,7 +43,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if hiThreshold < loThreshold {
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "High threshold is lower than low threshold!")
elekLog.Log(CONSOLE, log.InfoLevel, "High threshold is lower than low threshold!")
}
pipe, err := cmd.StdoutPipe()
@ -59,7 +59,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
scanner.Scan()
// Write to logfile
elekLog.Log(elekLogTypes.PCP, log.InfoLevel, scanner.Text())
elekLog.Log(PCP, log.InfoLevel, scanner.Text())
headers := strings.Split(scanner.Text(), ",")
@ -95,12 +95,12 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
if *logging {
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Logging PCP...")
elekLog.Log(CONSOLE, log.InfoLevel, "Logging PCP...")
text := scanner.Text()
split := strings.Split(text, ",")
elekLog.Log(elekLogTypes.PCP, log.InfoLevel, text)
elekLog.Log(PCP, log.InfoLevel, text)
totalPower := 0.0
for _, powerIndex := range powerIndexes {
@ -112,7 +112,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
powerHistories[host] = powerHistories[host].Next()
elekLog.WithFields(log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]),
"Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
"Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))}).Log(CONSOLE, log.InfoLevel, "")
totalPower += power
}
@ -124,10 +124,10 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
elekLog.WithFields(log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
"Sec Avg": fmt.Sprintf("%f", clusterMean)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
"Sec Avg": fmt.Sprintf("%f", clusterMean)}).Log(CONSOLE, log.InfoLevel, "")
if clusterMean > hiThreshold {
elekLog.Log(elekLogTypes.CONSOLE,
elekLog.Log(CONSOLE,
log.InfoLevel, "Need to cap a node")
// Create statics for all victims and choose one to cap
victims := make([]pcp.Victim, 0, 8)
@ -150,9 +150,9 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
cappedHosts[victim.Host] = true
orderCapped = append(orderCapped, victim.Host)
elekLog.WithFields(log.Fields{"Capping Victim": fmt.Sprintf("%s", victim.Host),
"Avg. Wattage": fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
"Avg. Wattage": fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)}).Log(CONSOLE, log.InfoLevel, "")
if err := rapl.Cap(victim.Host, "rapl", 50); err != nil {
elekLog.Log(elekLogTypes.CONSOLE,
elekLog.Log(CONSOLE,
log.ErrorLevel,
"Error capping host")
}
@ -168,9 +168,9 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
cappedHosts[host] = false
// User RAPL package to send uncap.
log.Printf("Uncapping host %s", host)
elekLog.WithFields(log.Fields{"Uncapped host": host}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
elekLog.WithFields(log.Fields{"Uncapped host": host}).Log(CONSOLE, log.InfoLevel, "")
if err := rapl.Cap(host, "rapl", 100); err != nil {
elekLog.Log(elekLogTypes.CONSOLE, log.ErrorLevel, "Error capping host")
elekLog.Log(CONSOLE, log.ErrorLevel, "Error capping host")
}
}
}
@ -180,7 +180,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
}
}(logging, hiThreshold, loThreshold)
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "PCP logging started")
elekLog.Log(CONSOLE, log.InfoLevel, "PCP logging started")
if err := cmd.Start(); err != nil {
log.Fatal(err)
@ -190,7 +190,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
select {
case <-quit:
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Stopping PCP logging in 5 seconds")
elekLog.Log(CONSOLE, log.InfoLevel, "Stopping PCP logging in 5 seconds")
time.Sleep(5 * time.Second)
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly

View file

@ -33,7 +33,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/constants"
elekLog "github.com/spdfg/elektron/logging"
elekLogTypes "github.com/spdfg/elektron/logging/types"
. "github.com/spdfg/elektron/logging/types"
"github.com/spdfg/elektron/pcp"
"github.com/spdfg/elektron/rapl"
"github.com/spdfg/elektron/utilities"
@ -56,7 +56,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if hiThreshold < loThreshold {
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "High threshold is lower than low threshold!")
elekLog.Log(CONSOLE, log.InfoLevel, "High threshold is lower than low threshold!")
}
pipe, err := cmd.StdoutPipe()
@ -72,7 +72,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
scanner.Scan()
// Write to logfile
elekLog.Log(elekLogTypes.PCP, log.InfoLevel, scanner.Text())
elekLog.Log(PCP, log.InfoLevel, scanner.Text())
headers := strings.Split(scanner.Text(), ",")
@ -111,11 +111,11 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
for scanner.Scan() {
if *logging {
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Logging PCP...")
elekLog.Log(CONSOLE, log.InfoLevel, "Logging PCP...")
split := strings.Split(scanner.Text(), ",")
text := scanner.Text()
elekLog.Log(elekLogTypes.PCP, log.InfoLevel, text)
elekLog.Log(PCP, log.InfoLevel, text)
totalPower := 0.0
for _, powerIndex := range powerIndexes {
@ -127,7 +127,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
powerHistories[host] = powerHistories[host].Next()
elekLog.WithFields(log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]),
"Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
"Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))}).Log(CONSOLE, log.InfoLevel, "")
totalPower += power
}
clusterPower := totalPower * pcp.RAPLUnits
@ -138,14 +138,14 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
elekLog.WithFields(log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
"Sec Avg": fmt.Sprintf("%f", clusterMean)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
"Sec Avg": fmt.Sprintf("%f", clusterMean)}).Log(CONSOLE, log.InfoLevel, "")
if clusterMean >= hiThreshold {
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Need to cap a node")
elekLog.Log(CONSOLE, log.InfoLevel, "Need to cap a node")
elekLog.WithFields(log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
elekLog.WithFields(log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}).Log(CONSOLE, log.InfoLevel, "")
elekLog.WithFields(log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
elekLog.WithFields(log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}).Log(CONSOLE, log.InfoLevel, "")
// Create statics for all victims and choose one to cap
victims := make([]pcp.Victim, 0, 8)
@ -173,10 +173,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
// Need to cap this victim.
if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil {
elekLog.WithFields(log.Fields{"Error capping host": fmt.Sprintf("%s", victims[i].Host)}).Log(elekLogTypes.CONSOLE, log.ErrorLevel, "")
elekLog.WithFields(log.Fields{"Error capping host": fmt.Sprintf("%s", victims[i].Host)}).Log(CONSOLE, log.ErrorLevel, "")
} else {
elekLog.Logf(elekLogTypes.CONSOLE, log.InfoLevel, "Capped host[%s] at %f", victims[i].Host, 50.0)
elekLog.Logf(CONSOLE, log.InfoLevel, "Capped host[%s] at %f", victims[i].Host, 50.0)
// Keeping track of this victim and it's cap value
cappedVictims[victims[i].Host] = 50.0
newVictimFound = true
@ -200,10 +200,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
newCapValue := getNextCapValue(capValue, 2)
if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil {
elekLog.WithFields(log.Fields{"Error capping host": fmt.Sprintf("%s", alreadyCappedHosts[i])}).Log(elekLogTypes.CONSOLE, log.ErrorLevel, "")
elekLog.WithFields(log.Fields{"Error capping host": fmt.Sprintf("%s", alreadyCappedHosts[i])}).Log(CONSOLE, log.ErrorLevel, "")
} else {
// Successful cap
elekLog.Logf(elekLogTypes.CONSOLE, log.InfoLevel,
elekLog.Logf(CONSOLE, log.InfoLevel,
"Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue)
// Checking whether this victim can be capped further
if newCapValue <= constants.LowerCapLimit {
@ -227,15 +227,15 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
}
}
if !canCapAlreadyCappedVictim {
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "No Victim left to cap")
elekLog.Log(CONSOLE, log.InfoLevel, "No Victim left to cap")
}
}
} else if clusterMean < loThreshold {
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Need to uncap a node")
elekLog.WithFields(log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
elekLog.WithFields(log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
elekLog.Log(CONSOLE, log.InfoLevel, "Need to uncap a node")
elekLog.WithFields(log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}).Log(CONSOLE, log.InfoLevel, "")
elekLog.WithFields(log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}).Log(CONSOLE, log.InfoLevel, "")
if len(orderCapped) > 0 {
// We pick the host that is capped the most to uncap.
orderCappedToSort := utilities.GetPairList(orderCappedVictims)
@ -246,10 +246,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
newUncapValue := orderCappedVictims[hostToUncap] * 2.0
if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil {
elekLog.WithFields(log.Fields{"Error uncapping host": fmt.Sprintf("%s", hostToUncap)}).Log(elekLogTypes.CONSOLE, log.ErrorLevel, "")
elekLog.WithFields(log.Fields{"Error uncapping host": fmt.Sprintf("%s", hostToUncap)}).Log(CONSOLE, log.ErrorLevel, "")
} else {
// Successful uncap
elekLog.Logf(elekLogTypes.CONSOLE, log.InfoLevel, "Uncapped host[%s] to %f", hostToUncap, newUncapValue)
elekLog.Logf(CONSOLE, log.InfoLevel, "Uncapped host[%s] to %f", hostToUncap, newUncapValue)
// Can we uncap this host further. If not, then we remove its entry from orderCapped
if newUncapValue >= 100.0 { // can compare using ==
// Deleting entry from orderCapped
@ -270,7 +270,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
}
}
} else {
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "No host staged for Uncapped")
elekLog.Log(CONSOLE, log.InfoLevel, "No host staged for Uncapped")
}
}
}
@ -279,7 +279,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
}(logging, hiThreshold, loThreshold)
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "PCP logging started")
elekLog.Log(CONSOLE, log.InfoLevel, "PCP logging started")
if err := cmd.Start(); err != nil {
log.Fatal(err)
}
@ -288,7 +288,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
select {
case <-quit:
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Stopping PCP logging in 5 seconds")
elekLog.Log(CONSOLE, log.InfoLevel, "Stopping PCP logging in 5 seconds")
time.Sleep(5 * time.Second)
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly

View file

@ -32,7 +32,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/def"
elekLog "github.com/spdfg/elektron/logging"
elekLogTypes "github.com/spdfg/elektron/logging/types"
. "github.com/spdfg/elektron/logging/types"
"github.com/spdfg/elektron/pcp"
"github.com/spdfg/elektron/powerCap"
"github.com/spdfg/elektron/schedulers"
@ -285,8 +285,8 @@ func main() {
// Starting the scheduler driver.
if status, err := driver.Run(); err != nil {
elekLog.WithFields(log.Fields{"status": status.String(), "error": err.Error()}).Log(elekLogTypes.CONSOLE,
elekLog.WithFields(log.Fields{"status": status.String(), "error": err.Error()}).Log(CONSOLE,
log.ErrorLevel, "Framework stopped ")
}
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Exiting...")
elekLog.Log(CONSOLE, log.InfoLevel, "Exiting...")
}

View file

@ -31,7 +31,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/def"
elekLog "github.com/spdfg/elektron/logging"
elekLogTypes "github.com/spdfg/elektron/logging/types"
. "github.com/spdfg/elektron/logging/types"
"github.com/spdfg/elektron/utilities"
"github.com/spdfg/elektron/utilities/schedUtils"
)
@ -250,29 +250,29 @@ func (s *BaseScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos
func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
if ts == nil {
elekLog.WithFields(log.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "TASKS STARTING...")
elekLog.WithFields(log.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}).Log(CONSOLE, log.InfoLevel, "TASKS STARTING...")
} else {
elekLog.WithFields(log.Fields{"task": fmt.Sprintf("%s", ts.Name), "Instance": fmt.Sprintf("%d", *ts.Instances),
"host": fmt.Sprintf("%s", offer.GetHostname())}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "TASK STARTING... ")
"host": fmt.Sprintf("%s", offer.GetHostname())}).Log(CONSOLE, log.InfoLevel, "TASK STARTING... ")
}
}
func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) {
elekLog.WithFields(log.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "Watts considered for ")
elekLog.WithFields(log.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}).Log(CONSOLE, log.InfoLevel, "Watts considered for ")
}
func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) {
elekLog.WithFields(log.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}).Log(elekLogTypes.CONSOLE,
elekLog.WithFields(log.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}).Log(CONSOLE,
log.InfoLevel, "")
}
func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
elekLog.WithFields(log.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}).Log(elekLogTypes.CONSOLE,
elekLog.WithFields(log.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}).Log(CONSOLE,
log.WarnLevel, "No tasks left to schedule ")
}
func (s *BaseScheduler) LogNumberOfRunningTasks() {
elekLog.WithFields(log.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}).Log(elekLogTypes.CONSOLE,
elekLog.WithFields(log.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}).Log(CONSOLE,
log.InfoLevel, "")
}
@ -283,67 +283,67 @@ func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
buffer.WriteString(fmt.Sprintln(taskName))
}
s.TasksRunningMutex.Unlock()
elekLog.WithFields(log.Fields{"Colocated with": fmt.Sprintf("%s", buffer.String())}).Log(elekLogTypes.CONSOLE,
elekLog.WithFields(log.Fields{"Colocated with": fmt.Sprintf("%s", buffer.String())}).Log(CONSOLE,
log.InfoLevel, "")
}
func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) {
elekLog.WithFields(log.Fields{offer.GetHostname(): fmt.Sprintf("%s", taskToSchedule.GetTaskId().GetValue())}).Log(elekLogTypes.SCHED_TRACE, log.InfoLevel, "")
elekLog.WithFields(log.Fields{offer.GetHostname(): fmt.Sprintf("%s", taskToSchedule.GetTaskId().GetValue())}).Log(SCHED_TRACE, log.InfoLevel, "")
}
func (s *BaseScheduler) LogTerminateScheduler() {
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Done scheduling all tasks!")
elekLog.Log(CONSOLE, log.InfoLevel, "Done scheduling all tasks!")
}
func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
offerResources ...interface{}) {
buffer := bytes.Buffer{}
buffer.WriteString(fmt.Sprintf("<CPU: %f, RAM: %f, Watts: %f>", offerResources...))
elekLog.WithFields(log.Fields{"Offer Resources": fmt.Sprintf("%s", buffer.String())}).Log(elekLogTypes.CONSOLE,
elekLog.WithFields(log.Fields{"Offer Resources": fmt.Sprintf("%s", buffer.String())}).Log(CONSOLE,
log.WarnLevel, "DECLINING OFFER... Offer has insufficient resources to launch a task")
}
func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) {
elekLog.WithFields(log.Fields{"OfferID": fmt.Sprintf("%s", offerID)}).Log(elekLogTypes.CONSOLE,
elekLog.WithFields(log.Fields{"OfferID": fmt.Sprintf("%s", offerID)}).Log(CONSOLE,
log.ErrorLevel, "OFFER RESCINDED")
}
func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) {
elekLog.WithFields(log.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}).Log(elekLogTypes.CONSOLE,
elekLog.WithFields(log.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}).Log(CONSOLE,
log.ErrorLevel, "SLAVE LOST")
}
func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) {
elekLog.WithFields(log.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}).Log(elekLogTypes.CONSOLE, log.ErrorLevel, "EXECUTOR LOST")
elekLog.WithFields(log.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}).Log(CONSOLE, log.ErrorLevel, "EXECUTOR LOST")
}
func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
slaveID *mesos.SlaveID, message string) {
elekLog.WithFields(log.Fields{"Received Framework message from executor": executorID}).Log(elekLogTypes.CONSOLE,
elekLog.WithFields(log.Fields{"Received Framework message from executor": executorID}).Log(CONSOLE,
log.InfoLevel, message)
}
func (s *BaseScheduler) LogMesosError(err string) {
elekLog.WithFields(log.Fields{"MESOS CONSOLE": fmt.Sprintf("%v", err)}).Log(elekLogTypes.CONSOLE,
elekLog.WithFields(log.Fields{"MESOS CONSOLE": fmt.Sprintf("%v", err)}).Log(CONSOLE,
log.ErrorLevel, "")
}
func (s *BaseScheduler) LogElectronError(err error) {
elekLog.WithFields(log.Fields{"ELECTRON CONSOLE": fmt.Sprintf("%v", err)}).Log(elekLogTypes.CONSOLE, log.ErrorLevel, "")
elekLog.WithFields(log.Fields{"ELECTRON CONSOLE": fmt.Sprintf("%v", err)}).Log(CONSOLE, log.ErrorLevel, "")
}
func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
masterInfo *mesos.MasterInfo) {
elekLog.WithFields(log.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%v", masterInfo)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "FRAMEWORK REGISTERED!")
elekLog.WithFields(log.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%v", masterInfo)}).Log(CONSOLE, log.InfoLevel, "FRAMEWORK REGISTERED!")
}
func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) {
elekLog.WithFields(log.Fields{"master": fmt.Sprintf("%v", masterInfo)}).Log(elekLogTypes.CONSOLE,
elekLog.WithFields(log.Fields{"master": fmt.Sprintf("%v", masterInfo)}).Log(CONSOLE,
log.InfoLevel, "Framework re-registered")
}
func (s *BaseScheduler) LogDisconnected() {
elekLog.Log(elekLogTypes.CONSOLE, log.WarnLevel, "Framework disconnected with master")
elekLog.Log(CONSOLE, log.WarnLevel, "Framework disconnected with master")
}
func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
@ -355,12 +355,12 @@ func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
default:
level = log.InfoLevel
}
elekLog.WithFields(log.Fields{"task": fmt.Sprintf("%s", *status.TaskId.Value), "state": NameFor(status.State)}).Log(elekLogTypes.CONSOLE, level, "Task Status received")
elekLog.WithFields(log.Fields{"task": fmt.Sprintf("%s", *status.TaskId.Value), "state": NameFor(status.State)}).Log(CONSOLE, level, "Task Status received")
}
func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicyState) {
logSPS := func() {
elekLog.WithFields(log.Fields{"Name": name}).Log(elekLogTypes.SPS, log.InfoLevel, "")
elekLog.WithFields(log.Fields{"Name": name}).Log(SPS, log.InfoLevel, "")
}
if s.hasReceivedResourceOffers && (s.curSchedPolicy != nextPolicy) {
logSPS()
@ -369,10 +369,10 @@ func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicy
}
// Logging the size of the scheduling window and the scheduling policy
// that is going to schedule the tasks in the scheduling window.
elekLog.WithFields(log.Fields{"Window size": fmt.Sprintf("%d", s.schedWindowSize), "Name": name}).Log(elekLogTypes.SCHED_WINDOW, log.InfoLevel, "")
elekLog.WithFields(log.Fields{"Window size": fmt.Sprintf("%d", s.schedWindowSize), "Name": name}).Log(SCHED_WINDOW, log.InfoLevel, "")
}
func (s *BaseScheduler) LogClsfnAndTaskDistOverhead(overhead time.Duration) {
// Logging the overhead in microseconds.
elekLog.WithFields(log.Fields{"Overhead in microseconds": fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)}).Log(elekLogTypes.CLSFN_TASKDISTR_OVERHEAD, log.InfoLevel, "")
elekLog.WithFields(log.Fields{"Overhead in microseconds": fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)}).Log(CLSFN_TASKDISTR_OVERHEAD, log.InfoLevel, "")
}

View file

@ -28,7 +28,7 @@ import (
"github.com/spdfg/elektron/constants"
"github.com/spdfg/elektron/def"
elekLog "github.com/spdfg/elektron/logging"
elekLogTypes "github.com/spdfg/elektron/logging/types"
. "github.com/spdfg/elektron/logging/types"
"github.com/spdfg/elektron/utilities"
"github.com/spdfg/elektron/utilities/mesosUtils"
)
@ -36,10 +36,10 @@ import (
func coLocated(tasks map[string]bool, s BaseScheduler) {
for _, task := range tasks {
elekLog.WithFields(log.Fields{"Task": task}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
elekLog.WithFields(log.Fields{"Task": task}).Log(CONSOLE, log.InfoLevel, "")
}
elekLog.Log(elekLogTypes.CONSOLE, log.InfoLevel, "---------------------")
elekLog.Log(CONSOLE, log.InfoLevel, "---------------------")
}
// Get the powerClass of the given hostname.

View file

@ -27,7 +27,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/def"
elekLog "github.com/spdfg/elektron/logging"
elekLogTypes "github.com/spdfg/elektron/logging/types"
. "github.com/spdfg/elektron/logging/types"
)
type SchedPolicyContext interface {
@ -90,7 +90,7 @@ func switchTaskDistBased(baseSchedRef *BaseScheduler) string {
// Determine the distribution of tasks in the new scheduling window.
taskDist, err := def.GetTaskDistributionInWindow(baseSchedRef.schedWindowSize, baseSchedRef.tasks)
baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime))
elekLog.WithFields(log.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}).Log(elekLogTypes.CONSOLE,
elekLog.WithFields(log.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}).Log(CONSOLE,
log.InfoLevel, "Switching... ")
if err != nil {
// All the tasks in the window were only classified into 1 cluster.

View file

@ -25,7 +25,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/constants"
elekLog "github.com/spdfg/elektron/logging"
elekLogTypes "github.com/spdfg/elektron/logging/types"
. "github.com/spdfg/elektron/logging/types"
)
func OfferAgg(offer *mesos.Offer) (float64, float64, float64) {
@ -90,12 +90,12 @@ func UpdateEnvironment(offer *mesos.Offer) {
var host = offer.GetHostname()
// If this host is not present in the set of hosts.
if _, ok := constants.Hosts[host]; !ok {
elekLog.WithFields(log.Fields{"Adding host": host}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "New host detected")
elekLog.WithFields(log.Fields{"Adding host": host}).Log(CONSOLE, log.InfoLevel, "New host detected")
// Add this host.
constants.Hosts[host] = struct{}{}
// Get the power class of this host.
class := PowerClass(offer)
elekLog.WithFields(log.Fields{"host": host, "PowerClass": class}).Log(elekLogTypes.CONSOLE,
elekLog.WithFields(log.Fields{"host": host, "PowerClass": class}).Log(CONSOLE,
log.InfoLevel, "Registering the power class...")
// If new power class, register the power class.
if _, ok := constants.PowerClasses[class]; !ok {

View file

@ -22,7 +22,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/def"
elekLog "github.com/spdfg/elektron/logging"
elekLogTypes "github.com/spdfg/elektron/logging/types"
. "github.com/spdfg/elektron/logging/types"
"github.com/spdfg/elektron/utilities"
)
@ -78,7 +78,7 @@ func (s *fillNextOfferCycle) apply(taskQueue []def.Task) (int, int) {
for _, task := range taskQueue {
numberOfTasksTraversed++
for i := *task.Instances; i > 0; i-- {
elekLog.Logf(elekLogTypes.CONSOLE, log.InfoLevel,
elekLog.Logf(CONSOLE, log.InfoLevel,
"Checking if Instance #%d of Task[%s] can be scheduled "+
"during the next offer cycle...", i, task.Name)
if canSchedule(task) {