Changed alias elekLogT to elekLogTypes
This commit is contained in:
parent
20f2f1e8c5
commit
13c3de44be
10 changed files with 85 additions and 82 deletions
|
@ -27,7 +27,7 @@ import (
|
||||||
"github.com/montanaflynn/stats"
|
"github.com/montanaflynn/stats"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spdfg/elektron/elektronLogging"
|
"github.com/spdfg/elektron/elektronLogging"
|
||||||
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
|
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Information about a cluster of tasks.
|
// Information about a cluster of tasks.
|
||||||
|
@ -52,7 +52,7 @@ func (tc TasksToClassify) taskObservationCalculator(task Task) []float64 {
|
||||||
} else if task.Watts != 0.0 {
|
} else if task.Watts != 0.0 {
|
||||||
return []float64{task.Watts}
|
return []float64{task.Watts}
|
||||||
} else {
|
} else {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.ERROR, log.FatalLevel,
|
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR, log.FatalLevel,
|
||||||
log.Fields{}, "Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload")
|
log.Fields{}, "Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload")
|
||||||
return []float64{0.0} // Won't reach here.
|
return []float64{0.0} // Won't reach here.
|
||||||
}
|
}
|
||||||
|
@ -108,7 +108,7 @@ func clusterSizeAvgMMMPU(tasks []Task, taskObservation func(task Task) []float64
|
||||||
} else {
|
} else {
|
||||||
// skip this value
|
// skip this value
|
||||||
// there is an error in the task config.
|
// there is an error in the task config.
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.ERROR, log.ErrorLevel,
|
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR, log.ErrorLevel,
|
||||||
log.Fields{}, fmt.Sprintf("%s", err))
|
log.Fields{}, fmt.Sprintf("%s", err))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -3,6 +3,7 @@ package elektronLogging
|
||||||
import (
|
import (
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
. "github.com/spdfg/elektron/elektronLogging/types"
|
. "github.com/spdfg/elektron/elektronLogging/types"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -25,9 +26,11 @@ func BuildLogger(prefix string) {
|
||||||
|
|
||||||
prefix = strings.Join([]string{prefix, formattedStartTime}, "_")
|
prefix = strings.Join([]string{prefix, formattedStartTime}, "_")
|
||||||
|
|
||||||
//create a single logrus instance and set its formatter to ElektronFormatter
|
logger = &log.Logger{
|
||||||
logger = log.New()
|
Out: os.Stderr,
|
||||||
logger.SetFormatter(&formatter)
|
Level: log.DebugLevel,
|
||||||
|
Formatter: &formatter,
|
||||||
|
}
|
||||||
|
|
||||||
// create a chain of loggers
|
// create a chain of loggers
|
||||||
head := &LoggerImpl{}
|
head := &LoggerImpl{}
|
||||||
|
|
|
@ -31,7 +31,7 @@ import (
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spdfg/elektron/elektronLogging"
|
"github.com/spdfg/elektron/elektronLogging"
|
||||||
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
|
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||||
"github.com/spdfg/elektron/pcp"
|
"github.com/spdfg/elektron/pcp"
|
||||||
"github.com/spdfg/elektron/rapl"
|
"github.com/spdfg/elektron/rapl"
|
||||||
)
|
)
|
||||||
|
@ -43,7 +43,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
||||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||||
|
|
||||||
if hiThreshold < loThreshold {
|
if hiThreshold < loThreshold {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "High threshold is lower than low threshold!")
|
log.Fields{}, "High threshold is lower than low threshold!")
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
||||||
scanner.Scan()
|
scanner.Scan()
|
||||||
|
|
||||||
// Write to logfile
|
// Write to logfile
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.PCP,
|
elektronLogging.ElektronLog.Log(elekLogTypes.PCP,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, scanner.Text())
|
log.Fields{}, scanner.Text())
|
||||||
|
|
||||||
|
@ -99,14 +99,14 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
||||||
|
|
||||||
if *logging {
|
if *logging {
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "Logging PCP...")
|
log.Fields{}, "Logging PCP...")
|
||||||
|
|
||||||
text := scanner.Text()
|
text := scanner.Text()
|
||||||
split := strings.Split(text, ",")
|
split := strings.Split(text, ",")
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.PCP,
|
elektronLogging.ElektronLog.Log(elekLogTypes.PCP,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, text)
|
log.Fields{}, text)
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
||||||
powerHistories[host].Value = power
|
powerHistories[host].Value = power
|
||||||
powerHistories[host] = powerHistories[host].Next()
|
powerHistories[host] = powerHistories[host].Next()
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))},
|
log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))},
|
||||||
"")
|
"")
|
||||||
|
@ -133,14 +133,14 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
||||||
|
|
||||||
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
|
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
|
log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
|
||||||
"Sec Avg": fmt.Sprintf("%f", clusterMean)},
|
"Sec Avg": fmt.Sprintf("%f", clusterMean)},
|
||||||
"")
|
"")
|
||||||
|
|
||||||
if clusterMean > hiThreshold {
|
if clusterMean > hiThreshold {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "Need to cap a node")
|
log.Fields{}, "Need to cap a node")
|
||||||
// Create statics for all victims and choose one to cap
|
// Create statics for all victims and choose one to cap
|
||||||
|
@ -163,12 +163,12 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
||||||
if !cappedHosts[victim.Host] {
|
if !cappedHosts[victim.Host] {
|
||||||
cappedHosts[victim.Host] = true
|
cappedHosts[victim.Host] = true
|
||||||
orderCapped = append(orderCapped, victim.Host)
|
orderCapped = append(orderCapped, victim.Host)
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Capping Victim": fmt.Sprintf("%s", victim.Host),
|
log.Fields{"Capping Victim": fmt.Sprintf("%s", victim.Host),
|
||||||
"Avg. Wattage": fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)}, "")
|
"Avg. Wattage": fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)}, "")
|
||||||
if err := rapl.Cap(victim.Host, "rapl", 50); err != nil {
|
if err := rapl.Cap(victim.Host, "rapl", 50); err != nil {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.ERROR,
|
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR,
|
||||||
log.ErrorLevel,
|
log.ErrorLevel,
|
||||||
log.Fields{}, "Error capping host")
|
log.Fields{}, "Error capping host")
|
||||||
}
|
}
|
||||||
|
@ -184,11 +184,11 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
||||||
cappedHosts[host] = false
|
cappedHosts[host] = false
|
||||||
// User RAPL package to send uncap.
|
// User RAPL package to send uncap.
|
||||||
log.Printf("Uncapping host %s", host)
|
log.Printf("Uncapping host %s", host)
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Uncapped host": host}, "")
|
log.Fields{"Uncapped host": host}, "")
|
||||||
if err := rapl.Cap(host, "rapl", 100); err != nil {
|
if err := rapl.Cap(host, "rapl", 100); err != nil {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.ERROR,
|
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR,
|
||||||
log.ErrorLevel,
|
log.ErrorLevel,
|
||||||
log.Fields{}, "Error capping host")
|
log.Fields{}, "Error capping host")
|
||||||
}
|
}
|
||||||
|
@ -200,7 +200,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
||||||
}
|
}
|
||||||
}(logging, hiThreshold, loThreshold)
|
}(logging, hiThreshold, loThreshold)
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "PCP logging started")
|
log.Fields{}, "PCP logging started")
|
||||||
|
|
||||||
|
@ -212,7 +212,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-quit:
|
case <-quit:
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "Stopping PCP logging in 5 seconds")
|
log.Fields{}, "Stopping PCP logging in 5 seconds")
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
|
|
|
@ -33,7 +33,7 @@ import (
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spdfg/elektron/constants"
|
"github.com/spdfg/elektron/constants"
|
||||||
"github.com/spdfg/elektron/elektronLogging"
|
"github.com/spdfg/elektron/elektronLogging"
|
||||||
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
|
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||||
"github.com/spdfg/elektron/pcp"
|
"github.com/spdfg/elektron/pcp"
|
||||||
"github.com/spdfg/elektron/rapl"
|
"github.com/spdfg/elektron/rapl"
|
||||||
"github.com/spdfg/elektron/utilities"
|
"github.com/spdfg/elektron/utilities"
|
||||||
|
@ -56,7 +56,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||||
|
|
||||||
if hiThreshold < loThreshold {
|
if hiThreshold < loThreshold {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "High threshold is lower than low threshold!")
|
log.Fields{}, "High threshold is lower than low threshold!")
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
scanner.Scan()
|
scanner.Scan()
|
||||||
|
|
||||||
// Write to logfile
|
// Write to logfile
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.PCP,
|
elektronLogging.ElektronLog.Log(elekLogTypes.PCP,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, scanner.Text())
|
log.Fields{}, scanner.Text())
|
||||||
|
|
||||||
|
@ -115,13 +115,13 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
|
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
if *logging {
|
if *logging {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "Logging PCP...")
|
log.Fields{}, "Logging PCP...")
|
||||||
split := strings.Split(scanner.Text(), ",")
|
split := strings.Split(scanner.Text(), ",")
|
||||||
|
|
||||||
text := scanner.Text()
|
text := scanner.Text()
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.PCP,
|
elektronLogging.ElektronLog.Log(elekLogTypes.PCP,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, text)
|
log.Fields{}, text)
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
powerHistories[host].Value = power
|
powerHistories[host].Value = power
|
||||||
powerHistories[host] = powerHistories[host].Next()
|
powerHistories[host] = powerHistories[host].Next()
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))},
|
log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))},
|
||||||
"")
|
"")
|
||||||
|
@ -147,22 +147,22 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
|
|
||||||
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
|
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
|
log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
|
||||||
"Sec Avg": fmt.Sprintf("%f", clusterMean)},
|
"Sec Avg": fmt.Sprintf("%f", clusterMean)},
|
||||||
"")
|
"")
|
||||||
|
|
||||||
if clusterMean >= hiThreshold {
|
if clusterMean >= hiThreshold {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "Need to cap a node")
|
log.Fields{}, "Need to cap a node")
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "")
|
log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "")
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "")
|
log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "")
|
||||||
// Create statics for all victims and choose one to cap
|
// Create statics for all victims and choose one to cap
|
||||||
|
@ -192,12 +192,12 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
// Need to cap this victim.
|
// Need to cap this victim.
|
||||||
if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil {
|
if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil {
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.ERROR,
|
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR,
|
||||||
log.ErrorLevel,
|
log.ErrorLevel,
|
||||||
log.Fields{"Error capping host": fmt.Sprintf("%s", victims[i].Host)}, "")
|
log.Fields{"Error capping host": fmt.Sprintf("%s", victims[i].Host)}, "")
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, fmt.Sprintf("Capped host[%s] at %f", victims[i].Host, 50.0))
|
log.Fields{}, fmt.Sprintf("Capped host[%s] at %f", victims[i].Host, 50.0))
|
||||||
// Keeping track of this victim and it's cap value
|
// Keeping track of this victim and it's cap value
|
||||||
|
@ -223,12 +223,12 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
newCapValue := getNextCapValue(capValue, 2)
|
newCapValue := getNextCapValue(capValue, 2)
|
||||||
if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil {
|
if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil {
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.ERROR,
|
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR,
|
||||||
log.ErrorLevel,
|
log.ErrorLevel,
|
||||||
log.Fields{"Error capping host": fmt.Sprintf("%s", alreadyCappedHosts[i])}, "")
|
log.Fields{"Error capping host": fmt.Sprintf("%s", alreadyCappedHosts[i])}, "")
|
||||||
} else {
|
} else {
|
||||||
// Successful cap
|
// Successful cap
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, fmt.Sprintf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue))
|
log.Fields{}, fmt.Sprintf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue))
|
||||||
// Checking whether this victim can be capped further
|
// Checking whether this victim can be capped further
|
||||||
|
@ -253,7 +253,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !canCapAlreadyCappedVictim {
|
if !canCapAlreadyCappedVictim {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "No Victim left to cap")
|
log.Fields{}, "No Victim left to cap")
|
||||||
}
|
}
|
||||||
|
@ -261,13 +261,13 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
|
|
||||||
} else if clusterMean < loThreshold {
|
} else if clusterMean < loThreshold {
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "Need to uncap a node")
|
log.Fields{}, "Need to uncap a node")
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "")
|
log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "")
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "")
|
log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "")
|
||||||
if len(orderCapped) > 0 {
|
if len(orderCapped) > 0 {
|
||||||
|
@ -280,12 +280,12 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
newUncapValue := orderCappedVictims[hostToUncap] * 2.0
|
newUncapValue := orderCappedVictims[hostToUncap] * 2.0
|
||||||
if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil {
|
if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil {
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.ERROR,
|
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR,
|
||||||
log.ErrorLevel,
|
log.ErrorLevel,
|
||||||
log.Fields{"Error uncapping host": fmt.Sprintf("%s", hostToUncap)}, "")
|
log.Fields{"Error uncapping host": fmt.Sprintf("%s", hostToUncap)}, "")
|
||||||
} else {
|
} else {
|
||||||
// Successful uncap
|
// Successful uncap
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, fmt.Sprintf("Uncapped host[%s] to %f", hostToUncap, newUncapValue))
|
log.Fields{}, fmt.Sprintf("Uncapped host[%s] to %f", hostToUncap, newUncapValue))
|
||||||
// Can we uncap this host further. If not, then we remove its entry from orderCapped
|
// Can we uncap this host further. If not, then we remove its entry from orderCapped
|
||||||
|
@ -308,7 +308,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "No host staged for Uncapped")
|
log.Fields{}, "No host staged for Uncapped")
|
||||||
}
|
}
|
||||||
|
@ -319,7 +319,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
|
|
||||||
}(logging, hiThreshold, loThreshold)
|
}(logging, hiThreshold, loThreshold)
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "PCP logging started")
|
log.Fields{}, "PCP logging started")
|
||||||
if err := cmd.Start(); err != nil {
|
if err := cmd.Start(); err != nil {
|
||||||
|
@ -330,7 +330,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-quit:
|
case <-quit:
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "Stopping PCP logging in 5 seconds")
|
log.Fields{}, "Stopping PCP logging in 5 seconds")
|
||||||
time.Sleep(5 * time.Second)
|
time.Sleep(5 * time.Second)
|
||||||
|
|
|
@ -27,7 +27,7 @@ import (
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spdfg/elektron/def"
|
"github.com/spdfg/elektron/def"
|
||||||
"github.com/spdfg/elektron/elektronLogging"
|
"github.com/spdfg/elektron/elektronLogging"
|
||||||
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
|
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||||
"github.com/spdfg/elektron/pcp"
|
"github.com/spdfg/elektron/pcp"
|
||||||
"github.com/spdfg/elektron/powerCap"
|
"github.com/spdfg/elektron/powerCap"
|
||||||
"github.com/spdfg/elektron/schedulers"
|
"github.com/spdfg/elektron/schedulers"
|
||||||
|
@ -278,11 +278,11 @@ func main() {
|
||||||
|
|
||||||
// Starting the scheduler driver.
|
// Starting the scheduler driver.
|
||||||
if status, err := driver.Run(); err != nil {
|
if status, err := driver.Run(); err != nil {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.ERROR,
|
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR,
|
||||||
log.ErrorLevel,
|
log.ErrorLevel,
|
||||||
log.Fields{"status": status.String(), "error": err.Error()}, "Framework stopped ")
|
log.Fields{"status": status.String(), "error": err.Error()}, "Framework stopped ")
|
||||||
}
|
}
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "Exiting...")
|
log.Fields{}, "Exiting...")
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ import (
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spdfg/elektron/def"
|
"github.com/spdfg/elektron/def"
|
||||||
"github.com/spdfg/elektron/elektronLogging"
|
"github.com/spdfg/elektron/elektronLogging"
|
||||||
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
|
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||||
"github.com/spdfg/elektron/utilities"
|
"github.com/spdfg/elektron/utilities"
|
||||||
"github.com/spdfg/elektron/utilities/schedUtils"
|
"github.com/spdfg/elektron/utilities/schedUtils"
|
||||||
)
|
)
|
||||||
|
@ -249,7 +249,7 @@ func (s *BaseScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
|
func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
|
||||||
lmt := elekLogT.GENERAL
|
lmt := elekLogTypes.GENERAL
|
||||||
if ts == nil {
|
if ts == nil {
|
||||||
elektronLogging.ElektronLog.Log(lmt, log.InfoLevel,
|
elektronLogging.ElektronLog.Log(lmt, log.InfoLevel,
|
||||||
log.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}, "TASKS STARTING...")
|
log.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}, "TASKS STARTING...")
|
||||||
|
@ -263,35 +263,35 @@ func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) {
|
func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) {
|
||||||
lmt := elekLogT.GENERAL
|
lmt := elekLogTypes.GENERAL
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}, "Watts considered for ")
|
log.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}, "Watts considered for ")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) {
|
func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) {
|
||||||
lmt := elekLogT.GENERAL
|
lmt := elekLogTypes.GENERAL
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}, "")
|
log.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
|
func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
|
||||||
lmt := elekLogT.WARNING
|
lmt := elekLogTypes.WARNING
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.WarnLevel,
|
log.WarnLevel,
|
||||||
log.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}, "No tasks left to schedule ")
|
log.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}, "No tasks left to schedule ")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogNumberOfRunningTasks() {
|
func (s *BaseScheduler) LogNumberOfRunningTasks() {
|
||||||
lmt := elekLogT.GENERAL
|
lmt := elekLogTypes.GENERAL
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}, "")
|
log.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
|
func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
|
||||||
lmt := elekLogT.GENERAL
|
lmt := elekLogTypes.GENERAL
|
||||||
buffer := bytes.Buffer{}
|
buffer := bytes.Buffer{}
|
||||||
s.TasksRunningMutex.Lock()
|
s.TasksRunningMutex.Lock()
|
||||||
for taskName := range s.Running[slaveID] {
|
for taskName := range s.Running[slaveID] {
|
||||||
|
@ -304,13 +304,13 @@ func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) {
|
func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.SCHED_TRACE,
|
elektronLogging.ElektronLog.Log(elekLogTypes.SCHED_TRACE,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{offer.GetHostname(): fmt.Sprintf("%s", taskToSchedule.GetTaskId().GetValue())}, "")
|
log.Fields{offer.GetHostname(): fmt.Sprintf("%s", taskToSchedule.GetTaskId().GetValue())}, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogTerminateScheduler() {
|
func (s *BaseScheduler) LogTerminateScheduler() {
|
||||||
lmt := elekLogT.GENERAL
|
lmt := elekLogTypes.GENERAL
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "Done scheduling all tasks!")
|
log.Fields{}, "Done scheduling all tasks!")
|
||||||
|
@ -318,7 +318,7 @@ func (s *BaseScheduler) LogTerminateScheduler() {
|
||||||
|
|
||||||
func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
|
func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
|
||||||
offerResources ...interface{}) {
|
offerResources ...interface{}) {
|
||||||
lmt := elekLogT.WARNING
|
lmt := elekLogTypes.WARNING
|
||||||
buffer := bytes.Buffer{}
|
buffer := bytes.Buffer{}
|
||||||
buffer.WriteString(fmt.Sprintf("<CPU: %f, RAM: %f, Watts: %f>", offerResources...))
|
buffer.WriteString(fmt.Sprintf("<CPU: %f, RAM: %f, Watts: %f>", offerResources...))
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
|
@ -327,21 +327,21 @@ func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) {
|
func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) {
|
||||||
lmt := elekLogT.ERROR
|
lmt := elekLogTypes.ERROR
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.ErrorLevel,
|
log.ErrorLevel,
|
||||||
log.Fields{"OfferID": fmt.Sprintf("%s", offerID)}, "OFFER RESCINDED")
|
log.Fields{"OfferID": fmt.Sprintf("%s", offerID)}, "OFFER RESCINDED")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) {
|
func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) {
|
||||||
lmt := elekLogT.ERROR
|
lmt := elekLogTypes.ERROR
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.ErrorLevel,
|
log.ErrorLevel,
|
||||||
log.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}, "SLAVE LOST")
|
log.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}, "SLAVE LOST")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) {
|
func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) {
|
||||||
lmt := elekLogT.ERROR
|
lmt := elekLogTypes.ERROR
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.ErrorLevel,
|
log.ErrorLevel,
|
||||||
log.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}, "EXECUTOR LOST")
|
log.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}, "EXECUTOR LOST")
|
||||||
|
@ -349,21 +349,21 @@ func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *m
|
||||||
|
|
||||||
func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
|
func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
|
||||||
slaveID *mesos.SlaveID, message string) {
|
slaveID *mesos.SlaveID, message string) {
|
||||||
lmt := elekLogT.GENERAL
|
lmt := elekLogTypes.GENERAL
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Received Framework message from executor": executorID}, message)
|
log.Fields{"Received Framework message from executor": executorID}, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogMesosError(err string) {
|
func (s *BaseScheduler) LogMesosError(err string) {
|
||||||
lmt := elekLogT.ERROR
|
lmt := elekLogTypes.ERROR
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.ErrorLevel,
|
log.ErrorLevel,
|
||||||
log.Fields{"MESOS ERROR": fmt.Sprintf("%v", err)}, "")
|
log.Fields{"MESOS ERROR": fmt.Sprintf("%v", err)}, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogElectronError(err error) {
|
func (s *BaseScheduler) LogElectronError(err error) {
|
||||||
lmt := elekLogT.ERROR
|
lmt := elekLogTypes.ERROR
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.ErrorLevel,
|
log.ErrorLevel,
|
||||||
log.Fields{"ELECTRON ERROR": fmt.Sprintf("%v", err)}, "")
|
log.Fields{"ELECTRON ERROR": fmt.Sprintf("%v", err)}, "")
|
||||||
|
@ -371,36 +371,36 @@ func (s *BaseScheduler) LogElectronError(err error) {
|
||||||
|
|
||||||
func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
|
func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
|
||||||
masterInfo *mesos.MasterInfo) {
|
masterInfo *mesos.MasterInfo) {
|
||||||
lmt := elekLogT.SUCCESS
|
lmt := elekLogTypes.SUCCESS
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%s", masterInfo)}, "FRAMEWORK REGISTERED!")
|
log.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%s", masterInfo)}, "FRAMEWORK REGISTERED!")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) {
|
func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) {
|
||||||
lmt := elekLogT.GENERAL
|
lmt := elekLogTypes.GENERAL
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"master": fmt.Sprintf("%s", masterInfo)}, "Framework re-registered")
|
log.Fields{"master": fmt.Sprintf("%s", masterInfo)}, "Framework re-registered")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogDisconnected() {
|
func (s *BaseScheduler) LogDisconnected() {
|
||||||
lmt := elekLogT.WARNING
|
lmt := elekLogTypes.WARNING
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.WarnLevel,
|
log.WarnLevel,
|
||||||
log.Fields{}, "Framework disconnected with master")
|
log.Fields{}, "Framework disconnected with master")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
|
func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
|
||||||
lmt := elekLogT.GENERAL
|
lmt := elekLogTypes.GENERAL
|
||||||
switch *status.State {
|
switch *status.State {
|
||||||
case mesos.TaskState_TASK_ERROR, mesos.TaskState_TASK_FAILED,
|
case mesos.TaskState_TASK_ERROR, mesos.TaskState_TASK_FAILED,
|
||||||
mesos.TaskState_TASK_KILLED, mesos.TaskState_TASK_LOST:
|
mesos.TaskState_TASK_KILLED, mesos.TaskState_TASK_LOST:
|
||||||
lmt = elekLogT.ERROR
|
lmt = elekLogTypes.ERROR
|
||||||
case mesos.TaskState_TASK_FINISHED:
|
case mesos.TaskState_TASK_FINISHED:
|
||||||
lmt = elekLogT.SUCCESS
|
lmt = elekLogTypes.SUCCESS
|
||||||
default:
|
default:
|
||||||
lmt = elekLogT.GENERAL
|
lmt = elekLogTypes.GENERAL
|
||||||
}
|
}
|
||||||
elektronLogging.ElektronLog.Log(lmt,
|
elektronLogging.ElektronLog.Log(lmt,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
|
@ -409,7 +409,7 @@ func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
|
||||||
|
|
||||||
func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicyState) {
|
func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicyState) {
|
||||||
logSPS := func() {
|
logSPS := func() {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.SPS,
|
elektronLogging.ElektronLog.Log(elekLogTypes.SPS,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Name": name}, "")
|
log.Fields{"Name": name}, "")
|
||||||
}
|
}
|
||||||
|
@ -420,14 +420,14 @@ func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicy
|
||||||
}
|
}
|
||||||
// Logging the size of the scheduling window and the scheduling policy
|
// Logging the size of the scheduling window and the scheduling policy
|
||||||
// that is going to schedule the tasks in the scheduling window.
|
// that is going to schedule the tasks in the scheduling window.
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.SCHED_WINDOW,
|
elektronLogging.ElektronLog.Log(elekLogTypes.SCHED_WINDOW,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Window size": fmt.Sprintf("%d", s.schedWindowSize), "Name": name}, "")
|
log.Fields{"Window size": fmt.Sprintf("%d", s.schedWindowSize), "Name": name}, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *BaseScheduler) LogClsfnAndTaskDistOverhead(overhead time.Duration) {
|
func (s *BaseScheduler) LogClsfnAndTaskDistOverhead(overhead time.Duration) {
|
||||||
// Logging the overhead in microseconds.
|
// Logging the overhead in microseconds.
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.CLSFN_TASKDIST_OVERHEAD,
|
elektronLogging.ElektronLog.Log(elekLogTypes.CLSFN_TASKDIST_OVERHEAD,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Overhead in microseconds": fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)}, "")
|
log.Fields{"Overhead in microseconds": fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)}, "")
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,7 @@ import (
|
||||||
"github.com/spdfg/elektron/constants"
|
"github.com/spdfg/elektron/constants"
|
||||||
"github.com/spdfg/elektron/def"
|
"github.com/spdfg/elektron/def"
|
||||||
"github.com/spdfg/elektron/elektronLogging"
|
"github.com/spdfg/elektron/elektronLogging"
|
||||||
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
|
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||||
"github.com/spdfg/elektron/utilities"
|
"github.com/spdfg/elektron/utilities"
|
||||||
"github.com/spdfg/elektron/utilities/mesosUtils"
|
"github.com/spdfg/elektron/utilities/mesosUtils"
|
||||||
)
|
)
|
||||||
|
@ -35,12 +35,12 @@ import (
|
||||||
func coLocated(tasks map[string]bool, s BaseScheduler) {
|
func coLocated(tasks map[string]bool, s BaseScheduler) {
|
||||||
|
|
||||||
for task := range tasks {
|
for task := range tasks {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Task": task}, "")
|
log.Fields{"Task": task}, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{}, "---------------------")
|
log.Fields{}, "---------------------")
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,7 @@ import (
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spdfg/elektron/def"
|
"github.com/spdfg/elektron/def"
|
||||||
"github.com/spdfg/elektron/elektronLogging"
|
"github.com/spdfg/elektron/elektronLogging"
|
||||||
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
|
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SchedPolicyContext interface {
|
type SchedPolicyContext interface {
|
||||||
|
@ -90,7 +90,7 @@ func switchTaskDistBased(baseSchedRef *BaseScheduler) string {
|
||||||
// Determine the distribution of tasks in the new scheduling window.
|
// Determine the distribution of tasks in the new scheduling window.
|
||||||
taskDist, err := def.GetTaskDistributionInWindow(baseSchedRef.schedWindowSize, baseSchedRef.tasks)
|
taskDist, err := def.GetTaskDistributionInWindow(baseSchedRef.schedWindowSize, baseSchedRef.tasks)
|
||||||
baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime))
|
baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime))
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||||
log.InfoLevel,
|
log.InfoLevel,
|
||||||
log.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}, "Switching... ")
|
log.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}, "Switching... ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spdfg/elektron/constants"
|
"github.com/spdfg/elektron/constants"
|
||||||
"github.com/spdfg/elektron/elektronLogging"
|
"github.com/spdfg/elektron/elektronLogging"
|
||||||
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
|
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -90,13 +90,13 @@ func UpdateEnvironment(offer *mesos.Offer) {
|
||||||
var host = offer.GetHostname()
|
var host = offer.GetHostname()
|
||||||
// If this host is not present in the set of hosts.
|
// If this host is not present in the set of hosts.
|
||||||
if _, ok := constants.Hosts[host]; !ok {
|
if _, ok := constants.Hosts[host]; !ok {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL, log.InfoLevel,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL, log.InfoLevel,
|
||||||
log.Fields{"Adding host": fmt.Sprintf("%s", host)}, "New host detected")
|
log.Fields{"Adding host": fmt.Sprintf("%s", host)}, "New host detected")
|
||||||
// Add this host.
|
// Add this host.
|
||||||
constants.Hosts[host] = struct{}{}
|
constants.Hosts[host] = struct{}{}
|
||||||
// Get the power class of this host.
|
// Get the power class of this host.
|
||||||
class := PowerClass(offer)
|
class := PowerClass(offer)
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL, log.InfoLevel,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL, log.InfoLevel,
|
||||||
log.Fields{"host": fmt.Sprintf("%s", host), "PowerClass": fmt.Sprintf("%s", class)}, "Registering the power class...")
|
log.Fields{"host": fmt.Sprintf("%s", host), "PowerClass": fmt.Sprintf("%s", class)}, "Registering the power class...")
|
||||||
// If new power class, register the power class.
|
// If new power class, register the power class.
|
||||||
if _, ok := constants.PowerClasses[class]; !ok {
|
if _, ok := constants.PowerClasses[class]; !ok {
|
||||||
|
|
|
@ -23,7 +23,7 @@ import (
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spdfg/elektron/def"
|
"github.com/spdfg/elektron/def"
|
||||||
"github.com/spdfg/elektron/elektronLogging"
|
"github.com/spdfg/elektron/elektronLogging"
|
||||||
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
|
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||||
"github.com/spdfg/elektron/utilities"
|
"github.com/spdfg/elektron/utilities"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ func (s *fillNextOfferCycle) apply(taskQueue []def.Task) (int, int) {
|
||||||
for _, task := range taskQueue {
|
for _, task := range taskQueue {
|
||||||
numberOfTasksTraversed++
|
numberOfTasksTraversed++
|
||||||
for i := *task.Instances; i > 0; i-- {
|
for i := *task.Instances; i > 0; i-- {
|
||||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL, log.InfoLevel,
|
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL, log.InfoLevel,
|
||||||
log.Fields{}, fmt.Sprintf("Checking if Instance #%d of Task[%s] can be scheduled "+
|
log.Fields{}, fmt.Sprintf("Checking if Instance #%d of Task[%s] can be scheduled "+
|
||||||
"during the next offer cycle...", i, task.Name))
|
"during the next offer cycle...", i, task.Name))
|
||||||
if canSchedule(task) {
|
if canSchedule(task) {
|
||||||
|
|
Reference in a new issue