Added functions to logging library
Added Logf() and WithFields() functions. Logf() for logging formatted messages. WithFields() for handling optional log fields.
This commit is contained in:
parent
b9592ed31c
commit
bedfa52d7a
18 changed files with 319 additions and 260 deletions
|
@ -52,8 +52,7 @@ func (tc TasksToClassify) taskObservationCalculator(task Task) []float64 {
|
|||
} else if task.Watts != 0.0 {
|
||||
return []float64{task.Watts}
|
||||
} else {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.FatalLevel,
|
||||
log.Fields{}, "Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.FatalLevel, "Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload")
|
||||
return []float64{0.0} // Won't reach here.
|
||||
}
|
||||
}
|
||||
|
@ -108,8 +107,7 @@ func clusterSizeAvgMMMPU(tasks []Task, taskObservation func(task Task) []float64
|
|||
} else {
|
||||
// skip this value
|
||||
// there is an error in the task config.
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.ErrorLevel,
|
||||
log.Fields{}, err.Error())
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.ErrorLevel, err.Error())
|
||||
}
|
||||
} else {
|
||||
// There is only one observation for the task.
|
||||
|
|
|
@ -12,29 +12,53 @@ type ClsfnTaskDistrOverheadLogger struct {
|
|||
LoggerImpl
|
||||
}
|
||||
|
||||
func NewClsfnTaskDistrOverheadLogger(logType int, prefix string) *ClsfnTaskDistrOverheadLogger {
|
||||
func NewClsfnTaskDistrOverheadLogger(b *baseLogData, logType int, prefix string) *ClsfnTaskDistrOverheadLogger {
|
||||
cLog := &ClsfnTaskDistrOverheadLogger{}
|
||||
cLog.Type = logType
|
||||
cLog.CreateLogFile(prefix)
|
||||
cLog.next = nil
|
||||
cLog.baseLogData = b
|
||||
return cLog
|
||||
}
|
||||
|
||||
func (cLog ClsfnTaskDistrOverheadLogger) Log(logType int, level log.Level, logData log.Fields, message string) {
|
||||
func (cLog ClsfnTaskDistrOverheadLogger) Log(logType int, level log.Level, message string) {
|
||||
if config.TaskDistrConfig.Enabled {
|
||||
if cLog.Type == logType {
|
||||
|
||||
logger.SetLevel(level)
|
||||
|
||||
if cLog.AllowOnConsole {
|
||||
logger.SetOutput(os.Stdout)
|
||||
logger.WithFields(logData).Println(message)
|
||||
logger.WithFields(cLog.data).Log(level, message)
|
||||
}
|
||||
|
||||
logger.SetOutput(cLog.LogFile)
|
||||
logger.WithFields(logData).Println(message)
|
||||
logger.WithFields(cLog.data).Log(level, message)
|
||||
}
|
||||
if cLog.next != nil {
|
||||
cLog.next.Log(logType, level, logData, message)
|
||||
cLog.next.Log(logType, level, message)
|
||||
} else {
|
||||
// Clearing the fields.
|
||||
cLog.resetFields()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cLog ClsfnTaskDistrOverheadLogger) Logf(logType int, level log.Level, msgFmtString string, args ...interface{}) {
|
||||
if config.TaskDistrConfig.Enabled {
|
||||
if cLog.Type == logType {
|
||||
|
||||
if cLog.AllowOnConsole {
|
||||
logger.SetOutput(os.Stdout)
|
||||
logger.WithFields(cLog.data).Logf(level, msgFmtString, args...)
|
||||
}
|
||||
|
||||
logger.SetOutput(cLog.LogFile)
|
||||
logger.WithFields(cLog.data).Logf(level, msgFmtString, args...)
|
||||
}
|
||||
if cLog.next != nil {
|
||||
cLog.next.Logf(logType, level, msgFmtString, args...)
|
||||
} else {
|
||||
// Clearing the fields.
|
||||
cLog.resetFields()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,26 +12,48 @@ type ConsoleLogger struct {
|
|||
LoggerImpl
|
||||
}
|
||||
|
||||
func NewConsoleLogger(logType int, prefix string) *ConsoleLogger {
|
||||
func NewConsoleLogger(b *baseLogData, logType int, prefix string) *ConsoleLogger {
|
||||
cLog := &ConsoleLogger{}
|
||||
cLog.Type = logType
|
||||
cLog.CreateLogFile(prefix)
|
||||
cLog.next = nil
|
||||
cLog.baseLogData = b
|
||||
return cLog
|
||||
}
|
||||
func (cLog ConsoleLogger) Log(logType int, level log.Level, logData log.Fields, message string) {
|
||||
func (cLog ConsoleLogger) Log(logType int, level log.Level, message string) {
|
||||
if config.ConsoleConfig.Enabled {
|
||||
if logType <= cLog.Type {
|
||||
|
||||
logger.SetLevel(level)
|
||||
|
||||
logger.SetOutput(os.Stdout)
|
||||
logger.WithFields(logData).Println(message)
|
||||
logger.WithFields(cLog.data).Log(level, message)
|
||||
|
||||
logger.SetOutput(cLog.LogFile)
|
||||
logger.WithFields(logData).Println(message)
|
||||
logger.WithFields(cLog.data).Log(level, message)
|
||||
}
|
||||
if cLog.next != nil {
|
||||
cLog.next.Log(logType, level, logData, message)
|
||||
cLog.next.Log(logType, level, message)
|
||||
} else {
|
||||
// Clearing the fields.
|
||||
cLog.resetFields()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cLog ConsoleLogger) Logf(logType int, level log.Level, msgFmtString string, args ...interface{}) {
|
||||
if config.ConsoleConfig.Enabled {
|
||||
if logType <= cLog.Type {
|
||||
|
||||
logger.SetOutput(os.Stdout)
|
||||
logger.WithFields(cLog.data).Logf(level, msgFmtString, args...)
|
||||
|
||||
logger.SetOutput(cLog.LogFile)
|
||||
logger.WithFields(cLog.data).Logf(level, msgFmtString, args...)
|
||||
}
|
||||
if cLog.next != nil {
|
||||
cLog.next.Logf(logType, level, msgFmtString, args...)
|
||||
} else {
|
||||
// Clearing the fields.
|
||||
cLog.resetFields()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
var config LoggerConfig
|
||||
var logger *log.Logger
|
||||
var formatter ElektronFormatter
|
||||
var ElektronLogger *ConsoleLogger
|
||||
var ElektronLogger *LoggerImpl
|
||||
var logDir logDirectory
|
||||
|
||||
func BuildLogger(prefix string, logConfigFilename string) {
|
||||
|
@ -35,18 +35,21 @@ func BuildLogger(prefix string, logConfigFilename string) {
|
|||
}
|
||||
|
||||
// Create a chain of loggers.
|
||||
cLog := NewConsoleLogger(CONSOLE, prefix)
|
||||
pLog := NewPCPLogger(PCP, prefix)
|
||||
schedTraceLog := NewSchedTraceLogger(SCHED_TRACE, prefix)
|
||||
spsLog := NewSchedPolicySwitchLogger(SPS, prefix)
|
||||
schedWindowLog := NewSchedWindowLogger(SCHED_WINDOW, prefix)
|
||||
tskDistLog := NewClsfnTaskDistrOverheadLogger(CLSFN_TASKDIST_OVERHEAD, prefix)
|
||||
b := &baseLogData{data: log.Fields{}}
|
||||
head := &LoggerImpl{baseLogData: b}
|
||||
cLog := NewConsoleLogger(b, CONSOLE, prefix)
|
||||
pLog := NewPCPLogger(b, PCP, prefix)
|
||||
schedTraceLog := NewSchedTraceLogger(b, SCHED_TRACE, prefix)
|
||||
spsLog := NewSchedPolicySwitchLogger(b, SPS, prefix)
|
||||
schedWindowLog := NewSchedWindowLogger(b, SCHED_WINDOW, prefix)
|
||||
tskDistLog := NewClsfnTaskDistrOverheadLogger(b, CLSFN_TASKDIST_OVERHEAD, prefix)
|
||||
|
||||
head.SetNext(cLog)
|
||||
cLog.SetNext(pLog)
|
||||
pLog.SetNext(schedTraceLog)
|
||||
schedTraceLog.SetNext(spsLog)
|
||||
spsLog.SetNext(schedWindowLog)
|
||||
schedWindowLog.SetNext(tskDistLog)
|
||||
|
||||
ElektronLogger = cLog
|
||||
ElektronLogger = head
|
||||
}
|
||||
|
|
|
@ -7,22 +7,48 @@ import (
|
|||
|
||||
type Logger interface {
|
||||
SetNext(logType Logger)
|
||||
Log(logType int, level log.Level, logData log.Fields, message string)
|
||||
Log(logType int, level log.Level, message string)
|
||||
Logf(logType int, level log.Level, msgFmtString string, args ...interface{})
|
||||
CreateLogFile(prefix string)
|
||||
}
|
||||
type baseLogData struct {
|
||||
data log.Fields
|
||||
}
|
||||
type LoggerImpl struct {
|
||||
*baseLogData
|
||||
Type int
|
||||
AllowOnConsole bool
|
||||
LogFile *os.File
|
||||
next Logger
|
||||
}
|
||||
|
||||
func (l *LoggerImpl) WithFields(logData log.Fields) *LoggerImpl {
|
||||
l.data = logData
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LoggerImpl) WithField(key string, value string) *LoggerImpl {
|
||||
l.data[key] = value
|
||||
return l
|
||||
}
|
||||
|
||||
func (l *LoggerImpl) SetNext(logType Logger) {
|
||||
l.next = logType
|
||||
}
|
||||
|
||||
func (l *LoggerImpl) Log(logType int, level log.Level, logData log.Fields, message string) {
|
||||
func (l LoggerImpl) Log(logType int, level log.Level, message string) {
|
||||
if l.next != nil {
|
||||
l.next.Log(logType, level, logData, message)
|
||||
l.next.Log(logType, level, message)
|
||||
}
|
||||
}
|
||||
|
||||
func (l LoggerImpl) Logf(logType int, level log.Level, msgFmtString string, args ...interface{}) {
|
||||
if l.next != nil {
|
||||
l.next.Logf(logType, level, msgFmtString, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LoggerImpl) resetFields() {
|
||||
l.data = nil
|
||||
l.data = log.Fields{}
|
||||
}
|
||||
|
|
|
@ -12,29 +12,53 @@ type PCPLogger struct {
|
|||
LoggerImpl
|
||||
}
|
||||
|
||||
func NewPCPLogger(logType int, prefix string) *PCPLogger {
|
||||
func NewPCPLogger(b *baseLogData, logType int, prefix string) *PCPLogger {
|
||||
pLog := &PCPLogger{}
|
||||
pLog.Type = logType
|
||||
pLog.CreateLogFile(prefix)
|
||||
pLog.next = nil
|
||||
pLog.baseLogData = b
|
||||
return pLog
|
||||
}
|
||||
|
||||
func (pLog PCPLogger) Log(logType int, level log.Level, logData log.Fields, message string) {
|
||||
func (pLog PCPLogger) Log(logType int, level log.Level, message string) {
|
||||
if config.PCPConfig.Enabled {
|
||||
if pLog.Type == logType {
|
||||
|
||||
logger.SetLevel(level)
|
||||
|
||||
if pLog.AllowOnConsole {
|
||||
logger.SetOutput(os.Stdout)
|
||||
logger.WithFields(logData).Println(message)
|
||||
logger.WithFields(pLog.data).Log(level, message)
|
||||
}
|
||||
|
||||
logger.SetOutput(pLog.LogFile)
|
||||
logger.WithFields(logData).Println(message)
|
||||
logger.WithFields(pLog.data).Log(level, message)
|
||||
}
|
||||
if pLog.next != nil {
|
||||
pLog.next.Log(logType, level, logData, message)
|
||||
pLog.next.Log(logType, level, message)
|
||||
} else {
|
||||
// Clearing the fields.
|
||||
pLog.resetFields()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pLog PCPLogger) Logf(logType int, level log.Level, msgFmtString string, args ...interface{}) {
|
||||
if config.PCPConfig.Enabled {
|
||||
if pLog.Type == logType {
|
||||
|
||||
if pLog.AllowOnConsole {
|
||||
logger.SetOutput(os.Stdout)
|
||||
logger.WithFields(pLog.data).Logf(level, msgFmtString, args...)
|
||||
}
|
||||
|
||||
logger.SetOutput(pLog.LogFile)
|
||||
logger.WithFields(pLog.data).Logf(level, msgFmtString, args...)
|
||||
}
|
||||
if pLog.next != nil {
|
||||
pLog.next.Logf(logType, level, msgFmtString, args...)
|
||||
} else {
|
||||
// Clearing the fields.
|
||||
pLog.resetFields()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,29 +12,53 @@ type SchedPolicySwitchLogger struct {
|
|||
LoggerImpl
|
||||
}
|
||||
|
||||
func NewSchedPolicySwitchLogger(logType int, prefix string) *SchedPolicySwitchLogger {
|
||||
func NewSchedPolicySwitchLogger(b *baseLogData, logType int, prefix string) *SchedPolicySwitchLogger {
|
||||
sLog := &SchedPolicySwitchLogger{}
|
||||
sLog.Type = logType
|
||||
sLog.CreateLogFile(prefix)
|
||||
sLog.next = nil
|
||||
sLog.baseLogData = b
|
||||
return sLog
|
||||
}
|
||||
|
||||
func (sLog SchedPolicySwitchLogger) Log(logType int, level log.Level, logData log.Fields, message string) {
|
||||
func (sLog SchedPolicySwitchLogger) Log(logType int, level log.Level, message string) {
|
||||
if config.SPSConfig.Enabled {
|
||||
if sLog.Type == logType {
|
||||
|
||||
logger.SetLevel(level)
|
||||
|
||||
if sLog.AllowOnConsole {
|
||||
logger.SetOutput(os.Stdout)
|
||||
logger.WithFields(logData).Println(message)
|
||||
logger.WithFields(sLog.data).Log(level, message)
|
||||
}
|
||||
|
||||
logger.SetOutput(sLog.LogFile)
|
||||
logger.WithFields(logData).Println(message)
|
||||
logger.WithFields(sLog.data).Log(level, message)
|
||||
}
|
||||
if sLog.next != nil {
|
||||
sLog.next.Log(logType, level, logData, message)
|
||||
sLog.next.Log(logType, level, message)
|
||||
} else {
|
||||
// Clearing the fields.
|
||||
sLog.resetFields()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sLog SchedPolicySwitchLogger) Logf(logType int, level log.Level, msgFmtString string, args ...interface{}) {
|
||||
if config.SPSConfig.Enabled {
|
||||
if sLog.Type == logType {
|
||||
|
||||
if sLog.AllowOnConsole {
|
||||
logger.SetOutput(os.Stdout)
|
||||
logger.WithFields(sLog.data).Logf(level, msgFmtString, args...)
|
||||
}
|
||||
|
||||
logger.SetOutput(sLog.LogFile)
|
||||
logger.WithFields(sLog.data).Logf(level, msgFmtString, args...)
|
||||
}
|
||||
if sLog.next != nil {
|
||||
sLog.next.Logf(logType, level, msgFmtString, args...)
|
||||
} else {
|
||||
// Clearing the fields.
|
||||
sLog.resetFields()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,29 +12,53 @@ type SchedTraceLogger struct {
|
|||
LoggerImpl
|
||||
}
|
||||
|
||||
func NewSchedTraceLogger(logType int, prefix string) *SchedTraceLogger {
|
||||
func NewSchedTraceLogger(b *baseLogData, logType int, prefix string) *SchedTraceLogger {
|
||||
sLog := &SchedTraceLogger{}
|
||||
sLog.Type = logType
|
||||
sLog.CreateLogFile(prefix)
|
||||
sLog.next = nil
|
||||
sLog.baseLogData = b
|
||||
return sLog
|
||||
}
|
||||
|
||||
func (sLog SchedTraceLogger) Log(logType int, level log.Level, logData log.Fields, message string) {
|
||||
func (sLog SchedTraceLogger) Log(logType int, level log.Level, message string) {
|
||||
if config.SchedTraceConfig.Enabled {
|
||||
if sLog.Type == logType {
|
||||
|
||||
logger.SetLevel(level)
|
||||
|
||||
if sLog.AllowOnConsole {
|
||||
logger.SetOutput(os.Stdout)
|
||||
logger.WithFields(logData).Println(message)
|
||||
logger.WithFields(sLog.data).Log(level, message)
|
||||
}
|
||||
|
||||
logger.SetOutput(sLog.LogFile)
|
||||
logger.WithFields(logData).Println(message)
|
||||
logger.WithFields(sLog.data).Log(level, message)
|
||||
}
|
||||
if sLog.next != nil {
|
||||
sLog.next.Log(logType, level, logData, message)
|
||||
sLog.next.Log(logType, level, message)
|
||||
} else {
|
||||
// Clearing the fields.
|
||||
sLog.resetFields()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sLog SchedTraceLogger) Logf(logType int, level log.Level, msgFmtString string, args ...interface{}) {
|
||||
if config.SchedTraceConfig.Enabled {
|
||||
if sLog.Type == logType {
|
||||
|
||||
if sLog.AllowOnConsole {
|
||||
logger.SetOutput(os.Stdout)
|
||||
logger.WithFields(sLog.data).Logf(level, msgFmtString, args...)
|
||||
}
|
||||
|
||||
logger.SetOutput(sLog.LogFile)
|
||||
logger.WithFields(sLog.data).Logf(level, msgFmtString, args...)
|
||||
}
|
||||
if sLog.next != nil {
|
||||
sLog.next.Logf(logType, level, msgFmtString, args...)
|
||||
} else {
|
||||
// Clearing the fields.
|
||||
sLog.resetFields()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,29 +12,53 @@ type SchedWindowLogger struct {
|
|||
LoggerImpl
|
||||
}
|
||||
|
||||
func NewSchedWindowLogger(logType int, prefix string) *SchedWindowLogger {
|
||||
func NewSchedWindowLogger(b *baseLogData, logType int, prefix string) *SchedWindowLogger {
|
||||
sLog := &SchedWindowLogger{}
|
||||
sLog.Type = logType
|
||||
sLog.CreateLogFile(prefix)
|
||||
sLog.next = nil
|
||||
sLog.baseLogData = b
|
||||
return sLog
|
||||
}
|
||||
|
||||
func (sLog SchedWindowLogger) Log(logType int, level log.Level, logData log.Fields, message string) {
|
||||
func (sLog SchedWindowLogger) Log(logType int, level log.Level, message string) {
|
||||
if config.SchedWindowConfig.Enabled {
|
||||
if sLog.Type == logType {
|
||||
|
||||
logger.SetLevel(level)
|
||||
|
||||
if sLog.AllowOnConsole {
|
||||
logger.SetOutput(os.Stdout)
|
||||
logger.WithFields(logData).Println(message)
|
||||
logger.WithFields(sLog.data).Log(level, message)
|
||||
}
|
||||
|
||||
logger.SetOutput(sLog.LogFile)
|
||||
logger.WithFields(logData).Println(message)
|
||||
logger.WithFields(sLog.data).Log(level, message)
|
||||
}
|
||||
if sLog.next != nil {
|
||||
sLog.next.Log(logType, level, logData, message)
|
||||
sLog.next.Log(logType, level, message)
|
||||
} else {
|
||||
// Clearing the fields.
|
||||
sLog.resetFields()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sLog SchedWindowLogger) Logf(logType int, level log.Level, msgFmtString string, args ...interface{}) {
|
||||
if config.SchedWindowConfig.Enabled {
|
||||
if sLog.Type == logType {
|
||||
|
||||
if sLog.AllowOnConsole {
|
||||
logger.SetOutput(os.Stdout)
|
||||
logger.WithFields(sLog.data).Logf(level, msgFmtString, args...)
|
||||
}
|
||||
|
||||
logger.SetOutput(sLog.LogFile)
|
||||
logger.WithFields(sLog.data).Logf(level, msgFmtString, args...)
|
||||
}
|
||||
if sLog.next != nil {
|
||||
sLog.next.Logf(logType, level, msgFmtString, args...)
|
||||
} else {
|
||||
// Clearing the fields.
|
||||
sLog.resetFields()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
16
pcp/pcp.go
16
pcp/pcp.go
|
@ -47,9 +47,7 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
|
|||
scanner.Scan()
|
||||
|
||||
// Write to logfile
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.PCP,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, scanner.Text())
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.PCP, log.InfoLevel, scanner.Text())
|
||||
|
||||
// Throw away first set of results
|
||||
scanner.Scan()
|
||||
|
@ -60,18 +58,14 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
|
|||
text := scanner.Text()
|
||||
|
||||
if *logging {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.PCP,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, text)
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.PCP, log.InfoLevel, text)
|
||||
}
|
||||
|
||||
seconds++
|
||||
}
|
||||
}(logging)
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "PCP logging started")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "PCP logging started")
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatal(err)
|
||||
|
@ -81,9 +75,7 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
|
|||
|
||||
select {
|
||||
case <-quit:
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Stopping PCP logging in 5 seconds")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Stopping PCP logging in 5 seconds")
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly
|
||||
|
|
|
@ -43,9 +43,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
if hiThreshold < loThreshold {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "High threshold is lower than low threshold!")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "High threshold is lower than low threshold!")
|
||||
}
|
||||
|
||||
pipe, err := cmd.StdoutPipe()
|
||||
|
@ -61,9 +59,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
scanner.Scan()
|
||||
|
||||
// Write to logfile
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.PCP,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, scanner.Text())
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.PCP, log.InfoLevel, scanner.Text())
|
||||
|
||||
headers := strings.Split(scanner.Text(), ",")
|
||||
|
||||
|
@ -99,16 +95,12 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
|
||||
if *logging {
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Logging PCP...")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Logging PCP...")
|
||||
|
||||
text := scanner.Text()
|
||||
split := strings.Split(text, ",")
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.PCP,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, text)
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.PCP, log.InfoLevel, text)
|
||||
|
||||
totalPower := 0.0
|
||||
for _, powerIndex := range powerIndexes {
|
||||
|
@ -119,10 +111,8 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
powerHistories[host].Value = power
|
||||
powerHistories[host] = powerHistories[host].Next()
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))},
|
||||
"")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]),
|
||||
"Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
|
||||
|
||||
totalPower += power
|
||||
}
|
||||
|
@ -133,16 +123,12 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
|
||||
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
|
||||
"Sec Avg": fmt.Sprintf("%f", clusterMean)},
|
||||
"")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
|
||||
"Sec Avg": fmt.Sprintf("%f", clusterMean)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
|
||||
|
||||
if clusterMean > hiThreshold {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Need to cap a node")
|
||||
log.InfoLevel, "Need to cap a node")
|
||||
// Create statics for all victims and choose one to cap
|
||||
victims := make([]pcp.Victim, 0, 8)
|
||||
|
||||
|
@ -163,14 +149,12 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
if !cappedHosts[victim.Host] {
|
||||
cappedHosts[victim.Host] = true
|
||||
orderCapped = append(orderCapped, victim.Host)
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Capping Victim": fmt.Sprintf("%s", victim.Host),
|
||||
"Avg. Wattage": fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Capping Victim": fmt.Sprintf("%s", victim.Host),
|
||||
"Avg. Wattage": fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
|
||||
if err := rapl.Cap(victim.Host, "rapl", 50); err != nil {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel,
|
||||
log.Fields{}, "Error capping host")
|
||||
"Error capping host")
|
||||
}
|
||||
break // Only cap one machine at at time.
|
||||
}
|
||||
|
@ -184,13 +168,9 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
cappedHosts[host] = false
|
||||
// User RAPL package to send uncap.
|
||||
log.Printf("Uncapping host %s", host)
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Uncapped host": host}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Uncapped host": host}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
|
||||
if err := rapl.Cap(host, "rapl", 100); err != nil {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel,
|
||||
log.Fields{}, "Error capping host")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.ErrorLevel, "Error capping host")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -200,9 +180,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
}
|
||||
}(logging, hiThreshold, loThreshold)
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "PCP logging started")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "PCP logging started")
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatal(err)
|
||||
|
@ -212,9 +190,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
|
||||
select {
|
||||
case <-quit:
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Stopping PCP logging in 5 seconds")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Stopping PCP logging in 5 seconds")
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly
|
||||
|
|
|
@ -56,9 +56,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
if hiThreshold < loThreshold {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "High threshold is lower than low threshold!")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "High threshold is lower than low threshold!")
|
||||
}
|
||||
|
||||
pipe, err := cmd.StdoutPipe()
|
||||
|
@ -74,9 +72,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
scanner.Scan()
|
||||
|
||||
// Write to logfile
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.PCP,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, scanner.Text())
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.PCP, log.InfoLevel, scanner.Text())
|
||||
|
||||
headers := strings.Split(scanner.Text(), ",")
|
||||
|
||||
|
@ -115,15 +111,11 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
|
||||
for scanner.Scan() {
|
||||
if *logging {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Logging PCP...")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Logging PCP...")
|
||||
split := strings.Split(scanner.Text(), ",")
|
||||
|
||||
text := scanner.Text()
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.PCP,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, text)
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.PCP, log.InfoLevel, text)
|
||||
|
||||
totalPower := 0.0
|
||||
for _, powerIndex := range powerIndexes {
|
||||
|
@ -134,10 +126,8 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
powerHistories[host].Value = power
|
||||
powerHistories[host] = powerHistories[host].Next()
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))},
|
||||
"")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]),
|
||||
"Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
|
||||
totalPower += power
|
||||
}
|
||||
clusterPower := totalPower * pcp.RAPLUnits
|
||||
|
@ -147,24 +137,15 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
|
||||
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
|
||||
"Sec Avg": fmt.Sprintf("%f", clusterMean)},
|
||||
"")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
|
||||
"Sec Avg": fmt.Sprintf("%f", clusterMean)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
|
||||
|
||||
if clusterMean >= hiThreshold {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Need to cap a node")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Need to cap a node")
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
|
||||
// Create statics for all victims and choose one to cap
|
||||
victims := make([]pcp.Victim, 0, 8)
|
||||
|
||||
|
@ -192,14 +173,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
// Need to cap this victim.
|
||||
if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil {
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel,
|
||||
log.Fields{"Error capping host": fmt.Sprintf("%s", victims[i].Host)}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Error capping host": fmt.Sprintf("%s", victims[i].Host)}).Log(elekLogTypes.CONSOLE, log.ErrorLevel, "")
|
||||
} else {
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, fmt.Sprintf("Capped host[%s] at %f", victims[i].Host, 50.0))
|
||||
elekLog.ElektronLogger.Logf(elekLogTypes.CONSOLE, log.InfoLevel, "Capped host[%s] at %f", victims[i].Host, 50.0)
|
||||
// Keeping track of this victim and it's cap value
|
||||
cappedVictims[victims[i].Host] = 50.0
|
||||
newVictimFound = true
|
||||
|
@ -223,14 +200,11 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
newCapValue := getNextCapValue(capValue, 2)
|
||||
if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil {
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel,
|
||||
log.Fields{"Error capping host": fmt.Sprintf("%s", alreadyCappedHosts[i])}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Error capping host": fmt.Sprintf("%s", alreadyCappedHosts[i])}).Log(elekLogTypes.CONSOLE, log.ErrorLevel, "")
|
||||
} else {
|
||||
// Successful cap
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, fmt.Sprintf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue))
|
||||
elekLog.ElektronLogger.Logf(elekLogTypes.CONSOLE, log.InfoLevel,
|
||||
"Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue)
|
||||
// Checking whether this victim can be capped further
|
||||
if newCapValue <= constants.LowerCapLimit {
|
||||
// Deleting victim from cappedVictims.
|
||||
|
@ -253,23 +227,15 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
}
|
||||
}
|
||||
if !canCapAlreadyCappedVictim {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "No Victim left to cap")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "No Victim left to cap")
|
||||
}
|
||||
}
|
||||
|
||||
} else if clusterMean < loThreshold {
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Need to uncap a node")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Need to uncap a node")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
|
||||
if len(orderCapped) > 0 {
|
||||
// We pick the host that is capped the most to uncap.
|
||||
orderCappedToSort := utilities.GetPairList(orderCappedVictims)
|
||||
|
@ -280,14 +246,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
newUncapValue := orderCappedVictims[hostToUncap] * 2.0
|
||||
if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil {
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel,
|
||||
log.Fields{"Error uncapping host": fmt.Sprintf("%s", hostToUncap)}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Error uncapping host": fmt.Sprintf("%s", hostToUncap)}).Log(elekLogTypes.CONSOLE, log.ErrorLevel, "")
|
||||
} else {
|
||||
// Successful uncap
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, fmt.Sprintf("Uncapped host[%s] to %f", hostToUncap, newUncapValue))
|
||||
elekLog.ElektronLogger.Logf(elekLogTypes.CONSOLE, log.InfoLevel, "Uncapped host[%s] to %f", hostToUncap, newUncapValue)
|
||||
// Can we uncap this host further. If not, then we remove its entry from orderCapped
|
||||
if newUncapValue >= 100.0 { // can compare using ==
|
||||
// Deleting entry from orderCapped
|
||||
|
@ -308,9 +270,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
}
|
||||
}
|
||||
} else {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "No host staged for Uncapped")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "No host staged for Uncapped")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -319,9 +279,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
|
||||
}(logging, hiThreshold, loThreshold)
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "PCP logging started")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "PCP logging started")
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
@ -330,9 +288,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
|
||||
select {
|
||||
case <-quit:
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Stopping PCP logging in 5 seconds")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Stopping PCP logging in 5 seconds")
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly
|
||||
|
|
|
@ -282,11 +282,8 @@ func main() {
|
|||
|
||||
// Starting the scheduler driver.
|
||||
if status, err := driver.Run(); err != nil {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel,
|
||||
log.Fields{"status": status.String(), "error": err.Error()}, "Framework stopped ")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"status": status.String(), "error": err.Error()}).Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel, "Framework stopped ")
|
||||
}
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Exiting...")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Exiting...")
|
||||
}
|
||||
|
|
|
@ -250,39 +250,31 @@ func (s *BaseScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos
|
|||
|
||||
func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
|
||||
if ts == nil {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel,
|
||||
log.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}, "TASKS STARTING...")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "TASKS STARTING...")
|
||||
} else {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"task": fmt.Sprintf("%s", ts.Name),
|
||||
"Instance": fmt.Sprintf("%d", *ts.Instances), "host": fmt.Sprintf("%s", offer.GetHostname())},
|
||||
"TASK STARTING... ")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"task": fmt.Sprintf("%s", ts.Name),
|
||||
"Instance": fmt.Sprintf("%d", *ts.Instances), "host": fmt.Sprintf("%s", offer.GetHostname())}).Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel, "TASK STARTING... ")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}, "Watts considered for ")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "Watts considered for ")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}).Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.WarnLevel,
|
||||
log.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}, "No tasks left to schedule ")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}).Log(elekLogTypes.CONSOLE,
|
||||
log.WarnLevel, "No tasks left to schedule ")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogNumberOfRunningTasks() {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}).Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
|
||||
|
@ -292,81 +284,67 @@ func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
|
|||
buffer.WriteString(fmt.Sprintln(taskName))
|
||||
}
|
||||
s.TasksRunningMutex.Unlock()
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Colocated with": fmt.Sprintf("%s", buffer.String())}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Colocated with": fmt.Sprintf("%s", buffer.String())}).Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.SCHED_TRACE,
|
||||
log.InfoLevel,
|
||||
log.Fields{offer.GetHostname(): fmt.Sprintf("%s", taskToSchedule.GetTaskId().GetValue())}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{offer.GetHostname(): fmt.Sprintf("%s", taskToSchedule.GetTaskId().GetValue())}).Log(elekLogTypes.SCHED_TRACE, log.InfoLevel, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogTerminateScheduler() {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, log.Fields{}, "Done scheduling all tasks!")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "Done scheduling all tasks!")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
|
||||
offerResources ...interface{}) {
|
||||
buffer := bytes.Buffer{}
|
||||
buffer.WriteString(fmt.Sprintf("<CPU: %f, RAM: %f, Watts: %f>", offerResources...))
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.WarnLevel,
|
||||
log.Fields{"Offer Resources": fmt.Sprintf("%s", buffer.String())}, "DECLINING OFFER... Offer has insufficient resources to launch a task")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Offer Resources": fmt.Sprintf("%s", buffer.String())}).Log(elekLogTypes.CONSOLE,
|
||||
log.WarnLevel, "DECLINING OFFER... Offer has insufficient resources to launch a task")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel,
|
||||
log.Fields{"OfferID": fmt.Sprintf("%s", offerID)}, "OFFER RESCINDED")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"OfferID": fmt.Sprintf("%s", offerID)}).Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel, "OFFER RESCINDED")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel,
|
||||
log.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}, "SLAVE LOST")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}).Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel, "SLAVE LOST")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel,
|
||||
log.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}, "EXECUTOR LOST")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}).Log(elekLogTypes.CONSOLE, log.ErrorLevel, "EXECUTOR LOST")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
|
||||
slaveID *mesos.SlaveID, message string) {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Received Framework message from executor": executorID}, message)
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Received Framework message from executor": executorID}).Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel, message)
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogMesosError(err string) {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel,
|
||||
log.Fields{"MESOS CONSOLE": fmt.Sprintf("%v", err)}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"MESOS CONSOLE": fmt.Sprintf("%v", err)}).Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogElectronError(err error) {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.ErrorLevel, log.Fields{"ELECTRON CONSOLE": fmt.Sprintf("%v", err)}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"ELECTRON CONSOLE": fmt.Sprintf("%v", err)}).Log(elekLogTypes.CONSOLE, log.ErrorLevel, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
|
||||
masterInfo *mesos.MasterInfo) {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%v", masterInfo)}, "FRAMEWORK REGISTERED!")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%v", masterInfo)}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "FRAMEWORK REGISTERED!")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"master": fmt.Sprintf("%v", masterInfo)}, "Framework re-registered")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"master": fmt.Sprintf("%v", masterInfo)}).Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel, "Framework re-registered")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogDisconnected() {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.WarnLevel, log.Fields{}, "Framework disconnected with master")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.WarnLevel, "Framework disconnected with master")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
|
||||
|
@ -378,13 +356,12 @@ func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
|
|||
default:
|
||||
level = log.InfoLevel
|
||||
}
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
level, log.Fields{"task": fmt.Sprintf("%s", *status.TaskId.Value), "state": NameFor(status.State)}, "Task Status received")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"task": fmt.Sprintf("%s", *status.TaskId.Value), "state": NameFor(status.State)}).Log(elekLogTypes.CONSOLE, level, "Task Status received")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicyState) {
|
||||
logSPS := func() {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.SPS, log.InfoLevel, log.Fields{"Name": name}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Name": name}).Log(elekLogTypes.SPS, log.InfoLevel, "")
|
||||
}
|
||||
if s.hasReceivedResourceOffers && (s.curSchedPolicy != nextPolicy) {
|
||||
logSPS()
|
||||
|
@ -393,14 +370,10 @@ func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicy
|
|||
}
|
||||
// Logging the size of the scheduling window and the scheduling policy
|
||||
// that is going to schedule the tasks in the scheduling window.
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.SCHED_WINDOW,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Window size": fmt.Sprintf("%d", s.schedWindowSize), "Name": name}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Window size": fmt.Sprintf("%d", s.schedWindowSize), "Name": name}).Log(elekLogTypes.SCHED_WINDOW, log.InfoLevel, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogClsfnAndTaskDistOverhead(overhead time.Duration) {
|
||||
// Logging the overhead in microseconds.
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CLSFN_TASKDIST_OVERHEAD,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Overhead in microseconds": fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Overhead in microseconds": fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)}).Log(elekLogTypes.CLSFN_TASKDIST_OVERHEAD, log.InfoLevel, "")
|
||||
}
|
||||
|
|
|
@ -36,10 +36,10 @@ import (
|
|||
func coLocated(tasks map[string]bool, s BaseScheduler) {
|
||||
|
||||
for _, task := range tasks {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, log.Fields{"Task": task}, "")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Task": task}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "")
|
||||
}
|
||||
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, log.Fields{}, "---------------------")
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel, "---------------------")
|
||||
}
|
||||
|
||||
// Get the powerClass of the given hostname.
|
||||
|
|
|
@ -90,9 +90,8 @@ func switchTaskDistBased(baseSchedRef *BaseScheduler) string {
|
|||
// Determine the distribution of tasks in the new scheduling window.
|
||||
taskDist, err := def.GetTaskDistributionInWindow(baseSchedRef.schedWindowSize, baseSchedRef.tasks)
|
||||
baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime))
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel,
|
||||
log.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}, "Switching... ")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}).Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel, "Switching... ")
|
||||
if err != nil {
|
||||
// All the tasks in the window were only classified into 1 cluster.
|
||||
// Max-Min and Max-GreedyMins would work the same way as Bin-Packing for this situation.
|
||||
|
|
|
@ -90,14 +90,13 @@ func UpdateEnvironment(offer *mesos.Offer) {
|
|||
var host = offer.GetHostname()
|
||||
// If this host is not present in the set of hosts.
|
||||
if _, ok := constants.Hosts[host]; !ok {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel,
|
||||
log.Fields{"Adding host": host}, "New host detected")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"Adding host": host}).Log(elekLogTypes.CONSOLE, log.InfoLevel, "New host detected")
|
||||
// Add this host.
|
||||
constants.Hosts[host] = struct{}{}
|
||||
// Get the power class of this host.
|
||||
class := PowerClass(offer)
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel,
|
||||
log.Fields{"host": host, "PowerClass": class}, "Registering the power class...")
|
||||
elekLog.ElektronLogger.WithFields(log.Fields{"host": host, "PowerClass": class}).Log(elekLogTypes.CONSOLE,
|
||||
log.InfoLevel, "Registering the power class...")
|
||||
// If new power class, register the power class.
|
||||
if _, ok := constants.PowerClasses[class]; !ok {
|
||||
constants.PowerClasses[class] = make(map[string]struct{})
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
package schedUtils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spdfg/elektron/def"
|
||||
elekLog "github.com/spdfg/elektron/elektronLogging"
|
||||
|
@ -80,9 +78,9 @@ func (s *fillNextOfferCycle) apply(taskQueue []def.Task) (int, int) {
|
|||
for _, task := range taskQueue {
|
||||
numberOfTasksTraversed++
|
||||
for i := *task.Instances; i > 0; i-- {
|
||||
elekLog.ElektronLogger.Log(elekLogTypes.CONSOLE, log.InfoLevel,
|
||||
log.Fields{}, fmt.Sprintf("Checking if Instance #%d of Task[%s] can be scheduled "+
|
||||
"during the next offer cycle...", i, task.Name))
|
||||
elekLog.ElektronLogger.Logf(elekLogTypes.CONSOLE, log.InfoLevel,
|
||||
"Checking if Instance #%d of Task[%s] can be scheduled "+
|
||||
"during the next offer cycle...", i, task.Name)
|
||||
if canSchedule(task) {
|
||||
filledCPU += task.CPU
|
||||
filledRAM += task.RAM
|
||||
|
|
Reference in a new issue