Revert previous commit. Renamed wrong alias
This commit is contained in:
parent
9952b9861d
commit
270c8669e6
24 changed files with 229 additions and 229 deletions
|
@ -25,7 +25,7 @@ import (
|
|||
|
||||
"github.com/mash/gokmeans"
|
||||
"github.com/montanaflynn/stats"
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spdfg/elektron/elektronLogging"
|
||||
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||
)
|
||||
|
@ -52,8 +52,8 @@ func (tc TasksToClassify) taskObservationCalculator(task Task) []float64 {
|
|||
} else if task.Watts != 0.0 {
|
||||
return []float64{task.Watts}
|
||||
} else {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR, elekLog.FatalLevel,
|
||||
elekLog.Fields{}, "Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload")
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR, log.FatalLevel,
|
||||
log.Fields{}, "Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload")
|
||||
return []float64{0.0} // Won't reach here.
|
||||
}
|
||||
}
|
||||
|
@ -108,8 +108,8 @@ func clusterSizeAvgMMMPU(tasks []Task, taskObservation func(task Task) []float64
|
|||
} else {
|
||||
// skip this value
|
||||
// there is an error in the task config.
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR, elekLog.ErrorLevel,
|
||||
elekLog.Fields{}, fmt.Sprintf("%s", err))
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR, log.ErrorLevel,
|
||||
log.Fields{}, fmt.Sprintf("%s", err))
|
||||
}
|
||||
} else {
|
||||
// There is only one observation for the task.
|
||||
|
|
|
@ -3,7 +3,7 @@ package elektronLogging
|
|||
import (
|
||||
"bytes"
|
||||
"github.com/fatih/color"
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -11,21 +11,21 @@ type ElektronFormatter struct {
|
|||
TimestampFormat string
|
||||
}
|
||||
|
||||
func (f ElektronFormatter) getColor(entry *elekLog.Entry) *color.Color {
|
||||
func (f ElektronFormatter) getColor(entry *log.Entry) *color.Color {
|
||||
switch entry.Level {
|
||||
case elekLog.InfoLevel:
|
||||
case log.InfoLevel:
|
||||
return color.New(color.FgGreen, color.Bold)
|
||||
case elekLog.WarnLevel:
|
||||
case log.WarnLevel:
|
||||
return color.New(color.FgYellow, color.Bold)
|
||||
case elekLog.ErrorLevel:
|
||||
case log.ErrorLevel:
|
||||
return color.New(color.FgRed, color.Bold)
|
||||
case elekLog.FatalLevel:
|
||||
case log.FatalLevel:
|
||||
return color.New(color.FgRed, color.Bold)
|
||||
default:
|
||||
return color.New(color.FgWhite, color.Bold)
|
||||
}
|
||||
}
|
||||
func (f ElektronFormatter) Format(entry *elekLog.Entry) ([]byte, error) {
|
||||
func (f ElektronFormatter) Format(entry *log.Entry) ([]byte, error) {
|
||||
var b *bytes.Buffer
|
||||
|
||||
if entry.Buffer != nil {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package elektronLogging
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
@ -17,7 +17,7 @@ func NewClsfnTaskDistOverheadLogger(logType int, prefix string) *ClsfnTaskDistOv
|
|||
return cLog
|
||||
}
|
||||
|
||||
func (cLog *ClsfnTaskDistOverheadLogger) Log(logType int, level elekLog.Level, logData elekLog.Fields, message string) {
|
||||
func (cLog *ClsfnTaskDistOverheadLogger) Log(logType int, level log.Level, logData log.Fields, message string) {
|
||||
if cLog.Type == logType {
|
||||
|
||||
logger.SetLevel(level)
|
||||
|
@ -43,7 +43,7 @@ func (cLog *ClsfnTaskDistOverheadLogger) SetLogFile(prefix string) {
|
|||
tskDistLogPrefix = strings.Join([]string{dirName, tskDistLogPrefix}, "/")
|
||||
}
|
||||
if logFile, err := os.Create(tskDistLogPrefix); err != nil {
|
||||
elekLog.Fatal("Unable to create logFile: ", err)
|
||||
log.Fatal("Unable to create logFile: ", err)
|
||||
} else {
|
||||
cLog.LogFileName = logFile
|
||||
cLog.AllowOnConsole = config.TaskDistConfig.AllowOnConsole
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package elektronLogging
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
@ -16,7 +16,7 @@ func NewConsoleLogger(logType int, prefix string) *ConsoleLogger {
|
|||
cLog.SetLogFile(prefix)
|
||||
return cLog
|
||||
}
|
||||
func (cLog *ConsoleLogger) Log(logType int, level elekLog.Level, logData elekLog.Fields, message string) {
|
||||
func (cLog *ConsoleLogger) Log(logType int, level log.Level, logData log.Fields, message string) {
|
||||
if logType <= cLog.Type {
|
||||
|
||||
logger.SetLevel(level)
|
||||
|
@ -40,7 +40,7 @@ func (cLog *ConsoleLogger) SetLogFile(prefix string) {
|
|||
consoleLogPrefix = strings.Join([]string{dirName, consoleLogPrefix}, "/")
|
||||
}
|
||||
if logFile, err := os.Create(consoleLogPrefix); err != nil {
|
||||
elekLog.Fatal("Unable to create logFile: ", err)
|
||||
log.Fatal("Unable to create logFile: ", err)
|
||||
} else {
|
||||
cLog.LogFileName = logFile
|
||||
cLog.AllowOnConsole = true
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package elektronLogging
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -28,7 +28,7 @@ func (logD *logDirectory) createLogDir(prefix string, startTime time.Time) {
|
|||
if _, err := os.Stat(logDirName); os.IsNotExist(err) {
|
||||
os.Mkdir(logDirName, 0755)
|
||||
} else {
|
||||
elekLog.Println("Unable to create elekLog directory: ", err)
|
||||
log.Println("Unable to create log directory: ", err)
|
||||
logDirName = ""
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package elektronLogging
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
. "github.com/spdfg/elektron/elektronLogging/types"
|
||||
"os"
|
||||
"strings"
|
||||
|
@ -9,7 +9,7 @@ import (
|
|||
)
|
||||
|
||||
var config LoggerConfig
|
||||
var logger *elekLog.Logger
|
||||
var logger *log.Logger
|
||||
var formatter ElektronFormatter
|
||||
var ElektronLog *LoggerImpl
|
||||
var logDir logDirectory
|
||||
|
@ -19,16 +19,16 @@ func BuildLogger(prefix string) {
|
|||
// read configuration from yaml
|
||||
config.GetConfig()
|
||||
|
||||
// create the elekLog directory
|
||||
// create the log directory
|
||||
startTime := time.Now()
|
||||
formatter.TimestampFormat = "2006-01-02 15:04:05"
|
||||
formattedStartTime := startTime.Format("20060102150405")
|
||||
|
||||
logDir.createLogDir(prefix, startTime)
|
||||
prefix = strings.Join([]string{prefix, formattedStartTime}, "_")
|
||||
logger = &elekLog.Logger{
|
||||
logger = &log.Logger{
|
||||
Out: os.Stderr,
|
||||
Level: elekLog.DebugLevel,
|
||||
Level: log.DebugLevel,
|
||||
Formatter: &formatter,
|
||||
}
|
||||
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
package elektronLogging
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"os"
|
||||
)
|
||||
|
||||
type Logger interface {
|
||||
SetNext(logType Logger)
|
||||
Log(logType int, level elekLog.Level, logData elekLog.Fields, message string)
|
||||
Log(logType int, level log.Level, logData log.Fields, message string)
|
||||
SetLogFile(prefix string)
|
||||
}
|
||||
type LoggerImpl struct {
|
||||
|
@ -21,7 +21,7 @@ func (l *LoggerImpl) SetNext(logType Logger) {
|
|||
l.next = logType
|
||||
}
|
||||
|
||||
func (l *LoggerImpl) Log(logType int, level elekLog.Level, logData elekLog.Fields, message string) {
|
||||
func (l *LoggerImpl) Log(logType int, level log.Level, logData log.Fields, message string) {
|
||||
if l.next != nil {
|
||||
l.next.Log(logType, level, logData, message)
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package elektronLogging
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
elekEnv "github.com/spdfg/elektron/environment"
|
||||
"gopkg.in/yaml.v2"
|
||||
"io/ioutil"
|
||||
|
@ -57,11 +57,11 @@ func (c *LoggerConfig) GetConfig() *LoggerConfig {
|
|||
|
||||
yamlFile, err := ioutil.ReadFile(elekEnv.LogConfigYaml)
|
||||
if err != nil {
|
||||
elekLog.Printf("yamlFile.Get err #%v ", err)
|
||||
log.Printf("yamlFile.Get err #%v ", err)
|
||||
}
|
||||
err = yaml.Unmarshal(yamlFile, c)
|
||||
if err != nil {
|
||||
elekLog.Fatalf("Unmarshal: %v", err)
|
||||
log.Fatalf("Unmarshal: %v", err)
|
||||
}
|
||||
|
||||
return c
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package elektronLogging
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
@ -17,7 +17,7 @@ func NewPcpLogger(logType int, prefix string) *PcpLogger {
|
|||
return pLog
|
||||
}
|
||||
|
||||
func (pLog *PcpLogger) Log(logType int, level elekLog.Level, logData elekLog.Fields, message string) {
|
||||
func (pLog *PcpLogger) Log(logType int, level log.Level, logData log.Fields, message string) {
|
||||
if pLog.Type == logType {
|
||||
|
||||
logger.SetLevel(level)
|
||||
|
@ -43,7 +43,7 @@ func (plog *PcpLogger) SetLogFile(prefix string) {
|
|||
pcpLogPrefix = strings.Join([]string{dirName, pcpLogPrefix}, "/")
|
||||
}
|
||||
if logFile, err := os.Create(pcpLogPrefix); err != nil {
|
||||
elekLog.Fatal("Unable to create logFile: ", err)
|
||||
log.Fatal("Unable to create logFile: ", err)
|
||||
} else {
|
||||
plog.LogFileName = logFile
|
||||
plog.AllowOnConsole = config.PCPConfig.AllowOnConsole
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package elektronLogging
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
@ -17,7 +17,7 @@ func NewSchedPolicySwitchLogger(logType int, prefix string) *SchedPolicySwitchLo
|
|||
return sLog
|
||||
}
|
||||
|
||||
func (sLog *SchedPolicySwitchLogger) Log(logType int, level elekLog.Level, logData elekLog.Fields, message string) {
|
||||
func (sLog *SchedPolicySwitchLogger) Log(logType int, level log.Level, logData log.Fields, message string) {
|
||||
if sLog.Type == logType {
|
||||
|
||||
logger.SetLevel(level)
|
||||
|
@ -43,7 +43,7 @@ func (sLog *SchedPolicySwitchLogger) SetLogFile(prefix string) {
|
|||
spsLogPrefix = strings.Join([]string{dirName, spsLogPrefix}, "/")
|
||||
}
|
||||
if logFile, err := os.Create(spsLogPrefix); err != nil {
|
||||
elekLog.Fatal("Unable to create logFile: ", err)
|
||||
log.Fatal("Unable to create logFile: ", err)
|
||||
} else {
|
||||
sLog.LogFileName = logFile
|
||||
sLog.AllowOnConsole = config.SPSConfig.AllowOnConsole
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package elektronLogging
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
@ -17,7 +17,7 @@ func NewSchedTraceLogger(logType int, prefix string) *SchedTraceLogger {
|
|||
return sLog
|
||||
}
|
||||
|
||||
func (sLog *SchedTraceLogger) Log(logType int, level elekLog.Level, logData elekLog.Fields, message string) {
|
||||
func (sLog *SchedTraceLogger) Log(logType int, level log.Level, logData log.Fields, message string) {
|
||||
if sLog.Type == logType {
|
||||
|
||||
logger.SetLevel(level)
|
||||
|
@ -43,7 +43,7 @@ func (sLog *SchedTraceLogger) SetLogFile(prefix string) {
|
|||
schedTraceLogPrefix = strings.Join([]string{dirName, schedTraceLogPrefix}, "/")
|
||||
}
|
||||
if logFile, err := os.Create(schedTraceLogPrefix); err != nil {
|
||||
elekLog.Fatal("Unable to create logFile: ", err)
|
||||
log.Fatal("Unable to create logFile: ", err)
|
||||
} else {
|
||||
sLog.LogFileName = logFile
|
||||
sLog.AllowOnConsole = config.SchedTraceConfig.AllowOnConsole
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package elektronLogging
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
@ -17,7 +17,7 @@ func NewSchedWindowLogger(logType int, prefix string) *SchedWindowLogger {
|
|||
return sLog
|
||||
}
|
||||
|
||||
func (sLog *SchedWindowLogger) Log(logType int, level elekLog.Level, logData elekLog.Fields, message string) {
|
||||
func (sLog *SchedWindowLogger) Log(logType int, level log.Level, logData log.Fields, message string) {
|
||||
if sLog.Type == logType {
|
||||
|
||||
logger.SetLevel(level)
|
||||
|
@ -43,7 +43,7 @@ func (sLog *SchedWindowLogger) SetLogFile(prefix string) {
|
|||
schedWindowLogPrefix = strings.Join([]string{dirName, schedWindowLogPrefix}, "/")
|
||||
}
|
||||
if logFile, err := os.Create(schedWindowLogPrefix); err != nil {
|
||||
elekLog.Fatal("Unable to create logFile: ", err)
|
||||
log.Fatal("Unable to create logFile: ", err)
|
||||
} else {
|
||||
sLog.LogFileName = logFile
|
||||
sLog.AllowOnConsole = config.SchedWindowConfig.AllowOnConsole
|
||||
|
|
22
pcp/pcp.go
22
pcp/pcp.go
|
@ -24,7 +24,7 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spdfg/elektron/elektronLogging"
|
||||
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
|
||||
)
|
||||
|
@ -36,7 +36,7 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
|
|||
|
||||
pipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
//cmd.Stdout = stdout
|
||||
|
||||
|
@ -48,8 +48,8 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
|
|||
|
||||
// Write to logfile
|
||||
elektronLogging.ElektronLog.Log(elekLogT.PCP,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, scanner.Text())
|
||||
log.InfoLevel,
|
||||
log.Fields{}, scanner.Text())
|
||||
|
||||
// Throw away first set of results
|
||||
scanner.Scan()
|
||||
|
@ -61,8 +61,8 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
|
|||
|
||||
if *logging {
|
||||
elektronLogging.ElektronLog.Log(elekLogT.PCP,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, text)
|
||||
log.InfoLevel,
|
||||
log.Fields{}, text)
|
||||
}
|
||||
|
||||
seconds++
|
||||
|
@ -70,11 +70,11 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
|
|||
}(logging)
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "PCP logging started")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "PCP logging started")
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
pgid, err := syscall.Getpgid(cmd.Process.Pid)
|
||||
|
@ -82,8 +82,8 @@ func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
|
|||
select {
|
||||
case <-quit:
|
||||
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "Stopping PCP logging in 5 seconds")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Stopping PCP logging in 5 seconds")
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spdfg/elektron/elektronLogging"
|
||||
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||
"github.com/spdfg/elektron/pcp"
|
||||
|
@ -44,13 +44,13 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
|
||||
if hiThreshold < loThreshold {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "High threshold is lower than low threshold!")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "High threshold is lower than low threshold!")
|
||||
}
|
||||
|
||||
pipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
//cmd.Stdout = stdout
|
||||
|
||||
|
@ -62,8 +62,8 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
|
||||
// Write to logfile
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.PCP,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, scanner.Text())
|
||||
log.InfoLevel,
|
||||
log.Fields{}, scanner.Text())
|
||||
|
||||
headers := strings.Split(scanner.Text(), ",")
|
||||
|
||||
|
@ -100,15 +100,15 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
if *logging {
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "Logging PCP...")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Logging PCP...")
|
||||
|
||||
text := scanner.Text()
|
||||
split := strings.Split(text, ",")
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.PCP,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, text)
|
||||
log.InfoLevel,
|
||||
log.Fields{}, text)
|
||||
|
||||
totalPower := 0.0
|
||||
for _, powerIndex := range powerIndexes {
|
||||
|
@ -120,8 +120,8 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
powerHistories[host] = powerHistories[host].Next()
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))},
|
||||
log.InfoLevel,
|
||||
log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))},
|
||||
"")
|
||||
|
||||
totalPower += power
|
||||
|
@ -134,15 +134,15 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
|
||||
log.InfoLevel,
|
||||
log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
|
||||
"Sec Avg": fmt.Sprintf("%f", clusterMean)},
|
||||
"")
|
||||
|
||||
if clusterMean > hiThreshold {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "Need to cap a node")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Need to cap a node")
|
||||
// Create statics for all victims and choose one to cap
|
||||
victims := make([]pcp.Victim, 0, 8)
|
||||
|
||||
|
@ -164,13 +164,13 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
cappedHosts[victim.Host] = true
|
||||
orderCapped = append(orderCapped, victim.Host)
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Capping Victim": fmt.Sprintf("%s", victim.Host),
|
||||
log.InfoLevel,
|
||||
log.Fields{"Capping Victim": fmt.Sprintf("%s", victim.Host),
|
||||
"Avg. Wattage": fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)}, "")
|
||||
if err := rapl.Cap(victim.Host, "rapl", 50); err != nil {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{}, "Error capping host")
|
||||
log.ErrorLevel,
|
||||
log.Fields{}, "Error capping host")
|
||||
}
|
||||
break // Only cap one machine at at time.
|
||||
}
|
||||
|
@ -183,14 +183,14 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
orderCapped = orderCapped[:len(orderCapped)-1]
|
||||
cappedHosts[host] = false
|
||||
// User RAPL package to send uncap.
|
||||
elekLog.Printf("Uncapping host %s", host)
|
||||
log.Printf("Uncapping host %s", host)
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Uncapped host": host}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Uncapped host": host}, "")
|
||||
if err := rapl.Cap(host, "rapl", 100); err != nil {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{}, "Error capping host")
|
||||
log.ErrorLevel,
|
||||
log.Fields{}, "Error capping host")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -201,11 +201,11 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
}(logging, hiThreshold, loThreshold)
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "PCP logging started")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "PCP logging started")
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
pgid, err := syscall.Getpgid(cmd.Process.Pid)
|
||||
|
@ -213,8 +213,8 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
|
|||
select {
|
||||
case <-quit:
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "Stopping PCP logging in 5 seconds")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Stopping PCP logging in 5 seconds")
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spdfg/elektron/constants"
|
||||
"github.com/spdfg/elektron/elektronLogging"
|
||||
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||
|
@ -57,13 +57,13 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
|
||||
if hiThreshold < loThreshold {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "High threshold is lower than low threshold!")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "High threshold is lower than low threshold!")
|
||||
}
|
||||
|
||||
pipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
//cmd.Stdout = stdout
|
||||
|
||||
|
@ -75,8 +75,8 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
|
||||
// Write to logfile
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.PCP,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, scanner.Text())
|
||||
log.InfoLevel,
|
||||
log.Fields{}, scanner.Text())
|
||||
|
||||
headers := strings.Split(scanner.Text(), ",")
|
||||
|
||||
|
@ -116,14 +116,14 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
for scanner.Scan() {
|
||||
if *logging {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "Logging PCP...")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Logging PCP...")
|
||||
split := strings.Split(scanner.Text(), ",")
|
||||
|
||||
text := scanner.Text()
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.PCP,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, text)
|
||||
log.InfoLevel,
|
||||
log.Fields{}, text)
|
||||
|
||||
totalPower := 0.0
|
||||
for _, powerIndex := range powerIndexes {
|
||||
|
@ -135,8 +135,8 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
powerHistories[host] = powerHistories[host].Next()
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))},
|
||||
log.InfoLevel,
|
||||
log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))},
|
||||
"")
|
||||
totalPower += power
|
||||
}
|
||||
|
@ -148,23 +148,23 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
|
||||
log.InfoLevel,
|
||||
log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
|
||||
"Sec Avg": fmt.Sprintf("%f", clusterMean)},
|
||||
"")
|
||||
|
||||
if clusterMean >= hiThreshold {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "Need to cap a node")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Need to cap a node")
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "")
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "")
|
||||
// Create statics for all victims and choose one to cap
|
||||
victims := make([]pcp.Victim, 0, 8)
|
||||
|
||||
|
@ -193,13 +193,13 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil {
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{"Error capping host": fmt.Sprintf("%s", victims[i].Host)}, "")
|
||||
log.ErrorLevel,
|
||||
log.Fields{"Error capping host": fmt.Sprintf("%s", victims[i].Host)}, "")
|
||||
} else {
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, fmt.Sprintf("Capped host[%s] at %f", victims[i].Host, 50.0))
|
||||
log.InfoLevel,
|
||||
log.Fields{}, fmt.Sprintf("Capped host[%s] at %f", victims[i].Host, 50.0))
|
||||
// Keeping track of this victim and it's cap value
|
||||
cappedVictims[victims[i].Host] = 50.0
|
||||
newVictimFound = true
|
||||
|
@ -224,13 +224,13 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil {
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{"Error capping host": fmt.Sprintf("%s", alreadyCappedHosts[i])}, "")
|
||||
log.ErrorLevel,
|
||||
log.Fields{"Error capping host": fmt.Sprintf("%s", alreadyCappedHosts[i])}, "")
|
||||
} else {
|
||||
// Successful cap
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, fmt.Sprintf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue))
|
||||
log.InfoLevel,
|
||||
log.Fields{}, fmt.Sprintf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue))
|
||||
// Checking whether this victim can be capped further
|
||||
if newCapValue <= constants.LowerCapLimit {
|
||||
// Deleting victim from cappedVictims.
|
||||
|
@ -254,22 +254,22 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
}
|
||||
if !canCapAlreadyCappedVictim {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "No Victim left to cap")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "No Victim left to cap")
|
||||
}
|
||||
}
|
||||
|
||||
} else if clusterMean < loThreshold {
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "Need to uncap a node")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Need to uncap a node")
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "")
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "")
|
||||
if len(orderCapped) > 0 {
|
||||
// We pick the host that is capped the most to uncap.
|
||||
orderCappedToSort := utilities.GetPairList(orderCappedVictims)
|
||||
|
@ -281,13 +281,13 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil {
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{"Error uncapping host": fmt.Sprintf("%s", hostToUncap)}, "")
|
||||
log.ErrorLevel,
|
||||
log.Fields{"Error uncapping host": fmt.Sprintf("%s", hostToUncap)}, "")
|
||||
} else {
|
||||
// Successful uncap
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, fmt.Sprintf("Uncapped host[%s] to %f", hostToUncap, newUncapValue))
|
||||
log.InfoLevel,
|
||||
log.Fields{}, fmt.Sprintf("Uncapped host[%s] to %f", hostToUncap, newUncapValue))
|
||||
// Can we uncap this host further. If not, then we remove its entry from orderCapped
|
||||
if newUncapValue >= 100.0 { // can compare using ==
|
||||
// Deleting entry from orderCapped
|
||||
|
@ -309,8 +309,8 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
}
|
||||
} else {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "No host staged for Uncapped")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "No host staged for Uncapped")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -320,10 +320,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
}(logging, hiThreshold, loThreshold)
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "PCP logging started")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "PCP logging started")
|
||||
if err := cmd.Start(); err != nil {
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
pgid, err := syscall.Getpgid(cmd.Process.Pid)
|
||||
|
@ -331,8 +331,8 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
|
|||
select {
|
||||
case <-quit:
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "Stopping PCP logging in 5 seconds")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Stopping PCP logging in 5 seconds")
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly
|
||||
|
|
34
scheduler.go
34
scheduler.go
|
@ -24,7 +24,7 @@ import (
|
|||
"github.com/golang/protobuf/proto"
|
||||
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
|
||||
sched "github.com/mesos/mesos-go/api/v0/scheduler"
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spdfg/elektron/def"
|
||||
"github.com/spdfg/elektron/elektronLogging"
|
||||
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||
|
@ -104,7 +104,7 @@ func main() {
|
|||
if *schedPolicyName != "first-fit" {
|
||||
if _, ok := schedulers.SchedPolicies[*schedPolicyName]; !ok {
|
||||
// invalid scheduling policy
|
||||
elekLog.Println("Invalid scheduling policy given. The possible scheduling policies are:")
|
||||
log.Println("Invalid scheduling policy given. The possible scheduling policies are:")
|
||||
listAllSchedulingPolicies()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ func main() {
|
|||
if *enableSchedPolicySwitch {
|
||||
// Scheduling policy config file required.
|
||||
if spcf := *schedPolConfigFile; spcf == "" {
|
||||
elekLog.Fatal("Scheduling policy characteristics file not provided.")
|
||||
log.Fatal("Scheduling policy characteristics file not provided.")
|
||||
} else {
|
||||
// Initializing the characteristics of the scheduling policies.
|
||||
schedulers.InitSchedPolicyCharacteristics(spcf)
|
||||
|
@ -148,7 +148,7 @@ func main() {
|
|||
// If CMW is disabled, then the Median of Medians Max Peak Power Usage value is used
|
||||
// as the watts value for each task.
|
||||
if *wattsAsAResource {
|
||||
elekLog.Println("WaaR enabled...")
|
||||
log.Println("WaaR enabled...")
|
||||
schedOptions = append(schedOptions, schedulers.WithWattsAsAResource(*wattsAsAResource))
|
||||
schedOptions = append(schedOptions, schedulers.WithClassMapWatts(*classMapWatts))
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ func main() {
|
|||
"prog-extrema": {},
|
||||
}
|
||||
if _, ok := powercapValues[*powerCapPolicy]; !ok {
|
||||
elekLog.Fatal("Incorrect power-capping algorithm specified.")
|
||||
log.Fatal("Incorrect power-capping algorithm specified.")
|
||||
} else {
|
||||
// Indicating which power capping algorithm to use, if any.
|
||||
// The pcp-logging with/without power capping will be run after the
|
||||
|
@ -185,7 +185,7 @@ func main() {
|
|||
// These values are not used to configure the scheduler.
|
||||
// hiThreshold and loThreshold are passed to the powercappers.
|
||||
if *hiThreshold < *loThreshold {
|
||||
elekLog.Fatal("High threshold is of a lower value than low " +
|
||||
log.Fatal("High threshold is of a lower value than low " +
|
||||
"threshold.")
|
||||
}
|
||||
}
|
||||
|
@ -195,11 +195,11 @@ func main() {
|
|||
// Tasks
|
||||
// If httpServer is disabled, then path of file containing workload needs to be provided.
|
||||
if *tasksFile == "" {
|
||||
elekLog.Fatal("Tasks specifications file not provided.")
|
||||
log.Fatal("Tasks specifications file not provided.")
|
||||
}
|
||||
tasks, err := def.TasksFromJSON(*tasksFile)
|
||||
if err != nil || len(tasks) == 0 {
|
||||
elekLog.Fatal("Invalid tasks specification file provided.")
|
||||
log.Fatal("Invalid tasks specification file provided.")
|
||||
}
|
||||
schedOptions = append(schedOptions, schedulers.WithTasks(tasks))
|
||||
|
||||
|
@ -216,12 +216,12 @@ func main() {
|
|||
Scheduler: scheduler,
|
||||
})
|
||||
if err != nil {
|
||||
elekLog.Fatal(fmt.Sprintf("Unable to create scheduler driver: %s", err))
|
||||
log.Fatal(fmt.Sprintf("Unable to create scheduler driver: %s", err))
|
||||
}
|
||||
|
||||
// Checking if prefix contains any special characters.
|
||||
if strings.Contains(*pcplogPrefix, "/") {
|
||||
elekLog.Fatal("elekLog file prefix should not contain '/'.")
|
||||
log.Fatal("log file prefix should not contain '/'.")
|
||||
}
|
||||
elektronLogging.BuildLogger(*pcplogPrefix)
|
||||
|
||||
|
@ -236,7 +236,7 @@ func main() {
|
|||
*loThreshold, *pcpConfigFile)
|
||||
}
|
||||
|
||||
// Take a second between starting PCP elekLog and continuing.
|
||||
// Take a second between starting PCP log and continuing.
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Attempt to handle SIGINT to not leave pmdumptext running.
|
||||
|
@ -250,7 +250,7 @@ func main() {
|
|||
return
|
||||
}
|
||||
|
||||
elekLog.Println("Received SIGINT... stopping")
|
||||
log.Println("Received SIGINT... stopping")
|
||||
close(done)
|
||||
}()
|
||||
|
||||
|
@ -266,7 +266,7 @@ func main() {
|
|||
select {
|
||||
case <-done:
|
||||
close(pcpLog)
|
||||
time.Sleep(5 * time.Second) //Wait for PCP to elekLog a few more seconds
|
||||
time.Sleep(5 * time.Second) //Wait for PCP to log a few more seconds
|
||||
// Closing logging channels.
|
||||
//case <-time.After(shutdownTimeout):
|
||||
}
|
||||
|
@ -279,10 +279,10 @@ func main() {
|
|||
// Starting the scheduler driver.
|
||||
if status, err := driver.Run(); err != nil {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.ERROR,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{"status": status.String(), "error": err.Error()}, "Framework stopped ")
|
||||
log.ErrorLevel,
|
||||
log.Fields{"status": status.String(), "error": err.Error()}, "Framework stopped ")
|
||||
}
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "Exiting...")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Exiting...")
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package schedulers
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
|
||||
sched "github.com/mesos/mesos-go/api/v0/scheduler"
|
||||
|
@ -39,7 +39,7 @@ func (s *MaxGreedyMins) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, ta
|
|||
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
||||
if err != nil {
|
||||
// Error in determining wattsConsideration
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
if (cpus >= (totalCPU + task.CPU)) && (mem >= (totalRAM + task.RAM)) &&
|
||||
(!baseSchedRef.wattsAsAResource || (watts >= (totalWatts + wattsConsideration))) {
|
||||
|
@ -136,7 +136,7 @@ func (s *MaxGreedyMins) ConsumeOffers(spc SchedPolicyContext, driver sched.Sched
|
|||
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
||||
if err != nil {
|
||||
// Error in determining wattsConsideration
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Don't take offer if it doesn't match our task's host requirement
|
||||
|
@ -161,7 +161,7 @@ func (s *MaxGreedyMins) ConsumeOffers(spc SchedPolicyContext, driver sched.Sched
|
|||
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
||||
if err != nil {
|
||||
// Error in determining wattsConsideration
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Don't take offer if it doesn't match our task's host requirement
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package schedulers
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
|
||||
sched "github.com/mesos/mesos-go/api/v0/scheduler"
|
||||
|
@ -39,7 +39,7 @@ func (s *MaxMin) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.
|
|||
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
||||
if err != nil {
|
||||
// Error in determining wattsConsideration
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
if (cpus >= (totalCPU + task.CPU)) && (mem >= (totalRAM + task.RAM)) &&
|
||||
(!baseSchedRef.wattsAsAResource || (watts >= (totalWatts + wattsConsideration))) {
|
||||
|
@ -148,7 +148,7 @@ func (s *MaxMin) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDri
|
|||
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
||||
if err != nil {
|
||||
// Error in determining wattsConsideration.
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Don't take offer if it doesn't match our task's host requirement.
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
|
||||
"github.com/mesos/mesos-go/api/v0/mesosutil"
|
||||
sched "github.com/mesos/mesos-go/api/v0/scheduler"
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spdfg/elektron/def"
|
||||
"github.com/spdfg/elektron/elektronLogging"
|
||||
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||
|
@ -67,7 +67,7 @@ type BaseScheduler struct {
|
|||
// Controls when to shutdown pcp logging.
|
||||
PCPLog chan struct{}
|
||||
|
||||
schedTrace *elekLog.Logger
|
||||
schedTrace *log.Logger
|
||||
|
||||
mutex sync.Mutex
|
||||
|
||||
|
@ -102,7 +102,7 @@ func (s *BaseScheduler) init(opts ...SchedulerOptions) {
|
|||
for _, opt := range opts {
|
||||
// applying options
|
||||
if err := opt(s); err != nil {
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
s.TasksRunningMutex.Lock()
|
||||
|
@ -251,12 +251,12 @@ func (s *BaseScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos
|
|||
func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
|
||||
lmt := elekLogTypes.GENERAL
|
||||
if ts == nil {
|
||||
elektronLogging.ElektronLog.Log(lmt, elekLog.InfoLevel,
|
||||
elekLog.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}, "TASKS STARTING...")
|
||||
elektronLogging.ElektronLog.Log(lmt, log.InfoLevel,
|
||||
log.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}, "TASKS STARTING...")
|
||||
} else {
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"task": fmt.Sprintf("%s", ts.Name),
|
||||
log.InfoLevel,
|
||||
log.Fields{"task": fmt.Sprintf("%s", ts.Name),
|
||||
"Instance": fmt.Sprintf("%d", *ts.Instances), "host": fmt.Sprintf("%s", offer.GetHostname())},
|
||||
"TASK STARTING... ")
|
||||
}
|
||||
|
@ -265,29 +265,29 @@ func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
|
|||
func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) {
|
||||
lmt := elekLogTypes.GENERAL
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}, "Watts considered for ")
|
||||
log.InfoLevel,
|
||||
log.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}, "Watts considered for ")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) {
|
||||
lmt := elekLogTypes.GENERAL
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
|
||||
lmt := elekLogTypes.WARNING
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.WarnLevel,
|
||||
elekLog.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}, "No tasks left to schedule ")
|
||||
log.WarnLevel,
|
||||
log.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}, "No tasks left to schedule ")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogNumberOfRunningTasks() {
|
||||
lmt := elekLogTypes.GENERAL
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
|
||||
|
@ -299,21 +299,21 @@ func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
|
|||
}
|
||||
s.TasksRunningMutex.Unlock()
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Colocated with": fmt.Sprintf("%s", buffer.String())}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Colocated with": fmt.Sprintf("%s", buffer.String())}, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.SCHED_TRACE,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{offer.GetHostname(): fmt.Sprintf("%s", taskToSchedule.GetTaskId().GetValue())}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{offer.GetHostname(): fmt.Sprintf("%s", taskToSchedule.GetTaskId().GetValue())}, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogTerminateScheduler() {
|
||||
lmt := elekLogTypes.GENERAL
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "Done scheduling all tasks!")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "Done scheduling all tasks!")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
|
||||
|
@ -322,73 +322,73 @@ func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
|
|||
buffer := bytes.Buffer{}
|
||||
buffer.WriteString(fmt.Sprintf("<CPU: %f, RAM: %f, Watts: %f>", offerResources...))
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.WarnLevel,
|
||||
elekLog.Fields{"Offer Resources": fmt.Sprintf("%s", buffer.String())}, "DECLINING OFFER... Offer has insufficient resources to launch a task")
|
||||
log.WarnLevel,
|
||||
log.Fields{"Offer Resources": fmt.Sprintf("%s", buffer.String())}, "DECLINING OFFER... Offer has insufficient resources to launch a task")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) {
|
||||
lmt := elekLogTypes.ERROR
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{"OfferID": fmt.Sprintf("%s", offerID)}, "OFFER RESCINDED")
|
||||
log.ErrorLevel,
|
||||
log.Fields{"OfferID": fmt.Sprintf("%s", offerID)}, "OFFER RESCINDED")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) {
|
||||
lmt := elekLogTypes.ERROR
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}, "SLAVE LOST")
|
||||
log.ErrorLevel,
|
||||
log.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}, "SLAVE LOST")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) {
|
||||
lmt := elekLogTypes.ERROR
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}, "EXECUTOR LOST")
|
||||
log.ErrorLevel,
|
||||
log.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}, "EXECUTOR LOST")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
|
||||
slaveID *mesos.SlaveID, message string) {
|
||||
lmt := elekLogTypes.GENERAL
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Received Framework message from executor": executorID}, message)
|
||||
log.InfoLevel,
|
||||
log.Fields{"Received Framework message from executor": executorID}, message)
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogMesosError(err string) {
|
||||
lmt := elekLogTypes.ERROR
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{"MESOS ERROR": fmt.Sprintf("%v", err)}, "")
|
||||
log.ErrorLevel,
|
||||
log.Fields{"MESOS ERROR": fmt.Sprintf("%v", err)}, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogElectronError(err error) {
|
||||
lmt := elekLogTypes.ERROR
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.ErrorLevel,
|
||||
elekLog.Fields{"ELECTRON ERROR": fmt.Sprintf("%v", err)}, "")
|
||||
log.ErrorLevel,
|
||||
log.Fields{"ELECTRON ERROR": fmt.Sprintf("%v", err)}, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
|
||||
masterInfo *mesos.MasterInfo) {
|
||||
lmt := elekLogTypes.SUCCESS
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%s", masterInfo)}, "FRAMEWORK REGISTERED!")
|
||||
log.InfoLevel,
|
||||
log.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%s", masterInfo)}, "FRAMEWORK REGISTERED!")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) {
|
||||
lmt := elekLogTypes.GENERAL
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"master": fmt.Sprintf("%s", masterInfo)}, "Framework re-registered")
|
||||
log.InfoLevel,
|
||||
log.Fields{"master": fmt.Sprintf("%s", masterInfo)}, "Framework re-registered")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogDisconnected() {
|
||||
lmt := elekLogTypes.WARNING
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.WarnLevel,
|
||||
elekLog.Fields{}, "Framework disconnected with master")
|
||||
log.WarnLevel,
|
||||
log.Fields{}, "Framework disconnected with master")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
|
||||
|
@ -403,15 +403,15 @@ func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
|
|||
lmt = elekLogTypes.GENERAL
|
||||
}
|
||||
elektronLogging.ElektronLog.Log(lmt,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"task": fmt.Sprintf("%s", *status.TaskId.Value), "state": NameFor(status.State)}, "Task Status received")
|
||||
log.InfoLevel,
|
||||
log.Fields{"task": fmt.Sprintf("%s", *status.TaskId.Value), "state": NameFor(status.State)}, "Task Status received")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicyState) {
|
||||
logSPS := func() {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.SPS,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Name": name}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Name": name}, "")
|
||||
}
|
||||
if s.hasReceivedResourceOffers && (s.curSchedPolicy != nextPolicy) {
|
||||
logSPS()
|
||||
|
@ -421,13 +421,13 @@ func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicy
|
|||
// Logging the size of the scheduling window and the scheduling policy
|
||||
// that is going to schedule the tasks in the scheduling window.
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.SCHED_WINDOW,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Window size": fmt.Sprintf("%d", s.schedWindowSize), "Name": name}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Window size": fmt.Sprintf("%d", s.schedWindowSize), "Name": name}, "")
|
||||
}
|
||||
|
||||
func (s *BaseScheduler) LogClsfnAndTaskDistOverhead(overhead time.Duration) {
|
||||
// Logging the overhead in microseconds.
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.CLSFN_TASKDIST_OVERHEAD,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Overhead in microseconds": fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Overhead in microseconds": fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)}, "")
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package schedulers
|
||||
|
||||
import (
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
|
||||
sched "github.com/mesos/mesos-go/api/v0/scheduler"
|
||||
|
@ -39,7 +39,7 @@ func (s *BinPackSortedWatts) takeOffer(spc SchedPolicyContext, offer *mesos.Offe
|
|||
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
||||
if err != nil {
|
||||
// Error in determining wattsConsideration.
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
if (cpus >= (totalCPU + task.CPU)) && (mem >= (totalRAM + task.RAM)) &&
|
||||
(!baseSchedRef.wattsAsAResource || (watts >= (totalWatts + wattsConsideration))) {
|
||||
|
@ -83,7 +83,7 @@ func (s *BinPackSortedWatts) ConsumeOffers(spc SchedPolicyContext, driver sched.
|
|||
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
|
||||
if err != nil {
|
||||
// Error in determining wattsConsideration.
|
||||
elekLog.Fatal(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Don't take offer if it doesn't match our task's host requirement.
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
|
||||
sched "github.com/mesos/mesos-go/api/v0/scheduler"
|
||||
"github.com/pkg/errors"
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spdfg/elektron/constants"
|
||||
"github.com/spdfg/elektron/def"
|
||||
"github.com/spdfg/elektron/elektronLogging"
|
||||
|
@ -36,13 +36,13 @@ func coLocated(tasks map[string]bool, s BaseScheduler) {
|
|||
|
||||
for task := range tasks {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Task": task}, "")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Task": task}, "")
|
||||
}
|
||||
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{}, "---------------------")
|
||||
log.InfoLevel,
|
||||
log.Fields{}, "---------------------")
|
||||
}
|
||||
|
||||
// Get the powerClass of the given hostname.
|
||||
|
@ -157,7 +157,7 @@ func WithSchedPolSwitchEnabled(enableSchedPolicySwitch bool, switchingCriteria s
|
|||
func WithNameOfFirstSchedPolToFix(nameOfFirstSchedPol string) SchedulerOptions {
|
||||
return func(s ElectronScheduler) error {
|
||||
if nameOfFirstSchedPol == "" {
|
||||
elekLog.Println("First scheduling policy to deploy not mentioned. This is now" +
|
||||
log.Println("First scheduling policy to deploy not mentioned. This is now" +
|
||||
" going to be determined at runtime.")
|
||||
return nil
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ func WithFixedSchedulingWindow(toFixSchedWindow bool, fixedSchedWindowSize int)
|
|||
if fixedSchedWindowSize <= 0 {
|
||||
return errors.New("Invalid value of scheduling window size. Please provide a value > 0.")
|
||||
}
|
||||
elekLog.Println(fmt.Sprintf("Fixing the size of the scheduling window to %d.."+
|
||||
log.Println(fmt.Sprintf("Fixing the size of the scheduling window to %d.."+
|
||||
".", fixedSchedWindowSize))
|
||||
s.(*BaseScheduler).toFixSchedWindow = toFixSchedWindow
|
||||
s.(*BaseScheduler).schedWindowSize = fixedSchedWindowSize
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
|
||||
sched "github.com/mesos/mesos-go/api/v0/scheduler"
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spdfg/elektron/def"
|
||||
"github.com/spdfg/elektron/elektronLogging"
|
||||
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||
|
@ -91,8 +91,8 @@ func switchTaskDistBased(baseSchedRef *BaseScheduler) string {
|
|||
taskDist, err := def.GetTaskDistributionInWindow(baseSchedRef.schedWindowSize, baseSchedRef.tasks)
|
||||
baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime))
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL,
|
||||
elekLog.InfoLevel,
|
||||
elekLog.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}, "Switching... ")
|
||||
log.InfoLevel,
|
||||
log.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}, "Switching... ")
|
||||
if err != nil {
|
||||
// All the tasks in the window were only classified into 1 cluster.
|
||||
// Max-Min and Max-GreedyMins would work the same way as Bin-Packing for this situation.
|
||||
|
@ -220,8 +220,8 @@ func (bsps *baseSchedPolicyState) SwitchIfNecessary(spc SchedPolicyContext) {
|
|||
switchToPolicyName = switchBasedOn[baseSchedRef.schedPolSwitchCriteria](baseSchedRef)
|
||||
} else {
|
||||
// We continue working with the currently deployed scheduling policy.
|
||||
elekLog.Println("Continuing with the current scheduling policy...")
|
||||
elekLog.Printf("TasksScheduled[%d], SchedWindowSize[%d]", bsps.numTasksScheduled,
|
||||
log.Println("Continuing with the current scheduling policy...")
|
||||
log.Printf("TasksScheduled[%d], SchedWindowSize[%d]", bsps.numTasksScheduled,
|
||||
baseSchedRef.schedWindowSize)
|
||||
return
|
||||
}
|
||||
|
@ -234,8 +234,8 @@ func (bsps *baseSchedPolicyState) SwitchIfNecessary(spc SchedPolicyContext) {
|
|||
bsps.numTasksScheduled = 0
|
||||
} else {
|
||||
// We continue working with the currently deployed scheduling policy.
|
||||
elekLog.Println("Continuing with the current scheduling policy...")
|
||||
elekLog.Printf("TasksScheduled[%d], SchedWindowSize[%d]", bsps.numTasksScheduled,
|
||||
log.Println("Continuing with the current scheduling policy...")
|
||||
log.Printf("TasksScheduled[%d], SchedWindowSize[%d]", bsps.numTasksScheduled,
|
||||
baseSchedRef.schedWindowSize)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ package offerUtils
|
|||
import (
|
||||
"fmt"
|
||||
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spdfg/elektron/constants"
|
||||
"github.com/spdfg/elektron/elektronLogging"
|
||||
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||
|
@ -90,14 +90,14 @@ func UpdateEnvironment(offer *mesos.Offer) {
|
|||
var host = offer.GetHostname()
|
||||
// If this host is not present in the set of hosts.
|
||||
if _, ok := constants.Hosts[host]; !ok {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.InfoLevel,
|
||||
elekLog.Fields{"Adding host": fmt.Sprintf("%s", host)}, "New host detected")
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL, log.InfoLevel,
|
||||
log.Fields{"Adding host": fmt.Sprintf("%s", host)}, "New host detected")
|
||||
// Add this host.
|
||||
constants.Hosts[host] = struct{}{}
|
||||
// Get the power class of this host.
|
||||
class := PowerClass(offer)
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.InfoLevel,
|
||||
elekLog.Fields{"host": fmt.Sprintf("%s", host), "PowerClass": fmt.Sprintf("%s", class)}, "Registering the power class...")
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL, log.InfoLevel,
|
||||
log.Fields{"host": fmt.Sprintf("%s", host), "PowerClass": fmt.Sprintf("%s", class)}, "Registering the power class...")
|
||||
// If new power class, register the power class.
|
||||
if _, ok := constants.PowerClasses[class]; !ok {
|
||||
constants.PowerClasses[class] = make(map[string]struct{})
|
||||
|
|
|
@ -20,7 +20,7 @@ package schedUtils
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
elekLog "github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spdfg/elektron/def"
|
||||
"github.com/spdfg/elektron/elektronLogging"
|
||||
elekLogTypes "github.com/spdfg/elektron/elektronLogging/types"
|
||||
|
@ -79,8 +79,8 @@ func (s *fillNextOfferCycle) apply(taskQueue []def.Task) (int, int) {
|
|||
for _, task := range taskQueue {
|
||||
numberOfTasksTraversed++
|
||||
for i := *task.Instances; i > 0; i-- {
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL, elekLog.InfoLevel,
|
||||
elekLog.Fields{}, fmt.Sprintf("Checking if Instance #%d of Task[%s] can be scheduled "+
|
||||
elektronLogging.ElektronLog.Log(elekLogTypes.GENERAL, log.InfoLevel,
|
||||
log.Fields{}, fmt.Sprintf("Checking if Instance #%d of Task[%s] can be scheduled "+
|
||||
"during the next offer cycle...", i, task.Name))
|
||||
if canSchedule(task) {
|
||||
filledCPU += task.CPU
|
||||
|
|
Reference in a new issue