Code formatting

This commit is contained in:
balandi1 2019-11-20 13:33:46 -05:00
parent ef8bd6ac1f
commit 4d15e59d4c
41 changed files with 472 additions and 475 deletions

View file

@ -25,9 +25,9 @@ import (
"github.com/mash/gokmeans"
"github.com/montanaflynn/stats"
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/elektronLogging"
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
log "github.com/sirupsen/logrus"
)
// Information about a cluster of tasks.
@ -53,7 +53,7 @@ func (tc TasksToClassify) taskObservationCalculator(task Task) []float64 {
return []float64{task.Watts}
} else {
elektronLogging.ElektronLog.Log(elekLogT.ERROR, log.FatalLevel,
log.Fields {}, "Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload")
log.Fields{}, "Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload")
return []float64{0.0} // Won't reach here.
}
}
@ -109,7 +109,7 @@ func clusterSizeAvgMMMPU(tasks []Task, taskObservation func(task Task) []float64
// skip this value
// there is an error in the task config.
elektronLogging.ElektronLog.Log(elekLogT.ERROR, log.ErrorLevel,
log.Fields {}, fmt.Sprintf("%s",err))
log.Fields{}, fmt.Sprintf("%s", err))
}
} else {
// There is only one observation for the task.

View file

@ -3,9 +3,9 @@ package elektronLogging
import (
"bytes"
"fmt"
"strings"
"github.com/fatih/color"
log "github.com/sirupsen/logrus"
"strings"
)
type ElektronFormatter struct {
@ -36,10 +36,10 @@ func (f ElektronFormatter) Format(entry *log.Entry) ([]byte, error) {
}
levelColor := f.getColor(entry)
level := levelColor.Sprintf("[%s]:",strings.ToUpper(entry.Level.String()))
message := fmt.Sprintf("%s %s ",level,entry.Time.Format(f.TimestampFormat))
level := levelColor.Sprintf("[%s]:", strings.ToUpper(entry.Level.String()))
message := fmt.Sprintf("%s %s ", level, entry.Time.Format(f.TimestampFormat))
if entry.Message != "" {
message = fmt.Sprintf("%s %s %s ",level,entry.Time.Format(f.TimestampFormat), entry.Message)
message = fmt.Sprintf("%s %s %s ", level, entry.Time.Format(f.TimestampFormat), entry.Message)
}
var formattedFields []string
for key, value := range entry.Data {
@ -47,7 +47,6 @@ func (f ElektronFormatter) Format(entry *log.Entry) ([]byte, error) {
strings.Join([]string{key, fmt.Sprintf("%s", value)}, "="))
}
b.WriteString(message)
b.WriteString(strings.Join(formattedFields, ", "))
b.WriteByte('\n')

View file

@ -1,8 +1,8 @@
package elektronLogging
import (
"os"
log "github.com/sirupsen/logrus"
"os"
)
type ClsfnTaskDistOverheadLogger struct {
@ -16,7 +16,7 @@ func NewClsfnTaskDistOverheadLogger(logType int, prefix string) *ClsfnTaskDistOv
return cLog
}
func (cLog *ClsfnTaskDistOverheadLogger) Log(logType int, level log.Level, logData log.Fields,message string) {
func (cLog *ClsfnTaskDistOverheadLogger) Log(logType int, level log.Level, logData log.Fields, message string) {
if cLog.Type == logType {
logger.SetLevel(level)

View file

@ -1,8 +1,8 @@
package elektronLogging
import (
"os"
log "github.com/sirupsen/logrus"
"os"
)
type ConsoleLogger struct {

View file

@ -1,10 +1,10 @@
package elektronLogging
import (
logrus "github.com/sirupsen/logrus"
"os"
"strconv"
"time"
logrus "github.com/sirupsen/logrus"
)
var logDir string

View file

@ -1,11 +1,11 @@
package elektronLogging
import (
"time"
"fmt"
"strconv"
. "github.com/spdfg/elektron/elektronLogging/types"
log "github.com/sirupsen/logrus"
. "github.com/spdfg/elektron/elektronLogging/types"
"strconv"
"time"
)
var config LoggerConfig
@ -22,8 +22,8 @@ func BuildLogger() *LoggerImpl {
formatter.TimestampFormat = "2006-01-02 15:04:05"
GetLogDir(startTime, "_")
prefix := fmt.Sprintf("_%d%d%s%s%s%s",startTime.Year(), startTime.Month(),strconv.Itoa(startTime.Day()),
strconv.Itoa(startTime.Hour()),strconv.Itoa(startTime.Minute()),strconv.Itoa(startTime.Second()))
prefix := fmt.Sprintf("_%d%d%s%s%s%s", startTime.Year(), startTime.Month(), strconv.Itoa(startTime.Day()),
strconv.Itoa(startTime.Hour()), strconv.Itoa(startTime.Minute()), strconv.Itoa(startTime.Second()))
//create a single logrus instance and set its formatter to ElektronFormatter
logger = log.New()
@ -31,8 +31,8 @@ func BuildLogger() *LoggerImpl {
// create a chain of loggers
head := new(LoggerImpl)
cLog := NewConsoleLogger(CONSOLE,prefix)
pLog := NewPcpLogger(PCP,prefix)
cLog := NewConsoleLogger(CONSOLE, prefix)
pLog := NewPcpLogger(PCP, prefix)
schedTraceLog := NewSchedTraceLogger(SCHED_TRACE, prefix)
spsLog := NewSchedPolicySwitchLogger(SPS, prefix)
schedWindowLog := NewSchedWindowLogger(SCHED_WINDOW, prefix)

View file

@ -1,13 +1,13 @@
package elektronLogging
import (
"gopkg.in/yaml.v2"
"io/ioutil"
log "github.com/sirupsen/logrus"
elekEnv "github.com/spdfg/elektron/environment"
"gopkg.in/yaml.v2"
"io/ioutil"
)
type LoggerConfig struct {
SchedTraceConfig struct {
Enabled bool `yaml:"enabled"`
FilenameExtension string `yaml:"filenameExtension"`
@ -50,7 +50,7 @@ type LoggerConfig struct {
AllowOnConsole bool `yaml:"allowOnConsole"`
} `yaml:"schedWindow"`
Format[] string `yaml:"format"`
Format []string `yaml:"format"`
}
func (c *LoggerConfig) GetConfig() *LoggerConfig {

View file

@ -1,8 +1,8 @@
package elektronLogging
import (
"os"
log "github.com/sirupsen/logrus"
"os"
)
type PcpLogger struct {

View file

@ -1,8 +1,8 @@
package elektronLogging
import (
"os"
log "github.com/sirupsen/logrus"
"os"
)
type SchedPolicySwitchLogger struct {

View file

@ -1,8 +1,8 @@
package elektronLogging
import (
"os"
log "github.com/sirupsen/logrus"
"os"
)
type SchedTraceLogger struct {

View file

@ -1,8 +1,8 @@
package elektronLogging
import (
"os"
log "github.com/sirupsen/logrus"
"os"
)
type SchedWindowLogger struct {

View file

@ -1,6 +1,5 @@
package elektronLogging
const (
ERROR = iota
WARNING = iota
@ -13,4 +12,3 @@ const (
SCHED_WINDOW = iota
CLSFN_TASKDIST_OVERHEAD = iota
)

View file

@ -24,12 +24,12 @@ import (
"syscall"
"time"
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/elektronLogging"
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
log "github.com/sirupsen/logrus"
)
func Start(quit chan struct{}, logging *bool,pcpConfigFile string) {
func Start(quit chan struct{}, logging *bool, pcpConfigFile string) {
var pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c " + pcpConfigFile
cmd := exec.Command("sh", "-c", pcpCommand)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
@ -49,7 +49,7 @@ func Start(quit chan struct{}, logging *bool,pcpConfigFile string) {
// Write to logfile
elektronLogging.ElektronLog.Log(elekLogT.PCP,
log.InfoLevel,
log.Fields {}, scanner.Text())
log.Fields{}, scanner.Text())
// Throw away first set of results
scanner.Scan()
@ -62,7 +62,7 @@ func Start(quit chan struct{}, logging *bool,pcpConfigFile string) {
if *logging {
elektronLogging.ElektronLog.Log(elekLogT.PCP,
log.InfoLevel,
log.Fields {}, text)
log.Fields{}, text)
}
seconds++
@ -71,7 +71,7 @@ func Start(quit chan struct{}, logging *bool,pcpConfigFile string) {
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "PCP logging started")
log.Fields{}, "PCP logging started")
if err := cmd.Start(); err != nil {
log.Fatal(err)
@ -83,7 +83,7 @@ func Start(quit chan struct{}, logging *bool,pcpConfigFile string) {
case <-quit:
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "Stopping PCP logging in 5 seconds")
log.Fields{}, "Stopping PCP logging in 5 seconds")
time.Sleep(5 * time.Second)
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly

View file

@ -29,14 +29,14 @@ import (
"syscall"
"time"
"github.com/spdfg/elektron/pcp"
"github.com/spdfg/elektron/rapl"
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/elektronLogging"
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/pcp"
"github.com/spdfg/elektron/rapl"
)
func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThreshold, loThreshold float64,pcpConfigFile string) {
func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThreshold, loThreshold float64, pcpConfigFile string) {
var pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c " + pcpConfigFile
cmd := exec.Command("sh", "-c", pcpCommand, pcpConfigFile)
@ -45,7 +45,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
if hiThreshold < loThreshold {
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "High threshold is lower than low threshold!")
log.Fields{}, "High threshold is lower than low threshold!")
}
pipe, err := cmd.StdoutPipe()
@ -63,7 +63,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
// Write to logfile
elektronLogging.ElektronLog.Log(elekLogT.PCP,
log.InfoLevel,
log.Fields {}, scanner.Text())
log.Fields{}, scanner.Text())
headers := strings.Split(scanner.Text(), ",")
@ -101,14 +101,14 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "Logging PCP...")
log.Fields{}, "Logging PCP...")
text := scanner.Text()
split := strings.Split(text, ",")
elektronLogging.ElektronLog.Log(elekLogT.PCP,
log.InfoLevel,
log.Fields {}, text)
log.Fields{}, text)
totalPower := 0.0
for _, powerIndex := range powerIndexes {
@ -121,7 +121,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {"Host" : fmt.Sprintf("%s",indexToHost[powerIndex]), "Power" : fmt.Sprintf("%f",(power * pcp.RAPLUnits))},
log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))},
"")
totalPower += power
@ -135,14 +135,14 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {"Total power" : fmt.Sprintf("%f %d",clusterPower,clusterPowerHist.Len()),
"Sec Avg" : fmt.Sprintf("%f",clusterMean)},
log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
"Sec Avg": fmt.Sprintf("%f", clusterMean)},
"")
if clusterMean > hiThreshold {
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "Need to cap a node")
log.Fields{}, "Need to cap a node")
// Create statics for all victims and choose one to cap
victims := make([]pcp.Victim, 0, 8)
@ -165,12 +165,12 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
orderCapped = append(orderCapped, victim.Host)
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {"Capping Victim" : fmt.Sprintf("%s",victim.Host),
"Avg. Wattage" : fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)}, "")
log.Fields{"Capping Victim": fmt.Sprintf("%s", victim.Host),
"Avg. Wattage": fmt.Sprintf("%f", victim.Watts*pcp.RAPLUnits)}, "")
if err := rapl.Cap(victim.Host, "rapl", 50); err != nil {
elektronLogging.ElektronLog.Log(elekLogT.ERROR,
log.ErrorLevel,
log.Fields {}, "Error capping host")
log.Fields{}, "Error capping host")
}
break // Only cap one machine at at time.
}
@ -186,11 +186,11 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
log.Printf("Uncapping host %s", host)
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {"Uncapped host" : host}, "")
log.Fields{"Uncapped host": host}, "")
if err := rapl.Cap(host, "rapl", 100); err != nil {
elektronLogging.ElektronLog.Log(elekLogT.ERROR,
log.ErrorLevel,
log.Fields {}, "Error capping host")
log.Fields{}, "Error capping host")
}
}
}
@ -202,7 +202,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "PCP logging started")
log.Fields{}, "PCP logging started")
if err := cmd.Start(); err != nil {
log.Fatal(err)
@ -214,7 +214,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
case <-quit:
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "Stopping PCP logging in 5 seconds")
log.Fields{}, "Stopping PCP logging in 5 seconds")
time.Sleep(5 * time.Second)
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly

View file

@ -30,13 +30,13 @@ import (
"syscall"
"time"
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/constants"
"github.com/spdfg/elektron/elektronLogging"
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
"github.com/spdfg/elektron/pcp"
"github.com/spdfg/elektron/rapl"
"github.com/spdfg/elektron/utilities"
"github.com/spdfg/elektron/elektronLogging"
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
log "github.com/sirupsen/logrus"
)
func round(num float64) int {
@ -49,7 +49,7 @@ func getNextCapValue(curCapValue float64, precision int) float64 {
return float64(round(curCapValue*output)) / output
}
func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiThreshold, loThreshold float64,pcpConfigFile string) {
func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiThreshold, loThreshold float64, pcpConfigFile string) {
var pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c " + pcpConfigFile
cmd := exec.Command("sh", "-c", pcpCommand, pcpConfigFile)
@ -58,7 +58,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
if hiThreshold < loThreshold {
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "High threshold is lower than low threshold!")
log.Fields{}, "High threshold is lower than low threshold!")
}
pipe, err := cmd.StdoutPipe()
@ -76,7 +76,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
// Write to logfile
elektronLogging.ElektronLog.Log(elekLogT.PCP,
log.InfoLevel,
log.Fields {}, scanner.Text())
log.Fields{}, scanner.Text())
headers := strings.Split(scanner.Text(), ",")
@ -117,13 +117,13 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
if *logging {
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "Logging PCP...")
log.Fields{}, "Logging PCP...")
split := strings.Split(scanner.Text(), ",")
text := scanner.Text()
elektronLogging.ElektronLog.Log(elekLogT.PCP,
log.InfoLevel,
log.Fields {}, text)
log.Fields{}, text)
totalPower := 0.0
for _, powerIndex := range powerIndexes {
@ -136,7 +136,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {"Host" : fmt.Sprintf("%s",indexToHost[powerIndex]), "Power" : fmt.Sprintf("%f",(power * pcp.RAPLUnits))},
log.Fields{"Host": fmt.Sprintf("%s", indexToHost[powerIndex]), "Power": fmt.Sprintf("%f", (power * pcp.RAPLUnits))},
"")
totalPower += power
}
@ -149,22 +149,22 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {"Total power" : fmt.Sprintf("%f %d",clusterPower,clusterPowerHist.Len()),
"Sec Avg" : fmt.Sprintf("%f",clusterMean)},
log.Fields{"Total power": fmt.Sprintf("%f %d", clusterPower, clusterPowerHist.Len()),
"Sec Avg": fmt.Sprintf("%f", clusterMean)},
"")
if clusterMean >= hiThreshold {
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "Need to cap a node")
log.Fields{}, "Need to cap a node")
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {"Cap values of capped victims" : fmt.Sprintf("%v",cappedVictims)}, "")
log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "")
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {"Cap values of victims to uncap" : fmt.Sprintf("%v",orderCappedVictims)}, "")
log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "")
// Create statics for all victims and choose one to cap
victims := make([]pcp.Victim, 0, 8)
@ -194,12 +194,12 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
elektronLogging.ElektronLog.Log(elekLogT.ERROR,
log.ErrorLevel,
log.Fields {"Error capping host" : fmt.Sprintf("%s",victims[i].Host)}, "")
log.Fields{"Error capping host": fmt.Sprintf("%s", victims[i].Host)}, "")
} else {
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, fmt.Sprintf("Capped host[%s] at %f", victims[i].Host, 50.0))
log.Fields{}, fmt.Sprintf("Capped host[%s] at %f", victims[i].Host, 50.0))
// Keeping track of this victim and it's cap value
cappedVictims[victims[i].Host] = 50.0
newVictimFound = true
@ -225,12 +225,12 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
elektronLogging.ElektronLog.Log(elekLogT.ERROR,
log.ErrorLevel,
log.Fields {"Error capping host" : fmt.Sprintf("%s",alreadyCappedHosts[i])}, "")
log.Fields{"Error capping host": fmt.Sprintf("%s", alreadyCappedHosts[i])}, "")
} else {
// Successful cap
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, fmt.Sprintf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue))
log.Fields{}, fmt.Sprintf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue))
// Checking whether this victim can be capped further
if newCapValue <= constants.LowerCapLimit {
// Deleting victim from cappedVictims.
@ -255,7 +255,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
if !canCapAlreadyCappedVictim {
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "No Victim left to cap")
log.Fields{}, "No Victim left to cap")
}
}
@ -263,13 +263,13 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "Need to uncap a node")
log.Fields{}, "Need to uncap a node")
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {"Cap values of capped victims" : fmt.Sprintf("%v",cappedVictims)}, "")
log.Fields{"Cap values of capped victims": fmt.Sprintf("%v", cappedVictims)}, "")
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {"Cap values of victims to uncap" : fmt.Sprintf("%v",orderCappedVictims)}, "")
log.Fields{"Cap values of victims to uncap": fmt.Sprintf("%v", orderCappedVictims)}, "")
if len(orderCapped) > 0 {
// We pick the host that is capped the most to uncap.
orderCappedToSort := utilities.GetPairList(orderCappedVictims)
@ -282,12 +282,12 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
elektronLogging.ElektronLog.Log(elekLogT.ERROR,
log.ErrorLevel,
log.Fields {"Error uncapping host" : fmt.Sprintf("%s",hostToUncap)}, "")
log.Fields{"Error uncapping host": fmt.Sprintf("%s", hostToUncap)}, "")
} else {
// Successful uncap
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, fmt.Sprintf("Uncapped host[%s] to %f", hostToUncap, newUncapValue))
log.Fields{}, fmt.Sprintf("Uncapped host[%s] to %f", hostToUncap, newUncapValue))
// Can we uncap this host further. If not, then we remove its entry from orderCapped
if newUncapValue >= 100.0 { // can compare using ==
// Deleting entry from orderCapped
@ -310,7 +310,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
} else {
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "No host staged for Uncapped")
log.Fields{}, "No host staged for Uncapped")
}
}
}
@ -321,7 +321,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "PCP logging started")
log.Fields{}, "PCP logging started")
if err := cmd.Start(); err != nil {
log.Fatal(err)
}
@ -332,7 +332,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
case <-quit:
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "Stopping PCP logging in 5 seconds")
log.Fields{}, "Stopping PCP logging in 5 seconds")
time.Sleep(5 * time.Second)
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly

View file

@ -28,13 +28,13 @@ import (
"github.com/golang/protobuf/proto"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/def"
"github.com/spdfg/elektron/elektronLogging"
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
"github.com/spdfg/elektron/pcp"
"github.com/spdfg/elektron/powerCap"
"github.com/spdfg/elektron/schedulers"
"github.com/spdfg/elektron/elektronLogging"
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
log "github.com/sirupsen/logrus"
)
var master = flag.String("master", "", "Location of leading Mesos master -- <mesos-master>:<port>")
@ -284,9 +284,9 @@ func main() {
if status, err := driver.Run(); err != nil {
elektronLogging.ElektronLog.Log(elekLogT.ERROR,
log.ErrorLevel,
log.Fields {"status" : status.String(), "error" : err.Error()}, "Framework stopped ")
log.Fields{"status": status.String(), "error": err.Error()}, "Framework stopped ")
}
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "Exiting...")
log.Fields{}, "Exiting...")
}

View file

@ -28,12 +28,12 @@ import (
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
"github.com/mesos/mesos-go/api/v0/mesosutil"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/def"
"github.com/spdfg/elektron/utilities"
"github.com/spdfg/elektron/utilities/schedUtils"
"github.com/spdfg/elektron/elektronLogging"
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/utilities"
"github.com/spdfg/elektron/utilities/schedUtils"
)
type BaseScheduler struct {
@ -252,12 +252,12 @@ func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
lmt := elekLogT.GENERAL
if ts == nil {
elektronLogging.ElektronLog.Log(lmt, log.InfoLevel,
log.Fields {"host" : fmt.Sprintf("%s",offer.GetHostname())}, "TASKS STARTING...")
log.Fields{"host": fmt.Sprintf("%s", offer.GetHostname())}, "TASKS STARTING...")
} else {
elektronLogging.ElektronLog.Log(lmt,
log.InfoLevel,
log.Fields {"task" : fmt.Sprintf("%s",ts.Name),
"Instance" : fmt.Sprintf("%d",*ts.Instances), "host" : fmt.Sprintf("%s",offer.GetHostname())},
log.Fields{"task": fmt.Sprintf("%s", ts.Name),
"Instance": fmt.Sprintf("%d", *ts.Instances), "host": fmt.Sprintf("%s", offer.GetHostname())},
"TASK STARTING... ")
}
}
@ -266,28 +266,28 @@ func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, watt
lmt := elekLogT.GENERAL
elektronLogging.ElektronLog.Log(lmt,
log.InfoLevel,
log.Fields {"task" : ts.Name, "host" : host, "Watts" : fmt.Sprintf("%f",wattsToConsider)}, "Watts considered for ")
log.Fields{"task": ts.Name, "host": host, "Watts": fmt.Sprintf("%f", wattsToConsider)}, "Watts considered for ")
}
func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) {
lmt := elekLogT.GENERAL
elektronLogging.ElektronLog.Log(lmt,
log.InfoLevel,
log.Fields {"Resource offers received" : fmt.Sprintf("%d",len(offers))}, "")
log.Fields{"Resource offers received": fmt.Sprintf("%d", len(offers))}, "")
}
func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
lmt := elekLogT.WARNING
elektronLogging.ElektronLog.Log(lmt,
log.WarnLevel,
log.Fields {"DECLINING OFFER for host" : fmt.Sprintf("%s",offer.GetHostname())}, "No tasks left to schedule ")
log.Fields{"DECLINING OFFER for host": fmt.Sprintf("%s", offer.GetHostname())}, "No tasks left to schedule ")
}
func (s *BaseScheduler) LogNumberOfRunningTasks() {
lmt := elekLogT.GENERAL
elektronLogging.ElektronLog.Log(lmt,
log.InfoLevel,
log.Fields {"Number of tasks still Running" : fmt.Sprintf("%d",s.tasksRunning)}, "")
log.Fields{"Number of tasks still Running": fmt.Sprintf("%d", s.tasksRunning)}, "")
}
func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
@ -300,20 +300,20 @@ func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
s.TasksRunningMutex.Unlock()
elektronLogging.ElektronLog.Log(lmt,
log.InfoLevel,
log.Fields {"Colocated with" : fmt.Sprintf("%s",buffer.String())}, "")
log.Fields{"Colocated with": fmt.Sprintf("%s", buffer.String())}, "")
}
func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) {
elektronLogging.ElektronLog.Log(elekLogT.SCHED_TRACE,
log.InfoLevel,
log.Fields {offer.GetHostname() : fmt.Sprintf("%s",taskToSchedule.GetTaskId().GetValue())}, "")
log.Fields{offer.GetHostname(): fmt.Sprintf("%s", taskToSchedule.GetTaskId().GetValue())}, "")
}
func (s *BaseScheduler) LogTerminateScheduler() {
lmt := elekLogT.GENERAL
elektronLogging.ElektronLog.Log(lmt,
log.InfoLevel,
log.Fields {}, "Done scheduling all tasks!")
log.Fields{}, "Done scheduling all tasks!")
}
func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
@ -323,28 +323,28 @@ func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
buffer.WriteString(fmt.Sprintf("<CPU: %f, RAM: %f, Watts: %f>", offerResources...))
elektronLogging.ElektronLog.Log(lmt,
log.WarnLevel,
log.Fields {"Offer Resources" : fmt.Sprintf("%s",buffer.String())}, "DECLINING OFFER... Offer has insufficient resources to launch a task")
log.Fields{"Offer Resources": fmt.Sprintf("%s", buffer.String())}, "DECLINING OFFER... Offer has insufficient resources to launch a task")
}
func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) {
lmt := elekLogT.ERROR
elektronLogging.ElektronLog.Log(lmt,
log.ErrorLevel,
log.Fields {"OfferID" : fmt.Sprintf("%s",offerID)}, "OFFER RESCINDED")
log.Fields{"OfferID": fmt.Sprintf("%s", offerID)}, "OFFER RESCINDED")
}
func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) {
lmt := elekLogT.ERROR
elektronLogging.ElektronLog.Log(lmt,
log.ErrorLevel,
log.Fields {"SlaveID" : fmt.Sprintf("%s",slaveID)}, "SLAVE LOST")
log.Fields{"SlaveID": fmt.Sprintf("%s", slaveID)}, "SLAVE LOST")
}
func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) {
lmt := elekLogT.ERROR
elektronLogging.ElektronLog.Log(lmt,
log.ErrorLevel,
log.Fields {"ExecutorID" : fmt.Sprintf("%s",executorID), "SlaveID" : fmt.Sprintf("%s", slaveID)}, "EXECUTOR LOST")
log.Fields{"ExecutorID": fmt.Sprintf("%s", executorID), "SlaveID": fmt.Sprintf("%s", slaveID)}, "EXECUTOR LOST")
}
func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
@ -352,21 +352,21 @@ func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
lmt := elekLogT.GENERAL
elektronLogging.ElektronLog.Log(lmt,
log.InfoLevel,
log.Fields {"Received Framework message from executor" : executorID}, message)
log.Fields{"Received Framework message from executor": executorID}, message)
}
func (s *BaseScheduler) LogMesosError(err string) {
lmt := elekLogT.ERROR
elektronLogging.ElektronLog.Log(lmt,
log.ErrorLevel,
log.Fields {"MESOS ERROR" : fmt.Sprintf("%v", err)}, "")
log.Fields{"MESOS ERROR": fmt.Sprintf("%v", err)}, "")
}
func (s *BaseScheduler) LogElectronError(err error) {
lmt := elekLogT.ERROR
elektronLogging.ElektronLog.Log(lmt,
log.ErrorLevel,
log.Fields {"ELECTRON ERROR" : fmt.Sprintf("%v",err)}, "")
log.Fields{"ELECTRON ERROR": fmt.Sprintf("%v", err)}, "")
}
func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
@ -374,21 +374,21 @@ func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
lmt := elekLogT.SUCCESS
elektronLogging.ElektronLog.Log(lmt,
log.InfoLevel,
log.Fields {"frameworkID" : fmt.Sprintf("%s",frameworkID), "master" : fmt.Sprintf("%s",masterInfo)}, "FRAMEWORK REGISTERED!")
log.Fields{"frameworkID": fmt.Sprintf("%s", frameworkID), "master": fmt.Sprintf("%s", masterInfo)}, "FRAMEWORK REGISTERED!")
}
func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) {
lmt := elekLogT.GENERAL
elektronLogging.ElektronLog.Log(lmt,
log.InfoLevel,
log.Fields {"master" : fmt.Sprintf("%s",masterInfo)}, "Framework re-registered")
log.Fields{"master": fmt.Sprintf("%s", masterInfo)}, "Framework re-registered")
}
func (s *BaseScheduler) LogDisconnected() {
lmt := elekLogT.WARNING
elektronLogging.ElektronLog.Log(lmt,
log.WarnLevel,
log.Fields {}, "Framework disconnected with master")
log.Fields{}, "Framework disconnected with master")
}
func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
@ -404,14 +404,14 @@ func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
}
elektronLogging.ElektronLog.Log(lmt,
log.InfoLevel,
log.Fields {"task" : fmt.Sprintf("%s",*status.TaskId.Value), "state" : NameFor(status.State)}, "Task Status received")
log.Fields{"task": fmt.Sprintf("%s", *status.TaskId.Value), "state": NameFor(status.State)}, "Task Status received")
}
func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicyState) {
logSPS := func() {
elektronLogging.ElektronLog.Log(elekLogT.SPS,
log.InfoLevel,
log.Fields {"Name" : name}, "")
log.Fields{"Name": name}, "")
}
if s.hasReceivedResourceOffers && (s.curSchedPolicy != nextPolicy) {
logSPS()
@ -422,12 +422,12 @@ func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicy
// that is going to schedule the tasks in the scheduling window.
elektronLogging.ElektronLog.Log(elekLogT.SCHED_WINDOW,
log.InfoLevel,
log.Fields {"Window size" : fmt.Sprintf("%d",s.schedWindowSize), "Name" : name}, "")
log.Fields{"Window size": fmt.Sprintf("%d", s.schedWindowSize), "Name": name}, "")
}
func (s *BaseScheduler) LogClsfnAndTaskDistOverhead(overhead time.Duration) {
// Logging the overhead in microseconds.
elektronLogging.ElektronLog.Log(elekLogT.CLSFN_TASKDIST_OVERHEAD,
log.InfoLevel,
log.Fields {"Overhead in microseconds" : fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)}, "")
log.Fields{"Overhead in microseconds": fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)}, "")
}

View file

@ -23,13 +23,13 @@ import (
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/constants"
"github.com/spdfg/elektron/def"
"github.com/spdfg/elektron/utilities"
"github.com/spdfg/elektron/utilities/mesosUtils"
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/elektronLogging"
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
"github.com/spdfg/elektron/utilities"
"github.com/spdfg/elektron/utilities/mesosUtils"
)
func coLocated(tasks map[string]bool, s BaseScheduler) {
@ -37,12 +37,12 @@ func coLocated(tasks map[string]bool, s BaseScheduler) {
for task := range tasks {
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {"Task" : task}, "")
log.Fields{"Task": task}, "")
}
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {}, "---------------------")
log.Fields{}, "---------------------")
}
// Get the powerClass of the given hostname.

View file

@ -24,10 +24,10 @@ import (
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/def"
"github.com/spdfg/elektron/elektronLogging"
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
log "github.com/sirupsen/logrus"
)
type SchedPolicyContext interface {
@ -92,7 +92,7 @@ func switchTaskDistBased(baseSchedRef *BaseScheduler) string {
baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime))
elektronLogging.ElektronLog.Log(elekLogT.GENERAL,
log.InfoLevel,
log.Fields {"Task Distribution" : fmt.Sprintf("%f",taskDist)}, "Switching... ")
log.Fields{"Task Distribution": fmt.Sprintf("%f", taskDist)}, "Switching... ")
if err != nil {
// All the tasks in the window were only classified into 1 cluster.
// Max-Min and Max-GreedyMins would work the same way as Bin-Packing for this situation.

View file

@ -19,13 +19,13 @@
package offerUtils
import (
"strings"
"fmt"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/constants"
"github.com/spdfg/elektron/elektronLogging"
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
log "github.com/sirupsen/logrus"
"strings"
)
func OfferAgg(offer *mesos.Offer) (float64, float64, float64) {
@ -91,13 +91,13 @@ func UpdateEnvironment(offer *mesos.Offer) {
// If this host is not present in the set of hosts.
if _, ok := constants.Hosts[host]; !ok {
elektronLogging.ElektronLog.Log(elekLogT.GENERAL, log.InfoLevel,
log.Fields {"Adding host" : fmt.Sprintf("%s",host)}, "New host detected")
log.Fields{"Adding host": fmt.Sprintf("%s", host)}, "New host detected")
// Add this host.
constants.Hosts[host] = struct{}{}
// Get the power class of this host.
class := PowerClass(offer)
elektronLogging.ElektronLog.Log(elekLogT.GENERAL, log.InfoLevel,
log.Fields {"host" : fmt.Sprintf("%s",host), "PowerClass" : fmt.Sprintf("%s", class)}, "Registering the power class...")
log.Fields{"host": fmt.Sprintf("%s", host), "PowerClass": fmt.Sprintf("%s", class)}, "Registering the power class...")
// If new power class, register the power class.
if _, ok := constants.PowerClasses[class]; !ok {
constants.PowerClasses[class] = make(map[string]struct{})

View file

@ -20,11 +20,11 @@ package schedUtils
import (
"fmt"
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/def"
"github.com/spdfg/elektron/utilities"
"github.com/spdfg/elektron/elektronLogging"
elekLogT "github.com/spdfg/elektron/elektronLogging/types"
log "github.com/sirupsen/logrus"
"github.com/spdfg/elektron/utilities"
)
// Criteria for resizing the scheduling window.
@ -80,7 +80,7 @@ func (s *fillNextOfferCycle) apply(taskQueue []def.Task) (int, int) {
numberOfTasksTraversed++
for i := *task.Instances; i > 0; i-- {
elektronLogging.ElektronLog.Log(elekLogT.GENERAL, log.InfoLevel,
log.Fields {}, fmt.Sprintf("Checking if Instance #%d of Task[%s] can be scheduled "+
log.Fields{}, fmt.Sprintf("Checking if Instance #%d of Task[%s] can be scheduled "+
"during the next offer cycle...", i, task.Name))
if canSchedule(task) {
filledCPU += task.CPU