Fixed import alias prefix.

Changed the prefix to import aliases to be 'elek' instead of 'elec'.
This commit is contained in:
Pradyumna Kaushik 2018-10-04 13:45:31 -04:00
parent b06bdeba59
commit 8e87bcb439
8 changed files with 115 additions and 115 deletions

View file

@ -10,11 +10,11 @@ import (
"github.com/mesos/mesos-go/api/v0/scheduler" "github.com/mesos/mesos-go/api/v0/scheduler"
"github.com/montanaflynn/stats" "github.com/montanaflynn/stats"
elecLogDef "gitlab.com/spdf/elektron/logging/def" elekLogDef "gitlab.com/spdf/elektron/logging/def"
"gitlab.com/spdf/elektron/schedulers" "gitlab.com/spdf/elektron/schedulers"
) )
func Start(quit chan struct{}, logging *bool, logMType chan elecLogDef.LogMessageType, logMsg chan string, s scheduler.Scheduler) { func Start(quit chan struct{}, logging *bool, logMType chan elekLogDef.LogMessageType, logMsg chan string, s scheduler.Scheduler) {
baseSchedRef := s.(*schedulers.BaseScheduler) baseSchedRef := s.(*schedulers.BaseScheduler)
const pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c config" const pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c config"
cmd := exec.Command("sh", "-c", pcpCommand) cmd := exec.Command("sh", "-c", pcpCommand)
@ -33,10 +33,10 @@ func Start(quit chan struct{}, logging *bool, logMType chan elecLogDef.LogMessag
scanner.Scan() scanner.Scan()
// Write to logfile // Write to logfile
logMType <- elecLogDef.PCP logMType <- elekLogDef.PCP
logMsg <- scanner.Text() logMsg <- scanner.Text()
logMType <- elecLogDef.DEG_COL logMType <- elekLogDef.DEG_COL
logMsg <- "CPU Variance, CPU Task Share Variance, Memory Variance, Memory Task Share Variance" logMsg <- "CPU Variance, CPU Task Share Variance, Memory Variance, Memory Task Share Variance"
// Throw away first set of results // Throw away first set of results
@ -48,7 +48,7 @@ func Start(quit chan struct{}, logging *bool, logMType chan elecLogDef.LogMessag
text := scanner.Text() text := scanner.Text()
if *logging { if *logging {
logMType <- elecLogDef.PCP logMType <- elekLogDef.PCP
logMsg <- text logMsg <- text
} }
@ -84,12 +84,12 @@ func Start(quit chan struct{}, logging *bool, logMType chan elecLogDef.LogMessag
memVariance, _ := stats.Variance(memUtils) memVariance, _ := stats.Variance(memUtils)
memTaskSharesVariance, _ := stats.Variance(memTaskShares) memTaskSharesVariance, _ := stats.Variance(memTaskShares)
logMType <- elecLogDef.DEG_COL logMType <- elekLogDef.DEG_COL
logMsg <- fmt.Sprintf("%f, %f, %f, %f", cpuVariance, cpuTaskSharesVariance, memVariance, memTaskSharesVariance) logMsg <- fmt.Sprintf("%f, %f, %f, %f", cpuVariance, cpuTaskSharesVariance, memVariance, memTaskSharesVariance)
} }
}(logging) }(logging)
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- "PCP logging started" logMsg <- "PCP logging started"
if err := cmd.Start(); err != nil { if err := cmd.Start(); err != nil {
@ -100,7 +100,7 @@ func Start(quit chan struct{}, logging *bool, logMType chan elecLogDef.LogMessag
select { select {
case <-quit: case <-quit:
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- "Stopping PCP logging in 5 seconds" logMsg <- "Stopping PCP logging in 5 seconds"
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)

View file

@ -12,13 +12,13 @@ import (
"syscall" "syscall"
"time" "time"
elecLogDef "gitlab.com/spdf/elektron/logging/def" elekLogDef "gitlab.com/spdf/elektron/logging/def"
"gitlab.com/spdf/elektron/pcp" "gitlab.com/spdf/elektron/pcp"
"gitlab.com/spdf/elektron/rapl" "gitlab.com/spdf/elektron/rapl"
) )
func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThreshold, loThreshold float64, func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThreshold, loThreshold float64,
logMType chan elecLogDef.LogMessageType, logMsg chan string) { logMType chan elekLogDef.LogMessageType, logMsg chan string) {
const pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c config" const pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c config"
cmd := exec.Command("sh", "-c", pcpCommand) cmd := exec.Command("sh", "-c", pcpCommand)
@ -41,7 +41,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
scanner.Scan() scanner.Scan()
// Write to logfile // Write to logfile
logMType <- elecLogDef.PCP logMType <- elekLogDef.PCP
logMsg <- scanner.Text() logMsg <- scanner.Text()
headers := strings.Split(scanner.Text(), ",") headers := strings.Split(scanner.Text(), ",")
@ -77,10 +77,10 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
for scanner.Scan() { for scanner.Scan() {
if *logging { if *logging {
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- "Logging PCP..." logMsg <- "Logging PCP..."
split := strings.Split(scanner.Text(), ",") split := strings.Split(scanner.Text(), ",")
logMType <- elecLogDef.PCP logMType <- elekLogDef.PCP
logMsg <- scanner.Text() logMsg <- scanner.Text()
totalPower := 0.0 totalPower := 0.0
@ -92,7 +92,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
powerHistories[host].Value = power powerHistories[host].Value = power
powerHistories[host] = powerHistories[host].Next() powerHistories[host] = powerHistories[host].Next()
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Host: %s, Power: %f", indexToHost[powerIndex], (power * pcp.RAPLUnits)) logMsg <- fmt.Sprintf("Host: %s, Power: %f", indexToHost[powerIndex], (power * pcp.RAPLUnits))
totalPower += power totalPower += power
@ -104,11 +104,11 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist) clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Total power: %f, %d Sec Avg: %f", clusterPower, clusterPowerHist.Len(), clusterMean) logMsg <- fmt.Sprintf("Total power: %f, %d Sec Avg: %f", clusterPower, clusterPowerHist.Len(), clusterMean)
if clusterMean > hiThreshold { if clusterMean > hiThreshold {
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- "Need to cap a node" logMsg <- "Need to cap a node"
// Create statics for all victims and choose one to cap // Create statics for all victims and choose one to cap
victims := make([]pcp.Victim, 0, 8) victims := make([]pcp.Victim, 0, 8)
@ -130,10 +130,10 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
if !cappedHosts[victim.Host] { if !cappedHosts[victim.Host] {
cappedHosts[victim.Host] = true cappedHosts[victim.Host] = true
orderCapped = append(orderCapped, victim.Host) orderCapped = append(orderCapped, victim.Host)
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Capping Victim %s Avg. Wattage: %f", victim.Host, victim.Watts*pcp.RAPLUnits) logMsg <- fmt.Sprintf("Capping Victim %s Avg. Wattage: %f", victim.Host, victim.Watts*pcp.RAPLUnits)
if err := rapl.Cap(victim.Host, "rapl", 50); err != nil { if err := rapl.Cap(victim.Host, "rapl", 50); err != nil {
logMType <- elecLogDef.ERROR logMType <- elekLogDef.ERROR
logMsg <- "Error capping host" logMsg <- "Error capping host"
} }
break // Only cap one machine at at time. break // Only cap one machine at at time.
@ -148,10 +148,10 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
cappedHosts[host] = false cappedHosts[host] = false
// User RAPL package to send uncap. // User RAPL package to send uncap.
log.Printf("Uncapping host %s", host) log.Printf("Uncapping host %s", host)
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Uncapped host %s", host) logMsg <- fmt.Sprintf("Uncapped host %s", host)
if err := rapl.Cap(host, "rapl", 100); err != nil { if err := rapl.Cap(host, "rapl", 100); err != nil {
logMType <- elecLogDef.ERROR logMType <- elekLogDef.ERROR
logMsg <- "Error capping host" logMsg <- "Error capping host"
} }
} }
@ -162,7 +162,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
} }
}(logging, hiThreshold, loThreshold) }(logging, hiThreshold, loThreshold)
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- "PCP logging started" logMsg <- "PCP logging started"
if err := cmd.Start(); err != nil { if err := cmd.Start(); err != nil {
@ -173,7 +173,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
select { select {
case <-quit: case <-quit:
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- "Stopping PCP logging in 5 seconds" logMsg <- "Stopping PCP logging in 5 seconds"
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)

View file

@ -14,7 +14,7 @@ import (
"time" "time"
"gitlab.com/spdf/elektron/constants" "gitlab.com/spdf/elektron/constants"
elecLogDef "gitlab.com/spdf/elektron/logging/def" elekLogDef "gitlab.com/spdf/elektron/logging/def"
"gitlab.com/spdf/elektron/pcp" "gitlab.com/spdf/elektron/pcp"
"gitlab.com/spdf/elektron/rapl" "gitlab.com/spdf/elektron/rapl"
"gitlab.com/spdf/elektron/utilities" "gitlab.com/spdf/elektron/utilities"
@ -31,14 +31,14 @@ func getNextCapValue(curCapValue float64, precision int) float64 {
} }
func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiThreshold, loThreshold float64, func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiThreshold, loThreshold float64,
logMType chan elecLogDef.LogMessageType, logMsg chan string) { logMType chan elekLogDef.LogMessageType, logMsg chan string) {
const pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c config" const pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c config"
cmd := exec.Command("sh", "-c", pcpCommand) cmd := exec.Command("sh", "-c", pcpCommand)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if hiThreshold < loThreshold { if hiThreshold < loThreshold {
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- "High threshold is lower than low threshold!" logMsg <- "High threshold is lower than low threshold!"
} }
@ -55,7 +55,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
scanner.Scan() scanner.Scan()
// Write to logfile // Write to logfile
logMType <- elecLogDef.PCP logMType <- elekLogDef.PCP
logMsg <- scanner.Text() logMsg <- scanner.Text()
headers := strings.Split(scanner.Text(), ",") headers := strings.Split(scanner.Text(), ",")
@ -95,10 +95,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
for scanner.Scan() { for scanner.Scan() {
if *logging { if *logging {
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- "Logging PCP..." logMsg <- "Logging PCP..."
split := strings.Split(scanner.Text(), ",") split := strings.Split(scanner.Text(), ",")
logMType <- elecLogDef.PCP logMType <- elekLogDef.PCP
logMsg <- scanner.Text() logMsg <- scanner.Text()
totalPower := 0.0 totalPower := 0.0
@ -110,7 +110,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
powerHistories[host].Value = power powerHistories[host].Value = power
powerHistories[host] = powerHistories[host].Next() powerHistories[host] = powerHistories[host].Next()
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Host: %s, Power %f", logMsg <- fmt.Sprintf("Host: %s, Power %f",
indexToHost[powerIndex], (power * pcp.RAPLUnits)) indexToHost[powerIndex], (power * pcp.RAPLUnits))
@ -123,15 +123,15 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist) clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Total power: %f, %d Sec Avg: %f", clusterPower, clusterPowerHist.Len(), clusterMean) logMsg <- fmt.Sprintf("Total power: %f, %d Sec Avg: %f", clusterPower, clusterPowerHist.Len(), clusterMean)
if clusterMean >= hiThreshold { if clusterMean >= hiThreshold {
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- "Need to cap a node" logMsg <- "Need to cap a node"
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Cap values of capped victims: %v", cappedVictims) logMsg <- fmt.Sprintf("Cap values of capped victims: %v", cappedVictims)
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Cap values of victims to uncap: %v", orderCappedVictims) logMsg <- fmt.Sprintf("Cap values of victims to uncap: %v", orderCappedVictims)
// Create statics for all victims and choose one to cap // Create statics for all victims and choose one to cap
victims := make([]pcp.Victim, 0, 8) victims := make([]pcp.Victim, 0, 8)
@ -159,10 +159,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
} }
// Need to cap this victim. // Need to cap this victim.
if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil { if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil {
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Error capping host %s", victims[i].Host) logMsg <- fmt.Sprintf("Error capping host %s", victims[i].Host)
} else { } else {
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Capped host[%s] at %f", victims[i].Host, 50.0) logMsg <- fmt.Sprintf("Capped host[%s] at %f", victims[i].Host, 50.0)
// Keeping track of this victim and it's cap value // Keeping track of this victim and it's cap value
cappedVictims[victims[i].Host] = 50.0 cappedVictims[victims[i].Host] = 50.0
@ -186,11 +186,11 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
if capValue > constants.LowerCapLimit { if capValue > constants.LowerCapLimit {
newCapValue := getNextCapValue(capValue, 2) newCapValue := getNextCapValue(capValue, 2)
if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil { if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil {
logMType <- elecLogDef.ERROR logMType <- elekLogDef.ERROR
logMsg <- fmt.Sprintf("Error capping host[%s]", alreadyCappedHosts[i]) logMsg <- fmt.Sprintf("Error capping host[%s]", alreadyCappedHosts[i])
} else { } else {
// Successful cap // Successful cap
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue) logMsg <- fmt.Sprintf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue)
// Checking whether this victim can be capped further // Checking whether this victim can be capped further
if newCapValue <= constants.LowerCapLimit { if newCapValue <= constants.LowerCapLimit {
@ -214,17 +214,17 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
} }
} }
if !canCapAlreadyCappedVictim { if !canCapAlreadyCappedVictim {
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- "No Victim left to cap." logMsg <- "No Victim left to cap."
} }
} }
} else if clusterMean < loThreshold { } else if clusterMean < loThreshold {
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- "Need to uncap a node" logMsg <- "Need to uncap a node"
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Cap values of capped victims: %v", cappedVictims) logMsg <- fmt.Sprintf("Cap values of capped victims: %v", cappedVictims)
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Cap values of victims to uncap: %v", orderCappedVictims) logMsg <- fmt.Sprintf("Cap values of victims to uncap: %v", orderCappedVictims)
if len(orderCapped) > 0 { if len(orderCapped) > 0 {
// We pick the host that is capped the most to uncap. // We pick the host that is capped the most to uncap.
@ -235,11 +235,11 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
// This is a floating point operation and might suffer from precision loss. // This is a floating point operation and might suffer from precision loss.
newUncapValue := orderCappedVictims[hostToUncap] * 2.0 newUncapValue := orderCappedVictims[hostToUncap] * 2.0
if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil { if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil {
logMType <- elecLogDef.ERROR logMType <- elekLogDef.ERROR
logMsg <- fmt.Sprintf("Error uncapping host[%s]", hostToUncap) logMsg <- fmt.Sprintf("Error uncapping host[%s]", hostToUncap)
} else { } else {
// Successful uncap // Successful uncap
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Uncapped host[%s] to %f", hostToUncap, newUncapValue) logMsg <- fmt.Sprintf("Uncapped host[%s] to %f", hostToUncap, newUncapValue)
// Can we uncap this host further. If not, then we remove its entry from orderCapped // Can we uncap this host further. If not, then we remove its entry from orderCapped
if newUncapValue >= 100.0 { // can compare using == if newUncapValue >= 100.0 { // can compare using ==
@ -261,7 +261,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
} }
} }
} else { } else {
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- "No host staged for Uncapped" logMsg <- "No host staged for Uncapped"
} }
} }
@ -271,7 +271,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
}(logging, hiThreshold, loThreshold) }(logging, hiThreshold, loThreshold)
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- "PCP logging started" logMsg <- "PCP logging started"
if err := cmd.Start(); err != nil { if err := cmd.Start(); err != nil {
@ -282,7 +282,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
select { select {
case <-quit: case <-quit:
logMType <- elecLogDef.GENERAL logMType <- elekLogDef.GENERAL
logMsg <- "Stopping PCP logging in 5 seconds" logMsg <- "Stopping PCP logging in 5 seconds"
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)

View file

@ -13,7 +13,7 @@ import (
mesos "github.com/mesos/mesos-go/api/v0/mesosproto" mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler" sched "github.com/mesos/mesos-go/api/v0/scheduler"
"gitlab.com/spdf/elektron/def" "gitlab.com/spdf/elektron/def"
elecLogDef "gitlab.com/spdf/elektron/logging/def" elekLogDef "gitlab.com/spdf/elektron/logging/def"
"gitlab.com/spdf/elektron/pcp" "gitlab.com/spdf/elektron/pcp"
"gitlab.com/spdf/elektron/schedulers" "gitlab.com/spdf/elektron/schedulers"
"gitlab.com/spdf/elektron/powerCap" "gitlab.com/spdf/elektron/powerCap"
@ -82,9 +82,9 @@ func main() {
logPrefix := *pcplogPrefix + "_" + formattedStartTime logPrefix := *pcplogPrefix + "_" + formattedStartTime
// creating logger and attaching different logging platforms // creating logger and attaching different logging platforms
logger := elecLogDef.BuildLogger(startTime, logPrefix) logger := elekLogDef.BuildLogger(startTime, logPrefix)
// logging channels // logging channels
logMType := make(chan elecLogDef.LogMessageType) logMType := make(chan elekLogDef.LogMessageType)
logMsg := make(chan string) logMsg := make(chan string)
go logger.Listen(logMType, logMsg) go logger.Listen(logMType, logMsg)
@ -101,32 +101,32 @@ func main() {
if *tasksFile == "" { if *tasksFile == "" {
//fmt.Println("No file containing tasks specifiction provided.") //fmt.Println("No file containing tasks specifiction provided.")
logger.WriteLog(elecLogDef.ERROR, "No file containing tasks specification provided") logger.WriteLog(elekLogDef.ERROR, "No file containing tasks specification provided")
os.Exit(1) os.Exit(1)
} }
if *hiThreshold < *loThreshold { if *hiThreshold < *loThreshold {
//fmt.Println("High threshold is of a lower value than low threshold.") //fmt.Println("High threshold is of a lower value than low threshold.")
logger.WriteLog(elecLogDef.ERROR, "High threshold is of a lower value than low threshold") logger.WriteLog(elekLogDef.ERROR, "High threshold is of a lower value than low threshold")
os.Exit(1) os.Exit(1)
} }
tasks, err := def.TasksFromJSON(*tasksFile) tasks, err := def.TasksFromJSON(*tasksFile)
if err != nil || len(tasks) == 0 { if err != nil || len(tasks) == 0 {
//fmt.Println("Invalid tasks specification file provided") //fmt.Println("Invalid tasks specification file provided")
logger.WriteLog(elecLogDef.ERROR, "Invalid tasks specification file provided") logger.WriteLog(elekLogDef.ERROR, "Invalid tasks specification file provided")
os.Exit(1) os.Exit(1)
} }
//log.Println("Scheduling the following tasks:") //log.Println("Scheduling the following tasks:")
logger.WriteLog(elecLogDef.GENERAL, "Scheduling the following tasks:") logger.WriteLog(elekLogDef.GENERAL, "Scheduling the following tasks:")
for _, task := range tasks { for _, task := range tasks {
fmt.Println(task) fmt.Println(task)
} }
if *enableSchedPolicySwitch { if *enableSchedPolicySwitch {
if spcf := *schedPolConfigFile; spcf == "" { if spcf := *schedPolConfigFile; spcf == "" {
logger.WriteLog(elecLogDef.ERROR, "No file containing characteristics for scheduling policies") logger.WriteLog(elekLogDef.ERROR, "No file containing characteristics for scheduling policies")
} else { } else {
// Initializing the characteristics of the scheduling policies. // Initializing the characteristics of the scheduling policies.
schedulers.InitSchedPolicyCharacteristics(spcf) schedulers.InitSchedPolicyCharacteristics(spcf)
@ -172,7 +172,7 @@ func main() {
} }
if _, ok := powercapValues[*powerCapPolicy]; !ok { if _, ok := powercapValues[*powerCapPolicy]; !ok {
logger.WriteLog(elecLogDef.ERROR, "Incorrect power capping policy specified.") logger.WriteLog(elekLogDef.ERROR, "Incorrect power capping policy specified.")
os.Exit(1) os.Exit(1)
} else { } else {
// Indicating which power capping policy to use, if any. // Indicating which power capping policy to use, if any.
@ -188,7 +188,7 @@ func main() {
// These values are not used to configure the scheduler. // These values are not used to configure the scheduler.
// hiThreshold and loThreshold are passed to the powercappers. // hiThreshold and loThreshold are passed to the powercappers.
if *hiThreshold < *loThreshold { if *hiThreshold < *loThreshold {
logger.WriteLog(elecLogDef.ERROR, "High threshold is of a"+ logger.WriteLog(elekLogDef.ERROR, "High threshold is of a"+
" lower value than low threshold.") " lower value than low threshold.")
os.Exit(1) os.Exit(1)
} }

View file

@ -12,7 +12,7 @@ import (
"github.com/mesos/mesos-go/api/v0/mesosutil" "github.com/mesos/mesos-go/api/v0/mesosutil"
sched "github.com/mesos/mesos-go/api/v0/scheduler" sched "github.com/mesos/mesos-go/api/v0/scheduler"
"gitlab.com/spdf/elektron/def" "gitlab.com/spdf/elektron/def"
elecLogDef "gitlab.com/spdf/elektron/logging/def" elekLogDef "gitlab.com/spdf/elektron/logging/def"
"gitlab.com/spdf/elektron/utilities" "gitlab.com/spdf/elektron/utilities"
"gitlab.com/spdf/elektron/utilities/schedUtils" "gitlab.com/spdf/elektron/utilities/schedUtils"
) )
@ -51,7 +51,7 @@ type BaseScheduler struct {
schedTrace *log.Logger schedTrace *log.Logger
// Send the type of the message to be logged // Send the type of the message to be logged
logMsgType chan elecLogDef.LogMessageType logMsgType chan elekLogDef.LogMessageType
// Send the message to be logged // Send the message to be logged
logMsg chan string logMsg chan string
@ -234,7 +234,7 @@ func (s *BaseScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos
} }
} }
func (s *BaseScheduler) Log(lmt elecLogDef.LogMessageType, msg string) { func (s *BaseScheduler) Log(lmt elekLogDef.LogMessageType, msg string) {
s.mutex.Lock() s.mutex.Lock()
s.logMsgType <- lmt s.logMsgType <- lmt
s.logMsg <- msg s.logMsg <- msg
@ -242,8 +242,8 @@ func (s *BaseScheduler) Log(lmt elecLogDef.LogMessageType, msg string) {
} }
func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) { func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
lmt := elecLogDef.GENERAL lmt := elekLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
var msg string var msg string
if ts == nil { if ts == nil {
msg = msgColor.Sprintf("TASKS STARTING... host = [%s]", offer.GetHostname()) msg = msgColor.Sprintf("TASKS STARTING... host = [%s]", offer.GetHostname())
@ -255,38 +255,38 @@ func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
} }
func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) { func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) {
lmt := elecLogDef.GENERAL lmt := elekLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Watts considered for task[%s] and host[%s] = %f Watts", msg := msgColor.Sprintf("Watts considered for task[%s] and host[%s] = %f Watts",
ts.Name, host, wattsToConsider) ts.Name, host, wattsToConsider)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) { func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) {
lmt := elecLogDef.GENERAL lmt := elekLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Received %d resource offers", len(offers)) msg := msgColor.Sprintf("Received %d resource offers", len(offers))
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) { func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
lmt := elecLogDef.WARNING lmt := elekLogDef.WARNING
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("DECLINING OFFER for host[%s]... "+ msg := msgColor.Sprintf("DECLINING OFFER for host[%s]... "+
"No tasks left to schedule", offer.GetHostname()) "No tasks left to schedule", offer.GetHostname())
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogNumberOfRunningTasks() { func (s *BaseScheduler) LogNumberOfRunningTasks() {
lmt := elecLogDef.GENERAL lmt := elekLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Number of tasks still Running = %d", s.tasksRunning) msg := msgColor.Sprintf("Number of tasks still Running = %d", s.tasksRunning)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) { func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
lmt := elecLogDef.GENERAL lmt := elekLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
buffer := bytes.Buffer{} buffer := bytes.Buffer{}
buffer.WriteString(fmt.Sprintln("Colocated with:")) buffer.WriteString(fmt.Sprintln("Colocated with:"))
s.TasksRunningMutex.Lock() s.TasksRunningMutex.Lock()
@ -300,20 +300,20 @@ func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) { func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) {
msg := fmt.Sprint(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue()) msg := fmt.Sprint(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
s.Log(elecLogDef.SCHED_TRACE, msg) s.Log(elekLogDef.SCHED_TRACE, msg)
} }
func (s *BaseScheduler) LogTerminateScheduler() { func (s *BaseScheduler) LogTerminateScheduler() {
lmt := elecLogDef.GENERAL lmt := elekLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprint("Done scheduling all tasks!") msg := msgColor.Sprint("Done scheduling all tasks!")
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer, func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
offerResources ...interface{}) { offerResources ...interface{}) {
lmt := elecLogDef.WARNING lmt := elekLogDef.WARNING
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
buffer := bytes.Buffer{} buffer := bytes.Buffer{}
buffer.WriteString(fmt.Sprintln("DECLINING OFFER... Offer has insufficient resources to launch a task")) buffer.WriteString(fmt.Sprintln("DECLINING OFFER... Offer has insufficient resources to launch a task"))
buffer.WriteString(fmt.Sprintf("Offer Resources <CPU: %f, RAM: %f, Watts: %f>", offerResources...)) buffer.WriteString(fmt.Sprintf("Offer Resources <CPU: %f, RAM: %f, Watts: %f>", offerResources...))
@ -322,91 +322,91 @@ func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
} }
func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) { func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) {
lmt := elecLogDef.ERROR lmt := elekLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("OFFER RESCINDED: OfferID = %s", offerID) msg := msgColor.Sprintf("OFFER RESCINDED: OfferID = %s", offerID)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) { func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) {
lmt := elecLogDef.ERROR lmt := elekLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("SLAVE LOST: SlaveID = %s", slaveID) msg := msgColor.Sprintf("SLAVE LOST: SlaveID = %s", slaveID)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) { func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) {
lmt := elecLogDef.ERROR lmt := elekLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("EXECUTOR LOST: ExecutorID = %s, SlaveID = %s", executorID, slaveID) msg := msgColor.Sprintf("EXECUTOR LOST: ExecutorID = %s, SlaveID = %s", executorID, slaveID)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID, func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
slaveID *mesos.SlaveID, message string) { slaveID *mesos.SlaveID, message string) {
lmt := elecLogDef.GENERAL lmt := elekLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Received Framework message from executor [%s]: %s", executorID, message) msg := msgColor.Sprintf("Received Framework message from executor [%s]: %s", executorID, message)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogMesosError(err string) { func (s *BaseScheduler) LogMesosError(err string) {
lmt := elecLogDef.ERROR lmt := elekLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("MESOS ERROR: %s", err) msg := msgColor.Sprintf("MESOS ERROR: %s", err)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogElectronError(err error) { func (s *BaseScheduler) LogElectronError(err error) {
lmt := elecLogDef.ERROR lmt := elekLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("ELECTRON ERROR: %v", err) msg := msgColor.Sprintf("ELECTRON ERROR: %v", err)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID, func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
masterInfo *mesos.MasterInfo) { masterInfo *mesos.MasterInfo) {
lmt := elecLogDef.SUCCESS lmt := elekLogDef.SUCCESS
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("FRAMEWORK REGISTERED! frameworkID = %s, master = %s", msg := msgColor.Sprintf("FRAMEWORK REGISTERED! frameworkID = %s, master = %s",
frameworkID, masterInfo) frameworkID, masterInfo)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) { func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) {
lmt := elecLogDef.GENERAL lmt := elekLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Framework re-registered with master %s", masterInfo) msg := msgColor.Sprintf("Framework re-registered with master %s", masterInfo)
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogDisconnected() { func (s *BaseScheduler) LogDisconnected() {
lmt := elecLogDef.WARNING lmt := elekLogDef.WARNING
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprint("Framework disconnected with master") msg := msgColor.Sprint("Framework disconnected with master")
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) { func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
var lmt elecLogDef.LogMessageType var lmt elekLogDef.LogMessageType
switch *status.State { switch *status.State {
case mesos.TaskState_TASK_ERROR, mesos.TaskState_TASK_FAILED, case mesos.TaskState_TASK_ERROR, mesos.TaskState_TASK_FAILED,
mesos.TaskState_TASK_KILLED, mesos.TaskState_TASK_LOST: mesos.TaskState_TASK_KILLED, mesos.TaskState_TASK_LOST:
lmt = elecLogDef.ERROR lmt = elekLogDef.ERROR
case mesos.TaskState_TASK_FINISHED: case mesos.TaskState_TASK_FINISHED:
lmt = elecLogDef.SUCCESS lmt = elekLogDef.SUCCESS
default: default:
lmt = elecLogDef.GENERAL lmt = elekLogDef.GENERAL
} }
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := elecLogDef.LogMessageColors[elecLogDef.GENERAL].Sprintf("Task Status received for task [%s] --> %s", msg := elekLogDef.LogMessageColors[elekLogDef.GENERAL].Sprintf("Task Status received for task [%s] --> %s",
*status.TaskId.Value, msgColor.Sprint(NameFor(status.State))) *status.TaskId.Value, msgColor.Sprint(NameFor(status.State)))
s.Log(lmt, msg) s.Log(lmt, msg)
} }
func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicyState) { func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicyState) {
logSPS := func() { logSPS := func() {
s.Log(elecLogDef.SPS, name) s.Log(elekLogDef.SPS, name)
} }
if s.hasReceivedResourceOffers && (s.curSchedPolicy != nextPolicy) { if s.hasReceivedResourceOffers && (s.curSchedPolicy != nextPolicy) {
logSPS() logSPS()
@ -415,10 +415,10 @@ func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicy
} }
// Logging the size of the scheduling window and the scheduling policy // Logging the size of the scheduling window and the scheduling policy
// that is going to schedule the tasks in the scheduling window. // that is going to schedule the tasks in the scheduling window.
s.Log(elecLogDef.SCHED_WINDOW, fmt.Sprintf("%d %s", s.schedWindowSize, name)) s.Log(elekLogDef.SCHED_WINDOW, fmt.Sprintf("%d %s", s.schedWindowSize, name))
} }
func (s *BaseScheduler) LogClsfnAndTaskDistOverhead(overhead time.Duration) { func (s *BaseScheduler) LogClsfnAndTaskDistOverhead(overhead time.Duration) {
// Logging the overhead in microseconds. // Logging the overhead in microseconds.
s.Log(elecLogDef.CLSFN_TASKDIST_OVERHEAD, fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0)) s.Log(elekLogDef.CLSFN_TASKDIST_OVERHEAD, fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0))
} }

View file

@ -6,7 +6,7 @@ import (
mesos "github.com/mesos/mesos-go/api/v0/mesosproto" mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler" sched "github.com/mesos/mesos-go/api/v0/scheduler"
"gitlab.com/spdf/elektron/def" "gitlab.com/spdf/elektron/def"
elecLogDef "gitlab.com/spdf/elektron/logging/def" elekLogDef "gitlab.com/spdf/elektron/logging/def"
) )
// Implements mesos scheduler. // Implements mesos scheduler.
@ -21,7 +21,7 @@ type ElectronScheduler interface {
// log message type, and the log message to the corresponding channels. // log message type, and the log message to the corresponding channels.
// Pass the logMessageType and the logMessage to the loggers for logging. // Pass the logMessageType and the logMessage to the loggers for logging.
Log(logMType elecLogDef.LogMessageType, logMsg string) Log(logMType elekLogDef.LogMessageType, logMsg string)
// To be called when about to launch a task. // To be called when about to launch a task.
// Log message indicating that a task is about to start executing. // Log message indicating that a task is about to start executing.
// Also, log the host on which the task is going to be launched. // Also, log the host on which the task is going to be launched.

View file

@ -6,7 +6,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"gitlab.com/spdf/elektron/constants" "gitlab.com/spdf/elektron/constants"
"gitlab.com/spdf/elektron/def" "gitlab.com/spdf/elektron/def"
elecLogDef "gitlab.com/spdf/elektron/logging/def" elekLogDef "gitlab.com/spdf/elektron/logging/def"
"gitlab.com/spdf/elektron/utilities" "gitlab.com/spdf/elektron/utilities"
"gitlab.com/spdf/elektron/utilities/mesosUtils" "gitlab.com/spdf/elektron/utilities/mesosUtils"
) )
@ -14,10 +14,10 @@ import (
func coLocated(tasks map[string]bool, s BaseScheduler) { func coLocated(tasks map[string]bool, s BaseScheduler) {
for task := range tasks { for task := range tasks {
s.Log(elecLogDef.GENERAL, task) s.Log(elekLogDef.GENERAL, task)
} }
s.Log(elecLogDef.GENERAL, "---------------------") s.Log(elekLogDef.GENERAL, "---------------------")
} }
// Get the powerClass of the given hostname. // Get the powerClass of the given hostname.
@ -109,7 +109,7 @@ func WithPCPLog(pcpLog chan struct{}) schedulerOptions {
} }
} }
func WithLoggingChannels(lmt chan elecLogDef.LogMessageType, msg chan string) schedulerOptions { func WithLoggingChannels(lmt chan elekLogDef.LogMessageType, msg chan string) schedulerOptions {
return func(s ElectronScheduler) error { return func(s ElectronScheduler) error {
s.(*BaseScheduler).logMsgType = lmt s.(*BaseScheduler).logMsgType = lmt
s.(*BaseScheduler).logMsg = msg s.(*BaseScheduler).logMsg = msg
@ -132,8 +132,8 @@ func WithSchedPolSwitchEnabled(enableSchedPolicySwitch bool, switchingCriteria s
func WithNameOfFirstSchedPolToFix(nameOfFirstSchedPol string) schedulerOptions { func WithNameOfFirstSchedPolToFix(nameOfFirstSchedPol string) schedulerOptions {
return func(s ElectronScheduler) error { return func(s ElectronScheduler) error {
if nameOfFirstSchedPol == "" { if nameOfFirstSchedPol == "" {
lmt := elecLogDef.WARNING lmt := elekLogDef.WARNING
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("First scheduling policy to deploy not mentioned. This is now going to be determined at runtime.") msg := msgColor.Sprintf("First scheduling policy to deploy not mentioned. This is now going to be determined at runtime.")
s.(*BaseScheduler).Log(lmt, msg) s.(*BaseScheduler).Log(lmt, msg)
return nil return nil
@ -152,8 +152,8 @@ func WithFixedSchedulingWindow(toFixSchedWindow bool, fixedSchedWindowSize int)
if fixedSchedWindowSize <= 0 { if fixedSchedWindowSize <= 0 {
return errors.New("Invalid value of scheduling window size. Please provide a value > 0.") return errors.New("Invalid value of scheduling window size. Please provide a value > 0.")
} }
lmt := elecLogDef.WARNING lmt := elekLogDef.WARNING
msgColor := elecLogDef.LogMessageColors[lmt] msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Fixing the size of the scheduling window to %d...", fixedSchedWindowSize) msg := msgColor.Sprintf("Fixing the size of the scheduling window to %d...", fixedSchedWindowSize)
s.(*BaseScheduler).Log(lmt, msg) s.(*BaseScheduler).Log(lmt, msg)
s.(*BaseScheduler).toFixSchedWindow = toFixSchedWindow s.(*BaseScheduler).toFixSchedWindow = toFixSchedWindow

View file

@ -8,7 +8,7 @@ import (
mesos "github.com/mesos/mesos-go/api/v0/mesosproto" mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler" sched "github.com/mesos/mesos-go/api/v0/scheduler"
"gitlab.com/spdf/elektron/def" "gitlab.com/spdf/elektron/def"
elecLogDef "gitlab.com/spdf/elektron/logging/def" elekLogDef "gitlab.com/spdf/elektron/logging/def"
) )
type SchedPolicyContext interface { type SchedPolicyContext interface {
@ -71,7 +71,7 @@ func switchTaskDistBased(baseSchedRef *BaseScheduler) string {
// Determine the distribution of tasks in the new scheduling window. // Determine the distribution of tasks in the new scheduling window.
taskDist, err := def.GetTaskDistributionInWindow(baseSchedRef.schedWindowSize, baseSchedRef.tasks) taskDist, err := def.GetTaskDistributionInWindow(baseSchedRef.schedWindowSize, baseSchedRef.tasks)
baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime)) baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime))
baseSchedRef.Log(elecLogDef.GENERAL, fmt.Sprintf("Switching... TaskDistribution[%f]", taskDist)) baseSchedRef.Log(elekLogDef.GENERAL, fmt.Sprintf("Switching... TaskDistribution[%f]", taskDist))
if err != nil { if err != nil {
// All the tasks in the window were only classified into 1 cluster. // All the tasks in the window were only classified into 1 cluster.
// Max-Min and Max-GreedyMins would work the same way as Bin-Packing for this situation. // Max-Min and Max-GreedyMins would work the same way as Bin-Packing for this situation.