Merged in experimentation/schedPolicySwitcher (pull request #1)
Experimentation/schedPolicySwitcher 1. Initial commit for consolidated loggers using observer pattern. 2. class factory for schedulers. 3. Using the scheduling policy class factory in schedulers/store.go and the scheduler builder helpers in schedulers/helpers.go, feature to be able to be able to plug a scheduling policy of your choice from the command line (right now only first-fit and bin-packing are possible. Will be updating the class factory to include other scheduling policies as well. 4. Removed TODO for using generic task sorters. Modified TODO for a config file input to run electron. 5. Added other schedulers to the factory 6. Partially retrofitted the other scheduling policies to use the logging library. 7. Retrofitted extrema and progressive to use the consolidated logging library. Fixed parameter issue with s.base.Disconnected(). Formatted project 8. Move statusUpdate(...) into base.go to remove redundant code. 9. Converted the baseScheduler into a state machine where the state is a scheduling policy that defines an approach to consume resource offers. 10. Added another command line argument to be used to enable switching of scheduling policies. Retrofitted scheduling policies to switch only if the particular feature has been enabled. changed argument to coLocated(...) to take base type rather than ElectronScheduler type. Also, prepended the prefix to the directory of the logs so that it would be easier to determine what the files in a directory correspond to without viewing the contents of the directory. Defined methods in ElectronScheduler. Each of these methods corresponds to a type of log that an ElectronScheduler would make. Each of these methods would need to be implemented by the scheduling policy. Electron has only one scheduler that implements the mesos scheduler interface. All the scheduling policies are just different implementations of ways to consume mesos resource offers. Retrofitted scheduling policies to now embed SchedPolicyState instead of baseScheduler. Approved-by: Pradyumna Kaushik <pkaushi1@binghamton.edu>
This commit is contained in:
parent
cb71153362
commit
065705d480
24 changed files with 1392 additions and 917 deletions
|
@ -5,11 +5,12 @@ import (
|
|||
"bitbucket.org/sunybingcloud/elektron/pcp"
|
||||
"bitbucket.org/sunybingcloud/elektron/rapl"
|
||||
"bitbucket.org/sunybingcloud/elektron/utilities"
|
||||
elecLogDef "bitbucket.org/sunybingcloud/elektron/logging/def"
|
||||
"bufio"
|
||||
"container/ring"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -28,23 +29,18 @@ func getNextCapValue(curCapValue float64, precision int) float64 {
|
|||
return float64(round(curCapValue*output)) / output
|
||||
}
|
||||
|
||||
func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, prefix string, hiThreshold, loThreshold float64) {
|
||||
log.Println("Inside Log and Progressive Extrema")
|
||||
func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiThreshold, loThreshold float64,
|
||||
logMType chan elecLogDef.LogMessageType, logMsg chan string) {
|
||||
|
||||
const pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c config"
|
||||
cmd := exec.Command("sh", "-c", pcpCommand)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
||||
if hiThreshold < loThreshold {
|
||||
log.Println("High threshold is lower than low threshold!")
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- "High threshold is lower than low threshold!"
|
||||
}
|
||||
|
||||
logFile, err := os.Create("./" + prefix + ".pcplog")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
defer logFile.Close()
|
||||
|
||||
pipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
|
@ -57,8 +53,9 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
// Get names of the columns.
|
||||
scanner.Scan()
|
||||
|
||||
// Write to logfile.
|
||||
logFile.WriteString(scanner.Text() + "\n")
|
||||
// Write to logfile
|
||||
logMType <- elecLogDef.PCP
|
||||
logMsg <- scanner.Text()
|
||||
|
||||
headers := strings.Split(scanner.Text(), ",")
|
||||
|
||||
|
@ -97,9 +94,11 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
|
||||
for scanner.Scan() {
|
||||
if *logging {
|
||||
log.Println("Logging PCP...")
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- "Logging PCP..."
|
||||
split := strings.Split(scanner.Text(), ",")
|
||||
logFile.WriteString(scanner.Text() + "\n")
|
||||
logMType <- elecLogDef.PCP
|
||||
logMsg <- scanner.Text()
|
||||
|
||||
totalPower := 0.0
|
||||
for _, powerIndex := range powerIndexes {
|
||||
|
@ -110,7 +109,9 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
powerHistories[host].Value = power
|
||||
powerHistories[host] = powerHistories[host].Next()
|
||||
|
||||
log.Printf("Host: %s, Power: %f", indexToHost[powerIndex], (power * pcp.RAPLUnits))
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- fmt.Sprintf("Host: %s, Power %f",
|
||||
indexToHost[powerIndex], (power * pcp.RAPLUnits))
|
||||
|
||||
totalPower += power
|
||||
}
|
||||
|
@ -121,13 +122,17 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
|
||||
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
|
||||
|
||||
log.Printf("Total power: %f, %d Sec Avg: %f", clusterPower, clusterPowerHist.Len(), clusterMean)
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- fmt.Sprintf("Total power: %f, %d Sec Avg: %f", clusterPower, clusterPowerHist.Len(), clusterMean)
|
||||
|
||||
if clusterMean >= hiThreshold {
|
||||
log.Println("Need to cap a node")
|
||||
log.Printf("Cap values of capped victims: %v", cappedVictims)
|
||||
log.Printf("Cap values of victims to uncap: %v", orderCappedVictims)
|
||||
// Create statics for all victims and choose one to cap.
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- "Need to cap a node"
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- fmt.Sprintf("Cap values of capped victims: %v", cappedVictims)
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- fmt.Sprintf("Cap values of victims to uncap: %v", orderCappedVictims)
|
||||
// Create statics for all victims and choose one to cap
|
||||
victims := make([]pcp.Victim, 0, 8)
|
||||
|
||||
// TODO: Just keep track of the largest to reduce fron nlogn to n
|
||||
|
@ -153,10 +158,12 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
}
|
||||
// Need to cap this victim.
|
||||
if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil {
|
||||
log.Printf("Error capping host %s", victims[i].Host)
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- fmt.Sprintf("Error capping host %s", victims[i].Host)
|
||||
} else {
|
||||
log.Printf("Capped host[%s] at %f", victims[i].Host, 50.0)
|
||||
// Keeping track of this victim and it's cap value.
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- fmt.Sprintf("Capped host[%s] at %f", victims[i].Host, 50.0)
|
||||
// Keeping track of this victim and it's cap value
|
||||
cappedVictims[victims[i].Host] = 50.0
|
||||
newVictimFound = true
|
||||
// This node can be uncapped and hence adding to orderCapped.
|
||||
|
@ -178,11 +185,13 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
if capValue > constants.LowerCapLimit {
|
||||
newCapValue := getNextCapValue(capValue, 2)
|
||||
if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil {
|
||||
log.Printf("Error capping host[%s]", alreadyCappedHosts[i])
|
||||
logMType <- elecLogDef.ERROR
|
||||
logMsg <- fmt.Sprintf("Error capping host[%s]", alreadyCappedHosts[i])
|
||||
} else {
|
||||
// Successful cap
|
||||
log.Printf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue)
|
||||
// Checking whether this victim can be capped further.
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- fmt.Sprintf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue)
|
||||
// Checking whether this victim can be capped further
|
||||
if newCapValue <= constants.LowerCapLimit {
|
||||
// Deleting victim from cappedVictims.
|
||||
delete(cappedVictims, alreadyCappedHosts[i])
|
||||
|
@ -204,14 +213,18 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
}
|
||||
}
|
||||
if !canCapAlreadyCappedVictim {
|
||||
log.Println("No Victim left to cap.")
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- "No Victim left to cap."
|
||||
}
|
||||
}
|
||||
|
||||
} else if clusterMean < loThreshold {
|
||||
log.Println("Need to uncap a node")
|
||||
log.Printf("Cap values of capped victims: %v", cappedVictims)
|
||||
log.Printf("Cap values of victims to uncap: %v", orderCappedVictims)
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- "Need to uncap a node"
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- fmt.Sprintf("Cap values of capped victims: %v", cappedVictims)
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- fmt.Sprintf("Cap values of victims to uncap: %v", orderCappedVictims)
|
||||
if len(orderCapped) > 0 {
|
||||
// We pick the host that is capped the most to uncap.
|
||||
orderCappedToSort := utilities.GetPairList(orderCappedVictims)
|
||||
|
@ -221,13 +234,15 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
// This is a floating point operation and might suffer from precision loss.
|
||||
newUncapValue := orderCappedVictims[hostToUncap] * 2.0
|
||||
if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil {
|
||||
log.Printf("Error uncapping host[%s]", hostToUncap)
|
||||
logMType <- elecLogDef.ERROR
|
||||
logMsg <- fmt.Sprintf("Error uncapping host[%s]", hostToUncap)
|
||||
} else {
|
||||
// Successful uncap.
|
||||
log.Printf("Uncapped host[%s] to %f", hostToUncap, newUncapValue)
|
||||
// Can we uncap this host further. If not, then we remove its entry from orderCapped.
|
||||
if newUncapValue >= 100.0 { // Can compare using ==
|
||||
// Deleting entry from orderCapped.
|
||||
// Successful uncap
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- fmt.Sprintf("Uncapped host[%s] to %f", hostToUncap, newUncapValue)
|
||||
// Can we uncap this host further. If not, then we remove its entry from orderCapped
|
||||
if newUncapValue >= 100.0 { // can compare using ==
|
||||
// Deleting entry from orderCapped
|
||||
for i, victimHost := range orderCapped {
|
||||
if victimHost == hostToUncap {
|
||||
orderCapped = append(orderCapped[:i], orderCapped[i+1:]...)
|
||||
|
@ -245,7 +260,8 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
}
|
||||
}
|
||||
} else {
|
||||
log.Println("No host staged for Uncapping")
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- "No host staged for Uncapped"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -254,7 +270,8 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
|
||||
}(logging, hiThreshold, loThreshold)
|
||||
|
||||
log.Println("PCP logging started")
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- "PCP logging started"
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatal(err)
|
||||
|
@ -264,7 +281,8 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
|
||||
select {
|
||||
case <-quit:
|
||||
log.Println("Stopping PCP logging in 5 seconds")
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- "Stopping PCP logging in 5 seconds"
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly
|
||||
|
|
Reference in a new issue