Merged in experimentation/schedPolicySwitcher (pull request #1)
Experimentation/schedPolicySwitcher 1. Initial commit for consolidated loggers using observer pattern. 2. class factory for schedulers. 3. Using the scheduling policy class factory in schedulers/store.go and the scheduler builder helpers in schedulers/helpers.go, feature to be able to be able to plug a scheduling policy of your choice from the command line (right now only first-fit and bin-packing are possible. Will be updating the class factory to include other scheduling policies as well. 4. Removed TODO for using generic task sorters. Modified TODO for a config file input to run electron. 5. Added other schedulers to the factory 6. Partially retrofitted the other scheduling policies to use the logging library. 7. Retrofitted extrema and progressive to use the consolidated logging library. Fixed parameter issue with s.base.Disconnected(). Formatted project 8. Move statusUpdate(...) into base.go to remove redundant code. 9. Converted the baseScheduler into a state machine where the state is a scheduling policy that defines an approach to consume resource offers. 10. Added another command line argument to be used to enable switching of scheduling policies. Retrofitted scheduling policies to switch only if the particular feature has been enabled. changed argument to coLocated(...) to take base type rather than ElectronScheduler type. Also, prepended the prefix to the directory of the logs so that it would be easier to determine what the files in a directory correspond to without viewing the contents of the directory. Defined methods in ElectronScheduler. Each of these methods corresponds to a type of log that an ElectronScheduler would make. Each of these methods would need to be implemented by the scheduling policy. Electron has only one scheduler that implements the mesos scheduler interface. All the scheduling policies are just different implementations of ways to consume mesos resource offers. Retrofitted scheduling policies to now embed SchedPolicyState instead of baseScheduler. Approved-by: Pradyumna Kaushik <pkaushi1@binghamton.edu>
This commit is contained in:
parent
cb71153362
commit
065705d480
24 changed files with 1392 additions and 917 deletions
|
@ -3,10 +3,11 @@ package pcp
|
|||
import (
|
||||
"bitbucket.org/sunybingcloud/elektron/pcp"
|
||||
"bitbucket.org/sunybingcloud/elektron/rapl"
|
||||
elecLogDef "bitbucket.org/sunybingcloud/elektron/logging/def"
|
||||
"bufio"
|
||||
"container/ring"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
@ -15,7 +16,9 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix string, hiThreshold, loThreshold float64) {
|
||||
func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThreshold, loThreshold float64,
|
||||
logMType chan elecLogDef.LogMessageType, logMsg chan string) {
|
||||
|
||||
const pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c config"
|
||||
cmd := exec.Command("sh", "-c", pcpCommand)
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
|
@ -24,13 +27,6 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
log.Println("High threshold is lower than low threshold!")
|
||||
}
|
||||
|
||||
logFile, err := os.Create("./" + prefix + ".pcplog")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
defer logFile.Close()
|
||||
|
||||
pipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
|
@ -43,8 +39,9 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
// Get names of the columns.
|
||||
scanner.Scan()
|
||||
|
||||
// Write to logfile.
|
||||
logFile.WriteString(scanner.Text() + "\n")
|
||||
// Write to logfile
|
||||
logMType <- elecLogDef.PCP
|
||||
logMsg <- scanner.Text()
|
||||
|
||||
headers := strings.Split(scanner.Text(), ",")
|
||||
|
||||
|
@ -79,9 +76,11 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
for scanner.Scan() {
|
||||
|
||||
if *logging {
|
||||
log.Println("Logging PCP...")
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- "Logging PCP..."
|
||||
split := strings.Split(scanner.Text(), ",")
|
||||
logFile.WriteString(scanner.Text() + "\n")
|
||||
logMType <- elecLogDef.PCP
|
||||
logMsg <- scanner.Text()
|
||||
|
||||
totalPower := 0.0
|
||||
for _, powerIndex := range powerIndexes {
|
||||
|
@ -92,7 +91,8 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
powerHistories[host].Value = power
|
||||
powerHistories[host] = powerHistories[host].Next()
|
||||
|
||||
log.Printf("Host: %s, Power: %f", indexToHost[powerIndex], (power * pcp.RAPLUnits))
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- fmt.Sprintf("Host: %s, Power: %f", indexToHost[powerIndex], (power * pcp.RAPLUnits))
|
||||
|
||||
totalPower += power
|
||||
}
|
||||
|
@ -103,11 +103,13 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
|
||||
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
|
||||
|
||||
log.Printf("Total power: %f, %d Sec Avg: %f", clusterPower, clusterPowerHist.Len(), clusterMean)
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- fmt.Sprintf("Total power: %f, %d Sec Avg: %f", clusterPower, clusterPowerHist.Len(), clusterMean)
|
||||
|
||||
if clusterMean > hiThreshold {
|
||||
log.Printf("Need to cap a node")
|
||||
// Create statics for all victims and choose one to cap.
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- "Need to cap a node"
|
||||
// Create statics for all victims and choose one to cap
|
||||
victims := make([]pcp.Victim, 0, 8)
|
||||
|
||||
// TODO: Just keep track of the largest to reduce fron nlogn to n
|
||||
|
@ -127,9 +129,11 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
if !cappedHosts[victim.Host] {
|
||||
cappedHosts[victim.Host] = true
|
||||
orderCapped = append(orderCapped, victim.Host)
|
||||
log.Printf("Capping Victim %s Avg. Wattage: %f", victim.Host, victim.Watts*pcp.RAPLUnits)
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- fmt.Sprintf("Capping Victim %s Avg. Wattage: %f", victim.Host, victim.Watts * pcp.RAPLUnits)
|
||||
if err := rapl.Cap(victim.Host, "rapl", 50); err != nil {
|
||||
log.Print("Error capping host")
|
||||
logMType <- elecLogDef.ERROR
|
||||
logMsg <- "Error capping host"
|
||||
}
|
||||
break // Only cap one machine at at time.
|
||||
}
|
||||
|
@ -143,8 +147,11 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
cappedHosts[host] = false
|
||||
// User RAPL package to send uncap.
|
||||
log.Printf("Uncapping host %s", host)
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- fmt.Sprintf("Uncapped host %s", host)
|
||||
if err := rapl.Cap(host, "rapl", 100); err != nil {
|
||||
log.Print("Error uncapping host")
|
||||
logMType <- elecLogDef.ERROR
|
||||
logMsg <- "Error capping host"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -154,7 +161,8 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
}
|
||||
}(logging, hiThreshold, loThreshold)
|
||||
|
||||
log.Println("PCP logging started")
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- "PCP logging started"
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatal(err)
|
||||
|
@ -164,7 +172,8 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
|
||||
select {
|
||||
case <-quit:
|
||||
log.Println("Stopping PCP logging in 5 seconds")
|
||||
logMType <- elecLogDef.GENERAL
|
||||
logMsg <- "Stopping PCP logging in 5 seconds"
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly
|
||||
|
|
Reference in a new issue