Merged in maxMinAndTrace (pull request #4)

Bin Packed Max Min and the scheduling trace
This commit is contained in:
Renan DelValle 2017-01-06 18:53:57 -05:00
commit 3acdfd9975
11 changed files with 352 additions and 26 deletions

View file

@ -11,6 +11,7 @@ To Do:
* Write test code for each scheduler (This should be after the design change)
* Some of the constants in constants/constants.go can vary based on the environment.
Possible to setup the constants at runtime based on the environment?
* Retrofit schedulers for scheduling tracing
**Requires [Performance Co-Pilot](http://pcp.io/) tool pmdumptext to be installed on the

View file

@ -8,15 +8,15 @@ import (
)
type Task struct {
Name string `json:"name"`
CPU float64 `json:"cpu"`
RAM float64 `json:"ram"`
Watts float64 `json:"watts"`
Image string `json:"image"`
CMD string `json:"cmd"`
Instances *int `json:"inst"`
Host string `json:"host"`
TaskID string `json:"taskID"`
Name string `json:"name"`
CPU float64 `json:"cpu"`
RAM float64 `json:"ram"`
Watts float64 `json:"watts"`
Image string `json:"image"`
CMD string `json:"cmd"`
Instances *int `json:"inst"`
Host string `json:"host"`
TaskID string `json:"taskID"`
ClassToWatts map[string]float64 `json:"class_to_watts"`
}

View file

@ -61,13 +61,12 @@ func StartLogAndDynamicCap(quit chan struct{}, logging *bool, prefix string, hiT
const pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c config"
cmd := exec.Command("sh", "-c", pcpCommand)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
startTime := time.Now().Format("20060102150405")
if hiThreshold < loThreshold {
log.Println("High threshold is lower than low threshold!")
}
logFile, err := os.Create("./" + prefix + startTime + ".pcplog")
logFile, err := os.Create("./" + prefix + ".pcplog")
if err != nil {
log.Fatal(err)
}

View file

@ -13,9 +13,8 @@ func Start(quit chan struct{}, logging *bool, prefix string) {
const pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c config"
cmd := exec.Command("sh", "-c", pcpCommand)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
startTime := time.Now().Format("20060102150405")
logFile, err := os.Create("./" + prefix + startTime + ".pcplog")
logFile, err := os.Create("./" + prefix + ".pcplog")
if err != nil {
log.Fatal(err)
}

View file

@ -29,7 +29,7 @@ func (tw taskWrapper) ID() string {
}
// Cluster wide capper
type ClusterwideCapper struct {}
type ClusterwideCapper struct{}
// Defining constructor for clusterwideCapper. Please don't call this directly and instead use GetClusterwideCapperInstance()
func newClusterwideCapper() *ClusterwideCapper {

View file

@ -2,8 +2,8 @@ package main
import (
"bitbucket.org/sunybingcloud/electron/def"
"bitbucket.org/sunybingcloud/electron/schedulers"
"bitbucket.org/sunybingcloud/electron/pcp"
"bitbucket.org/sunybingcloud/electron/schedulers"
"flag"
"fmt"
"github.com/golang/protobuf/proto"
@ -55,8 +55,10 @@ func main() {
for _, task := range tasks {
fmt.Println(task)
}
startTime := time.Now().Format("20060102150405")
logPrefix := *pcplogPrefix + "_" + startTime
scheduler := schedulers.NewPistonCapper(tasks, *ignoreWatts)
scheduler := schedulers.NewBPMaxMinWatts(tasks, *ignoreWatts, logPrefix)
driver, err := sched.NewMesosSchedulerDriver(sched.DriverConfig{
Master: *master,
Framework: &mesos.FrameworkInfo{
@ -70,9 +72,9 @@ func main() {
return
}
go pcp.Start(scheduler.PCPLog, &scheduler.RecordPCP, *pcplogPrefix)
//go pcp.StartLogAndDynamicCap(scheduler.PCPLog, &scheduler.RecordPCP, *pcplogPrefix, *hiThreshold, *loThreshold)
time.Sleep(1 * time.Second)
go pcp.Start(scheduler.PCPLog, &scheduler.RecordPCP, logPrefix)
//go pcp.StartLogAndDynamicCap(scheduler.PCPLog, &scheduler.RecordPCP, logPrefix, *hiThreshold, *loThreshold)
time.Sleep(1 * time.Second) // Take a second between starting PCP log and continuing
// Attempt to handle signint to not leave pmdumptext running
// Catch interrupt

50
schedulers/base.go Normal file
View file

@ -0,0 +1,50 @@
package schedulers
import (
mesos "github.com/mesos/mesos-go/mesosproto"
sched "github.com/mesos/mesos-go/scheduler"
"log"
)
type base struct{}
func (s *base) OfferRescinded(_ sched.SchedulerDriver, offerID *mesos.OfferID) {
log.Printf("Offer %s rescinded", offerID)
}
func (s *base) SlaveLost(_ sched.SchedulerDriver, slaveID *mesos.SlaveID) {
log.Printf("Slave %s lost", slaveID)
}
func (s *base) ExecutorLost(_ sched.SchedulerDriver,
executorID *mesos.ExecutorID,
slaveID *mesos.SlaveID, status int) {
log.Printf("Executor %s on slave %s was lost", executorID, slaveID)
}
func (s *base) Error(_ sched.SchedulerDriver, err string) {
log.Printf("Receiving an error: %s", err)
}
func (s *base) FrameworkMessage(
driver sched.SchedulerDriver,
executorID *mesos.ExecutorID,
slaveID *mesos.SlaveID,
message string) {
log.Println("Getting a framework message: ", message)
log.Printf("Received a framework message from some unknown source: %s", *executorID.Value)
}
func (s *base) Registered(
_ sched.SchedulerDriver,
frameworkID *mesos.FrameworkID,
masterInfo *mesos.MasterInfo) {
log.Printf("Framework %s registered with master %s", frameworkID, masterInfo)
}
func (s *base) Reregistered(_ sched.SchedulerDriver, masterInfo *mesos.MasterInfo) {
log.Printf("Framework re-registered with master %s", masterInfo)
}
func (s *base) Disconnected(sched.SchedulerDriver) {
log.Println("Framework disconnected with master")
}

275
schedulers/bpMaxMin.go Normal file
View file

@ -0,0 +1,275 @@
package schedulers
import (
"bitbucket.org/sunybingcloud/electron/def"
"fmt"
"github.com/golang/protobuf/proto"
mesos "github.com/mesos/mesos-go/mesosproto"
"github.com/mesos/mesos-go/mesosutil"
sched "github.com/mesos/mesos-go/scheduler"
"log"
"os"
"sort"
"strings"
"time"
)
// Decides if to take an offer or not
func (*BPMaxMinWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
cpus, mem, watts := OfferAgg(offer)
//TODO: Insert watts calculation here instead of taking them as a parameter
if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts {
return true
}
return false
}
type BPMaxMinWatts struct {
base //Type embedding to inherit common functions
tasksCreated int
tasksRunning int
tasks []def.Task
metrics map[string]def.Metric
running map[string]map[string]bool
ignoreWatts bool
// First set of PCP values are garbage values, signal to logger to start recording when we're
// about to schedule a new task
RecordPCP bool
// This channel is closed when the program receives an interrupt,
// signalling that the program should shut down.
Shutdown chan struct{}
// This channel is closed after shutdown is closed, and only when all
// outstanding tasks have been cleaned up
Done chan struct{}
// Controls when to shutdown pcp logging
PCPLog chan struct{}
schedTrace *log.Logger
}
// New electron scheduler
func NewBPMaxMinWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BPMaxMinWatts {
sort.Sort(def.WattsSorter(tasks))
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
if err != nil {
log.Fatal(err)
}
s := &BPMaxMinWatts{
tasks: tasks,
ignoreWatts: ignoreWatts,
Shutdown: make(chan struct{}),
Done: make(chan struct{}),
PCPLog: make(chan struct{}),
running: make(map[string]map[string]bool),
RecordPCP: false,
schedTrace: log.New(logFile, "", log.LstdFlags),
}
return s
}
func (s *BPMaxMinWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
s.tasksCreated++
// Start recording only when we're creating the first task
if !s.RecordPCP {
// Turn on logging
s.RecordPCP = true
time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts
}
// If this is our first time running into this Agent
if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok {
s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
}
// Add task to list of tasks running on node
s.running[offer.GetSlaveId().GoString()][taskName] = true
resources := []*mesos.Resource{
mesosutil.NewScalarResource("cpus", task.CPU),
mesosutil.NewScalarResource("mem", task.RAM),
}
if !s.ignoreWatts {
resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
}
return &mesos.TaskInfo{
Name: proto.String(taskName),
TaskId: &mesos.TaskID{
Value: proto.String("electron-" + taskName),
},
SlaveId: offer.SlaveId,
Resources: resources,
Command: &mesos.CommandInfo{
Value: proto.String(task.CMD),
},
Container: &mesos.ContainerInfo{
Type: mesos.ContainerInfo_DOCKER.Enum(),
Docker: &mesos.ContainerInfo_DockerInfo{
Image: proto.String(task.Image),
Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated
},
},
}
}
// Determine if the remaining space inside of the offer is enough for this
// the task we need to create. If it is, create a TaskInfo and return it.
func (s *BPMaxMinWatts) CheckFit(i int,
task def.Task,
offer *mesos.Offer,
totalCPU *float64,
totalRAM *float64,
totalWatts *float64) (bool, *mesos.TaskInfo) {
offerCPU, offerRAM, offerWatts := OfferAgg(offer)
// Does the task fit
if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) &&
(offerCPU >= (*totalCPU + task.CPU)) &&
(offerRAM >= (*totalRAM + task.RAM)) {
*totalWatts += task.Watts
*totalCPU += task.CPU
*totalRAM += task.RAM
log.Println("Co-Located with: ")
coLocated(s.running[offer.GetSlaveId().GoString()])
taskToSchedule := s.newTask(offer, task)
fmt.Println("Inst: ", *task.Instances)
s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
*task.Instances--
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
if len(s.tasks) <= 0 {
log.Println("Done scheduling all tasks")
close(s.Shutdown)
}
}
return true, taskToSchedule
}
return false, nil
}
func (s *BPMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
log.Printf("Received %d resource offers", len(offers))
for _, offer := range offers {
select {
case <-s.Shutdown:
log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")
driver.DeclineOffer(offer.Id, longFilter)
log.Println("Number of tasks still running: ", s.tasksRunning)
continue
default:
}
tasks := []*mesos.TaskInfo{}
offerTaken := false
totalWatts := 0.0
totalCPU := 0.0
totalRAM := 0.0
// Assumes s.tasks is ordered in non-decreasing median max peak order
// Attempt to schedule a single instance of the heaviest workload available first
// Start from the back until one fits
for i:= len(s.tasks)-1; i >= 0; i-- {
task := s.tasks[i]
// Check host if it exists
if task.Host != "" {
// Don't take offer if it doesn't match our task's host requirement
if !strings.HasPrefix(*offer.Hostname, task.Host) {
continue
}
}
// TODO: Fix this so index doesn't need to be passed
taken, taskToSchedule := s.CheckFit(i, task, offer, &totalCPU, &totalRAM, &totalWatts)
if taken {
offerTaken = true
tasks = append(tasks, taskToSchedule)
break
}
}
// Pack the rest of the offer with the smallest tasks
for i, task := range s.tasks {
// Check host if it exists
if task.Host != "" {
// Don't take offer if it doesn't match our task's host requirement
if !strings.HasPrefix(*offer.Hostname, task.Host) {
continue
}
}
for *task.Instances > 0 {
// TODO: Fix this so index doesn't need to be passed
taken, taskToSchedule := s.CheckFit(i, task, offer, &totalCPU, &totalRAM, &totalWatts)
if taken {
offerTaken = true
tasks = append(tasks, taskToSchedule)
} else {
break // Continue on to next task
}
}
}
if offerTaken {
log.Printf("Starting on [%s]\n", offer.GetHostname())
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter)
} else {
// If there was no match for the task
fmt.Println("There is not enough resources to launch a task:")
cpus, mem, watts := OfferAgg(offer)
log.Printf("<CPU: %f, RAM: %f, Watts: %f>\n", cpus, mem, watts)
driver.DeclineOffer(offer.Id, defaultFilter)
}
}
}
func (s *BPMaxMinWatts) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
if *status.State == mesos.TaskState_TASK_RUNNING {
s.tasksRunning++
} else if IsTerminal(status.State) {
delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value)
s.tasksRunning--
if s.tasksRunning == 0 {
select {
case <-s.Shutdown:
close(s.Done)
default:
}
}
}
log.Printf("DONE: Task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
}

View file

@ -170,7 +170,7 @@ func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers
// Does the task fit
// OR lazy evaluation. If ignore watts is set to true, second statement won't
// be evaluated.
if (s.ignoreWatts || (offerWatts >= (totalWatts+task.ClassToWatts[nodeClass]))) &&
if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[nodeClass]))) &&
(offerCPU >= (totalCPU + task.CPU)) &&
(offerRAM >= (totalRAM + task.RAM)) {

View file

@ -264,7 +264,7 @@ func (s *ProactiveClusterwideCapRanked) ResourceOffers(driver sched.SchedulerDri
}
// sorting the tasks in ascending order of watts.
if (len(s.tasks) > 0) {
if len(s.tasks) > 0 {
sort.Sort(def.WattsSorter(s.tasks))
// calculating the total number of tasks ranked.
numberOfRankedTasks := 0

View file

@ -7,8 +7,8 @@ One should implement Val() to be able to use this utility.
package runAvg
import (
"errors"
"container/list"
"errors"
)
type Interface interface {
@ -19,7 +19,7 @@ type Interface interface {
}
type runningAverageCalculator struct {
window list.List
window list.List
windowSize int
currentSum float64
}
@ -30,7 +30,7 @@ var racSingleton *runningAverageCalculator
// return single instance
func getInstance(curSum float64, wSize int) *runningAverageCalculator {
if racSingleton == nil {
racSingleton = &runningAverageCalculator {
racSingleton = &runningAverageCalculator{
windowSize: wSize,
currentSum: curSum,
}
@ -55,7 +55,7 @@ func (rac *runningAverageCalculator) calculate(data Interface) float64 {
elementToRemove := rac.window.Front()
rac.currentSum -= elementToRemove.Value.(Interface).Val()
rac.window.Remove(elementToRemove)
// adding new element to the window
rac.window.PushBack(data)
rac.currentSum += data.Val()
@ -105,4 +105,4 @@ func Init() {
racSingleton.window.Init()
racSingleton.windowSize = 0
racSingleton.currentSum = 0.0
}
}