Sorting based on watts value added. bin packing based on watts is almost complete.

This commit is contained in:
Renan DelValle 2016-10-15 21:24:14 -04:00
parent 97002ea353
commit 617fb8ac0f
3 changed files with 287 additions and 25 deletions

View file

@ -33,3 +33,17 @@ func TasksFromJSON(uri string) ([]Task, error) {
return tasks, nil
}
type WattsSorter []Task
func (slice WattsSorter) Len() int {
return len(slice)
}
func (slice WattsSorter) Less(i, j int) bool {
return slice[i].Watts < slice[j].Watts
}
func (slice WattsSorter) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}

View file

@ -8,6 +8,7 @@ import (
"github.com/mesos/mesos-go/mesosutil"
sched "github.com/mesos/mesos-go/scheduler"
"log"
"sort"
"strings"
"time"
)
@ -51,6 +52,7 @@ type BinPackWatts struct {
// New electron scheduler
func NewBinPackWatts(tasks []def.Task, ignoreWatts bool) *BinPackWatts {
sort.Sort(def.WattsSorter(tasks))
s := &BinPackWatts{
tasks: tasks,
@ -142,9 +144,10 @@ func (s *BinPackWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*me
tasks := []*mesos.TaskInfo{}
// First fit strategy
_, _, offer_watts := OfferAgg(offer)
taken := false
totalWatts := 0.0
for i, task := range s.tasks {
// Check host if it exists
@ -155,45 +158,46 @@ func (s *BinPackWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*me
}
}
// Decision to take the offer or not
if s.takeOffer(offer, task) {
for *task.Instances > 0 {
// Does the task fit
if offer_watts >= (totalWatts + task.Watts) {
log.Println("Co-Located with: ")
coLocated(s.running[offer.GetSlaveId().GoString()])
taken = true
totalWatts += task.Watts
log.Println("Co-Located with: ")
coLocated(s.running[offer.GetSlaveId().GoString()])
tasks = append(tasks, s.newTask(offer, task))
tasks = append(tasks, s.newTask(offer, task))
fmt.Println("Inst: ", *task.Instances)
*task.Instances--
log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname())
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter)
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
taken = true
fmt.Println("Inst: ", *task.Instances)
*task.Instances--
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
s.tasks[i] = s.tasks[len(s.tasks)-1]
s.tasks = s.tasks[:len(s.tasks)-1]
if len(s.tasks) <= 0 {
log.Println("Done scheduling all tasks")
close(s.Shutdown)
if len(s.tasks) <= 0 {
log.Println("Done scheduling all tasks")
close(s.Shutdown)
}
}
} else {
break // Continue on to next offer
}
break // Offer taken, move on
}
}
// If there was no match for the task
if !taken {
if taken {
log.Printf("Starting on [%s]\n", offer.GetHostname())
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter)
} else {
// If there was no match for the task
fmt.Println("There is not enough resources to launch a task:")
cpus, mem, watts := OfferAgg(offer)
log.Printf("<CPU: %f, RAM: %f, Watts: %f>\n", cpus, mem, watts)
driver.DeclineOffer(offer.Id, defaultFilter)
}
}
}

View file

@ -0,0 +1,244 @@
package schedulers
import (
"bitbucket.org/bingcloud/electron/def"
"fmt"
"github.com/golang/protobuf/proto"
mesos "github.com/mesos/mesos-go/mesosproto"
"github.com/mesos/mesos-go/mesosutil"
sched "github.com/mesos/mesos-go/scheduler"
"log"
"strings"
"time"
"sort"
)
// Decides if to take an offer or not
func (*FirstFitSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
cpus, mem, watts := OfferAgg(offer)
//TODO: Insert watts calculation here instead of taking them as a parameter
if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts {
return true
}
return false
}
// electronScheduler implements the Scheduler interface
type FirstFitSortedWatts struct {
tasksCreated int
tasksRunning int
tasks []def.Task
metrics map[string]def.Metric
running map[string]map[string]bool
ignoreWatts bool
// First set of PCP values are garbage values, signal to logger to start recording when we're
// about to schedule a new task
RecordPCP bool
// This channel is closed when the program receives an interrupt,
// signalling that the program should shut down.
Shutdown chan struct{}
// This channel is closed after shutdown is closed, and only when all
// outstanding tasks have been cleaned up
Done chan struct{}
// Controls when to shutdown pcp logging
PCPLog chan struct{}
}
// New electron scheduler
func NewFirstFitSortedWatts(tasks []def.Task, ignoreWatts bool) *FirstFitSortedWatts {
sort.Sort(def.WattsSorter(tasks))
s := &FirstFitSortedWatts{
tasks: tasks,
ignoreWatts: ignoreWatts,
Shutdown: make(chan struct{}),
Done: make(chan struct{}),
PCPLog: make(chan struct{}),
running: make(map[string]map[string]bool),
RecordPCP: false,
}
return s
}
func (s *FirstFitSortedWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
s.tasksCreated++
if !s.RecordPCP {
// Turn on logging
s.RecordPCP = true
time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts
}
// If this is our first time running into this Agent
if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok {
s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
}
// Add task to list of tasks running on node
s.running[offer.GetSlaveId().GoString()][taskName] = true
resources := []*mesos.Resource{
mesosutil.NewScalarResource("cpus", task.CPU),
mesosutil.NewScalarResource("mem", task.RAM),
}
if !s.ignoreWatts {
resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
}
return &mesos.TaskInfo{
Name: proto.String(taskName),
TaskId: &mesos.TaskID{
Value: proto.String("electron-" + taskName),
},
SlaveId: offer.SlaveId,
Resources: resources,
Command: &mesos.CommandInfo{
Value: proto.String(task.CMD),
},
Container: &mesos.ContainerInfo{
Type: mesos.ContainerInfo_DOCKER.Enum(),
Docker: &mesos.ContainerInfo_DockerInfo{
Image: proto.String(task.Image),
Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated
},
},
}
}
func (s *FirstFitSortedWatts) Registered(
_ sched.SchedulerDriver,
frameworkID *mesos.FrameworkID,
masterInfo *mesos.MasterInfo) {
log.Printf("Framework %s registered with master %s", frameworkID, masterInfo)
}
func (s *FirstFitSortedWatts) Reregistered(_ sched.SchedulerDriver, masterInfo *mesos.MasterInfo) {
log.Printf("Framework re-registered with master %s", masterInfo)
}
func (s *FirstFitSortedWatts) Disconnected(sched.SchedulerDriver) {
log.Println("Framework disconnected with master")
}
func (s *FirstFitSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
log.Printf("Received %d resource offers", len(offers))
for _, offer := range offers {
select {
case <-s.Shutdown:
log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")
driver.DeclineOffer(offer.Id, longFilter)
log.Println("Number of tasks still running: ", s.tasksRunning)
continue
default:
}
tasks := []*mesos.TaskInfo{}
// First fit strategy
taken := false
for i, task := range s.tasks {
// Check host if it exists
if task.Host != "" {
// Don't take offer if it doesn't match our task's host requirement
if !strings.HasPrefix(*offer.Hostname, task.Host) {
continue
}
}
// Decision to take the offer or not
if s.takeOffer(offer, task) {
log.Println("Co-Located with: ")
coLocated(s.running[offer.GetSlaveId().GoString()])
tasks = append(tasks, s.newTask(offer, task))
log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname())
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter)
taken = true
fmt.Println("Inst: ", *task.Instances)
*task.Instances--
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
s.tasks= append(s.tasks[:i], s.tasks[i+1:]...)
if len(s.tasks) <= 0 {
log.Println("Done scheduling all tasks")
close(s.Shutdown)
}
}
break // Offer taken, move on
}
}
// If there was no match for the task
if !taken {
fmt.Println("There is not enough resources to launch a task:")
cpus, mem, watts := OfferAgg(offer)
log.Printf("<CPU: %f, RAM: %f, Watts: %f>\n", cpus, mem, watts)
driver.DeclineOffer(offer.Id, defaultFilter)
}
}
}
func (s *FirstFitSortedWatts) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
if *status.State == mesos.TaskState_TASK_RUNNING {
s.tasksRunning++
} else if IsTerminal(status.State) {
delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value)
s.tasksRunning--
if s.tasksRunning == 0 {
select {
case <-s.Shutdown:
close(s.Done)
default:
}
}
}
log.Printf("DONE: Task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
}
func (s *FirstFitSortedWatts) FrameworkMessage(
driver sched.SchedulerDriver,
executorID *mesos.ExecutorID,
slaveID *mesos.SlaveID,
message string) {
log.Println("Getting a framework message: ", message)
log.Printf("Received a framework message from some unknown source: %s", *executorID.Value)
}
func (s *FirstFitSortedWatts) OfferRescinded(_ sched.SchedulerDriver, offerID *mesos.OfferID) {
log.Printf("Offer %s rescinded", offerID)
}
func (s *FirstFitSortedWatts) SlaveLost(_ sched.SchedulerDriver, slaveID *mesos.SlaveID) {
log.Printf("Slave %s lost", slaveID)
}
func (s *FirstFitSortedWatts) ExecutorLost(_ sched.SchedulerDriver, executorID *mesos.ExecutorID, slaveID *mesos.SlaveID, status int) {
log.Printf("Executor %s on slave %s was lost", executorID, slaveID)
}
func (s *FirstFitSortedWatts) Error(_ sched.SchedulerDriver, err string) {
log.Printf("Receiving an error: %s", err)
}