242 lines
6.6 KiB
Go
242 lines
6.6 KiB
Go
|
package schedulers
|
||
|
|
||
|
import (
|
||
|
"bitbucket.org/bingcloud/electron/def"
|
||
|
"fmt"
|
||
|
"github.com/golang/protobuf/proto"
|
||
|
mesos "github.com/mesos/mesos-go/mesosproto"
|
||
|
"github.com/mesos/mesos-go/mesosutil"
|
||
|
sched "github.com/mesos/mesos-go/scheduler"
|
||
|
"log"
|
||
|
"strings"
|
||
|
"time"
|
||
|
)
|
||
|
|
||
|
// Decides if to take an offer or not
|
||
|
func (*BinPackWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
|
||
|
|
||
|
cpus, mem, watts := OfferAgg(offer)
|
||
|
|
||
|
//TODO: Insert watts calculation here instead of taking them as a parameter
|
||
|
|
||
|
if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts {
|
||
|
return true
|
||
|
}
|
||
|
|
||
|
return false
|
||
|
}
|
||
|
|
||
|
type BinPackWatts struct {
|
||
|
tasksCreated int
|
||
|
tasksRunning int
|
||
|
tasks []def.Task
|
||
|
metrics map[string]def.Metric
|
||
|
running map[string]map[string]bool
|
||
|
ignoreWatts bool
|
||
|
|
||
|
// First set of PCP values are garbage values, signal to logger to start recording when we're
|
||
|
// about to schedule a new task
|
||
|
RecordPCP bool
|
||
|
|
||
|
// This channel is closed when the program receives an interrupt,
|
||
|
// signalling that the program should shut down.
|
||
|
Shutdown chan struct{}
|
||
|
// This channel is closed after shutdown is closed, and only when all
|
||
|
// outstanding tasks have been cleaned up
|
||
|
Done chan struct{}
|
||
|
|
||
|
// Controls when to shutdown pcp logging
|
||
|
PCPLog chan struct{}
|
||
|
}
|
||
|
|
||
|
// New electron scheduler
|
||
|
func NewBinPackWatts(tasks []def.Task, ignoreWatts bool) *BinPackWatts {
|
||
|
|
||
|
s := &BinPackWatts{
|
||
|
tasks: tasks,
|
||
|
ignoreWatts: ignoreWatts,
|
||
|
Shutdown: make(chan struct{}),
|
||
|
Done: make(chan struct{}),
|
||
|
PCPLog: make(chan struct{}),
|
||
|
running: make(map[string]map[string]bool),
|
||
|
RecordPCP: false,
|
||
|
}
|
||
|
return s
|
||
|
}
|
||
|
|
||
|
func (s *BinPackWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
|
||
|
taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
|
||
|
s.tasksCreated++
|
||
|
|
||
|
if !s.RecordPCP {
|
||
|
// Turn on logging
|
||
|
s.RecordPCP = true
|
||
|
time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts
|
||
|
}
|
||
|
|
||
|
// If this is our first time running into this Agent
|
||
|
if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok {
|
||
|
s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
|
||
|
}
|
||
|
|
||
|
// Add task to list of tasks running on node
|
||
|
s.running[offer.GetSlaveId().GoString()][taskName] = true
|
||
|
|
||
|
resources := []*mesos.Resource{
|
||
|
mesosutil.NewScalarResource("cpus", task.CPU),
|
||
|
mesosutil.NewScalarResource("mem", task.RAM),
|
||
|
}
|
||
|
|
||
|
if !s.ignoreWatts {
|
||
|
resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
|
||
|
}
|
||
|
|
||
|
return &mesos.TaskInfo{
|
||
|
Name: proto.String(taskName),
|
||
|
TaskId: &mesos.TaskID{
|
||
|
Value: proto.String("electron-" + taskName),
|
||
|
},
|
||
|
SlaveId: offer.SlaveId,
|
||
|
Resources: resources,
|
||
|
Command: &mesos.CommandInfo{
|
||
|
Value: proto.String(task.CMD),
|
||
|
},
|
||
|
Container: &mesos.ContainerInfo{
|
||
|
Type: mesos.ContainerInfo_DOCKER.Enum(),
|
||
|
Docker: &mesos.ContainerInfo_DockerInfo{
|
||
|
Image: proto.String(task.Image),
|
||
|
Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated
|
||
|
},
|
||
|
},
|
||
|
}
|
||
|
}
|
||
|
|
||
|
func (s *BinPackWatts) Registered(
|
||
|
_ sched.SchedulerDriver,
|
||
|
frameworkID *mesos.FrameworkID,
|
||
|
masterInfo *mesos.MasterInfo) {
|
||
|
log.Printf("Framework %s registered with master %s", frameworkID, masterInfo)
|
||
|
}
|
||
|
|
||
|
func (s *BinPackWatts) Reregistered(_ sched.SchedulerDriver, masterInfo *mesos.MasterInfo) {
|
||
|
log.Printf("Framework re-registered with master %s", masterInfo)
|
||
|
}
|
||
|
|
||
|
func (s *BinPackWatts) Disconnected(sched.SchedulerDriver) {
|
||
|
log.Println("Framework disconnected with master")
|
||
|
}
|
||
|
|
||
|
func (s *BinPackWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
|
||
|
log.Printf("Received %d resource offers", len(offers))
|
||
|
|
||
|
for _, offer := range offers {
|
||
|
select {
|
||
|
case <-s.Shutdown:
|
||
|
log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")
|
||
|
driver.DeclineOffer(offer.Id, longFilter)
|
||
|
|
||
|
log.Println("Number of tasks still running: ", s.tasksRunning)
|
||
|
continue
|
||
|
default:
|
||
|
}
|
||
|
|
||
|
tasks := []*mesos.TaskInfo{}
|
||
|
|
||
|
// First fit strategy
|
||
|
|
||
|
taken := false
|
||
|
for i, task := range s.tasks {
|
||
|
|
||
|
// Check host if it exists
|
||
|
if task.Host != "" {
|
||
|
// Don't take offer if it doesn't match our task's host requirement
|
||
|
if !strings.HasPrefix(*offer.Hostname, task.Host) {
|
||
|
continue
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// Decision to take the offer or not
|
||
|
if s.takeOffer(offer, task) {
|
||
|
|
||
|
log.Println("Co-Located with: ")
|
||
|
coLocated(s.running[offer.GetSlaveId().GoString()])
|
||
|
|
||
|
tasks = append(tasks, s.newTask(offer, task))
|
||
|
|
||
|
log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname())
|
||
|
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter)
|
||
|
|
||
|
taken = true
|
||
|
|
||
|
fmt.Println("Inst: ", *task.Instances)
|
||
|
*task.Instances--
|
||
|
|
||
|
if *task.Instances <= 0 {
|
||
|
// All instances of task have been scheduled, remove it
|
||
|
s.tasks[i] = s.tasks[len(s.tasks)-1]
|
||
|
s.tasks = s.tasks[:len(s.tasks)-1]
|
||
|
|
||
|
if len(s.tasks) <= 0 {
|
||
|
log.Println("Done scheduling all tasks")
|
||
|
close(s.Shutdown)
|
||
|
}
|
||
|
}
|
||
|
break // Offer taken, move on
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// If there was no match for the task
|
||
|
if !taken {
|
||
|
fmt.Println("There is not enough resources to launch a task:")
|
||
|
cpus, mem, watts := OfferAgg(offer)
|
||
|
|
||
|
log.Printf("<CPU: %f, RAM: %f, Watts: %f>\n", cpus, mem, watts)
|
||
|
driver.DeclineOffer(offer.Id, defaultFilter)
|
||
|
}
|
||
|
|
||
|
}
|
||
|
}
|
||
|
|
||
|
func (s *BinPackWatts) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
|
||
|
log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
|
||
|
|
||
|
if *status.State == mesos.TaskState_TASK_RUNNING {
|
||
|
s.tasksRunning++
|
||
|
} else if IsTerminal(status.State) {
|
||
|
delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value)
|
||
|
s.tasksRunning--
|
||
|
if s.tasksRunning == 0 {
|
||
|
select {
|
||
|
case <-s.Shutdown:
|
||
|
close(s.Done)
|
||
|
default:
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
log.Printf("DONE: Task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
|
||
|
}
|
||
|
|
||
|
func (s *BinPackWatts) FrameworkMessage(
|
||
|
driver sched.SchedulerDriver,
|
||
|
executorID *mesos.ExecutorID,
|
||
|
slaveID *mesos.SlaveID,
|
||
|
message string) {
|
||
|
|
||
|
log.Println("Getting a framework message: ", message)
|
||
|
log.Printf("Received a framework message from some unknown source: %s", *executorID.Value)
|
||
|
}
|
||
|
|
||
|
func (s *BinPackWatts) OfferRescinded(_ sched.SchedulerDriver, offerID *mesos.OfferID) {
|
||
|
log.Printf("Offer %s rescinded", offerID)
|
||
|
}
|
||
|
func (s *BinPackWatts) SlaveLost(_ sched.SchedulerDriver, slaveID *mesos.SlaveID) {
|
||
|
log.Printf("Slave %s lost", slaveID)
|
||
|
}
|
||
|
func (s *BinPackWatts) ExecutorLost(_ sched.SchedulerDriver, executorID *mesos.ExecutorID, slaveID *mesos.SlaveID, status int) {
|
||
|
log.Printf("Executor %s on slave %s was lost", executorID, slaveID)
|
||
|
}
|
||
|
|
||
|
func (s *BinPackWatts) Error(_ sched.SchedulerDriver, err string) {
|
||
|
log.Printf("Receiving an error: %s", err)
|
||
|
}
|