2017-01-04 20:25:54 -05:00
package schedulers
import (
"bitbucket.org/sunybingcloud/electron/def"
"fmt"
"github.com/golang/protobuf/proto"
mesos "github.com/mesos/mesos-go/mesosproto"
"github.com/mesos/mesos-go/mesosutil"
sched "github.com/mesos/mesos-go/scheduler"
"log"
"os"
"sort"
"strings"
"time"
)
// Decides if to take an offer or not
func ( * BPMaxMinWatts ) takeOffer ( offer * mesos . Offer , task def . Task ) bool {
cpus , mem , watts := OfferAgg ( offer )
//TODO: Insert watts calculation here instead of taking them as a parameter
if cpus >= task . CPU && mem >= task . RAM && watts >= task . Watts {
return true
}
return false
}
type BPMaxMinWatts struct {
base //Type embedding to inherit common functions
tasksCreated int
tasksRunning int
tasks [ ] def . Task
metrics map [ string ] def . Metric
running map [ string ] map [ string ] bool
ignoreWatts bool
// First set of PCP values are garbage values, signal to logger to start recording when we're
// about to schedule a new task
RecordPCP bool
// This channel is closed when the program receives an interrupt,
// signalling that the program should shut down.
Shutdown chan struct { }
// This channel is closed after shutdown is closed, and only when all
// outstanding tasks have been cleaned up
Done chan struct { }
// Controls when to shutdown pcp logging
PCPLog chan struct { }
schedTrace * log . Logger
}
// New electron scheduler
func NewBPMaxMinWatts ( tasks [ ] def . Task , ignoreWatts bool , schedTracePrefix string ) * BPMaxMinWatts {
sort . Sort ( def . WattsSorter ( tasks ) )
logFile , err := os . Create ( "./" + schedTracePrefix + "_schedTrace.log" )
if err != nil {
log . Fatal ( err )
}
s := & BPMaxMinWatts {
tasks : tasks ,
ignoreWatts : ignoreWatts ,
Shutdown : make ( chan struct { } ) ,
Done : make ( chan struct { } ) ,
PCPLog : make ( chan struct { } ) ,
running : make ( map [ string ] map [ string ] bool ) ,
RecordPCP : false ,
schedTrace : log . New ( logFile , "" , log . LstdFlags ) ,
}
return s
}
func ( s * BPMaxMinWatts ) newTask ( offer * mesos . Offer , task def . Task ) * mesos . TaskInfo {
taskName := fmt . Sprintf ( "%s-%d" , task . Name , * task . Instances )
s . tasksCreated ++
// Start recording only when we're creating the first task
if ! s . RecordPCP {
// Turn on logging
s . RecordPCP = true
time . Sleep ( 1 * time . Second ) // Make sure we're recording by the time the first task starts
}
// If this is our first time running into this Agent
if _ , ok := s . running [ offer . GetSlaveId ( ) . GoString ( ) ] ; ! ok {
s . running [ offer . GetSlaveId ( ) . GoString ( ) ] = make ( map [ string ] bool )
}
// Add task to list of tasks running on node
s . running [ offer . GetSlaveId ( ) . GoString ( ) ] [ taskName ] = true
resources := [ ] * mesos . Resource {
mesosutil . NewScalarResource ( "cpus" , task . CPU ) ,
mesosutil . NewScalarResource ( "mem" , task . RAM ) ,
}
if ! s . ignoreWatts {
resources = append ( resources , mesosutil . NewScalarResource ( "watts" , task . Watts ) )
}
return & mesos . TaskInfo {
Name : proto . String ( taskName ) ,
TaskId : & mesos . TaskID {
Value : proto . String ( "electron-" + taskName ) ,
} ,
SlaveId : offer . SlaveId ,
Resources : resources ,
Command : & mesos . CommandInfo {
Value : proto . String ( task . CMD ) ,
} ,
Container : & mesos . ContainerInfo {
Type : mesos . ContainerInfo_DOCKER . Enum ( ) ,
Docker : & mesos . ContainerInfo_DockerInfo {
Image : proto . String ( task . Image ) ,
Network : mesos . ContainerInfo_DockerInfo_BRIDGE . Enum ( ) , // Run everything isolated
} ,
} ,
}
}
2017-01-06 18:53:03 -05:00
// Determine if the remaining space inside of the offer is enough for this
// the task we need to create. If it is, create a TaskInfo and return it.
2017-01-04 20:25:54 -05:00
func ( s * BPMaxMinWatts ) CheckFit ( i int ,
task def . Task ,
offer * mesos . Offer ,
totalCPU * float64 ,
totalRAM * float64 ,
totalWatts * float64 ) ( bool , * mesos . TaskInfo ) {
offerCPU , offerRAM , offerWatts := OfferAgg ( offer )
// Does the task fit
if ( s . ignoreWatts || ( offerWatts >= ( * totalWatts + task . Watts ) ) ) &&
( offerCPU >= ( * totalCPU + task . CPU ) ) &&
( offerRAM >= ( * totalRAM + task . RAM ) ) {
* totalWatts += task . Watts
* totalCPU += task . CPU
* totalRAM += task . RAM
log . Println ( "Co-Located with: " )
coLocated ( s . running [ offer . GetSlaveId ( ) . GoString ( ) ] )
taskToSchedule := s . newTask ( offer , task )
fmt . Println ( "Inst: " , * task . Instances )
s . schedTrace . Print ( offer . GetHostname ( ) + ":" + taskToSchedule . GetTaskId ( ) . GetValue ( ) )
* task . Instances --
if * task . Instances <= 0 {
// All instances of task have been scheduled, remove it
s . tasks = append ( s . tasks [ : i ] , s . tasks [ i + 1 : ] ... )
if len ( s . tasks ) <= 0 {
log . Println ( "Done scheduling all tasks" )
close ( s . Shutdown )
}
}
return true , taskToSchedule
}
return false , nil
}
func ( s * BPMaxMinWatts ) ResourceOffers ( driver sched . SchedulerDriver , offers [ ] * mesos . Offer ) {
log . Printf ( "Received %d resource offers" , len ( offers ) )
for _ , offer := range offers {
select {
case <- s . Shutdown :
log . Println ( "Done scheduling tasks: declining offer on [" , offer . GetHostname ( ) , "]" )
driver . DeclineOffer ( offer . Id , longFilter )
log . Println ( "Number of tasks still running: " , s . tasksRunning )
continue
default :
}
tasks := [ ] * mesos . TaskInfo { }
offerTaken := false
totalWatts := 0.0
totalCPU := 0.0
totalRAM := 0.0
2017-01-06 18:53:03 -05:00
// Assumes s.tasks is ordered in non-decreasing median max peak order
2017-01-04 20:25:54 -05:00
// Attempt to schedule a single instance of the heaviest workload available first
// Start from the back until one fits
for i := len ( s . tasks ) - 1 ; i >= 0 ; i -- {
task := s . tasks [ i ]
// Check host if it exists
if task . Host != "" {
// Don't take offer if it doesn't match our task's host requirement
if ! strings . HasPrefix ( * offer . Hostname , task . Host ) {
continue
}
}
// TODO: Fix this so index doesn't need to be passed
taken , taskToSchedule := s . CheckFit ( i , task , offer , & totalCPU , & totalRAM , & totalWatts )
if taken {
offerTaken = true
tasks = append ( tasks , taskToSchedule )
break
}
}
// Pack the rest of the offer with the smallest tasks
for i , task := range s . tasks {
// Check host if it exists
if task . Host != "" {
// Don't take offer if it doesn't match our task's host requirement
if ! strings . HasPrefix ( * offer . Hostname , task . Host ) {
continue
}
}
for * task . Instances > 0 {
// TODO: Fix this so index doesn't need to be passed
taken , taskToSchedule := s . CheckFit ( i , task , offer , & totalCPU , & totalRAM , & totalWatts )
if taken {
offerTaken = true
tasks = append ( tasks , taskToSchedule )
} else {
break // Continue on to next task
}
}
}
if offerTaken {
log . Printf ( "Starting on [%s]\n" , offer . GetHostname ( ) )
driver . LaunchTasks ( [ ] * mesos . OfferID { offer . Id } , tasks , defaultFilter )
} else {
// If there was no match for the task
fmt . Println ( "There is not enough resources to launch a task:" )
cpus , mem , watts := OfferAgg ( offer )
log . Printf ( "<CPU: %f, RAM: %f, Watts: %f>\n" , cpus , mem , watts )
driver . DeclineOffer ( offer . Id , defaultFilter )
}
}
}
func ( s * BPMaxMinWatts ) StatusUpdate ( driver sched . SchedulerDriver , status * mesos . TaskStatus ) {
log . Printf ( "Received task status [%s] for task [%s]" , NameFor ( status . State ) , * status . TaskId . Value )
if * status . State == mesos . TaskState_TASK_RUNNING {
s . tasksRunning ++
} else if IsTerminal ( status . State ) {
delete ( s . running [ status . GetSlaveId ( ) . GoString ( ) ] , * status . TaskId . Value )
s . tasksRunning --
if s . tasksRunning == 0 {
select {
case <- s . Shutdown :
close ( s . Done )
default :
}
}
}
log . Printf ( "DONE: Task status [%s] for task [%s]" , NameFor ( status . State ) , * status . TaskId . Value )
}