Fix: mesos-go imports

This commit is contained in:
Akash Kothawale 2018-01-19 17:46:35 -05:00 committed by Pradyumna Kaushik
parent 065705d480
commit 3b80af6d8f
20 changed files with 1088 additions and 23 deletions

View file

@ -4,7 +4,7 @@ import (
"bitbucket.org/sunybingcloud/elektron/constants"
"bitbucket.org/sunybingcloud/elektron/utilities/offerUtils"
"encoding/json"
mesos "github.com/mesos/mesos-go/mesosproto"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
"github.com/pkg/errors"
"os"
)

View file

@ -1,7 +1,7 @@
package def
import (
"github.com/mdesenfants/gokmeans"
"github.com/mash/gokmeans"
"log"
"sort"
)

View file

@ -8,8 +8,8 @@ import (
"flag"
"fmt"
"github.com/golang/protobuf/proto"
mesos "github.com/mesos/mesos-go/mesosproto"
sched "github.com/mesos/mesos-go/scheduler"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"log"
"os"
"os/signal"

View file

@ -5,8 +5,8 @@ import (
"bitbucket.org/sunybingcloud/elektron/utilities/mesosUtils"
"bitbucket.org/sunybingcloud/elektron/utilities/offerUtils"
"fmt"
mesos "github.com/mesos/mesos-go/mesosproto"
sched "github.com/mesos/mesos-go/scheduler"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"log"
"math/rand"
)

View file

@ -5,8 +5,8 @@ import (
"bitbucket.org/sunybingcloud/elektron/utilities/mesosUtils"
"bitbucket.org/sunybingcloud/elektron/utilities/offerUtils"
"fmt"
mesos "github.com/mesos/mesos-go/mesosproto"
sched "github.com/mesos/mesos-go/scheduler"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"log"
"math/rand"
)

View file

@ -6,9 +6,9 @@ import (
"bytes"
"fmt"
"github.com/golang/protobuf/proto"
mesos "github.com/mesos/mesos-go/mesosproto"
"github.com/mesos/mesos-go/mesosutil"
sched "github.com/mesos/mesos-go/scheduler"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
"github.com/mesos/mesos-go/api/v0/mesosutil"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"log"
"sync"
"time"

View file

@ -5,8 +5,8 @@ import (
"bitbucket.org/sunybingcloud/elektron/utilities/mesosUtils"
"bitbucket.org/sunybingcloud/elektron/utilities/offerUtils"
"fmt"
mesos "github.com/mesos/mesos-go/mesosproto"
sched "github.com/mesos/mesos-go/scheduler"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"log"
"math/rand"
)

View file

@ -0,0 +1,145 @@
package schedulers
import (
"bitbucket.org/sunybingcloud/electron/def"
elecLogDef "bitbucket.org/sunybingcloud/electron/logging/def"
"bitbucket.org/sunybingcloud/electron/utilities/mesosUtils"
"bitbucket.org/sunybingcloud/electron/utilities/offerUtils"
"bytes"
"fmt"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"log"
"math/rand"
"sort"
)
// Decides if to take an offer or not
func (s *BinPackSortedWattsSortedOffers) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.Task,
totalCPU, totalRAM, totalWatts float64) bool {
baseSchedRef := spc.(*baseScheduler)
offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
//TODO: Insert watts calculation here instead of taking them as a parameter
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
log.Fatal(err)
}
if (offerCPU >= (totalCPU + task.CPU)) && (offerRAM >= (totalRAM + task.RAM)) &&
(!baseSchedRef.wattsAsAResource || (offerWatts >= (totalWatts + wattsConsideration))) {
return true
}
return false
}
type BinPackSortedWattsSortedOffers struct {
SchedPolicyState
}
func (s *BinPackSortedWattsSortedOffers) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver, offers []*mesos.Offer) {
fmt.Println("BPSWSO scheduling...")
baseSchedRef := spc.(*baseScheduler)
def.SortTasks(baseSchedRef.tasks, def.SortByWatts)
baseSchedRef.LogOffersReceived(offers)
// Sorting the offers
sort.Sort(offerUtils.OffersSorter(offers))
// Printing the sorted offers and the corresponding CPU resource availability
buffer := bytes.Buffer{}
buffer.WriteString(fmt.Sprint("Sorted Offers:\n"))
for i := 0; i < len(offers); i++ {
offer := offers[i]
offerUtils.UpdateEnvironment(offer)
offerCPU, _, _ := offerUtils.OfferAgg(offer)
buffer.WriteString(fmt.Sprintf("Offer[%s].CPU = %f\n", offer.GetHostname(), offerCPU))
}
baseSchedRef.Log(elecLogDef.GENERAL, buffer.String())
for _, offer := range offers {
select {
case <-baseSchedRef.Shutdown:
baseSchedRef.LogNoPendingTasksDeclineOffers(offer)
driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
baseSchedRef.LogNumberOfRunningTasks()
continue
default:
}
tasks := []*mesos.TaskInfo{}
offerTaken := false
totalWatts := 0.0
totalCPU := 0.0
totalRAM := 0.0
for i := 0; i < len(baseSchedRef.tasks); i++ {
task := baseSchedRef.tasks[i]
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
log.Fatal(err)
}
// Don't take offer if it doesn't match our task's host requirement
if offerUtils.HostMismatch(*offer.Hostname, task.Host) {
continue
}
for *task.Instances > 0 {
// Does the task fit
if s.takeOffer(spc, offer, task, totalCPU, totalRAM, totalWatts) {
offerTaken = true
totalWatts += wattsConsideration
totalCPU += task.CPU
totalRAM += task.RAM
baseSchedRef.LogCoLocatedTasks(offer.GetSlaveId().GoString())
taskToSchedule := baseSchedRef.newTask(offer, task)
tasks = append(tasks, taskToSchedule)
baseSchedRef.LogSchedTrace(taskToSchedule, offer)
*task.Instances--
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
baseSchedRef.tasks = append(baseSchedRef.tasks[:i],
baseSchedRef.tasks[i+1:]...)
if len(baseSchedRef.tasks) <= 0 {
baseSchedRef.LogTerminateScheduler()
close(baseSchedRef.Shutdown)
}
}
} else {
break // Continue on to next offer
}
}
}
if offerTaken {
baseSchedRef.LogTaskStarting(nil, offer)
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
} else {
// If there was no match for the task
cpus, mem, watts := offerUtils.OfferAgg(offer)
baseSchedRef.LogInsufficientResourcesDeclineOffer(offer, cpus, mem, watts)
driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter)
}
}
// Switch scheduling policy only if feature enabled from CLI
if baseSchedRef.schedPolSwitchEnabled {
// Switching to a random scheduling policy.
// TODO: Switch based on some criteria.
index := rand.Intn(len(SchedPolicies))
for _, v := range SchedPolicies {
if index == 0 {
spc.SwitchSchedPol(v)
break
}
index--
}
}
}

266
schedulers/bottomHeavy.go Normal file
View file

@ -0,0 +1,266 @@
package schedulers
import (
"bitbucket.org/sunybingcloud/electron/constants"
"bitbucket.org/sunybingcloud/electron/def"
elecLogDef "bitbucket.org/sunybingcloud/electron/logging/def"
"bitbucket.org/sunybingcloud/electron/utilities/mesosUtils"
"bitbucket.org/sunybingcloud/electron/utilities/offerUtils"
"bytes"
"fmt"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"log"
"math/rand"
)
/*
Tasks are categorized into small and large tasks based on watts requirements.
All the large tasks are packed into offers from agents belonging to power classes A and B, using Bin-Packing.
All the small tasks are spread among offers from agents belonging to power class C and D, using First-Fit.
Bin-Packing has the most effect when co-scheduling of tasks is increased. Large tasks typically utilize more resources and hence,
co-scheduling them has a great impact on the total power utilization.
*/
func (s *BottomHeavy) takeOfferBinPack(spc SchedPolicyContext, offer *mesos.Offer, totalCPU, totalRAM, totalWatts,
wattsToConsider float64, task def.Task) bool {
baseSchedRef := spc.(*baseScheduler)
offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
//TODO: Insert watts calculation here instead of taking them as a parameter
if (!baseSchedRef.wattsAsAResource || (offerWatts >= (totalWatts + wattsToConsider))) &&
(offerCPU >= (totalCPU + task.CPU)) &&
(offerRAM >= (totalRAM + task.RAM)) {
return true
}
return false
}
func (s *BottomHeavy) takeOfferFirstFit(spc SchedPolicyContext, offer *mesos.Offer,
wattsConsideration float64, task def.Task) bool {
baseSchedRef := spc.(*baseScheduler)
offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
//TODO: Insert watts calculation here instead of taking them as a parameter
if (!baseSchedRef.wattsAsAResource || (offerWatts >= wattsConsideration)) &&
(offerCPU >= task.CPU) && (offerRAM >= task.RAM) {
return true
}
return false
}
type BottomHeavy struct {
SchedPolicyState
smallTasks, largeTasks []def.Task
}
// Shut down scheduler if no more tasks to schedule
func (s *BottomHeavy) shutDownIfNecessary(spc SchedPolicyContext) {
baseSchedRef := spc.(*baseScheduler)
if len(s.smallTasks) <= 0 && len(s.largeTasks) <= 0 {
baseSchedRef.LogTerminateScheduler()
close(baseSchedRef.Shutdown)
}
}
// create TaskInfo and log scheduling trace
func (s *BottomHeavy) createTaskInfoAndLogSchedTrace(spc SchedPolicyContext, offer *mesos.Offer,
task def.Task) *mesos.TaskInfo {
baseSchedRef := spc.(*baseScheduler)
baseSchedRef.LogCoLocatedTasks(offer.GetSlaveId().GoString())
taskToSchedule := baseSchedRef.newTask(offer, task)
baseSchedRef.LogSchedTrace(taskToSchedule, offer)
*task.Instances--
return taskToSchedule
}
// Using BinPacking to pack large tasks into the given offers.
func (s *BottomHeavy) pack(spc SchedPolicyContext, offers []*mesos.Offer, driver sched.SchedulerDriver) {
baseSchedRef := spc.(*baseScheduler)
for _, offer := range offers {
select {
case <-baseSchedRef.Shutdown:
baseSchedRef.LogNoPendingTasksDeclineOffers(offer)
driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
baseSchedRef.LogNumberOfRunningTasks()
continue
default:
}
tasks := []*mesos.TaskInfo{}
totalWatts := 0.0
totalCPU := 0.0
totalRAM := 0.0
offerTaken := false
for i := 0; i < len(s.largeTasks); i++ {
task := s.largeTasks[i]
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
log.Fatal(err)
}
for *task.Instances > 0 {
// Does the task fit
// OR lazy evaluation. If ignore watts is set to true, second statement won't
// be evaluated.
if s.takeOfferBinPack(spc, offer, totalCPU, totalRAM, totalWatts,
wattsConsideration, task) {
offerTaken = true
totalWatts += wattsConsideration
totalCPU += task.CPU
totalRAM += task.RAM
tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(spc, offer, task))
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
s.largeTasks = append(s.largeTasks[:i], s.largeTasks[i+1:]...)
s.shutDownIfNecessary(spc)
}
} else {
break // Continue on to next task
}
}
}
if offerTaken {
baseSchedRef.LogTaskStarting(nil, offer)
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
} else {
// If there was no match for the task
cpus, mem, watts := offerUtils.OfferAgg(offer)
baseSchedRef.LogInsufficientResourcesDeclineOffer(offer, cpus, mem, watts)
driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter)
}
}
}
// Using First-Fit to spread small tasks among the given offers.
func (s *BottomHeavy) spread(spc SchedPolicyContext, offers []*mesos.Offer, driver sched.SchedulerDriver) {
baseSchedRef := spc.(*baseScheduler)
for _, offer := range offers {
select {
case <-baseSchedRef.Shutdown:
baseSchedRef.LogNoPendingTasksDeclineOffers(offer)
driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
baseSchedRef.LogNumberOfRunningTasks()
continue
default:
}
tasks := []*mesos.TaskInfo{}
taken := false
for i := 0; i < len(s.smallTasks); i++ {
task := s.smallTasks[i]
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
log.Fatal(err)
}
// Decision to take the offer or not
if s.takeOfferFirstFit(spc, offer, wattsConsideration, task) {
taken = true
tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(spc, offer, task))
baseSchedRef.LogTaskStarting(&s.smallTasks[i], offer)
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
s.smallTasks = append(s.smallTasks[:i], s.smallTasks[i+1:]...)
s.shutDownIfNecessary(spc)
}
break // Offer taken, move on
}
}
if !taken {
// If there was no match for the task
cpus, mem, watts := offerUtils.OfferAgg(offer)
baseSchedRef.LogInsufficientResourcesDeclineOffer(offer, cpus, mem, watts)
driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter)
}
}
}
func (s *BottomHeavy) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver, offers []*mesos.Offer) {
fmt.Println("BottomHeavy scheduling...")
baseSchedRef := spc.(*baseScheduler)
baseSchedRef.LogOffersReceived(offers)
// Sorting tasks based on Watts
def.SortTasks(baseSchedRef.tasks, def.SortByWatts)
// Classification done based on MMPU watts requirements, into 2 clusters.
classifiedTasks := def.ClassifyTasks(baseSchedRef.tasks, 2)
// Separating small tasks from large tasks.
s.smallTasks = classifiedTasks[0].Tasks
s.largeTasks = classifiedTasks[1].Tasks
// We need to separate the offers into
// offers from ClassA and ClassB and offers from ClassC and ClassD.
// Nodes in ClassA and ClassB will be packed with the large tasks.
// Small tasks will be spread out among the nodes in ClassC and ClassD.
offersHeavyPowerClasses := []*mesos.Offer{}
offersLightPowerClasses := []*mesos.Offer{}
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
select {
case <-baseSchedRef.Shutdown:
baseSchedRef.LogNoPendingTasksDeclineOffers(offer)
driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
baseSchedRef.LogNumberOfRunningTasks()
continue
default:
}
if _, ok := constants.PowerClasses["A"][*offer.Hostname]; ok {
offersHeavyPowerClasses = append(offersHeavyPowerClasses, offer)
}
if _, ok := constants.PowerClasses["B"][*offer.Hostname]; ok {
offersHeavyPowerClasses = append(offersHeavyPowerClasses, offer)
}
if _, ok := constants.PowerClasses["C"][*offer.Hostname]; ok {
offersLightPowerClasses = append(offersLightPowerClasses, offer)
}
if _, ok := constants.PowerClasses["D"][*offer.Hostname]; ok {
offersLightPowerClasses = append(offersLightPowerClasses, offer)
}
}
buffer := bytes.Buffer{}
buffer.WriteString(fmt.Sprintln("Packing Large tasks into ClassAB offers:"))
for _, o := range offersHeavyPowerClasses {
buffer.WriteString(fmt.Sprintln(*o.Hostname))
}
baseSchedRef.Log(elecLogDef.GENERAL, buffer.String())
buffer.Reset()
// Packing tasks into offersHeavyPowerClasses
s.pack(spc, offersHeavyPowerClasses, driver)
buffer.WriteString(fmt.Sprintln("Packing Small tasks among ClassCD offers:"))
for _, o := range offersLightPowerClasses {
buffer.WriteString(*o.Hostname)
}
baseSchedRef.Log(elecLogDef.GENERAL, buffer.String())
// Spreading tasks among offersLightPowerClasses
s.spread(spc, offersLightPowerClasses, driver)
// Switch scheduling policy only if feature enabled from CLI
if baseSchedRef.schedPolSwitchEnabled {
// Switching to a random scheduling policy.
// TODO: Switch based on some criteria.
index := rand.Intn(len(SchedPolicies))
for _, v := range SchedPolicies {
if index == 0 {
spc.SwitchSchedPol(v)
break
}
index--
}
}
}

View file

@ -3,8 +3,8 @@ package schedulers
import (
"bitbucket.org/sunybingcloud/elektron/def"
elecLogDef "bitbucket.org/sunybingcloud/elektron/logging/def"
mesos "github.com/mesos/mesos-go/mesosproto"
sched "github.com/mesos/mesos-go/scheduler"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
)
// Implements mesos scheduler.

View file

@ -5,8 +5,8 @@ import (
"bitbucket.org/sunybingcloud/elektron/utilities/mesosUtils"
"bitbucket.org/sunybingcloud/elektron/utilities/offerUtils"
"fmt"
mesos "github.com/mesos/mesos-go/mesosproto"
sched "github.com/mesos/mesos-go/scheduler"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"math/rand"
)

View file

@ -0,0 +1,136 @@
package schedulers
import (
"bitbucket.org/sunybingcloud/electron/def"
elecLogDef "bitbucket.org/sunybingcloud/electron/logging/def"
"bitbucket.org/sunybingcloud/electron/utilities/mesosUtils"
"bitbucket.org/sunybingcloud/electron/utilities/offerUtils"
"bytes"
"fmt"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"log"
"math/rand"
"sort"
)
// Decides if to take an offer or not
func (s *FirstFitSortedOffers) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.Task) bool {
baseSchedRef := spc.(*baseScheduler)
cpus, mem, watts := offerUtils.OfferAgg(offer)
//TODO: Insert watts calculation here instead of taking them as a parameter
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
log.Fatal(err)
}
if cpus >= task.CPU && mem >= task.RAM && (!baseSchedRef.wattsAsAResource || watts >= wattsConsideration) {
return true
}
return false
}
// electronScheduler implements the Scheduler interface
type FirstFitSortedOffers struct {
SchedPolicyState
}
func (s *FirstFitSortedOffers) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver,
offers []*mesos.Offer) {
fmt.Println("FFSO scheduling...")
baseSchedRef := spc.(*baseScheduler)
baseSchedRef.LogOffersReceived(offers)
// Sorting the offers
sort.Sort(offerUtils.OffersSorter(offers))
// Printing the sorted offers and the corresponding CPU resource availability
buffer := bytes.Buffer{}
buffer.WriteString(fmt.Sprintln("Sorted Offers:"))
for i := 0; i < len(offers); i++ {
offer := offers[i]
offerUtils.UpdateEnvironment(offer)
offerCPU, _, _ := offerUtils.OfferAgg(offer)
buffer.WriteString(fmt.Sprintf("Offer[%s].CPU = %f\n", offer.GetHostname(), offerCPU))
}
baseSchedRef.Log(elecLogDef.GENERAL, buffer.String())
for _, offer := range offers {
select {
case <-baseSchedRef.Shutdown:
baseSchedRef.LogNoPendingTasksDeclineOffers(offer)
driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
baseSchedRef.LogNumberOfRunningTasks()
continue
default:
}
tasks := []*mesos.TaskInfo{}
// First fit strategy
offerTaken := false
for i := 0; i < len(baseSchedRef.tasks); i++ {
task := baseSchedRef.tasks[i]
// Don't take offer if it doesn't match our task's host requirement
if offerUtils.HostMismatch(*offer.Hostname, task.Host) {
continue
}
// Decision to take the offer or not
if s.takeOffer(spc, offer, task) {
baseSchedRef.LogCoLocatedTasks(offer.GetSlaveId().GoString())
taskToSchedule := baseSchedRef.newTask(offer, task)
tasks = append(tasks, taskToSchedule)
baseSchedRef.LogTaskStarting(&task, offer)
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
offerTaken = true
baseSchedRef.LogSchedTrace(taskToSchedule, offer)
*task.Instances--
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
baseSchedRef.tasks[i] = baseSchedRef.tasks[len(baseSchedRef.tasks)-1]
baseSchedRef.tasks = baseSchedRef.tasks[:len(baseSchedRef.tasks)-1]
if len(baseSchedRef.tasks) <= 0 {
baseSchedRef.LogTerminateScheduler()
close(baseSchedRef.Shutdown)
}
}
break // Offer taken, move on
}
}
// If there was no match for the task
if !offerTaken {
cpus, mem, watts := offerUtils.OfferAgg(offer)
baseSchedRef.LogInsufficientResourcesDeclineOffer(offer, cpus, mem, watts)
driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter)
}
}
// Switch scheduling policy only if feature enabled from CLI
if baseSchedRef.schedPolSwitchEnabled {
// Switching to a random scheduling policy.
// TODO: Switch based on some criteria.
index := rand.Intn(len(SchedPolicies))
for _, v := range SchedPolicies {
if index == 0 {
spc.SwitchSchedPol(v)
break
}
index--
}
}
}

View file

@ -0,0 +1,134 @@
package schedulers
import (
"bitbucket.org/sunybingcloud/electron/def"
elecLogDef "bitbucket.org/sunybingcloud/electron/logging/def"
"bitbucket.org/sunybingcloud/electron/utilities/mesosUtils"
"bitbucket.org/sunybingcloud/electron/utilities/offerUtils"
"bytes"
"fmt"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"log"
"math/rand"
"sort"
)
// Decides if to take an offer or not
func (s *FirstFitSortedWattsSortedOffers) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.Task) bool {
baseSchedRef := spc.(*baseScheduler)
cpus, mem, watts := offerUtils.OfferAgg(offer)
//TODO: Insert watts calculation here instead of taking them as a parameter
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
log.Fatal(err)
}
if cpus >= task.CPU && mem >= task.RAM && (!baseSchedRef.wattsAsAResource || watts >= wattsConsideration) {
return true
}
return false
}
// electronScheduler implements the Scheduler interface
type FirstFitSortedWattsSortedOffers struct {
SchedPolicyState
}
func (s *FirstFitSortedWattsSortedOffers) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver,
offers []*mesos.Offer) {
fmt.Println("FFSWSO scheduling...")
baseSchedRef := spc.(*baseScheduler)
def.SortTasks(baseSchedRef.tasks, def.SortByWatts)
baseSchedRef.LogOffersReceived(offers)
// Sorting the offers
sort.Sort(offerUtils.OffersSorter(offers))
// Printing the sorted offers and the corresponding CPU resource availability
buffer := bytes.Buffer{}
buffer.WriteString(fmt.Sprintln("Sorted Offers:"))
for i := 0; i < len(offers); i++ {
offer := offers[i]
offerUtils.UpdateEnvironment(offer)
offerCPU, _, _ := offerUtils.OfferAgg(offer)
buffer.WriteString(fmt.Sprintf("Offer[%s].CPU = %f\n", offer.GetHostname(), offerCPU))
}
baseSchedRef.Log(elecLogDef.GENERAL, buffer.String())
for _, offer := range offers {
select {
case <-baseSchedRef.Shutdown:
baseSchedRef.LogNoPendingTasksDeclineOffers(offer)
driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
baseSchedRef.LogNumberOfRunningTasks()
continue
default:
}
tasks := []*mesos.TaskInfo{}
// First fit strategy
offerTaken := false
for i := 0; i < len(baseSchedRef.tasks); i++ {
task := baseSchedRef.tasks[i]
// Don't take offer if it doesn't match our task's host requirement
if offerUtils.HostMismatch(*offer.Hostname, task.Host) {
continue
}
// Decision to take the offer or not
if s.takeOffer(spc, offer, task) {
baseSchedRef.LogCoLocatedTasks(offer.GetSlaveId().GoString())
taskToSchedule := baseSchedRef.newTask(offer, task)
tasks = append(tasks, taskToSchedule)
baseSchedRef.LogTaskStarting(&task, offer)
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
offerTaken = true
baseSchedRef.LogSchedTrace(taskToSchedule, offer)
*task.Instances--
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
baseSchedRef.tasks = append(baseSchedRef.tasks[:i],
baseSchedRef.tasks[i+1:]...)
if len(baseSchedRef.tasks) <= 0 {
baseSchedRef.LogTerminateScheduler()
close(baseSchedRef.Shutdown)
}
}
break // Offer taken, move on
}
}
// If there was no match for the task
if !offerTaken {
cpus, mem, watts := offerUtils.OfferAgg(offer)
baseSchedRef.LogInsufficientResourcesDeclineOffer(offer, cpus, mem, watts)
driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter)
}
}
if baseSchedRef.schedPolSwitchEnabled {
// Switching to a random scheduling policy.
// TODO: Switch based on some criteria.
index := rand.Intn(len(SchedPolicies))
for _, v := range SchedPolicies {
if index == 0 {
spc.SwitchSchedPol(v)
break
}
index--
}
}
}

View file

@ -0,0 +1,122 @@
package schedulers
import (
"bitbucket.org/sunybingcloud/electron/def"
"bitbucket.org/sunybingcloud/electron/utilities/mesosUtils"
"bitbucket.org/sunybingcloud/electron/utilities/offerUtils"
"fmt"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"log"
"math/rand"
)
// Decides if to take an offer or not
func (s *FirstFitSortedWatts) takeOffer(spc SchedPolicyContext, offer *mesos.Offer, task def.Task) bool {
baseSchedRef := spc.(*baseScheduler)
cpus, mem, watts := offerUtils.OfferAgg(offer)
//TODO: Insert watts calculation here instead of taking them as a parameter
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
log.Fatal(err)
}
if cpus >= task.CPU && mem >= task.RAM && (!baseSchedRef.wattsAsAResource || watts >= wattsConsideration) {
return true
}
return false
}
// electronScheduler implements the Scheduler interface
type FirstFitSortedWatts struct {
SchedPolicyState
}
func (s *FirstFitSortedWatts) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver,
offers []*mesos.Offer) {
fmt.Println("FFSW scheduling...")
baseSchedRef := spc.(*baseScheduler)
def.SortTasks(baseSchedRef.tasks, def.SortByWatts)
baseSchedRef.LogOffersReceived(offers)
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
select {
case <-baseSchedRef.Shutdown:
baseSchedRef.LogNoPendingTasksDeclineOffers(offer)
driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
baseSchedRef.LogNumberOfRunningTasks()
continue
default:
}
tasks := []*mesos.TaskInfo{}
// First fit strategy
offerTaken := false
for i := 0; i < len(baseSchedRef.tasks); i++ {
task := baseSchedRef.tasks[i]
// Don't take offer if it doesn't match our task's host requirement
if offerUtils.HostMismatch(*offer.Hostname, task.Host) {
continue
}
// Decision to take the offer or not
if s.takeOffer(spc, offer, task) {
baseSchedRef.LogCoLocatedTasks(offer.GetSlaveId().GoString())
taskToSchedule := baseSchedRef.newTask(offer, task)
tasks = append(tasks, taskToSchedule)
baseSchedRef.LogTaskStarting(&task, offer)
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
offerTaken = true
baseSchedRef.LogSchedTrace(taskToSchedule, offer)
*task.Instances--
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
baseSchedRef.tasks = append(baseSchedRef.tasks[:i],
baseSchedRef.tasks[i+1:]...)
if len(baseSchedRef.tasks) <= 0 {
baseSchedRef.LogTerminateScheduler()
close(baseSchedRef.Shutdown)
}
}
break // Offer taken, move on
}
}
// If there was no match for the task
if !offerTaken {
cpus, mem, watts := offerUtils.OfferAgg(offer)
baseSchedRef.LogInsufficientResourcesDeclineOffer(offer, cpus, mem, watts)
driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter)
}
}
// Switch scheduling policy only if feature enabled from CLI
if baseSchedRef.schedPolSwitchEnabled {
// Switching to a random scheduling policy.
// TODO: Switch based on some criteria.
index := rand.Intn(len(SchedPolicies))
for _, v := range SchedPolicies {
if index == 0 {
spc.SwitchSchedPol(v)
break
}
index--
}
}
}

View file

@ -1,8 +1,8 @@
package schedulers
import (
mesos "github.com/mesos/mesos-go/mesosproto"
sched "github.com/mesos/mesos-go/scheduler"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
)
type SchedPolicyContext interface {

View file

@ -2,7 +2,7 @@ package schedulers
import (
"fmt"
mesos "github.com/mesos/mesos-go/mesosproto"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
)
// NameFor returns the string name for a TaskState.

View file

@ -1,7 +1,7 @@
package schedulers
import (
sched "github.com/mesos/mesos-go/scheduler"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
)
// Names of different scheduling policies.

262
schedulers/topHeavy.go Normal file
View file

@ -0,0 +1,262 @@
package schedulers
import (
"bitbucket.org/sunybingcloud/electron/constants"
"bitbucket.org/sunybingcloud/electron/def"
elecLogDef "bitbucket.org/sunybingcloud/electron/logging/def"
"bitbucket.org/sunybingcloud/electron/utilities/mesosUtils"
"bitbucket.org/sunybingcloud/electron/utilities/offerUtils"
"bytes"
"fmt"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"log"
"math/rand"
)
/*
Tasks are categorized into small and large tasks based on the watts requirement.
All the small tasks are packed into offers from agents belonging to power class C and power class D, using BinPacking.
All the large tasks are spread among the offers from agents belonging to power class A and power class B, using FirstFit.
This was done to give a little more room for the large tasks (power intensive) for execution and reduce the possibility of
starvation of power intensive tasks.
*/
func (s *TopHeavy) takeOfferBinPack(spc SchedPolicyContext, offer *mesos.Offer, totalCPU, totalRAM, totalWatts,
wattsToConsider float64, task def.Task) bool {
baseSchedRef := spc.(*baseScheduler)
offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
//TODO: Insert watts calculation here instead of taking them as a parameter
if (!baseSchedRef.wattsAsAResource || (offerWatts >= (totalWatts + wattsToConsider))) &&
(offerCPU >= (totalCPU + task.CPU)) &&
(offerRAM >= (totalRAM + task.RAM)) {
return true
}
return false
}
func (s *TopHeavy) takeOfferFirstFit(spc SchedPolicyContext, offer *mesos.Offer, wattsConsideration float64, task def.Task) bool {
baseSchedRef := spc.(*baseScheduler)
offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
//TODO: Insert watts calculation here instead of taking them as a parameter
if (!baseSchedRef.wattsAsAResource || (offerWatts >= wattsConsideration)) &&
(offerCPU >= task.CPU) && (offerRAM >= task.RAM) {
return true
}
return false
}
// electronScheduler implements the Scheduler interface
type TopHeavy struct {
SchedPolicyState
smallTasks, largeTasks []def.Task
}
// Shut down scheduler if no more tasks to schedule
func (s *TopHeavy) shutDownIfNecessary(spc SchedPolicyContext) {
baseSchedRef := spc.(*baseScheduler)
if len(s.smallTasks) <= 0 && len(s.largeTasks) <= 0 {
baseSchedRef.LogTerminateScheduler()
close(baseSchedRef.Shutdown)
}
}
// create TaskInfo and log scheduling trace
func (s *TopHeavy) createTaskInfoAndLogSchedTrace(spc SchedPolicyContext, offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
baseSchedRef := spc.(*baseScheduler)
baseSchedRef.LogCoLocatedTasks(offer.GetSlaveId().GoString())
taskToSchedule := baseSchedRef.newTask(offer, task)
baseSchedRef.LogSchedTrace(taskToSchedule, offer)
*task.Instances--
return taskToSchedule
}
// Using BinPacking to pack small tasks into this offer.
func (s *TopHeavy) pack(spc SchedPolicyContext, offers []*mesos.Offer, driver sched.SchedulerDriver) {
baseSchedRef := spc.(*baseScheduler)
for _, offer := range offers {
select {
case <-baseSchedRef.Shutdown:
baseSchedRef.LogNoPendingTasksDeclineOffers(offer)
driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
baseSchedRef.LogNumberOfRunningTasks()
continue
default:
}
tasks := []*mesos.TaskInfo{}
totalWatts := 0.0
totalCPU := 0.0
totalRAM := 0.0
taken := false
for i := 0; i < len(s.smallTasks); i++ {
task := s.smallTasks[i]
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
log.Fatal(err)
}
for *task.Instances > 0 {
// Does the task fit
// OR lazy evaluation. If ignore watts is set to true, second statement won't
// be evaluated.
if s.takeOfferBinPack(spc, offer, totalCPU, totalRAM, totalWatts, wattsConsideration, task) {
taken = true
totalWatts += wattsConsideration
totalCPU += task.CPU
totalRAM += task.RAM
tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(spc, offer, task))
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
s.smallTasks = append(s.smallTasks[:i], s.smallTasks[i+1:]...)
s.shutDownIfNecessary(spc)
}
} else {
break // Continue on to next task
}
}
}
if taken {
baseSchedRef.LogTaskStarting(nil, offer)
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
} else {
// If there was no match for the task
cpus, mem, watts := offerUtils.OfferAgg(offer)
baseSchedRef.LogInsufficientResourcesDeclineOffer(offer, cpus, mem, watts)
driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter)
}
}
}
// Using first fit to spread large tasks into these offers.
func (s *TopHeavy) spread(spc SchedPolicyContext, offers []*mesos.Offer, driver sched.SchedulerDriver) {
baseSchedRef := spc.(*baseScheduler)
for _, offer := range offers {
select {
case <-baseSchedRef.Shutdown:
baseSchedRef.LogNoPendingTasksDeclineOffers(offer)
driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
baseSchedRef.LogNumberOfRunningTasks()
continue
default:
}
tasks := []*mesos.TaskInfo{}
offerTaken := false
for i := 0; i < len(s.largeTasks); i++ {
task := s.largeTasks[i]
wattsConsideration, err := def.WattsToConsider(task, baseSchedRef.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
log.Fatal(err)
}
// Decision to take the offer or not
if s.takeOfferFirstFit(spc, offer, wattsConsideration, task) {
offerTaken = true
tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(spc, offer, task))
baseSchedRef.LogTaskStarting(&s.largeTasks[i], offer)
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
s.largeTasks = append(s.largeTasks[:i], s.largeTasks[i+1:]...)
s.shutDownIfNecessary(spc)
}
break // Offer taken, move on
}
}
if !offerTaken {
// If there was no match for the task
cpus, mem, watts := offerUtils.OfferAgg(offer)
baseSchedRef.LogInsufficientResourcesDeclineOffer(offer, cpus, mem, watts)
driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter)
}
}
}
func (s *TopHeavy) ConsumeOffers(spc SchedPolicyContext, driver sched.SchedulerDriver, offers []*mesos.Offer) {
fmt.Println("TopHeavy scheduling...")
baseSchedRef := spc.(*baseScheduler)
baseSchedRef.LogOffersReceived(offers)
// Sorting tasks based on Watts
def.SortTasks(baseSchedRef.tasks, def.SortByWatts)
// Classification done based on MMPU watts requirements, into 2 clusters.
classifiedTasks := def.ClassifyTasks(baseSchedRef.tasks, 2)
// Separating small tasks from large tasks.
s.smallTasks = classifiedTasks[0].Tasks
s.largeTasks = classifiedTasks[1].Tasks
baseSchedRef.LogOffersReceived(offers)
// We need to separate the offers into
// offers from ClassA and ClassB and offers from ClassC and ClassD.
// Offers from ClassA and ClassB would execute the large tasks.
// Offers from ClassC and ClassD would execute the small tasks.
offersHeavyPowerClasses := []*mesos.Offer{}
offersLightPowerClasses := []*mesos.Offer{}
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
select {
case <-baseSchedRef.Shutdown:
baseSchedRef.LogNoPendingTasksDeclineOffers(offer)
driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
baseSchedRef.LogNumberOfRunningTasks()
continue
default:
}
if _, ok := constants.PowerClasses["A"][*offer.Hostname]; ok {
offersHeavyPowerClasses = append(offersHeavyPowerClasses, offer)
}
if _, ok := constants.PowerClasses["B"][*offer.Hostname]; ok {
offersHeavyPowerClasses = append(offersHeavyPowerClasses, offer)
}
if _, ok := constants.PowerClasses["C"][*offer.Hostname]; ok {
offersLightPowerClasses = append(offersLightPowerClasses, offer)
}
if _, ok := constants.PowerClasses["D"][*offer.Hostname]; ok {
offersLightPowerClasses = append(offersLightPowerClasses, offer)
}
}
buffer := bytes.Buffer{}
buffer.WriteString(fmt.Sprintln("Spreading Large tasks into ClassAB Offers:"))
for _, o := range offersHeavyPowerClasses {
buffer.WriteString(fmt.Sprintln(*o.Hostname))
}
baseSchedRef.Log(elecLogDef.GENERAL, buffer.String())
buffer.Reset()
buffer.WriteString(fmt.Sprintln("Packing Small tasks into ClassCD Offers:"))
for _, o := range offersLightPowerClasses {
buffer.WriteString(fmt.Sprintln(*o.Hostname))
}
baseSchedRef.Log(elecLogDef.GENERAL, buffer.String())
// Packing tasks into offersLightPowerClasses
s.pack(spc, offersLightPowerClasses, driver)
// Spreading tasks among offersHeavyPowerClasses
s.spread(spc, offersHeavyPowerClasses, driver)
if baseSchedRef.schedPolSwitchEnabled {
// Switching to a random scheduling policy.
// TODO: Switch based on some criteria.
index := rand.Intn(len(SchedPolicies))
for _, v := range SchedPolicies {
if index == 0 {
spc.SwitchSchedPol(v)
break
}
index--
}
}
}

View file

@ -2,7 +2,7 @@ package mesosUtils
import (
"github.com/golang/protobuf/proto"
mesos "github.com/mesos/mesos-go/mesosproto"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
)
var (

View file

@ -2,7 +2,7 @@ package offerUtils
import (
"bitbucket.org/sunybingcloud/elektron/constants"
mesos "github.com/mesos/mesos-go/mesosproto"
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
"log"
"strings"
)