changed commandline argument descriptions. Changed the names of the scheduling policies and the structs.
This commit is contained in:
parent
59266d207d
commit
3fa4f3d0e6
3 changed files with 31 additions and 28 deletions
29
scheduler.go
29
scheduler.go
|
@ -15,23 +15,26 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
var master = flag.String("master", "xavier:5050", "Location of leading Mesos master")
|
||||
var master = flag.String("master", "<mesos-master>:5050", "Location of leading Mesos master")
|
||||
var tasksFile = flag.String("workload", "", "JSON file containing task definitions")
|
||||
var wattsAsAResource = flag.Bool("wattsAsAResource", false, "Enable Watts as a Resource")
|
||||
var pcplogPrefix = flag.String("logPrefix", "", "Prefix for pcplog")
|
||||
var hiThreshold = flag.Float64("hiThreshold", 0.0, "Upperbound for when we should start capping")
|
||||
var loThreshold = flag.Float64("loThreshold", 0.0, "Lowerbound for when we should start uncapping")
|
||||
var classMapWatts = flag.Bool("classMapWatts", false, "Enable mapping of watts to power class of node")
|
||||
var wattsAsAResource = flag.Bool("wattsAsAResource", false, "Enable Watts as a Resource. This allows the usage of the Watts attribute (if present) in the workload definition during offer matching.")
|
||||
var pcplogPrefix = flag.String("logPrefix", "", "Prefix for PCP log file")
|
||||
var hiThreshold = flag.Float64("hiThreshold", 0.0, "Upperbound for Cluster average historical power consumption, beyond which extrema/progressive-extrema would start power-capping")
|
||||
var loThreshold = flag.Float64("loThreshold", 0.0, "Lowerbound for Cluster average historical power consumption, below which extrema/progressive-extrema would stop power-capping")
|
||||
var classMapWatts = flag.Bool("classMapWatts", false, "Enable mapping of watts to powerClass of node")
|
||||
|
||||
// Short hand args
|
||||
func init() {
|
||||
flag.StringVar(master, "m", "xavier:5050", "Location of leading Mesos master (shorthand)")
|
||||
flag.StringVar(master, "m", "<mesos-master>:5050", "Location of leading Mesos master (shorthand)")
|
||||
flag.StringVar(tasksFile, "w", "", "JSON file containing task definitions (shorthand)")
|
||||
flag.BoolVar(wattsAsAResource, "waar", false, "Enable Watts as a Resource (shorthand)")
|
||||
flag.StringVar(pcplogPrefix, "p", "", "Prefix for pcplog (shorthand)")
|
||||
flag.Float64Var(hiThreshold, "ht", 700.0, "Upperbound for when we should start capping (shorthand)")
|
||||
flag.Float64Var(loThreshold, "lt", 400.0, "Lowerbound for when we should start uncapping (shorthand)")
|
||||
flag.BoolVar(classMapWatts, "cmw", false, "Enable mapping of watts to power class of node (shorthand)")
|
||||
flag.BoolVar(wattsAsAResource, "waar", false, "Enable Watts as a Resource. " +
|
||||
"This allows the usage of the Watts attribute (if present) in the workload definition during offer matching. (shorthand)")
|
||||
flag.StringVar(pcplogPrefix, "p", "", "Prefix for PCP log file (shorthand)")
|
||||
flag.Float64Var(hiThreshold, "ht", 700.0, "Upperbound for Cluster average historical power consumption, " +
|
||||
"beyond which extrema/progressive-extrema would start power-capping (shorthand)")
|
||||
flag.Float64Var(loThreshold, "lt", 400.0, "Lowerbound for Cluster average historical power consumption, " +
|
||||
"below which extrema/progressive-extrema would stop power-capping (shorthand)")
|
||||
flag.BoolVar(classMapWatts, "cmw", false, "Enable mapping of watts to powerClass of node (shorthand)")
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
@ -64,7 +67,7 @@ func main() {
|
|||
driver, err := sched.NewMesosSchedulerDriver(sched.DriverConfig{
|
||||
Master: *master,
|
||||
Framework: &mesos.FrameworkInfo{
|
||||
Name: proto.String("Electron"),
|
||||
Name: proto.String("Elektron"),
|
||||
User: proto.String(""),
|
||||
},
|
||||
Scheduler: scheduler,
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
)
|
||||
|
||||
// Decides if to take an offer or not
|
||||
func (s *BinPackSortedWatts) takeOffer(offer *mesos.Offer, task def.Task, totalCPU, totalRAM, totalWatts float64) bool {
|
||||
func (s *BinPacking) takeOffer(offer *mesos.Offer, task def.Task, totalCPU, totalRAM, totalWatts float64) bool {
|
||||
|
||||
cpus, mem, watts := offerUtils.OfferAgg(offer)
|
||||
|
||||
|
@ -33,12 +33,12 @@ func (s *BinPackSortedWatts) takeOffer(offer *mesos.Offer, task def.Task, totalC
|
|||
return false
|
||||
}
|
||||
|
||||
type BinPackSortedWatts struct {
|
||||
type BinPacking struct {
|
||||
base // Type embedded to inherit common functions
|
||||
}
|
||||
|
||||
// New elektron scheduler
|
||||
func NewBinPackSortedWatts(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *BinPackSortedWatts {
|
||||
func NewBinPacking(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *BinPacking {
|
||||
def.SortTasks(tasks, def.SortByWatts)
|
||||
|
||||
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
|
||||
|
@ -46,7 +46,7 @@ func NewBinPackSortedWatts(tasks []def.Task, wattsAsAResource bool, schedTracePr
|
|||
log.Fatal(err)
|
||||
}
|
||||
|
||||
s := &BinPackSortedWatts{
|
||||
s := &BinPacking{
|
||||
base: base{
|
||||
tasks: tasks,
|
||||
wattsAsAResource: wattsAsAResource,
|
||||
|
@ -62,7 +62,7 @@ func NewBinPackSortedWatts(tasks []def.Task, wattsAsAResource bool, schedTracePr
|
|||
return s
|
||||
}
|
||||
|
||||
func (s *BinPackSortedWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
|
||||
func (s *BinPacking) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
|
||||
taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
|
||||
s.tasksCreated++
|
||||
|
||||
|
@ -115,7 +115,7 @@ func (s *BinPackSortedWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.T
|
|||
}
|
||||
}
|
||||
|
||||
func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
|
||||
func (s *BinPacking) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
|
||||
log.Printf("Received %d resource offers", len(offers))
|
||||
|
||||
for _, offer := range offers {
|
||||
|
@ -196,7 +196,7 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers
|
|||
}
|
||||
}
|
||||
|
||||
func (s *BinPackSortedWatts) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
|
||||
func (s *BinPacking) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
|
||||
log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
|
||||
|
||||
if *status.State == mesos.TaskState_TASK_RUNNING {
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
)
|
||||
|
||||
// Decides if to take an offer or not
|
||||
func (s *BPSWMaxMinWatts) takeOffer(offer *mesos.Offer, task def.Task,
|
||||
func (s *MaxGreedyMins) takeOffer(offer *mesos.Offer, task def.Task,
|
||||
totalCPU, totalRAM, totalWatts float64) bool {
|
||||
|
||||
cpus, mem, watts := offerUtils.OfferAgg(offer)
|
||||
|
@ -34,12 +34,12 @@ func (s *BPSWMaxMinWatts) takeOffer(offer *mesos.Offer, task def.Task,
|
|||
return false
|
||||
}
|
||||
|
||||
type BPSWMaxMinWatts struct {
|
||||
type MaxGreedyMins struct {
|
||||
base //Type embedding to inherit common functions
|
||||
}
|
||||
|
||||
// New elektron scheduler
|
||||
func NewBPSWMaxMinWatts(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *BPSWMaxMinWatts {
|
||||
func NewMaxGreedyMins(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *MaxGreedyMins {
|
||||
def.SortTasks(tasks, def.SortByWatts)
|
||||
|
||||
logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
|
||||
|
@ -47,7 +47,7 @@ func NewBPSWMaxMinWatts(tasks []def.Task, wattsAsAResource bool, schedTracePrefi
|
|||
log.Fatal(err)
|
||||
}
|
||||
|
||||
s := &BPSWMaxMinWatts{
|
||||
s := &MaxGreedyMins{
|
||||
base: base{
|
||||
tasks: tasks,
|
||||
wattsAsAResource: wattsAsAResource,
|
||||
|
@ -63,7 +63,7 @@ func NewBPSWMaxMinWatts(tasks []def.Task, wattsAsAResource bool, schedTracePrefi
|
|||
return s
|
||||
}
|
||||
|
||||
func (s *BPSWMaxMinWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
|
||||
func (s *MaxGreedyMins) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
|
||||
taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
|
||||
s.tasksCreated++
|
||||
|
||||
|
@ -119,7 +119,7 @@ func (s *BPSWMaxMinWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.Task
|
|||
|
||||
// Determine if the remaining space inside of the offer is enough for this
|
||||
// the task we need to create. If it is, create a TaskInfo and return it.
|
||||
func (s *BPSWMaxMinWatts) CheckFit(
|
||||
func (s *MaxGreedyMins) CheckFit(
|
||||
i int,
|
||||
task def.Task,
|
||||
wattsConsideration float64,
|
||||
|
@ -159,7 +159,7 @@ func (s *BPSWMaxMinWatts) CheckFit(
|
|||
return false, nil
|
||||
}
|
||||
|
||||
func (s *BPSWMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
|
||||
func (s *MaxGreedyMins) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
|
||||
log.Printf("Received %d resource offers", len(offers))
|
||||
|
||||
for _, offer := range offers {
|
||||
|
@ -253,7 +253,7 @@ func (s *BPSWMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []
|
|||
}
|
||||
}
|
||||
|
||||
func (s *BPSWMaxMinWatts) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
|
||||
func (s *MaxGreedyMins) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
|
||||
log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
|
||||
|
||||
if *status.State == mesos.TaskState_TASK_RUNNING {
|
||||
|
|
Reference in a new issue