Fixed the comments to be capitalized at the start and also terminate with a period.

This commit is contained in:
Pradyumna Kaushik 2017-09-28 15:36:47 -04:00
parent 577120ae7c
commit b807625b78
19 changed files with 194 additions and 201 deletions

View file

@ -24,17 +24,17 @@ type base struct {
classMapWatts bool
// First set of PCP values are garbage values, signal to logger to start recording when we're
// about to schedule a new task
// about to schedule a new task.
RecordPCP *bool
// This channel is closed when the program receives an interrupt,
// signalling that the program should shut down.
Shutdown chan struct{}
// This channel is closed after shutdown is closed, and only when all
// outstanding tasks have been cleaned up
// outstanding tasks have been cleaned up.
Done chan struct{}
// Controls when to shutdown pcp logging
// Controls when to shutdown pcp logging.
PCPLog chan struct{}
schedTrace *log.Logger
@ -42,7 +42,7 @@ type base struct {
func (s *base) init(opts ...schedPolicyOption) {
for _, opt := range opts {
// applying options
// Applying options.
if err := opt(s); err != nil {
log.Fatal(err)
}

View file

@ -13,7 +13,7 @@ import (
"time"
)
// Decides if to take an offer or not
// Decides if to take an offer or not.
func (s *BinPacking) takeOffer(offer *mesos.Offer, task def.Task, totalCPU, totalRAM, totalWatts float64) bool {
cpus, mem, watts := offerUtils.OfferAgg(offer)
@ -22,7 +22,7 @@ func (s *BinPacking) takeOffer(offer *mesos.Offer, task def.Task, totalCPU, tota
wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
// Error in determining wattsConsideration.
log.Fatal(err)
}
if (cpus >= (totalCPU + task.CPU)) && (mem >= (totalRAM + task.RAM)) &&
@ -33,13 +33,13 @@ func (s *BinPacking) takeOffer(offer *mesos.Offer, task def.Task, totalCPU, tota
}
type BinPacking struct {
base // Type embedded to inherit common functions
base // Type embedded to inherit common functions.
}
// Initialization
// Initialization.
func (s *BinPacking) init(opts ...schedPolicyOption) {
s.base.init(opts...)
// sorting the tasks based on watts
// Sorting the tasks based on watts.
def.SortTasks(s.tasks, def.SortByWatts)
}
@ -48,17 +48,17 @@ func (s *BinPacking) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo
s.tasksCreated++
if !*s.RecordPCP {
// Turn on logging
// Turn on logging.
*s.RecordPCP = true
time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts
}
// If this is our first time running into this Agent
// If this is our first time running into this Agent.
if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok {
s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
}
// Add task to list of tasks running on node
// Add task to list of tasks running on node.
s.running[offer.GetSlaveId().GoString()][taskName] = true
resources := []*mesos.Resource{
@ -71,7 +71,7 @@ func (s *BinPacking) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
} else {
// Error in determining wattsToConsider
// Error in determining wattsToConsider.
log.Fatal(err)
}
}
@ -121,17 +121,17 @@ func (s *BinPacking) ResourceOffers(driver sched.SchedulerDriver, offers []*meso
task := s.tasks[i]
wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
// Error in determining wattsConsideration.
log.Fatal(err)
}
// Don't take offer if it doesn't match our task's host requirement
// Don't take offer if it doesn't match our task's host requirement.
if offerUtils.HostMismatch(*offer.Hostname, task.Host) {
continue
}
for *task.Instances > 0 {
// Does the task fit
// Does the task fit.
if s.takeOffer(offer, task, totalCPU, totalRAM, totalWatts) {
offerTaken = true
@ -148,7 +148,7 @@ func (s *BinPacking) ResourceOffers(driver sched.SchedulerDriver, offers []*meso
*task.Instances--
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
// All instances of task have been scheduled, remove it.
s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
if len(s.tasks) <= 0 {
@ -157,7 +157,7 @@ func (s *BinPacking) ResourceOffers(driver sched.SchedulerDriver, offers []*meso
}
}
} else {
break // Continue on to next offer
break // Continue on to next offer.
}
}
}
@ -167,7 +167,7 @@ func (s *BinPacking) ResourceOffers(driver sched.SchedulerDriver, offers []*meso
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
} else {
// If there was no match for the task
// If there was no match for the task.
fmt.Println("There is not enough resources to launch a task:")
cpus, mem, watts := offerUtils.OfferAgg(offer)

View file

@ -13,7 +13,7 @@ import (
"time"
)
// Decides if to take an offer or not
// Decides if to take an offer or not.
func (s *FirstFit) takeOffer(offer *mesos.Offer, task def.Task) bool {
cpus, mem, watts := offerUtils.OfferAgg(offer)
@ -22,7 +22,7 @@ func (s *FirstFit) takeOffer(offer *mesos.Offer, task def.Task) bool {
wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
// Error in determining wattsConsideration.
log.Fatal(err)
}
if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || watts >= wattsConsideration) {
@ -32,12 +32,12 @@ func (s *FirstFit) takeOffer(offer *mesos.Offer, task def.Task) bool {
return false
}
// elektronScheduler implements the Scheduler interface
// Elektron scheduler implements the Scheduler interface.
type FirstFit struct {
base // Type embedded to inherit common functions
}
// Initialization
// Initialization.
func (s *FirstFit) init(opts ...schedPolicyOption) {
s.base.init(opts...)
}
@ -47,17 +47,17 @@ func (s *FirstFit) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
s.tasksCreated++
if !*s.RecordPCP {
// Turn on logging
// Turn on logging.
*s.RecordPCP = true
time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts
time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts.
}
// If this is our first time running into this Agent
// If this is our first time running into this Agent.
if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok {
s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
}
// Add task to list of tasks running on node
// Add task to list of tasks running on node.
s.running[offer.GetSlaveId().GoString()][taskName] = true
resources := []*mesos.Resource{
@ -70,7 +70,7 @@ func (s *FirstFit) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
} else {
// Error in determining wattsConsideration
// Error in determining wattsConsideration.
log.Fatal(err)
}
}
@ -112,18 +112,17 @@ func (s *FirstFit) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.
tasks := []*mesos.TaskInfo{}
// First fit strategy
// First fit strategy.
offerTaken := false
for i := 0; i < len(s.tasks); i++ {
task := s.tasks[i]
// Don't take offer if it doesn't match our task's host requirement
// Don't take offer if it doesn't match our task's host requirement.
if offerUtils.HostMismatch(*offer.Hostname, task.Host) {
continue
}
// Decision to take the offer or not
// Decision to take the offer or not.
if s.takeOffer(offer, task) {
log.Println("Co-Located with: ")
@ -142,7 +141,7 @@ func (s *FirstFit) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.
*task.Instances--
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
// All instances of task have been scheduled, remove it.
s.tasks[i] = s.tasks[len(s.tasks)-1]
s.tasks = s.tasks[:len(s.tasks)-1]
@ -151,11 +150,11 @@ func (s *FirstFit) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.
close(s.Shutdown)
}
}
break // Offer taken, move on
break // Offer taken, move on.
}
}
// If there was no match for the task
// If there was no match for the task.
if !offerTaken {
fmt.Println("There is not enough resources to launch a task:")
cpus, mem, watts := offerUtils.OfferAgg(offer)

View file

@ -18,7 +18,7 @@ func coLocated(tasks map[string]bool) {
fmt.Println("---------------------")
}
// Get the powerClass of the given hostname
// Get the powerClass of the given hostname.
func hostToPowerClass(hostName string) string {
for powerClass, hosts := range constants.PowerClasses {
if _, ok := hosts[hostName]; ok {
@ -28,7 +28,7 @@ func hostToPowerClass(hostName string) string {
return ""
}
// scheduler policy options to help initialize schedulers
// Scheduler policy options to help initialize schedulers.
type schedPolicyOption func(e ElectronScheduler) error
func WithTasks(ts []def.Task) schedPolicyOption {

View file

@ -13,7 +13,7 @@ import (
"time"
)
// Decides if to take an offer or not
// Decides if to take an offer or not.
func (s *MaxGreedyMins) takeOffer(offer *mesos.Offer, task def.Task,
totalCPU, totalRAM, totalWatts float64) bool {
@ -23,7 +23,7 @@ func (s *MaxGreedyMins) takeOffer(offer *mesos.Offer, task def.Task,
wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
// Error in determining wattsConsideration.
log.Fatal(err)
}
if (cpus >= (totalCPU + task.CPU)) && (mem >= (totalRAM + task.RAM)) &&
@ -34,10 +34,10 @@ func (s *MaxGreedyMins) takeOffer(offer *mesos.Offer, task def.Task,
}
type MaxGreedyMins struct {
base //Type embedding to inherit common functions
base //Type embedding to inherit common functions.
}
// Initialization
// Initialization.
func (s *MaxGreedyMins) init(opts ...schedPolicyOption) {
s.base.init(opts...)
}
@ -46,19 +46,19 @@ func (s *MaxGreedyMins) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskIn
taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
s.tasksCreated++
// Start recording only when we're creating the first task
// Start recording only when we're creating the first task.
if !*s.RecordPCP {
// Turn on logging
*s.RecordPCP = true
time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts
time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts.
}
// If this is our first time running into this Agent
// If this is our first time running into this Agent.
if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok {
s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
}
// Add task to list of tasks running on node
// Add task to list of tasks running on node.
s.running[offer.GetSlaveId().GoString()][taskName] = true
resources := []*mesos.Resource{
@ -71,7 +71,7 @@ func (s *MaxGreedyMins) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskIn
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
} else {
// Error in determining wattsConsideration
// Error in determining wattsConsideration.
log.Fatal(err)
}
}
@ -90,7 +90,7 @@ func (s *MaxGreedyMins) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskIn
Type: mesos.ContainerInfo_DOCKER.Enum(),
Docker: &mesos.ContainerInfo_DockerInfo{
Image: proto.String(task.Image),
Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated
Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated.
},
},
}
@ -107,7 +107,7 @@ func (s *MaxGreedyMins) CheckFit(
totalRAM *float64,
totalWatts *float64) (bool, *mesos.TaskInfo) {
// Does the task fit
// Does the task fit.
if s.takeOffer(offer, task, *totalCPU, *totalRAM, *totalWatts) {
*totalWatts += wattsConsideration
@ -123,7 +123,7 @@ func (s *MaxGreedyMins) CheckFit(
*task.Instances--
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
// All instances of task have been scheduled, remove it.
s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
if len(s.tasks) <= 0 {
@ -160,20 +160,20 @@ func (s *MaxGreedyMins) ResourceOffers(driver sched.SchedulerDriver, offers []*m
totalCPU := 0.0
totalRAM := 0.0
// Assumes s.tasks is ordered in non-decreasing median max peak order
// Assumes s.tasks is ordered in non-decreasing median max peak order.
// Attempt to schedule a single instance of the heaviest workload available first
// Start from the back until one fits
// Attempt to schedule a single instance of the heaviest workload available first.
// Start from the back until one fits.
for i := len(s.tasks) - 1; i >= 0; i-- {
task := s.tasks[i]
wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
// Error in determining wattsConsideration.
log.Fatal(err)
}
// Don't take offer if it doesn't match our task's host requirement
// Don't take offer if it doesn't match our task's host requirement.
if offerUtils.HostMismatch(*offer.Hostname, task.Host) {
continue
}
@ -189,16 +189,16 @@ func (s *MaxGreedyMins) ResourceOffers(driver sched.SchedulerDriver, offers []*m
}
}
// Pack the rest of the offer with the smallest tasks
// Pack the rest of the offer with the smallest tasks.
for i := 0; i < len(s.tasks); i++ {
task := s.tasks[i]
wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
// Error in determining wattsConsideration.
log.Fatal(err)
}
// Don't take offer if it doesn't match our task's host requirement
// Don't take offer if it doesn't match our task's host requirement.
if offerUtils.HostMismatch(*offer.Hostname, task.Host) {
continue
}
@ -212,7 +212,7 @@ func (s *MaxGreedyMins) ResourceOffers(driver sched.SchedulerDriver, offers []*m
offerTaken = true
tasks = append(tasks, taskToSchedule)
} else {
break // Continue on to next task
break // Continue on to next task.
}
}
}
@ -222,7 +222,7 @@ func (s *MaxGreedyMins) ResourceOffers(driver sched.SchedulerDriver, offers []*m
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
} else {
// If there was no match for the task
// If there was no match for the task.
fmt.Println("There is not enough resources to launch a task:")
cpus, mem, watts := offerUtils.OfferAgg(offer)

View file

@ -13,7 +13,7 @@ import (
"time"
)
// Decides if to take an offer or not
// Decides if to take an offer or not.
func (s *MaxMin) takeOffer(offer *mesos.Offer, task def.Task,
totalCPU, totalRAM, totalWatts float64) bool {
@ -23,7 +23,7 @@ func (s *MaxMin) takeOffer(offer *mesos.Offer, task def.Task,
wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
// Error in determining wattsConsideration.
log.Fatal(err)
}
if (cpus >= (totalCPU + task.CPU)) && (mem >= (totalRAM + task.RAM)) &&
@ -34,10 +34,10 @@ func (s *MaxMin) takeOffer(offer *mesos.Offer, task def.Task,
}
type MaxMin struct {
base //Type embedding to inherit common functions
base //Type embedding to inherit common functions.
}
// Initialization
// Initialization.
func (s *MaxMin) init(opts ...schedPolicyOption) {
s.base.init(opts...)
}
@ -46,19 +46,19 @@ func (s *MaxMin) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
s.tasksCreated++
// Start recording only when we're creating the first task
// Start recording only when we're creating the first task.
if !*s.RecordPCP {
// Turn on logging
// Turn on logging.
*s.RecordPCP = true
time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts
time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts.
}
// If this is our first time running into this Agent
// If this is our first time running into this Agent.
if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok {
s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
}
// Add task to list of tasks running on node
// Add task to list of tasks running on node.
s.running[offer.GetSlaveId().GoString()][taskName] = true
resources := []*mesos.Resource{
@ -71,7 +71,7 @@ func (s *MaxMin) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
} else {
// Error in determining wattsConsideration
// Error in determining wattsConsideration.
log.Fatal(err)
}
}
@ -90,7 +90,7 @@ func (s *MaxMin) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
Type: mesos.ContainerInfo_DOCKER.Enum(),
Docker: &mesos.ContainerInfo_DockerInfo{
Image: proto.String(task.Image),
Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated
Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated.
},
},
}
@ -107,7 +107,7 @@ func (s *MaxMin) CheckFit(
totalRAM *float64,
totalWatts *float64) (bool, *mesos.TaskInfo) {
// Does the task fit
// Does the task fit.
if s.takeOffer(offer, task, *totalCPU, *totalRAM, *totalWatts) {
*totalWatts += wattsConsideration
@ -123,7 +123,7 @@ func (s *MaxMin) CheckFit(
*task.Instances--
if *task.Instances <= 0 {
// All instances of task have been scheduled, remove it
// All instances of task have been scheduled, remove it.
s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
if len(s.tasks) <= 0 {
@ -160,17 +160,17 @@ func (s *MaxMin) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Of
totalCPU := 0.0
totalRAM := 0.0
// Assumes s.tasks is ordered in non-decreasing median max peak order
// Assumes s.tasks is ordered in non-decreasing median max peak order.
// Attempt to schedule a single instance of the heaviest workload available first
// Start from the back until one fits
// Attempt to schedule a single instance of the heaviest workload available first.
// Start from the back until one fits.
direction := false // True = Min Max, False = Max Min
direction := false // True = Min Max, False = Max Min.
var index int
start := true // if false then index has changed and need to keep it that way
start := true // If false then index has changed and need to keep it that way.
for i := 0; i < len(s.tasks); i++ {
// we need to pick a min task or a max task
// depending on the value of direction
// We need to pick a min task or a max task
// depending on the value of direction.
if direction && start {
index = 0
} else if start {
@ -180,11 +180,11 @@ func (s *MaxMin) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Of
wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
if err != nil {
// Error in determining wattsConsideration
// Error in determining wattsConsideration.
log.Fatal(err)
}
// Don't take offer it is doesn't match our task's host requirement
// Don't take offer it is doesn't match our task's host requirement.
if offerUtils.HostMismatch(*offer.Hostname, task.Host) {
continue
}
@ -196,13 +196,13 @@ func (s *MaxMin) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Of
if taken {
offerTaken = true
tasks = append(tasks, taskToSchedule)
// Need to change direction and set start to true
// Setting start to true would ensure that index be set accurately again
// Need to change direction and set start to true.
// Setting start to true would ensure that index be set accurately again.
direction = !direction
start = true
i--
} else {
// Need to move index depending on the value of direction
// Need to move index depending on the value of direction.
if direction {
index++
start = false

View file

@ -10,7 +10,7 @@ const (
mm = "max-min"
)
// Scheduler class factory
// Scheduler class factory.
var Schedulers map[string]scheduler.Scheduler = map[string]scheduler.Scheduler{
ff: &FirstFit{base: base{}},
bp: &BinPacking{base: base{}},
@ -18,7 +18,7 @@ var Schedulers map[string]scheduler.Scheduler = map[string]scheduler.Scheduler{
mm: &MaxMin{base: base{}},
}
// build the scheduling policy with the options being applied
// Build the scheduling policy with the options being applied.
func BuildSchedPolicy(s scheduler.Scheduler, opts ...schedPolicyOption) {
s.(ElectronScheduler).init(opts...)
}