Resolved merge conflicts with the master. Also, added TODO in README to use Go1.8 techniques.

This commit is contained in:
Pradyumna Kaushik 2017-04-21 17:17:22 -04:00
commit 1b15bb14e7
24 changed files with 122 additions and 101 deletions

View file

@ -127,6 +127,7 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr
log.Println("Sorted Offers:")
for i := 0; i < len(offers); i++ {
offer := offers[i]
offerUtils.UpdateEnvironment(offer)
offerCPU, _, _ := offerUtils.OfferAgg(offer)
log.Printf("Offer[%s].CPU = %f\n", offer.GetHostname(), offerCPU)
}

View file

@ -210,6 +210,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off
// retrieving the total power for each host in the offers
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
if _, ok := s.totalPower[*offer.Hostname]; !ok {
_, _, offerWatts := offerUtils.OfferAgg(offer)
s.totalPower[*offer.Hostname] = offerWatts

View file

@ -120,6 +120,7 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers
log.Printf("Received %d resource offers", len(offers))
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
select {
case <-s.Shutdown:
log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")

View file

@ -17,12 +17,12 @@ import (
)
/*
Tasks are categorized into small and large tasks based on the watts requirement.
All the small tasks are packed into offers from agents belonging to power class C and power class D, using BinPacking.
All the large tasks are spread among the offers from agents belonging to power class A and power class B, using FirstFit.
Tasks are categorized into small and large tasks based on watts requirements.
All the large tasks are packed into offers from agents belonging to power classes A and B, using Bin-Packing.
All the small tasks are spread among offers from agents belonging to power class C and D, using First-Fit.
BinPacking has the most effect when co-scheduling of tasks is increased. Large tasks typically utilize more resources and hence,
co-scheduling them has a great impact on the total power utilization.
Bin-Packing has the most effect when co-scheduling of tasks is increased. Large tasks typically utilize more resources and hence,
co-scheduling them has a great impact on the total power utilization.
*/
func (s *BottomHeavy) takeOfferBinPack(offer *mesos.Offer, totalCPU, totalRAM, totalWatts,
@ -174,7 +174,7 @@ func (s *BottomHeavy) createTaskInfoAndLogSchedTrace(offer *mesos.Offer, task de
return taskToSchedule
}
// Using BinPacking to pack small tasks into this offer.
// Using BinPacking to pack large tasks into the given offers.
func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) {
for _, offer := range offers {
select {
@ -236,7 +236,7 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver)
}
}
// Using first fit to spread large tasks into these offers.
// Using First-Fit to spread small tasks among the given offers.
func (s *BottomHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) {
for _, offer := range offers {
select {
@ -297,6 +297,7 @@ func (s *BottomHeavy) ResourceOffers(driver sched.SchedulerDriver, offers []*mes
offersLightPowerClasses := []*mesos.Offer{}
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
select {
case <-s.Shutdown:
log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")
@ -307,13 +308,19 @@ func (s *BottomHeavy) ResourceOffers(driver sched.SchedulerDriver, offers []*mes
default:
}
if constants.PowerClasses["A"][*offer.Hostname] ||
constants.PowerClasses["B"][*offer.Hostname] {
if _, ok := constants.PowerClasses["A"][*offer.Hostname]; ok {
offersHeavyPowerClasses = append(offersHeavyPowerClasses, offer)
} else if constants.PowerClasses["C"][*offer.Hostname] ||
constants.PowerClasses["D"][*offer.Hostname] {
}
if _, ok := constants.PowerClasses["B"][*offer.Hostname]; ok {
offersHeavyPowerClasses = append(offersHeavyPowerClasses, offer)
}
if _, ok := constants.PowerClasses["C"][*offer.Hostname]; ok {
offersLightPowerClasses = append(offersLightPowerClasses, offer)
}
if _, ok := constants.PowerClasses["D"][*offer.Hostname]; ok {
offersLightPowerClasses = append(offersLightPowerClasses, offer)
}
}
log.Println("Packing Large tasks into ClassAB offers:")

View file

@ -164,6 +164,7 @@ func (s *BPSWMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []
log.Printf("Received %d resource offers", len(offers))
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
select {
case <-s.Shutdown:
log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")

View file

@ -261,6 +261,7 @@ func (s *BPSWMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, o
log.Printf("Received %d resource offers", len(offers))
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
select {
case <-s.Shutdown:
log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")

View file

@ -164,7 +164,7 @@ func (s *BPSWMaxMinProacCC) startCapping() {
// updating cap value
bpMaxMinProacCCCapValue = bpMaxMinProacCCNewCapValue
if bpMaxMinProacCCCapValue > 0.0 {
for _, host := range constants.Hosts {
for host, _ := range constants.Hosts {
// Rounding cap value to nearest int
if err := rapl.Cap(host, "rapl", float64(int(math.Floor(bpMaxMinProacCCCapValue+0.5)))); err != nil {
log.Println(err)
@ -190,7 +190,7 @@ func (s *BPSWMaxMinProacCC) startRecapping() {
bpMaxMinProacCCMutex.Lock()
// If stopped performing cluster-wide capping, then we need to recap.
if s.isRecapping && bpMaxMinProacCCRecapValue > 0.0 {
for _, host := range constants.Hosts {
for host, _ := range constants.Hosts {
// Rounding the recap value to the nearest int
if err := rapl.Cap(host, "rapl", float64(int(math.Floor(bpMaxMinProacCCRecapValue+0.5)))); err != nil {
log.Println(err)
@ -300,6 +300,7 @@ func (s *BPSWMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers
// retrieving the available power for all the hosts in the offers.
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
_, _, offerWatts := offerUtils.OfferAgg(offer)
s.availablePower[*offer.Hostname] = offerWatts
// setting total power if the first time

View file

@ -119,6 +119,7 @@ func (s *FirstFit) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.
log.Printf("Received %d resource offers", len(offers))
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
select {
case <-s.Shutdown:
log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")

View file

@ -164,7 +164,7 @@ func (s *FirstFitProacCC) startCapping() {
// Need to cap the cluster to the fcfsCurrentCapValue.
fcfsMutex.Lock()
if fcfsCurrentCapValue > 0.0 {
for _, host := range constants.Hosts {
for host, _ := range constants.Hosts {
// Rounding curreCapValue to the nearest int.
if err := rapl.Cap(host, "rapl", float64(int(math.Floor(fcfsCurrentCapValue+0.5)))); err != nil {
log.Println(err)
@ -188,7 +188,7 @@ func (s *FirstFitProacCC) startRecapping() {
fcfsMutex.Lock()
// If stopped performing cluster wide capping then we need to explicitly cap the entire cluster.
if s.isRecapping && fcfsRecapValue > 0.0 {
for _, host := range constants.Hosts {
for host, _ := range constants.Hosts {
// Rounding curreCapValue to the nearest int.
if err := rapl.Cap(host, "rapl", float64(int(math.Floor(fcfsRecapValue+0.5)))); err != nil {
log.Println(err)
@ -233,6 +233,7 @@ func (s *FirstFitProacCC) ResourceOffers(driver sched.SchedulerDriver, offers []
// retrieving the available power for all the hosts in the offers.
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
_, _, offer_watts := offerUtils.OfferAgg(offer)
s.availablePower[*offer.Hostname] = offer_watts
// setting total power if the first time.

View file

@ -126,6 +126,7 @@ func (s *FirstFitSortedOffers) ResourceOffers(driver sched.SchedulerDriver, offe
log.Println("Sorted Offers:")
for i := 0; i < len(offers); i++ {
offer := offers[i]
offerUtils.UpdateEnvironment(offer)
offerCPU, _, _ := offerUtils.OfferAgg(offer)
log.Printf("Offer[%s].CPU = %f\n", offer.GetHostname(), offerCPU)
}

View file

@ -177,7 +177,7 @@ func (s *FirstFitSortedWattsProacCC) startCapping() {
// Need to cap the cluster to the rankedCurrentCapValue.
rankedMutex.Lock()
if rankedCurrentCapValue > 0.0 {
for _, host := range constants.Hosts {
for host, _ := range constants.Hosts {
// Rounding currentCapValue to the nearest int.
if err := rapl.Cap(host, "rapl", float64(int(math.Floor(rankedCurrentCapValue+0.5)))); err != nil {
log.Println(err)
@ -201,7 +201,7 @@ func (s *FirstFitSortedWattsProacCC) startRecapping() {
rankedMutex.Lock()
// If stopped performing cluster wide capping then we need to explicitly cap the entire cluster.
if s.isRecapping && rankedRecapValue > 0.0 {
for _, host := range constants.Hosts {
for host, _ := range constants.Hosts {
// Rounding currentCapValue to the nearest int.
if err := rapl.Cap(host, "rapl", float64(int(math.Floor(rankedRecapValue+0.5)))); err != nil {
log.Println(err)
@ -246,6 +246,7 @@ func (s *FirstFitSortedWattsProacCC) ResourceOffers(driver sched.SchedulerDriver
// retrieving the available power for all the hosts in the offers.
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
_, _, offer_watts := offerUtils.OfferAgg(offer)
s.availablePower[*offer.Hostname] = offer_watts
// setting total power if the first time.

View file

@ -128,6 +128,7 @@ func (s *FirstFitSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerD
log.Println("Sorted Offers:")
for i := 0; i < len(offers); i++ {
offer := offers[i]
offerUtils.UpdateEnvironment(offer)
offerCPU, _, _ := offerUtils.OfferAgg(offer)
log.Printf("Offer[%s].CPU = %f\n", offer.GetHostname(), offerCPU)
}

View file

@ -122,6 +122,7 @@ func (s *FirstFitSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offer
log.Printf("Received %d resource offers", len(offers))
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
select {
case <-s.Shutdown:
log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")

View file

@ -112,6 +112,7 @@ func (s *FirstFitWattsOnly) ResourceOffers(driver sched.SchedulerDriver, offers
log.Printf("Received %d resource offers", len(offers))
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
select {
case <-s.Shutdown:
log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")

View file

@ -18,7 +18,7 @@ func coLocated(tasks map[string]bool) {
// Get the powerClass of the given hostname
func hostToPowerClass(hostName string) string {
for powerClass, hosts := range constants.PowerClasses {
if ok := hosts[hostName]; ok {
if _, ok := hosts[hostName]; ok {
return powerClass
}
}

View file

@ -18,8 +18,8 @@ import (
/*
Tasks are categorized into small and large tasks based on the watts requirement.
All the large tasks are packed into offers from agents belonging to power class A and power class B, using BinPacking.
All the small tasks are spread among the offers from agents belonging to power class C and power class D, using FirstFit.
All the small tasks are packed into offers from agents belonging to power class C and power class D, using BinPacking.
All the large tasks are spread among the offers from agents belonging to power class A and power class B, using FirstFit.
This was done to give a little more room for the large tasks (power intensive) for execution and reduce the possibility of
starvation of power intensive tasks.
@ -296,6 +296,7 @@ func (s *TopHeavy) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.
offersLightPowerClasses := []*mesos.Offer{}
for _, offer := range offers {
offerUtils.UpdateEnvironment(offer)
select {
case <-s.Shutdown:
log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")
@ -306,11 +307,16 @@ func (s *TopHeavy) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.
default:
}
if constants.PowerClasses["A"][*offer.Hostname] ||
constants.PowerClasses["B"][*offer.Hostname] {
if _, ok := constants.PowerClasses["A"][*offer.Hostname]; ok {
offersHeavyPowerClasses = append(offersHeavyPowerClasses, offer)
} else if constants.PowerClasses["C"][*offer.Hostname] ||
constants.PowerClasses["D"][*offer.Hostname] {
}
if _, ok := constants.PowerClasses["B"][*offer.Hostname]; ok {
offersHeavyPowerClasses = append(offersHeavyPowerClasses, offer)
}
if _, ok := constants.PowerClasses["C"][*offer.Hostname]; ok {
offersLightPowerClasses = append(offersLightPowerClasses, offer)
}
if _, ok := constants.PowerClasses["D"][*offer.Hostname]; ok {
offersLightPowerClasses = append(offersLightPowerClasses, offer)
}
}