From 395917a97e2d546ebb9e88fbb345dce90629a70d Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Sun, 5 Feb 2017 01:08:48 -0500
Subject: [PATCH 01/15] changed pcplogger to pcplog and extrema

---
 scheduler.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/scheduler.go b/scheduler.go
index a6b11de..8512315 100644
--- a/scheduler.go
+++ b/scheduler.go
@@ -72,8 +72,8 @@ func main() {
 		return
 	}
 
-	//go pcp.Start(scheduler.PCPLog, &scheduler.RecordPCP, logPrefix)
-	go pcp.StartPCPLogAndExtremaDynamicCap(scheduler.PCPLog, &scheduler.RecordPCP, logPrefix, *hiThreshold, *loThreshold)
+	go pcp.Start(scheduler.PCPLog, &scheduler.RecordPCP, logPrefix)
+	//go pcp.StartPCPLogAndExtremaDynamicCap(scheduler.PCPLog, &scheduler.RecordPCP, logPrefix, *hiThreshold, *loThreshold)
 	time.Sleep(1 * time.Second) // Take a second between starting PCP log and continuing
 
 	// Attempt to handle signint to not leave pmdumptext running

From b838c53c6d57896b75f46c41be2ce17c0f62874a Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Sun, 5 Feb 2017 14:49:07 -0500
Subject: [PATCH 02/15] added CPUSorter to task to be able to sort tasks based
 on CPU requirement.

---
 def/task.go | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/def/task.go b/def/task.go
index 62ab993..ce59cd4 100644
--- a/def/task.go
+++ b/def/task.go
@@ -65,6 +65,7 @@ func (tsk *Task) SetTaskID(taskID string) bool {
 	}
 }
 
+// Sorter implements sort.Sort interface to sort tasks by Watts requirement.
 type WattsSorter []Task
 
 func (slice WattsSorter) Len() int {
@@ -79,6 +80,21 @@ func (slice WattsSorter) Swap(i, j int) {
 	slice[i], slice[j] = slice[j], slice[i]
 }
 
+// Sorter implements sort.Sort interface to sort tasks by CPU requirement.
+type CPUSorter []Task
+
+func (slice CPUSorter) Len() int {
+	return len(slice)
+}
+
+func (slice CPUSorter) Less(i, j int) bool {
+	return slice[i].CPU < slice[j].CPU
+}
+
+func (slice CPUSorter) Swap(i, j int) {
+	slice[i], slice[j] = slice[j], slice[i]
+}
+
 // Compare two tasks.
 func Compare(task1 *Task, task2 *Task) bool {
 	// If comparing the same pointers (checking the addresses).

From 0d79079bf64bb878df802e676da2a21e8b1b375d Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Sun, 5 Feb 2017 14:59:34 -0500
Subject: [PATCH 03/15] changed scheduler to binPacked

---
 scheduler.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/scheduler.go b/scheduler.go
index 8512315..0d1c325 100644
--- a/scheduler.go
+++ b/scheduler.go
@@ -58,7 +58,7 @@ func main() {
 	startTime := time.Now().Format("20060102150405")
 	logPrefix := *pcplogPrefix + "_" + startTime
 
-	scheduler := schedulers.NewBinPackSortedWatts(tasks, *ignoreWatts, logPrefix)
+	scheduler := schedulers.NewBinPacked(tasks, *ignoreWatts, logPrefix)
 	driver, err := sched.NewMesosSchedulerDriver(sched.DriverConfig{
 		Master: *master,
 		Framework: &mesos.FrameworkInfo{

From a2b50dd313437403d3719282db0597a9b2e89c8e Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Wed, 8 Feb 2017 14:58:57 -0500
Subject: [PATCH 04/15] Added TODO for making def.Task an interface where we
 can define different types of Task configuration. This increases Electron's
 flexivility on the workload format

---
 README.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/README.md b/README.md
index 769efaa..9153432 100644
--- a/README.md
+++ b/README.md
@@ -16,6 +16,7 @@ To Do:
     longer any tasks to schedule.
  * Have a centralised logFile that can be filtered by identifier. All electron logs should go into this file.
  * Make ClassMapWatts to commandLine arguments so Electron can be run with ClassMapWatts enabled/disabled.
+ * Make def.Task an interface for further modularization and flexibility.
 
 
 **Requires [Performance Co-Pilot](http://pcp.io/) tool pmdumptext to be installed on the

From fdcb401447ac837629ed831316106307c69db7b5 Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Thu, 9 Feb 2017 18:05:38 -0500
Subject: [PATCH 05/15] Made classMapWatts a commandLine option where one can
 enable and disable mapping of watts to powerclasses when accepting offers
 from Mesos. Removed the schedulers that were created solely for the
 classMapWatts feature. Retrofitted all schedulers to use the powerClass
 mapped watts attribute for a task, if classMapWatts has been enabled. Removed
 unnecessary functions and variables from constants.go. Removed unnecessary
 functions from utilities/utils.go. Fixed operator precendence issue with
 takeOffer(...) in some of the schedulers. Added TODO to decouple capping
 strategies from the schedulers completely. Added TODO to move all the common
 struct attributes in the schedulers into base.go.

---
 README.md                                     |   1 -
 constants/constants.go                        |  72 +--
 def/task.go                                   |  29 ++
 powerCapping/proactiveclusterwidecappers.go   |   5 +-
 scheduler.go                                  |   4 +-
 schedulers/README.md                          |   8 +-
 schedulers/binPackSortedWattsSortedOffers.go  |  61 ++-
 schedulers/binpackedpistoncapping.go          |  97 +++--
 schedulers/binpacksortedwatts.go              |  60 ++-
 schedulers/bottomHeavy.go                     |  76 ++--
 schedulers/bpswClassMapWatts.go               | 231 ----------
 schedulers/bpswClassMapWattsPistonCapping.go  | 391 -----------------
 schedulers/bpswClassMapWattsProacCC.go        | 410 ------------------
 schedulers/{bpMaxMin.go => bpswMaxMin.go}     |  88 ++--
 ...nCapping.go => bpswMaxMinPistonCapping.go} | 124 ++++--
 ...pMaxMinProacCC.go => bpswMaxMinProacCC.go} |  65 ++-
 schedulers/firstfit.go                        |  49 ++-
 ...rwidecappingfcfs.go => firstfitProacCC.go} |  44 +-
 schedulers/firstfitSortedOffers.go            |  49 ++-
 .../firstfitSortedWattsClassMapWatts.go       | 203 ---------
 ...firstfitSortedWattsClassMapWattsProacCC.go | 386 -----------------
 ...anked.go => firstfitSortedWattsProacCC.go} |  67 +--
 schedulers/firstfitSortedWattsSortedOffers.go |  50 ++-
 schedulers/firstfitsortedwatts.go             |  49 ++-
 schedulers/firstfitwattsonly.go               |  50 ++-
 schedulers/helpers.go                         |  26 +-
 schedulers/topHeavy.go                        |  73 ++--
 utilities/utils.go                            |  12 +-
 28 files changed, 686 insertions(+), 2094 deletions(-)
 delete mode 100644 schedulers/bpswClassMapWatts.go
 delete mode 100644 schedulers/bpswClassMapWattsPistonCapping.go
 delete mode 100644 schedulers/bpswClassMapWattsProacCC.go
 rename schedulers/{bpMaxMin.go => bpswMaxMin.go} (71%)
 rename schedulers/{bpMaxMinPistonCapping.go => bpswMaxMinPistonCapping.go} (74%)
 rename schedulers/{bpMaxMinProacCC.go => bpswMaxMinProacCC.go} (84%)
 rename schedulers/{proactiveclusterwidecappingfcfs.go => firstfitProacCC.go} (88%)
 delete mode 100644 schedulers/firstfitSortedWattsClassMapWatts.go
 delete mode 100644 schedulers/firstfitSortedWattsClassMapWattsProacCC.go
 rename schedulers/{proactiveclusterwidecappingranked.go => firstfitSortedWattsProacCC.go} (86%)

diff --git a/README.md b/README.md
index 9153432..96226e8 100644
--- a/README.md
+++ b/README.md
@@ -18,7 +18,6 @@ To Do:
  * Make ClassMapWatts to commandLine arguments so Electron can be run with ClassMapWatts enabled/disabled.
  * Make def.Task an interface for further modularization and flexibility.
 
-
 **Requires [Performance Co-Pilot](http://pcp.io/) tool pmdumptext to be installed on the
 machine on which electron is launched for logging to work and PCP collector agents installed
 on the Mesos Agents**
diff --git a/constants/constants.go b/constants/constants.go
index 045c1a2..834d65f 100644
--- a/constants/constants.go
+++ b/constants/constants.go
@@ -1,11 +1,8 @@
 /*
 Constants that are used across scripts
 1. The available hosts = stratos-00x (x varies from 1 to 8)
-2. cap_margin = percentage of the requested power to allocate
-3. power_threshold = overloading factor
-5. window_size = number of tasks to consider for computation of the dynamic cap.
-
-Also, exposing functions to update or initialize some of the constants.
+2. CapMargin = percentage of the requested power to allocate
+3. ConsiderationWindowSize = number of tasks to consider for computation of the dynamic cap.
 */
 package constants
 
@@ -16,15 +13,15 @@ var Hosts = []string{"stratos-001.cs.binghamton.edu", "stratos-002.cs.binghamton
 
 // Classification of the nodes in the cluster based on their power consumption.
 var PowerClasses = map[string]map[string]bool{
-	"ClassA": map[string]bool{
+	"A": map[string]bool{
 		"stratos-005.cs.binghamton.edu": true,
 		"stratos-006.cs.binghamton.edu": true,
 	},
-	"ClassB": map[string]bool{
+	"B": map[string]bool{
 		"stratos-007.cs.binghamton.edu": true,
 		"stratos-008.cs.binghamton.edu": true,
 	},
-	"ClassC": map[string]bool{
+	"C": map[string]bool{
 		"stratos-001.cs.binghamton.edu": true,
 		"stratos-002.cs.binghamton.edu": true,
 		"stratos-003.cs.binghamton.edu": true,
@@ -32,69 +29,12 @@ var PowerClasses = map[string]map[string]bool{
 	},
 }
 
-// Add a new host to the slice of hosts.
-func AddNewHost(newHost string) bool {
-	// Validation
-	if newHost == "" {
-		return false
-	} else {
-		Hosts = append(Hosts, newHost)
-		return true
-	}
-}
-
-/*
-	Lower bound of the percentage of requested power, that can be allocated to a task.
-
-	Note: This constant is not used for the proactive cluster wide capping schemes.
-*/
-var PowerThreshold = 0.6 // Right now saying that a task will never be given lesser than 60% of the power it requested.
-
 /*
   Margin with respect to the required power for a job.
-  So, if power required = 10W, the node would be capped to 75%*10W.
+  So, if power required = 10W, the node would be capped to CapMargin * 10W.
   This value can be changed upon convenience.
 */
 var CapMargin = 0.70
 
-// Modify the cap margin.
-func UpdateCapMargin(newCapMargin float64) bool {
-	// Checking if the new_cap_margin is less than the power threshold.
-	if newCapMargin < StarvationFactor {
-		return false
-	} else {
-		CapMargin = newCapMargin
-		return true
-	}
-}
-
-/*
-	The factor, that when multiplied with (task.Watts * CapMargin) results in (task.Watts * PowerThreshold).
-	This is used to check whether available power, for a host in an offer, is not less than (PowerThreshold * task.Watts),
-		which is assumed to result in starvation of the task.
-	Here is an example,
-		Suppose a task requires 100W of power. Assuming CapMargin = 0.75 and PowerThreshold = 0.6.
-		So, the assumed allocated watts is 75W.
-		Now, when we get an offer, we need to check whether the available power, for the host in that offer, is
-			not less than 60% (the PowerTreshold) of the requested power (100W).
-		To put it in other words,
-			availablePower >= 100W * 0.75 * X
-		where X is the StarvationFactor (80% in this case)
-
-	Note: This constant is not used for the proactive cluster wide capping schemes.
-*/
-var StarvationFactor = PowerThreshold / CapMargin
-
 // Window size for running average
 var ConsiderationWindowSize = 20
-
-// Update the window size.
-func UpdateWindowSize(newWindowSize int) bool {
-	// Validation
-	if newWindowSize == 0 {
-		return false
-	} else {
-		ConsiderationWindowSize = newWindowSize
-		return true
-	}
-}
diff --git a/def/task.go b/def/task.go
index ce59cd4..1b4af97 100644
--- a/def/task.go
+++ b/def/task.go
@@ -2,7 +2,9 @@ package def
 
 import (
 	"bitbucket.org/sunybingcloud/electron/constants"
+	"bitbucket.org/sunybingcloud/electron/utilities/offerUtils"
 	"encoding/json"
+	mesos "github.com/mesos/mesos-go/mesosproto"
 	"github.com/pkg/errors"
 	"os"
 )
@@ -65,6 +67,33 @@ func (tsk *Task) SetTaskID(taskID string) bool {
 	}
 }
 
+/*
+ Determine the watts value to consider for each task.
+
+ This value could either be task.Watts or task.ClassToWatts[<power class>]
+ If task.ClassToWatts is not present, then return task.Watts (this would be for workloads which don't have classMapWatts)
+*/
+func WattsToConsider(task Task, classMapWatts bool, offer *mesos.Offer) (float64, error) {
+	if classMapWatts {
+		// checking if ClassToWatts was present in the workload.
+		if task.ClassToWatts != nil {
+			return task.ClassToWatts[offerUtils.PowerClass(offer)], nil
+		} else {
+			// Checking whether task.Watts is 0.0. If yes, then throwing an error.
+			if task.Watts == 0.0 {
+				return task.Watts, errors.New("Configuration error in task. Watts attribute is 0 for " + task.Name)
+			}
+			return task.Watts, nil
+		}
+	} else {
+		// Checking whether task.Watts is 0.0. If yes, then throwing an error.
+		if task.Watts == 0.0 {
+			return task.Watts, errors.New("Configuration error in task. Watts attribute is 0 for " + task.Name)
+		}
+		return task.Watts, nil
+	}
+}
+
 // Sorter implements sort.Sort interface to sort tasks by Watts requirement.
 type WattsSorter []Task
 
diff --git a/powerCapping/proactiveclusterwidecappers.go b/powerCapping/proactiveclusterwidecappers.go
index b3cc84a..d1f8009 100644
--- a/powerCapping/proactiveclusterwidecappers.go
+++ b/powerCapping/proactiveclusterwidecappers.go
@@ -2,6 +2,8 @@
 Cluster wide dynamic capping
 
 This is a capping strategy that can be used with schedulers to improve the power consumption.
+
+Note: This capping strategy doesn't currently considered task.Watts to power class mapping with classMapWatts is enabled.
 */
 package powerCapping
 
@@ -244,8 +246,7 @@ func (capper ClusterwideCapper) TaskFinished(taskID string) {
 }
 
 // First come first serve scheduling.
-func (capper ClusterwideCapper) FCFSDeterminedCap(totalPower map[string]float64,
-	newTask *def.Task) (float64, error) {
+func (capper ClusterwideCapper) FCFSDeterminedCap(totalPower map[string]float64, newTask *def.Task) (float64, error) {
 	// Validation
 	if totalPower == nil {
 		return 100, errors.New("Invalid argument: totalPower")
diff --git a/scheduler.go b/scheduler.go
index 0d1c325..82d7dba 100644
--- a/scheduler.go
+++ b/scheduler.go
@@ -21,6 +21,7 @@ var ignoreWatts = flag.Bool("ignoreWatts", false, "Ignore watts in offers")
 var pcplogPrefix = flag.String("logPrefix", "", "Prefix for pcplog")
 var hiThreshold = flag.Float64("hiThreshold", 0.0, "Upperbound for when we should start capping")
 var loThreshold = flag.Float64("loThreshold", 0.0, "Lowerbound for when we should start uncapping")
+var classMapWatts = flag.Bool("classMapWatts", false, "Enable mapping of watts to power class of node")
 
 // Short hand args
 func init() {
@@ -30,6 +31,7 @@ func init() {
 	flag.StringVar(pcplogPrefix, "p", "", "Prefix for pcplog (shorthand)")
 	flag.Float64Var(hiThreshold, "ht", 700.0, "Upperbound for when we should start capping (shorthand)")
 	flag.Float64Var(loThreshold, "lt", 400.0, "Lowerbound for when we should start uncapping (shorthand)")
+	flag.BoolVar(classMapWatts, "cmw", false, "Enable mapping of watts to power class of node")
 }
 
 func main() {
@@ -58,7 +60,7 @@ func main() {
 	startTime := time.Now().Format("20060102150405")
 	logPrefix := *pcplogPrefix + "_" + startTime
 
-	scheduler := schedulers.NewBinPacked(tasks, *ignoreWatts, logPrefix)
+	scheduler := schedulers.NewBinPackedPistonCapper(tasks, *ignoreWatts, logPrefix, *classMapWatts)
 	driver, err := sched.NewMesosSchedulerDriver(sched.DriverConfig{
 		Master: *master,
 		Framework: &mesos.FrameworkInfo{
diff --git a/schedulers/README.md b/schedulers/README.md
index 275798b..cec4efc 100644
--- a/schedulers/README.md
+++ b/schedulers/README.md
@@ -5,18 +5,16 @@ To Do:
 
  * Design changes -- Possible to have one scheduler with different scheduling schemes?
  * Fix the race condition on 'tasksRunning' in proactiveclusterwidecappingfcfs.go and proactiveclusterwidecappingranked.go
- * Separate the capping strategies from the scheduling algorithms and make it possible to use any capping strategy with any scheduler.
- * Make newTask(...) variadic where the newTaskClass argument can either be given or not. If not give, then pick task.Watts as the watts attribute, else pick task.ClassToWatts[newTaskClass].
- * Retrofit pcp/proactiveclusterwidecappers.go to include the power capping go routines and to cap only when necessary.
+ * **Critical**: Separate the capping strategies from the scheduling algorithms and make it possible to use any capping strategy with any scheduler.
  * Create a package that would contain routines to perform various logging and move helpers.coLocated(...) into that.
- * Retrofit schedulers to be able to run either using ClassMapWatts enabled or disabled.
+ * Move all the common struct members from all schedulers into base.go.
 
 Scheduling Algorithms:
 
  * First Fit
  * First Fit with sorted watts
  * Bin-packing with sorted watts
- * ClassMapWatts -- Bin-packing and First Fit that now use Watts per power class.
+ * BinPacked-MaxMin -- Packing one large task with a bunch of small tasks.
  * Top Heavy -- Hybrid scheduler that packs small tasks (less power intensive) using Bin-packing and spreads large tasks (power intensive) using First Fit.
  * Bottom Heavy -- Hybrid scheduler that packs large tasks (power intensive) using Bin-packing and spreads small tasks (less power intensive) using First Fit. 
  
diff --git a/schedulers/binPackSortedWattsSortedOffers.go b/schedulers/binPackSortedWattsSortedOffers.go
index 1cf2191..0eae312 100644
--- a/schedulers/binPackSortedWattsSortedOffers.go
+++ b/schedulers/binPackSortedWattsSortedOffers.go
@@ -17,13 +17,18 @@ import (
 )
 
 // Decides if to take an offer or not
-func (*BinPackSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool {
+func (s *BinPackSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool {
 
 	cpus, mem, watts := offerUtils.OfferAgg(offer)
 
 	//TODO: Insert watts calculation here instead of taking them as a parameter
 
-	if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts {
+	wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+	if err != nil {
+		// Error in determining wattsConsideration
+		log.Fatal(err)
+	}
+	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || (watts >= wattsConsideration)) {
 		return true
 	}
 
@@ -31,13 +36,14 @@ func (*BinPackSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def.Ta
 }
 
 type BinPackSortedWattsSortedOffers struct {
-	base         // Type embedded to inherit common functions
-	tasksCreated int
-	tasksRunning int
-	tasks        []def.Task
-	metrics      map[string]def.Metric
-	running      map[string]map[string]bool
-	ignoreWatts  bool
+	base          // Type embedded to inherit common functions
+	tasksCreated  int
+	tasksRunning  int
+	tasks         []def.Task
+	metrics       map[string]def.Metric
+	running       map[string]map[string]bool
+	ignoreWatts   bool
+	classMapWatts bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -57,7 +63,8 @@ type BinPackSortedWattsSortedOffers struct {
 }
 
 // New electron scheduler
-func NewBinPackSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BinPackSortedWattsSortedOffers {
+func NewBinPackSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
+	classMapWatts bool) *BinPackSortedWattsSortedOffers {
 	sort.Sort(def.WattsSorter(tasks))
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@@ -66,14 +73,15 @@ func NewBinPackSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, sched
 	}
 
 	s := &BinPackSortedWattsSortedOffers{
-		tasks:       tasks,
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		RecordPCP:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
+		tasks:         tasks,
+		ignoreWatts:   ignoreWatts,
+		classMapWatts: classMapWatts,
+		Shutdown:      make(chan struct{}),
+		Done:          make(chan struct{}),
+		PCPLog:        make(chan struct{}),
+		running:       make(map[string]map[string]bool),
+		RecordPCP:     false,
+		schedTrace:    log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -102,7 +110,13 @@ func (s *BinPackSortedWattsSortedOffers) newTask(offer *mesos.Offer, task def.Ta
 	}
 
 	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
+		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
+			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
+			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
+		} else {
+			// Error in determining wattsConsideration
+			log.Fatal(err)
+		}
 	}
 
 	return &mesos.TaskInfo{
@@ -160,6 +174,11 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr
 		totalRAM := 0.0
 		for i := 0; i < len(s.tasks); i++ {
 			task := s.tasks[i]
+			wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+			if err != nil {
+				// Error in determining wattsConsideration
+				log.Fatal(err)
+			}
 
 			// Check host if it exists
 			if task.Host != "" {
@@ -171,12 +190,12 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr
 
 			for *task.Instances > 0 {
 				// Does the task fit
-				if (s.ignoreWatts || offer_watts >= (totalWatts+task.Watts)) &&
+				if (s.ignoreWatts || (offer_watts >= (totalWatts + wattsConsideration))) &&
 					(offer_cpu >= (totalCPU + task.CPU)) &&
 					(offer_ram >= (totalRAM + task.RAM)) {
 
 					offerTaken = true
-					totalWatts += task.Watts
+					totalWatts += wattsConsideration
 					totalCPU += task.CPU
 					totalRAM += task.RAM
 					log.Println("Co-Located with: ")
diff --git a/schedulers/binpackedpistoncapping.go b/schedulers/binpackedpistoncapping.go
index 7f413f9..b77a89e 100644
--- a/schedulers/binpackedpistoncapping.go
+++ b/schedulers/binpackedpistoncapping.go
@@ -27,17 +27,18 @@ import (
   corresponding to the load on that node.
 */
 type BinPackedPistonCapper struct {
-	base         // Type embedded to inherit common functions
-	tasksCreated int
-	tasksRunning int
-	tasks        []def.Task
-	metrics      map[string]def.Metric
-	running      map[string]map[string]bool
-	taskMonitor  map[string][]def.Task
-	totalPower   map[string]float64
-	ignoreWatts  bool
-	ticker       *time.Ticker
-	isCapping    bool
+	base          // Type embedded to inherit common functions
+	tasksCreated  int
+	tasksRunning  int
+	tasks         []def.Task
+	metrics       map[string]def.Metric
+	running       map[string]map[string]bool
+	taskMonitor   map[string][]def.Task
+	totalPower    map[string]float64
+	ignoreWatts   bool
+	classMapWatts bool
+	ticker        *time.Ticker
+	isCapping     bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule the new task.
@@ -58,7 +59,8 @@ type BinPackedPistonCapper struct {
 }
 
 // New electron scheduler.
-func NewBinPackedPistonCapper(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BinPackedPistonCapper {
+func NewBinPackedPistonCapper(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
+	classMapWatts bool) *BinPackedPistonCapper {
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
 	if err != nil {
@@ -66,26 +68,32 @@ func NewBinPackedPistonCapper(tasks []def.Task, ignoreWatts bool, schedTracePref
 	}
 
 	s := &BinPackedPistonCapper{
-		tasks:       tasks,
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		taskMonitor: make(map[string][]def.Task),
-		totalPower:  make(map[string]float64),
-		RecordPCP:   false,
-		ticker:      time.NewTicker(5 * time.Second),
-		isCapping:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
+		tasks:         tasks,
+		ignoreWatts:   ignoreWatts,
+		classMapWatts: classMapWatts,
+		Shutdown:      make(chan struct{}),
+		Done:          make(chan struct{}),
+		PCPLog:        make(chan struct{}),
+		running:       make(map[string]map[string]bool),
+		taskMonitor:   make(map[string][]def.Task),
+		totalPower:    make(map[string]float64),
+		RecordPCP:     false,
+		ticker:        time.NewTicker(5 * time.Second),
+		isCapping:     false,
+		schedTrace:    log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
 
 // check whether task fits the offer or not.
-func (s *BinPackedPistonCapper) takeOffer(offerWatts float64, offerCPU float64, offerRAM float64,
+func (s *BinPackedPistonCapper) takeOffer(offer *mesos.Offer, offerWatts float64, offerCPU float64, offerRAM float64,
 	totalWatts float64, totalCPU float64, totalRAM float64, task def.Task) bool {
-	if (s.ignoreWatts || (offerWatts >= (totalWatts + task.Watts))) &&
+	wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+	if err != nil {
+		// Error in determining wattsToConsider
+		log.Fatal(err)
+	}
+	if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsConsideration))) &&
 		(offerCPU >= (totalCPU + task.CPU)) &&
 		(offerRAM >= (totalRAM + task.RAM)) {
 		return true
@@ -130,7 +138,13 @@ func (s *BinPackedPistonCapper) newTask(offer *mesos.Offer, task def.Task) *meso
 	}
 
 	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
+		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
+			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
+			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
+		} else {
+			// Error in determining wattsConsideration
+			log.Fatal(err)
+		}
 	}
 
 	return &mesos.TaskInfo{
@@ -183,7 +197,8 @@ func (s *BinPackedPistonCapper) startCapping() {
 							if err := rapl.Cap(host, "rapl", roundedCapValue); err != nil {
 								log.Println(err)
 							} else {
-								log.Printf("Capped [%s] at %d", host, int(math.Floor(capValue+0.5)))
+								log.Printf("Capped [%s] at %d", host,
+									int(math.Floor(capValue+0.5)))
 							}
 							bpPistonPreviousRoundedCapValues[host] = roundedCapValue
 						}
@@ -219,8 +234,8 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off
 	// retrieving the total power for each host in the offers
 	for _, offer := range offers {
 		if _, ok := s.totalPower[*offer.Hostname]; !ok {
-			_, _, offer_watts := offerUtils.OfferAgg(offer)
-			s.totalPower[*offer.Hostname] = offer_watts
+			_, _, offerWatts := offerUtils.OfferAgg(offer)
+			s.totalPower[*offer.Hostname] = offerWatts
 		}
 	}
 
@@ -258,6 +273,12 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off
 		partialLoad := 0.0
 		for i := 0; i < len(s.tasks); i++ {
 			task := s.tasks[i]
+			wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+			if err != nil {
+				// Error in determining wattsConsideration
+				log.Fatal(err)
+			}
+
 			// Check host if it exists
 			if task.Host != "" {
 				// Don't take offer if it doens't match our task's host requirement.
@@ -268,7 +289,8 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off
 
 			for *task.Instances > 0 {
 				// Does the task fit
-				if s.takeOffer(offerWatts, offerCPU, offerRAM, totalWatts, totalCPU, totalRAM, task) {
+				if s.takeOffer(offer, offerWatts, offerCPU, offerRAM,
+					totalWatts, totalCPU, totalRAM, task) {
 
 					// Start piston capping if haven't started yet
 					if !s.isCapping {
@@ -277,7 +299,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off
 					}
 
 					offerTaken = true
-					totalWatts += task.Watts
+					totalWatts += wattsConsideration
 					totalCPU += task.CPU
 					totalRAM += task.RAM
 					log.Println("Co-Located with: ")
@@ -289,7 +311,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off
 					s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
 					*task.Instances--
 					// updating the cap value for offer.Hostname
-					partialLoad += ((task.Watts * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100
+					partialLoad += ((wattsConsideration * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100
 
 					if *task.Instances <= 0 {
 						// All instances of task have been scheduled. Remove it
@@ -370,9 +392,16 @@ func (s *BinPackedPistonCapper) StatusUpdate(driver sched.SchedulerDriver, statu
 			log.Println(err)
 		}
 
+		// Need to determine the watts consideration for the finishedTask
+		var wattsConsideration float64
+		if s.classMapWatts {
+			wattsConsideration = finishedTask.ClassToWatts[hostToPowerClass(hostOfFinishedTask)]
+		} else {
+			wattsConsideration = finishedTask.Watts
+		}
 		// Need to update the cap values for host of the finishedTask
 		bpPistonMutex.Lock()
-		bpPistonCapValues[hostOfFinishedTask] -= ((finishedTask.Watts * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100
+		bpPistonCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100
 		// Checking to see if the cap value has become 0, in which case we uncap the host.
 		if int(math.Floor(bpPistonCapValues[hostOfFinishedTask]+0.5)) == 0 {
 			bpPistonCapValues[hostOfFinishedTask] = 100
diff --git a/schedulers/binpacksortedwatts.go b/schedulers/binpacksortedwatts.go
index cf8162f..215341a 100644
--- a/schedulers/binpacksortedwatts.go
+++ b/schedulers/binpacksortedwatts.go
@@ -17,13 +17,18 @@ import (
 )
 
 // Decides if to take an offer or not
-func (*BinPackSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
+func (s *BinPackSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
 
 	cpus, mem, watts := offerUtils.OfferAgg(offer)
 
 	//TODO: Insert watts calculation here instead of taking them as a parameter
 
-	if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts {
+	wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+	if err != nil {
+		// Error in determining wattsConsideration
+		log.Fatal(err)
+	}
+	if cpus >= task.CPU && mem >= task.RAM && watts >= wattsConsideration {
 		return true
 	}
 
@@ -31,13 +36,14 @@ func (*BinPackSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
 }
 
 type BinPackSortedWatts struct {
-	base         // Type embedded to inherit common functions
-	tasksCreated int
-	tasksRunning int
-	tasks        []def.Task
-	metrics      map[string]def.Metric
-	running      map[string]map[string]bool
-	ignoreWatts  bool
+	base          // Type embedded to inherit common functions
+	tasksCreated  int
+	tasksRunning  int
+	tasks         []def.Task
+	metrics       map[string]def.Metric
+	running       map[string]map[string]bool
+	ignoreWatts   bool
+	classMapWatts bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -57,7 +63,7 @@ type BinPackSortedWatts struct {
 }
 
 // New electron scheduler
-func NewBinPackSortedWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BinPackSortedWatts {
+func NewBinPackSortedWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *BinPackSortedWatts {
 	sort.Sort(def.WattsSorter(tasks))
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@@ -66,14 +72,15 @@ func NewBinPackSortedWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix
 	}
 
 	s := &BinPackSortedWatts{
-		tasks:       tasks,
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		RecordPCP:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
+		tasks:         tasks,
+		ignoreWatts:   ignoreWatts,
+		classMapWatts: classMapWatts,
+		Shutdown:      make(chan struct{}),
+		Done:          make(chan struct{}),
+		PCPLog:        make(chan struct{}),
+		running:       make(map[string]map[string]bool),
+		RecordPCP:     false,
+		schedTrace:    log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -102,7 +109,13 @@ func (s *BinPackSortedWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.T
 	}
 
 	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
+		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
+			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
+			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
+		} else {
+			// Error in determining wattsToConsider
+			log.Fatal(err)
+		}
 	}
 
 	return &mesos.TaskInfo{
@@ -149,6 +162,11 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers
 		totalRAM := 0.0
 		for i := 0; i < len(s.tasks); i++ {
 			task := s.tasks[i]
+			wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+			if err != nil {
+				// Error in determining wattsConsideration
+				log.Fatal(err)
+			}
 
 			// Check host if it exists
 			if task.Host != "" {
@@ -160,12 +178,12 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers
 
 			for *task.Instances > 0 {
 				// Does the task fit
-				if (s.ignoreWatts || offer_watts >= (totalWatts+task.Watts)) &&
+				if (s.ignoreWatts || (offer_watts >= (totalWatts + wattsConsideration))) &&
 					(offer_cpu >= (totalCPU + task.CPU)) &&
 					(offer_ram >= (totalRAM + task.RAM)) {
 
 					offerTaken = true
-					totalWatts += task.Watts
+					totalWatts += wattsConsideration
 					totalCPU += task.CPU
 					totalRAM += task.RAM
 					log.Println("Co-Located with: ")
diff --git a/schedulers/bottomHeavy.go b/schedulers/bottomHeavy.go
index b108827..a0bf3b4 100644
--- a/schedulers/bottomHeavy.go
+++ b/schedulers/bottomHeavy.go
@@ -35,6 +35,7 @@ type BottomHeavy struct {
 	metrics                map[string]def.Metric
 	running                map[string]map[string]bool
 	ignoreWatts            bool
+	classMapWatts          bool
 	smallTasks, largeTasks []def.Task
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
@@ -55,7 +56,7 @@ type BottomHeavy struct {
 }
 
 // New electron scheduler
-func NewBottomHeavy(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BottomHeavy {
+func NewBottomHeavy(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *BottomHeavy {
 	sort.Sort(def.WattsSorter(tasks))
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@@ -67,20 +68,21 @@ func NewBottomHeavy(tasks []def.Task, ignoreWatts bool, schedTracePrefix string)
 	// Classification done based on MMPU watts requirements.
 	mid := int(math.Floor((float64(len(tasks)) / 2.0) + 0.5))
 	s := &BottomHeavy{
-		smallTasks:  tasks[:mid],
-		largeTasks:  tasks[mid+1:],
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		RecordPCP:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
+		smallTasks:    tasks[:mid],
+		largeTasks:    tasks[mid+1:],
+		ignoreWatts:   ignoreWatts,
+		classMapWatts: classMapWatts,
+		Shutdown:      make(chan struct{}),
+		Done:          make(chan struct{}),
+		PCPLog:        make(chan struct{}),
+		running:       make(map[string]map[string]bool),
+		RecordPCP:     false,
+		schedTrace:    log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
 
-func (s *BottomHeavy) newTask(offer *mesos.Offer, task def.Task, newTaskClass string) *mesos.TaskInfo {
+func (s *BottomHeavy) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
 	taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
 	s.tasksCreated++
 
@@ -104,7 +106,13 @@ func (s *BottomHeavy) newTask(offer *mesos.Offer, task def.Task, newTaskClass st
 	}
 
 	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[newTaskClass]))
+		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
+			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
+			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
+		} else {
+			// Error in determining wattsConsideration
+			log.Fatal(err)
+		}
 	}
 
 	return &mesos.TaskInfo{
@@ -136,11 +144,10 @@ func (s *BottomHeavy) shutDownIfNecessary() {
 }
 
 // create TaskInfo and log scheduling trace
-func (s *BottomHeavy) createTaskInfoAndLogSchedTrace(offer *mesos.Offer,
-	powerClass string, task def.Task) *mesos.TaskInfo {
+func (s *BottomHeavy) createTaskInfoAndLogSchedTrace(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
 	log.Println("Co-Located with:")
 	coLocated(s.running[offer.GetSlaveId().GoString()])
-	taskToSchedule := s.newTask(offer, task, powerClass)
+	taskToSchedule := s.newTask(offer, task)
 
 	fmt.Println("Inst: ", *task.Instances)
 	s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
@@ -169,24 +176,24 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver)
 		offerTaken := false
 		for i := 0; i < len(s.largeTasks); i++ {
 			task := s.largeTasks[i]
+			wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+			if err != nil {
+				// Error in determining wattsConsideration
+				log.Fatal(err)
+			}
 
 			for *task.Instances > 0 {
-				powerClass := offerUtils.PowerClass(offer)
 				// Does the task fit
 				// OR lazy evaluation. If ignore watts is set to true, second statement won't
 				// be evaluated.
-				wattsToConsider := task.Watts
-				if !s.ignoreWatts {
-					wattsToConsider = task.ClassToWatts[powerClass]
-				}
-				if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsToConsider))) &&
+				if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsConsideration))) &&
 					(offerCPU >= (totalCPU + task.CPU)) &&
 					(offerRAM >= (totalRAM + task.RAM)) {
 					offerTaken = true
-					totalWatts += wattsToConsider
+					totalWatts += wattsConsideration
 					totalCPU += task.CPU
 					totalRAM += task.RAM
-					tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, powerClass, task))
+					tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, task))
 
 					if *task.Instances <= 0 {
 						// All instances of task have been scheduled, remove it
@@ -231,17 +238,20 @@ func (s *BottomHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver
 		taken := false
 		for i := 0; i < len(s.smallTasks); i++ {
 			task := s.smallTasks[i]
-			powerClass := offerUtils.PowerClass(offer)
+			wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+			if err != nil {
+				// Error in determining wattsConsideration
+				log.Fatal(err)
+			} else {
+				// Logging the watts consideration
+				log.Printf("Watts Considered for host[%s], task[%s] = %f\n", *offer.Hostname, task.Name, wattsConsideration)
+			}
 
 			// Decision to take the offer or not
-			wattsToConsider := task.Watts
-			if !s.ignoreWatts {
-				wattsToConsider = task.ClassToWatts[powerClass]
-			}
-			if (s.ignoreWatts || (offerWatts >= wattsToConsider)) &&
+			if (s.ignoreWatts || (offerWatts >= wattsConsideration)) &&
 				(offerCPU >= task.CPU) && (offerRAM >= task.RAM) {
 				taken = true
-				tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, powerClass, task))
+				tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, task))
 				log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname())
 				driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
 
@@ -286,10 +296,10 @@ func (s *BottomHeavy) ResourceOffers(driver sched.SchedulerDriver, offers []*mes
 		default:
 		}
 
-		if constants.PowerClasses["ClassA"][*offer.Hostname] ||
-			constants.PowerClasses["ClassB"][*offer.Hostname] {
+		if constants.PowerClasses["A"][*offer.Hostname] ||
+			constants.PowerClasses["B"][*offer.Hostname] {
 			offersClassAB = append(offersClassAB, offer)
-		} else if constants.PowerClasses["ClassC"][*offer.Hostname] {
+		} else if constants.PowerClasses["C"][*offer.Hostname] {
 			offersClassC = append(offersClassC, offer)
 		}
 	}
diff --git a/schedulers/bpswClassMapWatts.go b/schedulers/bpswClassMapWatts.go
deleted file mode 100644
index 1464df8..0000000
--- a/schedulers/bpswClassMapWatts.go
+++ /dev/null
@@ -1,231 +0,0 @@
-package schedulers
-
-import (
-	"bitbucket.org/sunybingcloud/electron/def"
-	"bitbucket.org/sunybingcloud/electron/utilities/mesosUtils"
-	"bitbucket.org/sunybingcloud/electron/utilities/offerUtils"
-	"fmt"
-	"github.com/golang/protobuf/proto"
-	mesos "github.com/mesos/mesos-go/mesosproto"
-	"github.com/mesos/mesos-go/mesosutil"
-	sched "github.com/mesos/mesos-go/scheduler"
-	"log"
-	"os"
-	"sort"
-	"strings"
-	"time"
-)
-
-// Decides if to take an offer or not
-func (*BPSWClassMapWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
-
-	cpus, mem, watts := offerUtils.OfferAgg(offer)
-
-	//TODO: Insert watts calculation here instead of taking them as a parameter
-
-	if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts {
-		return true
-	}
-
-	return false
-}
-
-type BPSWClassMapWatts struct {
-	base         // Type embedded to inherit common functions
-	tasksCreated int
-	tasksRunning int
-	tasks        []def.Task
-	metrics      map[string]def.Metric
-	running      map[string]map[string]bool
-	ignoreWatts  bool
-
-	// First set of PCP values are garbage values, signal to logger to start recording when we're
-	// about to schedule a new task
-	RecordPCP bool
-
-	// This channel is closed when the program receives an interrupt,
-	// signalling that the program should shut down.
-	Shutdown chan struct{}
-	// This channel is closed after shutdown is closed, and only when all
-	// outstanding tasks have been cleaned up
-	Done chan struct{}
-
-	// Controls when to shutdown pcp logging
-	PCPLog chan struct{}
-
-	schedTrace *log.Logger
-}
-
-// New electron scheduler
-func NewBPSWClassMapWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BPSWClassMapWatts {
-	sort.Sort(def.WattsSorter(tasks))
-
-	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	s := &BPSWClassMapWatts{
-		tasks:       tasks,
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		RecordPCP:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
-	}
-	return s
-}
-
-func (s *BPSWClassMapWatts) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo {
-	taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
-	s.tasksCreated++
-
-	if !s.RecordPCP {
-		// Turn on logging
-		s.RecordPCP = true
-		time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts
-	}
-
-	// If this is our first time running into this Agent
-	if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok {
-		s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
-	}
-
-	// Add task to list of tasks running on node
-	s.running[offer.GetSlaveId().GoString()][taskName] = true
-
-	resources := []*mesos.Resource{
-		mesosutil.NewScalarResource("cpus", task.CPU),
-		mesosutil.NewScalarResource("mem", task.RAM),
-	}
-
-	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass]))
-	}
-
-	return &mesos.TaskInfo{
-		Name: proto.String(taskName),
-		TaskId: &mesos.TaskID{
-			Value: proto.String("electron-" + taskName),
-		},
-		SlaveId:   offer.SlaveId,
-		Resources: resources,
-		Command: &mesos.CommandInfo{
-			Value: proto.String(task.CMD),
-		},
-		Container: &mesos.ContainerInfo{
-			Type: mesos.ContainerInfo_DOCKER.Enum(),
-			Docker: &mesos.ContainerInfo_DockerInfo{
-				Image:   proto.String(task.Image),
-				Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated
-			},
-		},
-	}
-}
-
-func (s *BPSWClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
-	log.Printf("Received %d resource offers", len(offers))
-
-	for _, offer := range offers {
-		select {
-		case <-s.Shutdown:
-			log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")
-			driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
-
-			log.Println("Number of tasks still running: ", s.tasksRunning)
-			continue
-		default:
-		}
-
-		tasks := []*mesos.TaskInfo{}
-
-		offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
-
-		offerTaken := false
-		totalWatts := 0.0
-		totalCPU := 0.0
-		totalRAM := 0.0
-		for i := 0; i < len(s.tasks); i++ {
-			task := s.tasks[i]
-
-			// Check host if it exists
-			if task.Host != "" {
-				// Don't take offer if it doesn't match our task's host requirement
-				if !strings.HasPrefix(*offer.Hostname, task.Host) {
-					continue
-				}
-			}
-
-			for *task.Instances > 0 {
-				powerClass := offerUtils.PowerClass(offer)
-				// Does the task fit
-				// OR lazy evaluation. If ignore watts is set to true, second statement won't
-				// be evaluated.
-				if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) &&
-					(offerCPU >= (totalCPU + task.CPU)) &&
-					(offerRAM >= (totalRAM + task.RAM)) {
-
-					fmt.Println("Watts being used: ", task.ClassToWatts[powerClass])
-					offerTaken = true
-					totalWatts += task.ClassToWatts[powerClass]
-					totalCPU += task.CPU
-					totalRAM += task.RAM
-					log.Println("Co-Located with: ")
-					coLocated(s.running[offer.GetSlaveId().GoString()])
-					taskToSchedule := s.newTask(offer, task, powerClass)
-					tasks = append(tasks, taskToSchedule)
-
-					fmt.Println("Inst: ", *task.Instances)
-					s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
-					*task.Instances--
-
-					if *task.Instances <= 0 {
-						// All instances of task have been scheduled, remove it
-						s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
-
-						if len(s.tasks) <= 0 {
-							log.Println("Done scheduling all tasks")
-							close(s.Shutdown)
-						}
-					}
-				} else {
-					break // Continue on to next task
-				}
-			}
-		}
-
-		if offerTaken {
-			log.Printf("Starting on [%s]\n", offer.GetHostname())
-			driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
-		} else {
-
-			// If there was no match for the task
-			fmt.Println("There is not enough resources to launch a task:")
-			cpus, mem, watts := offerUtils.OfferAgg(offer)
-
-			log.Printf("<CPU: %f, RAM: %f, Watts: %f>\n", cpus, mem, watts)
-			driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter)
-		}
-	}
-}
-
-func (s *BPSWClassMapWatts) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
-	log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
-
-	if *status.State == mesos.TaskState_TASK_RUNNING {
-		s.tasksRunning++
-	} else if IsTerminal(status.State) {
-		delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value)
-		s.tasksRunning--
-		if s.tasksRunning == 0 {
-			select {
-			case <-s.Shutdown:
-				close(s.Done)
-			default:
-			}
-		}
-	}
-	log.Printf("DONE: Task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
-}
diff --git a/schedulers/bpswClassMapWattsPistonCapping.go b/schedulers/bpswClassMapWattsPistonCapping.go
deleted file mode 100644
index a80c599..0000000
--- a/schedulers/bpswClassMapWattsPistonCapping.go
+++ /dev/null
@@ -1,391 +0,0 @@
-package schedulers
-
-import (
-	"bitbucket.org/sunybingcloud/electron/constants"
-	"bitbucket.org/sunybingcloud/electron/def"
-	"bitbucket.org/sunybingcloud/electron/rapl"
-	"bitbucket.org/sunybingcloud/electron/utilities/mesosUtils"
-	"bitbucket.org/sunybingcloud/electron/utilities/offerUtils"
-	"errors"
-	"fmt"
-	"github.com/golang/protobuf/proto"
-	mesos "github.com/mesos/mesos-go/mesosproto"
-	"github.com/mesos/mesos-go/mesosutil"
-	sched "github.com/mesos/mesos-go/scheduler"
-	"log"
-	"math"
-	"os"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-)
-
-// Decides if to take offer or not
-func (s *BPSWClassMapWattsPistonCapping) takeOffer(offer *mesos.Offer, task def.Task) bool {
-	cpus, mem, watts := offerUtils.OfferAgg(offer)
-
-	//TODO: Insert watts calculation here instead of taking them as a parameter
-
-	if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts {
-		return true
-	}
-
-	return false
-}
-
-type BPSWClassMapWattsPistonCapping struct {
-	base         // Type embedded to inherit common functions
-	tasksCreated int
-	tasksRunning int
-	tasks        []def.Task
-	metrics      map[string]def.Metric
-	running      map[string]map[string]bool
-	taskMonitor  map[string][]def.Task
-	totalPower   map[string]float64
-	ignoreWatts  bool
-	ticker       *time.Ticker
-	isCapping    bool
-
-	// First set of PCP values are garbage values, signal to logger to start recording when we're
-	// about to schedule the new task
-	RecordPCP bool
-
-	// This channel is closed when the program receives an interrupt,
-	// signalling that program should shutdown
-	Shutdown chan struct{}
-
-	// This channel is closed after shutdown is closed, and only when all
-	// outstanding tasks have been cleaned up
-	Done chan struct{}
-
-	// Controls when to shutdown pcp logging
-	PCPLog chan struct{}
-
-	schedTrace *log.Logger
-}
-
-// New electron scheduler
-func NewBPSWClassMapWattsPistonCapping(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BPSWClassMapWattsPistonCapping {
-	sort.Sort(def.WattsSorter(tasks))
-
-	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	s := &BPSWClassMapWattsPistonCapping{
-		tasks:       tasks,
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		taskMonitor: make(map[string][]def.Task),
-		totalPower:  make(map[string]float64),
-		RecordPCP:   false,
-		ticker:      time.NewTicker(5 * time.Second),
-		isCapping:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
-	}
-	return s
-}
-
-func (s *BPSWClassMapWattsPistonCapping) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo {
-	taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
-	s.tasksCreated++
-
-	if !s.RecordPCP {
-		// Turn on logging
-		s.RecordPCP = true
-		time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts
-	}
-
-	// If this is our first time running into this Agent
-	if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok {
-		s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
-	}
-
-	// Add task to list of tasks running on node
-	s.running[offer.GetSlaveId().GoString()][taskName] = true
-
-	// Setting the task ID to the task. This is done so that we can consider each task to be different
-	// even though they have the same parameters.
-	task.SetTaskID(*proto.String("electron-" + taskName))
-	// Add task to list of tasks running on node
-	if len(s.taskMonitor[*offer.Hostname]) == 0 {
-		s.taskMonitor[*offer.Hostname] = []def.Task{task}
-	} else {
-		s.taskMonitor[*offer.Hostname] = append(s.taskMonitor[*offer.Hostname], task)
-	}
-
-	resources := []*mesos.Resource{
-		mesosutil.NewScalarResource("cpus", task.CPU),
-		mesosutil.NewScalarResource("mem", task.RAM),
-	}
-
-	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass]))
-	}
-
-	return &mesos.TaskInfo{
-		Name: proto.String(taskName),
-		TaskId: &mesos.TaskID{
-			Value: proto.String("electron-" + taskName),
-		},
-		SlaveId:   offer.SlaveId,
-		Resources: resources,
-		Command: &mesos.CommandInfo{
-			Value: proto.String(task.CMD),
-		},
-		Container: &mesos.ContainerInfo{
-			Type: mesos.ContainerInfo_DOCKER.Enum(),
-			Docker: &mesos.ContainerInfo_DockerInfo{
-				Image:   proto.String(task.Image),
-				Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated
-			},
-		},
-	}
-}
-
-func (s *BPSWClassMapWattsPistonCapping) Disconnected(sched.SchedulerDriver) {
-	// Need to stop the capping process
-	s.ticker.Stop()
-	bpswClassMapWattsPistonMutex.Lock()
-	s.isCapping = false
-	bpswClassMapWattsPistonMutex.Unlock()
-	log.Println("Framework disconnected with master")
-}
-
-// mutex
-var bpswClassMapWattsPistonMutex sync.Mutex
-
-// go routine to cap each node in the cluster at regular intervals of time
-var bpswClassMapWattsPistonCapValues = make(map[string]float64)
-
-// Storing the previous cap value for each host so as to not repeatedly cap the nodes to the same value. (reduces overhead)
-var bpswClassMapWattsPistonPreviousRoundedCapValues = make(map[string]int)
-
-func (s *BPSWClassMapWattsPistonCapping) startCapping() {
-	go func() {
-		for {
-			select {
-			case <-s.ticker.C:
-				// Need to cap each node
-				bpswClassMapWattsPistonMutex.Lock()
-				for host, capValue := range bpswClassMapWattsPistonCapValues {
-					roundedCapValue := int(math.Floor(capValue + 0.5))
-					// has the cap value changed
-					if previousRoundedCap, ok := bpswClassMapWattsPistonPreviousRoundedCapValues[host]; ok {
-						if previousRoundedCap != roundedCapValue {
-							if err := rapl.Cap(host, "rapl", roundedCapValue); err != nil {
-								log.Println(err)
-							} else {
-								log.Printf("Capped [%s] at %d", host, roundedCapValue)
-							}
-							bpswClassMapWattsPistonPreviousRoundedCapValues[host] = roundedCapValue
-						}
-					} else {
-						if err := rapl.Cap(host, "rapl", roundedCapValue); err != nil {
-							log.Println(err)
-						} else {
-							log.Printf("Capped [%s] at %d", host, roundedCapValue)
-						}
-						bpswClassMapWattsPistonPreviousRoundedCapValues[host] = roundedCapValue
-					}
-				}
-				bpswClassMapWattsPistonMutex.Unlock()
-			}
-		}
-	}()
-}
-
-// Stop the capping
-func (s *BPSWClassMapWattsPistonCapping) stopCapping() {
-	if s.isCapping {
-		log.Println("Stopping the capping.")
-		s.ticker.Stop()
-		bpswClassMapWattsPistonMutex.Lock()
-		s.isCapping = false
-		bpswClassMapWattsPistonMutex.Unlock()
-	}
-}
-
-func (s *BPSWClassMapWattsPistonCapping) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
-	log.Printf("Received %d resource offers", len(offers))
-
-	// retrieving the total power for each host in the offers.
-	for _, offer := range offers {
-		if _, ok := s.totalPower[*offer.Hostname]; !ok {
-			_, _, offerWatts := offerUtils.OfferAgg(offer)
-			s.totalPower[*offer.Hostname] = offerWatts
-		}
-	}
-
-	// Displaying the totalPower
-	for host, tpower := range s.totalPower {
-		log.Printf("TotalPower[%s] = %f", host, tpower)
-	}
-
-	for _, offer := range offers {
-		select {
-		case <-s.Shutdown:
-			log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")
-			driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
-
-			log.Println("Number of tasks still running: ", s.tasksRunning)
-			continue
-		default:
-		}
-
-		tasks := []*mesos.TaskInfo{}
-
-		offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
-
-		offerTaken := false
-		totalWatts := 0.0
-		totalCPU := 0.0
-		totalRAM := 0.0
-		// Store the partialLoad for host corresponding to this offer
-		// Once we can't fit any more tasks, we update the capValue for this host with partialLoad and then launch the fitted tasks.
-		partialLoad := 0.0
-		for i := 0; i < len(s.tasks); i++ {
-			task := s.tasks[i]
-			// Check host if it exists
-			if task.Host != "" {
-				// Don't take offer if it doesn't match our task's host requirement
-				if !strings.HasPrefix(*offer.Hostname, task.Host) {
-					continue
-				}
-			}
-
-			for *task.Instances > 0 {
-				powerClass := offerUtils.PowerClass(offer)
-				// Does the task fit
-				// OR lazy evaluation. If ignoreWatts is set to true, second statement won't
-				// be evaluated
-				if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) &&
-					(offerCPU >= (totalCPU + task.CPU)) &&
-					(offerRAM >= (totalRAM + task.RAM)) {
-
-					// Start piston capping if haven't started yet
-					if !s.isCapping {
-						s.isCapping = true
-						s.startCapping()
-					}
-
-					fmt.Println("Watts being used: ", task.ClassToWatts[powerClass])
-					offerTaken = true
-					totalWatts += task.ClassToWatts[powerClass]
-					totalCPU += task.CPU
-					totalRAM += task.RAM
-					log.Println("Co-Located with: ")
-					coLocated(s.running[offer.GetSlaveId().GoString()])
-					taskToSchedule := s.newTask(offer, task, powerClass)
-					tasks = append(tasks, taskToSchedule)
-
-					fmt.Println("Inst: ", *task.Instances)
-					s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
-					*task.Instances--
-					partialLoad += ((task.Watts * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100
-
-					if *task.Instances <= 0 {
-						// All instances of task have been scheduled. Remove it
-						s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
-						if len(s.tasks) <= 0 {
-							log.Println("Done scheduling all tasks")
-							close(s.Shutdown)
-						}
-					}
-				} else {
-					break // Continue on to the next task
-				}
-			}
-		}
-
-		if offerTaken {
-			// Updating the cap value for offer.Hostname
-			bpswClassMapWattsPistonMutex.Lock()
-			bpswClassMapWattsPistonCapValues[*offer.Hostname] += partialLoad
-			bpswClassMapWattsPistonMutex.Unlock()
-			log.Printf("Starting on [%s]\n", offer.GetHostname())
-			driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
-		} else {
-			// If there was no match for task
-			log.Println("There is not enough resources to launch task: ")
-			cpus, mem, watts := offerUtils.OfferAgg(offer)
-
-			log.Printf("<CPU: %f, RAM: %f, Watts: %f>\n", cpus, mem, watts)
-			driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter)
-		}
-	}
-}
-
-// Remove finished task from the taskMonitor
-func (s *BPSWClassMapWattsPistonCapping) deleteFromTaskMonitor(finishedTaskID string) (def.Task, string, error) {
-	hostOfFinishedTask := ""
-	indexOfFinishedTask := -1
-	found := false
-	var finishedTask def.Task
-
-	for host, tasks := range s.taskMonitor {
-		for i, task := range tasks {
-			if task.TaskID == finishedTaskID {
-				hostOfFinishedTask = host
-				indexOfFinishedTask = i
-				found = true
-			}
-		}
-		if found {
-			break
-		}
-	}
-
-	if hostOfFinishedTask != "" && indexOfFinishedTask != -1 {
-		finishedTask = s.taskMonitor[hostOfFinishedTask][indexOfFinishedTask]
-		log.Printf("Removing task with TaskID [%s] from the list of running tasks\n",
-			s.taskMonitor[hostOfFinishedTask][indexOfFinishedTask].TaskID)
-		s.taskMonitor[hostOfFinishedTask] = append(s.taskMonitor[hostOfFinishedTask][:indexOfFinishedTask],
-			s.taskMonitor[hostOfFinishedTask][indexOfFinishedTask+1:]...)
-	} else {
-		return finishedTask, hostOfFinishedTask, errors.New("Finished Task not present in TaskMonitor")
-	}
-	return finishedTask, hostOfFinishedTask, nil
-}
-
-func (s *BPSWClassMapWattsPistonCapping) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
-	log.Printf("Received task status [%s] for task [%s]\n", NameFor(status.State), *status.TaskId.Value)
-
-	if *status.State == mesos.TaskState_TASK_RUNNING {
-		bpswClassMapWattsPistonMutex.Lock()
-		s.tasksRunning++
-		bpswClassMapWattsPistonMutex.Unlock()
-	} else if IsTerminal(status.State) {
-		delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value)
-		// Deleting the task from the taskMonitor
-		finishedTask, hostOfFinishedTask, err := s.deleteFromTaskMonitor(*status.TaskId.Value)
-		if err != nil {
-			log.Println(err)
-		}
-
-		// Need to update the cap values for host of the finishedTask
-		bpswClassMapWattsPistonMutex.Lock()
-		bpswClassMapWattsPistonCapValues[hostOfFinishedTask] -= ((finishedTask.Watts * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100
-		// Checking to see if the cap value has become 0, in which case we uncap the host.
-		if int(math.Floor(bpswClassMapWattsPistonCapValues[hostOfFinishedTask]+0.5)) == 0 {
-			bpswClassMapWattsPistonCapValues[hostOfFinishedTask] = 100
-		}
-		s.tasksRunning--
-		bpswClassMapWattsPistonMutex.Unlock()
-
-		if s.tasksRunning == 0 {
-			select {
-			case <-s.Shutdown:
-				s.stopCapping()
-				close(s.Done)
-			default:
-			}
-		}
-	}
-	log.Printf("DONE: Task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
-}
diff --git a/schedulers/bpswClassMapWattsProacCC.go b/schedulers/bpswClassMapWattsProacCC.go
deleted file mode 100644
index b250e67..0000000
--- a/schedulers/bpswClassMapWattsProacCC.go
+++ /dev/null
@@ -1,410 +0,0 @@
-package schedulers
-
-import (
-	"bitbucket.org/sunybingcloud/electron/constants"
-	"bitbucket.org/sunybingcloud/electron/def"
-	powCap "bitbucket.org/sunybingcloud/electron/powerCapping"
-	"bitbucket.org/sunybingcloud/electron/rapl"
-	"bitbucket.org/sunybingcloud/electron/utilities/mesosUtils"
-	"bitbucket.org/sunybingcloud/electron/utilities/offerUtils"
-	"fmt"
-	"github.com/golang/protobuf/proto"
-	mesos "github.com/mesos/mesos-go/mesosproto"
-	"github.com/mesos/mesos-go/mesosutil"
-	sched "github.com/mesos/mesos-go/scheduler"
-	"log"
-	"math"
-	"os"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-)
-
-// Decides if to take an offer or not
-func (*BPSWClassMapWattsProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool {
-	cpus, mem, watts := offerUtils.OfferAgg(offer)
-
-	// TODO: Insert watts calculation here instead of taking them as parameter
-
-	if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts {
-		return true
-	}
-
-	return false
-}
-
-type BPSWClassMapWattsProacCC struct {
-	base           // Type embedding to inherit common functions
-	tasksCreated   int
-	tasksRunning   int
-	tasks          []def.Task
-	metrics        map[string]def.Metric
-	running        map[string]map[string]bool
-	taskMonitor    map[string][]def.Task
-	availablePower map[string]float64
-	totalPower     map[string]float64
-	ignoreWatts    bool
-	capper         *powCap.ClusterwideCapper
-	ticker         *time.Ticker
-	recapTicker    *time.Ticker
-	isCapping      bool // indicate whether we are currently performing cluster-wide capping.
-	isRecapping    bool // indicate whether we are currently performing cluster-wide recapping.
-
-	// First set of PCP values are garbage values, signal to logger to start recording when we're
-	// about to schedule a new task
-	RecordPCP bool
-
-	// This channel is closed when the program receives an interrupt,
-	// signalling that the program should shut down
-	Shutdown chan struct{}
-	// This channel is closed after shutdown is closed, and only when all
-	// outstanding tasks have been cleaned up
-	Done chan struct{}
-
-	// Controls when to shutdown pcp logging
-	PCPLog chan struct{}
-
-	schedTrace *log.Logger
-}
-
-// New electron scheduler
-func NewBPSWClassMapWattsProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BPSWClassMapWattsProacCC {
-	sort.Sort(def.WattsSorter(tasks))
-
-	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	s := &BPSWClassMapWattsProacCC{
-		tasks:          tasks,
-		ignoreWatts:    ignoreWatts,
-		Shutdown:       make(chan struct{}),
-		Done:           make(chan struct{}),
-		PCPLog:         make(chan struct{}),
-		running:        make(map[string]map[string]bool),
-		taskMonitor:    make(map[string][]def.Task),
-		availablePower: make(map[string]float64),
-		totalPower:     make(map[string]float64),
-		RecordPCP:      false,
-		capper:         powCap.GetClusterwideCapperInstance(),
-		ticker:         time.NewTicker(10 * time.Second),
-		recapTicker:    time.NewTicker(20 * time.Second),
-		isCapping:      false,
-		isRecapping:    false,
-		schedTrace:     log.New(logFile, "", log.LstdFlags),
-	}
-	return s
-}
-
-// mutex
-var bpswClassMapWattsProacCCMutex sync.Mutex
-
-func (s *BPSWClassMapWattsProacCC) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo {
-	taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
-	s.tasksCreated++
-
-	if !s.RecordPCP {
-		// Turn on logging.
-		s.RecordPCP = true
-		time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts
-	}
-
-	// If this is our first time running into this Agent
-	if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok {
-		s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
-	}
-
-	// Setting the task ID to the task. This is done so that we can consider each task to be different,
-	// even though they have the same parameters.
-	task.SetTaskID(*proto.String("electron-" + taskName))
-	// Add task to the list of tasks running on the node.
-	s.running[offer.GetSlaveId().GoString()][taskName] = true
-	if len(s.taskMonitor[*offer.Hostname]) == 0 {
-		s.taskMonitor[*offer.Hostname] = []def.Task{task}
-	} else {
-		s.taskMonitor[*offer.Hostname] = append(s.taskMonitor[*offer.Hostname], task)
-	}
-
-	resources := []*mesos.Resource{
-		mesosutil.NewScalarResource("cpus", task.CPU),
-		mesosutil.NewScalarResource("mem", task.RAM),
-	}
-
-	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass]))
-	}
-
-	return &mesos.TaskInfo{
-		Name: proto.String(taskName),
-		TaskId: &mesos.TaskID{
-			Value: proto.String("electron-" + taskName),
-		},
-		SlaveId:   offer.SlaveId,
-		Resources: resources,
-		Command: &mesos.CommandInfo{
-			Value: proto.String(task.CMD),
-		},
-		Container: &mesos.ContainerInfo{
-			Type: mesos.ContainerInfo_DOCKER.Enum(),
-			Docker: &mesos.ContainerInfo_DockerInfo{
-				Image:   proto.String(task.Image),
-				Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated
-			},
-		},
-	}
-}
-
-func (s *BPSWClassMapWattsProacCC) Disconnected(sched.SchedulerDriver) {
-	// Need to stop the capping process
-	s.ticker.Stop()
-	s.recapTicker.Stop()
-	bpswClassMapWattsProacCCMutex.Lock()
-	s.isCapping = false
-	bpswClassMapWattsProacCCMutex.Unlock()
-	log.Println("Framework disconnected with master")
-}
-
-// go routine to cap the entire cluster in regular intervals of time.
-var bpswClassMapWattsProacCCCapValue = 0.0    // initial value to indicate that we haven't capped the cluster yet.
-var bpswClassMapWattsProacCCNewCapValue = 0.0 // newly computed cap value
-func (s *BPSWClassMapWattsProacCC) startCapping() {
-	go func() {
-		for {
-			select {
-			case <-s.ticker.C:
-				// Need to cap the cluster only if new cap value different from old cap value.
-				// This way we don't unnecessarily cap the cluster.
-				bpswClassMapWattsProacCCMutex.Lock()
-				if s.isCapping {
-					if int(math.Floor(bpswClassMapWattsProacCCNewCapValue+0.5)) != int(math.Floor(bpswClassMapWattsProacCCCapValue+0.5)) {
-						// updating cap value
-						bpswClassMapWattsProacCCCapValue = bpswClassMapWattsProacCCNewCapValue
-						if bpswClassMapWattsProacCCCapValue > 0.0 {
-							for _, host := range constants.Hosts {
-								// Rounding cap value to nearest int
-								if err := rapl.Cap(host, "rapl", int(math.Floor(bpswClassMapWattsProacCCCapValue+0.5))); err != nil {
-									log.Println(err)
-								}
-							}
-							log.Printf("Capped the cluster to %d", int(math.Floor(bpswClassMapWattsProacCCCapValue+0.5)))
-						}
-					}
-				}
-				bpswClassMapWattsProacCCMutex.Unlock()
-			}
-		}
-	}()
-}
-
-// go routine to recap the entire cluster in regular intervals of time.
-var bpswClassMapWattsProacCCRecapValue = 0.0 // The cluster-wide cap value when recapping
-func (s *BPSWClassMapWattsProacCC) startRecapping() {
-	go func() {
-		for {
-			select {
-			case <-s.recapTicker.C:
-				bpswClassMapWattsProacCCMutex.Lock()
-				// If stopped performing cluster wide capping, then we need to recap
-				if s.isRecapping && bpswClassMapWattsProacCCRecapValue > 0.0 {
-					for _, host := range constants.Hosts {
-						// Rounding capValue to the nearest int
-						if err := rapl.Cap(host, "rapl", int(math.Floor(bpswClassMapWattsProacCCRecapValue+0.5))); err != nil {
-							log.Println(err)
-						}
-					}
-					log.Printf("Recapping the cluster to %d", int(math.Floor(bpswClassMapWattsProacCCRecapValue+0.5)))
-				}
-				// Setting recapping to false
-				s.isRecapping = false
-				bpswClassMapWattsProacCCMutex.Unlock()
-			}
-		}
-	}()
-}
-
-// Stop cluster wide capping
-func (s *BPSWClassMapWattsProacCC) stopCapping() {
-	if s.isCapping {
-		log.Println("Stopping the cluster-wide capping.")
-		s.ticker.Stop()
-		bpswClassMapWattsProacCCMutex.Lock()
-		s.isCapping = false
-		s.isRecapping = true
-		bpswClassMapWattsProacCCMutex.Unlock()
-	}
-}
-
-// Stop the cluster wide recapping
-func (s *BPSWClassMapWattsProacCC) stopRecapping() {
-	// If not capping, then definitely recapping.
-	if !s.isCapping && s.isRecapping {
-		log.Println("Stopping the cluster-wide re-capping.")
-		s.recapTicker.Stop()
-		bpswClassMapWattsProacCCMutex.Lock()
-		s.isRecapping = false
-		bpswClassMapWattsProacCCMutex.Unlock()
-	}
-}
-
-func (s *BPSWClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
-	log.Printf("Received %d resource offers", len(offers))
-
-	// retrieving the available power for all the hosts in the offers.
-	for _, offer := range offers {
-		_, _, offerWatts := offerUtils.OfferAgg(offer)
-		s.availablePower[*offer.Hostname] = offerWatts
-		// setting total power if the first time
-		if _, ok := s.totalPower[*offer.Hostname]; !ok {
-			s.totalPower[*offer.Hostname] = offerWatts
-		}
-	}
-
-	for host, tpower := range s.totalPower {
-		log.Printf("TotalPower[%s] = %f", host, tpower)
-	}
-
-	for _, offer := range offers {
-		select {
-		case <-s.Shutdown:
-			log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")
-			driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
-
-			log.Println("Number of tasks still running: ", s.tasksRunning)
-			continue
-		default:
-		}
-
-		tasks := []*mesos.TaskInfo{}
-
-		offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
-
-		offerTaken := false
-		totalWatts := 0.0
-		totalCPU := 0.0
-		totalRAM := 0.0
-		for i := 0; i < len(s.tasks); i++ {
-			task := s.tasks[i]
-			// Check host if it exists
-			if task.Host != "" {
-				// Don't take offer it it doesn't match our task's host requirement.
-				if strings.HasPrefix(*offer.Hostname, task.Host) {
-					continue
-				}
-			}
-
-			for *task.Instances > 0 {
-				powerClass := offerUtils.PowerClass(offer)
-				// Does the task fit
-				// OR Lazy evaluation. If ignore watts is set to true, second statement won't
-				// be evaluated.
-				if (s.ignoreWatts || (offerWatts >= (totalWatts + task.ClassToWatts[powerClass]))) &&
-					(offerCPU >= (totalCPU + task.CPU)) &&
-					(offerRAM >= (totalRAM + task.RAM)) {
-
-					// Capping the cluster if haven't yet started
-					if !s.isCapping {
-						bpswClassMapWattsProacCCMutex.Lock()
-						s.isCapping = true
-						bpswClassMapWattsProacCCMutex.Unlock()
-						s.startCapping()
-					}
-
-					fmt.Println("Watts being used: ", task.ClassToWatts[powerClass])
-					tempCap, err := s.capper.FCFSDeterminedCap(s.totalPower, &task)
-					if err == nil {
-						bpswClassMapWattsProacCCMutex.Lock()
-						bpswClassMapWattsProacCCNewCapValue = tempCap
-						bpswClassMapWattsProacCCMutex.Unlock()
-					} else {
-						log.Println("Failed to determine new cluster-wide cap:")
-						log.Println(err)
-					}
-					offerTaken = true
-					totalWatts += task.ClassToWatts[powerClass]
-					totalCPU += task.CPU
-					totalRAM += task.RAM
-					log.Println("Co-Located with: ")
-					coLocated(s.running[offer.GetSlaveId().GoString()])
-					taskToSchedule := s.newTask(offer, task, powerClass)
-					tasks = append(tasks, taskToSchedule)
-
-					fmt.Println("Inst: ", *task.Instances)
-					s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
-					*task.Instances--
-
-					if *task.Instances <= 0 {
-						// All instances of task have been scheduled, remove it
-						s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
-
-						if len(s.tasks) == 0 {
-							log.Println("Done scheduling all tasks.")
-							// Need to stop the cluster wide capping as there aren't any more tasks to schedule.
-							s.stopCapping()
-							s.startRecapping() // Load changes after every task finishes and hence, we need to change the capping of the cluster.
-							close(s.Shutdown)
-						}
-					}
-				} else {
-					break // Continue on to the next task
-				}
-			}
-		}
-
-		if offerTaken {
-			log.Printf("Starting on [%s]\n", offer.GetHostname())
-			driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
-		} else {
-			// If there was no match for the task
-			fmt.Println("There is not enough resources to launch a task:")
-			cpus, mem, watts := offerUtils.OfferAgg(offer)
-
-			log.Printf("<CPU: %f, RAM: %f, Watts: %f>\n", cpus, mem, watts)
-			driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter)
-		}
-	}
-}
-
-func (s *BPSWClassMapWattsProacCC) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
-	log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
-
-	if *status.State == mesos.TaskState_TASK_RUNNING {
-		s.tasksRunning++
-	} else if IsTerminal(status.State) {
-		delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value)
-		// Need to remove the task from the window
-		s.capper.TaskFinished(*status.TaskId.Value)
-		// Determining the new cluster wide recap value
-		//tempCap, err := s.capper.NaiveRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
-		tempCap, err := s.capper.CleverRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
-		if err == nil {
-			// If new determined cap value is different from the current recap value, then we need to recap
-			if int(math.Floor(tempCap+0.5)) != int(math.Floor(bpswClassMapWattsProacCCRecapValue+0.5)) {
-				bpswClassMapWattsProacCCRecapValue = tempCap
-				bpswClassMapWattsProacCCMutex.Lock()
-				s.isRecapping = true
-				bpswClassMapWattsProacCCMutex.Unlock()
-				log.Printf("Determined re-cap value: %f\n", bpswClassMapWattsProacCCRecapValue)
-			} else {
-				bpswClassMapWattsProacCCMutex.Lock()
-				s.isRecapping = false
-				bpswClassMapWattsProacCCMutex.Unlock()
-			}
-		} else {
-			log.Println(err)
-		}
-
-		s.tasksRunning--
-		if s.tasksRunning == 0 {
-			select {
-			case <-s.Shutdown:
-				// Need to stop the cluster-wide recapping
-				s.stopRecapping()
-				close(s.Done)
-			default:
-			}
-		}
-	}
-	log.Printf("DONE: Task status [%s] for task[%s]", NameFor(status.State), *status.TaskId.Value)
-}
diff --git a/schedulers/bpMaxMin.go b/schedulers/bpswMaxMin.go
similarity index 71%
rename from schedulers/bpMaxMin.go
rename to schedulers/bpswMaxMin.go
index d5e791a..f6d4f3b 100644
--- a/schedulers/bpMaxMin.go
+++ b/schedulers/bpswMaxMin.go
@@ -17,27 +17,33 @@ import (
 )
 
 // Decides if to take an offer or not
-func (*BPMaxMinWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
+func (s *BPSWMaxMinWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
 
 	cpus, mem, watts := offerUtils.OfferAgg(offer)
 
 	//TODO: Insert watts calculation here instead of taking them as a parameter
 
-	if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts {
+	wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+	if err != nil {
+		// Error in determining wattsConsideration
+		log.Fatal(err)
+	}
+	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || (watts >= wattsConsideration)) {
 		return true
 	}
 
 	return false
 }
 
-type BPMaxMinWatts struct {
-	base         //Type embedding to inherit common functions
-	tasksCreated int
-	tasksRunning int
-	tasks        []def.Task
-	metrics      map[string]def.Metric
-	running      map[string]map[string]bool
-	ignoreWatts  bool
+type BPSWMaxMinWatts struct {
+	base          //Type embedding to inherit common functions
+	tasksCreated  int
+	tasksRunning  int
+	tasks         []def.Task
+	metrics       map[string]def.Metric
+	running       map[string]map[string]bool
+	ignoreWatts   bool
+	classMapWatts bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -57,7 +63,7 @@ type BPMaxMinWatts struct {
 }
 
 // New electron scheduler
-func NewBPMaxMinWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BPMaxMinWatts {
+func NewBPMaxMinWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *BPSWMaxMinWatts {
 	sort.Sort(def.WattsSorter(tasks))
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@@ -65,20 +71,21 @@ func NewBPMaxMinWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix strin
 		log.Fatal(err)
 	}
 
-	s := &BPMaxMinWatts{
-		tasks:       tasks,
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		RecordPCP:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
+	s := &BPSWMaxMinWatts{
+		tasks:         tasks,
+		ignoreWatts:   ignoreWatts,
+		classMapWatts: classMapWatts,
+		Shutdown:      make(chan struct{}),
+		Done:          make(chan struct{}),
+		PCPLog:        make(chan struct{}),
+		running:       make(map[string]map[string]bool),
+		RecordPCP:     false,
+		schedTrace:    log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
 
-func (s *BPMaxMinWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
+func (s *BPSWMaxMinWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
 	taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
 	s.tasksCreated++
 
@@ -103,7 +110,13 @@ func (s *BPMaxMinWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskIn
 	}
 
 	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
+		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
+			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
+			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
+		} else {
+			// Error in determining wattsConsideration
+			log.Fatal(err)
+		}
 	}
 
 	return &mesos.TaskInfo{
@@ -128,8 +141,9 @@ func (s *BPMaxMinWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskIn
 
 // Determine if the remaining space inside of the offer is enough for this
 // the task we need to create. If it is, create a TaskInfo and return it.
-func (s *BPMaxMinWatts) CheckFit(i int,
+func (s *BPSWMaxMinWatts) CheckFit(i int,
 	task def.Task,
+	wattsConsideration float64,
 	offer *mesos.Offer,
 	totalCPU *float64,
 	totalRAM *float64,
@@ -138,11 +152,11 @@ func (s *BPMaxMinWatts) CheckFit(i int,
 	offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
 
 	// Does the task fit
-	if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) &&
+	if (s.ignoreWatts || (offerWatts >= (*totalWatts + wattsConsideration))) &&
 		(offerCPU >= (*totalCPU + task.CPU)) &&
 		(offerRAM >= (*totalRAM + task.RAM)) {
 
-		*totalWatts += task.Watts
+		*totalWatts += wattsConsideration
 		*totalCPU += task.CPU
 		*totalRAM += task.RAM
 		log.Println("Co-Located with: ")
@@ -170,7 +184,7 @@ func (s *BPMaxMinWatts) CheckFit(i int,
 	return false, nil
 }
 
-func (s *BPMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
+func (s *BPSWMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
 	log.Printf("Received %d resource offers", len(offers))
 
 	for _, offer := range offers {
@@ -198,6 +212,12 @@ func (s *BPMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*m
 		for i := len(s.tasks) - 1; i >= 0; i-- {
 
 			task := s.tasks[i]
+			wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+			if err != nil {
+				// Error in determining wattsConsideration
+				log.Fatal(err)
+			}
+
 			// Check host if it exists
 			if task.Host != "" {
 				// Don't take offer if it doesn't match our task's host requirement
@@ -207,7 +227,8 @@ func (s *BPMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*m
 			}
 
 			// TODO: Fix this so index doesn't need to be passed
-			taken, taskToSchedule := s.CheckFit(i, task, offer, &totalCPU, &totalRAM, &totalWatts)
+			taken, taskToSchedule := s.CheckFit(i, task, wattsConsideration, offer,
+				&totalCPU, &totalRAM, &totalWatts)
 
 			if taken {
 				offerTaken = true
@@ -217,7 +238,13 @@ func (s *BPMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*m
 		}
 
 		// Pack the rest of the offer with the smallest tasks
-		for i, task := range s.tasks {
+		for i := 0; i < len(s.tasks); i++ {
+			task := s.tasks[i]
+			wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+			if err != nil {
+				// Error in determining wattsConsideration
+				log.Fatal(err)
+			}
 
 			// Check host if it exists
 			if task.Host != "" {
@@ -229,7 +256,8 @@ func (s *BPMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*m
 
 			for *task.Instances > 0 {
 				// TODO: Fix this so index doesn't need to be passed
-				taken, taskToSchedule := s.CheckFit(i, task, offer, &totalCPU, &totalRAM, &totalWatts)
+				taken, taskToSchedule := s.CheckFit(i, task, wattsConsideration, offer,
+					&totalCPU, &totalRAM, &totalWatts)
 
 				if taken {
 					offerTaken = true
@@ -255,7 +283,7 @@ func (s *BPMaxMinWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*m
 	}
 }
 
-func (s *BPMaxMinWatts) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
+func (s *BPSWMaxMinWatts) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
 	log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
 
 	if *status.State == mesos.TaskState_TASK_RUNNING {
diff --git a/schedulers/bpMaxMinPistonCapping.go b/schedulers/bpswMaxMinPistonCapping.go
similarity index 74%
rename from schedulers/bpMaxMinPistonCapping.go
rename to schedulers/bpswMaxMinPistonCapping.go
index b4d4e3c..6214e0e 100644
--- a/schedulers/bpMaxMinPistonCapping.go
+++ b/schedulers/bpswMaxMinPistonCapping.go
@@ -22,31 +22,37 @@ import (
 )
 
 // Decides if to take an offer or not
-func (s *BPMaxMinPistonCapping) takeOffer(offer *mesos.Offer, task def.Task) bool {
+func (s *BPSWMaxMinPistonCapping) takeOffer(offer *mesos.Offer, task def.Task) bool {
 
 	cpus, mem, watts := offerUtils.OfferAgg(offer)
 
 	//TODO: Insert watts calculation here instead of taking them as a parameter
 
-	if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts {
+	wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+	if err != nil {
+		// Error in determining wattsConsideration
+		log.Fatal(err)
+	}
+	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || (watts >= wattsConsideration)) {
 		return true
 	}
 
 	return false
 }
 
-type BPMaxMinPistonCapping struct {
-	base         //Type embedding to inherit common functions
-	tasksCreated int
-	tasksRunning int
-	tasks        []def.Task
-	metrics      map[string]def.Metric
-	running      map[string]map[string]bool
-	taskMonitor  map[string][]def.Task
-	totalPower   map[string]float64
-	ignoreWatts  bool
-	ticker       *time.Ticker
-	isCapping    bool
+type BPSWMaxMinPistonCapping struct {
+	base          //Type embedding to inherit common functions
+	tasksCreated  int
+	tasksRunning  int
+	tasks         []def.Task
+	metrics       map[string]def.Metric
+	running       map[string]map[string]bool
+	taskMonitor   map[string][]def.Task
+	totalPower    map[string]float64
+	ignoreWatts   bool
+	classMapWatts bool
+	ticker        *time.Ticker
+	isCapping     bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -66,7 +72,8 @@ type BPMaxMinPistonCapping struct {
 }
 
 // New electron scheduler
-func NewBPMaxMinPistonCapping(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BPMaxMinPistonCapping {
+func NewBPSWMaxMinPistonCapping(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
+	classMapWatts bool) *BPSWMaxMinPistonCapping {
 	sort.Sort(def.WattsSorter(tasks))
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@@ -74,25 +81,26 @@ func NewBPMaxMinPistonCapping(tasks []def.Task, ignoreWatts bool, schedTracePref
 		log.Fatal(err)
 	}
 
-	s := &BPMaxMinPistonCapping{
-		tasks:       tasks,
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		taskMonitor: make(map[string][]def.Task),
-		totalPower:  make(map[string]float64),
-		RecordPCP:   false,
-		ticker:      time.NewTicker(5 * time.Second),
-		isCapping:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
+	s := &BPSWMaxMinPistonCapping{
+		tasks:         tasks,
+		ignoreWatts:   ignoreWatts,
+		classMapWatts: classMapWatts,
+		Shutdown:      make(chan struct{}),
+		Done:          make(chan struct{}),
+		PCPLog:        make(chan struct{}),
+		running:       make(map[string]map[string]bool),
+		taskMonitor:   make(map[string][]def.Task),
+		totalPower:    make(map[string]float64),
+		RecordPCP:     false,
+		ticker:        time.NewTicker(5 * time.Second),
+		isCapping:     false,
+		schedTrace:    log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 
 }
 
-func (s *BPMaxMinPistonCapping) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
+func (s *BPSWMaxMinPistonCapping) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
 	taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
 	s.tasksCreated++
 
@@ -127,7 +135,13 @@ func (s *BPMaxMinPistonCapping) newTask(offer *mesos.Offer, task def.Task) *meso
 	}
 
 	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
+		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
+			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
+			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
+		} else {
+			// Error in determining wattsConsideration
+			log.Fatal(err)
+		}
 	}
 
 	return &mesos.TaskInfo{
@@ -150,7 +164,7 @@ func (s *BPMaxMinPistonCapping) newTask(offer *mesos.Offer, task def.Task) *meso
 	}
 }
 
-func (s *BPMaxMinPistonCapping) Disconnected(sched.SchedulerDriver) {
+func (s *BPSWMaxMinPistonCapping) Disconnected(sched.SchedulerDriver) {
 	// Need to stop the capping process
 	s.ticker.Stop()
 	bpMaxMinPistonCappingMutex.Lock()
@@ -168,7 +182,7 @@ var bpMaxMinPistonCappingCapValues = make(map[string]float64)
 // Storing the previous cap value for each host so as to not repeatedly cap the nodes to the same value. (reduces overhead)
 var bpMaxMinPistonCappingPreviousRoundedCapValues = make(map[string]int)
 
-func (s *BPMaxMinPistonCapping) startCapping() {
+func (s *BPSWMaxMinPistonCapping) startCapping() {
 	go func() {
 		for {
 			select {
@@ -204,7 +218,7 @@ func (s *BPMaxMinPistonCapping) startCapping() {
 }
 
 // Stop the capping
-func (s *BPMaxMinPistonCapping) stopCapping() {
+func (s *BPSWMaxMinPistonCapping) stopCapping() {
 	if s.isCapping {
 		log.Println("Stopping the capping.")
 		s.ticker.Stop()
@@ -216,8 +230,9 @@ func (s *BPMaxMinPistonCapping) stopCapping() {
 
 // Determine if the remaining sapce inside of the offer is enough for
 // the task we need to create. If it is, create a TaskInfo and return it.
-func (s *BPMaxMinPistonCapping) CheckFit(i int,
+func (s *BPSWMaxMinPistonCapping) CheckFit(i int,
 	task def.Task,
+	wattsConsideration float64,
 	offer *mesos.Offer,
 	totalCPU *float64,
 	totalRAM *float64,
@@ -227,7 +242,7 @@ func (s *BPMaxMinPistonCapping) CheckFit(i int,
 	offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
 
 	// Does the task fit
-	if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) &&
+	if (s.ignoreWatts || (offerWatts >= (*totalWatts + wattsConsideration))) &&
 		(offerCPU >= (*totalCPU + task.CPU)) &&
 		(offerRAM >= (*totalRAM + task.RAM)) {
 
@@ -237,7 +252,7 @@ func (s *BPMaxMinPistonCapping) CheckFit(i int,
 			s.startCapping()
 		}
 
-		*totalWatts += task.Watts
+		*totalWatts += wattsConsideration
 		*totalCPU += task.CPU
 		*totalRAM += task.RAM
 		log.Println("Co-Located with: ")
@@ -248,7 +263,7 @@ func (s *BPMaxMinPistonCapping) CheckFit(i int,
 		fmt.Println("Inst: ", *task.Instances)
 		s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
 		*task.Instances--
-		*partialLoad += ((task.Watts * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100
+		*partialLoad += ((wattsConsideration * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100
 
 		if *task.Instances <= 0 {
 			// All instances of task have been scheduled, remove it
@@ -266,7 +281,7 @@ func (s *BPMaxMinPistonCapping) CheckFit(i int,
 	return false, nil
 }
 
-func (s *BPMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
+func (s *BPSWMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
 	log.Printf("Received %d resource offers", len(offers))
 
 	for _, offer := range offers {
@@ -297,6 +312,12 @@ func (s *BPMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, off
 		for i := len(s.tasks) - 1; i >= 0; i-- {
 
 			task := s.tasks[i]
+			wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+			if err != nil {
+				// Error in determining wattsConsideration
+				log.Fatal(err)
+			}
+
 			// Check host if it exists
 			if task.Host != "" {
 				// Don't take offer if it doesn't match our task's host requirement
@@ -306,7 +327,8 @@ func (s *BPMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, off
 			}
 
 			// TODO: Fix this so index doesn't need to be passed
-			taken, taskToSchedule := s.CheckFit(i, task, offer, &totalCPU, &totalRAM, &totalWatts, &partialLoad)
+			taken, taskToSchedule := s.CheckFit(i, task, wattsConsideration, offer,
+				&totalCPU, &totalRAM, &totalWatts, &partialLoad)
 
 			if taken {
 				offerTaken = true
@@ -316,7 +338,13 @@ func (s *BPMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, off
 		}
 
 		// Pack the rest of the offer with the smallest tasks
-		for i, task := range s.tasks {
+		for i := 0; i < len(s.tasks); i++ {
+			task := s.tasks[i]
+			wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+			if err != nil {
+				// Error in determining wattsConsideration
+				log.Fatal(err)
+			}
 
 			// Check host if it exists
 			if task.Host != "" {
@@ -328,7 +356,8 @@ func (s *BPMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, off
 
 			for *task.Instances > 0 {
 				// TODO: Fix this so index doesn't need to be passed
-				taken, taskToSchedule := s.CheckFit(i, task, offer, &totalCPU, &totalRAM, &totalWatts, &partialLoad)
+				taken, taskToSchedule := s.CheckFit(i, task, wattsConsideration, offer,
+					&totalCPU, &totalRAM, &totalWatts, &partialLoad)
 
 				if taken {
 					offerTaken = true
@@ -359,7 +388,7 @@ func (s *BPMaxMinPistonCapping) ResourceOffers(driver sched.SchedulerDriver, off
 }
 
 // Remove finished task from the taskMonitor
-func (s *BPMaxMinPistonCapping) deleteFromTaskMonitor(finishedTaskID string) (def.Task, string, error) {
+func (s *BPSWMaxMinPistonCapping) deleteFromTaskMonitor(finishedTaskID string) (def.Task, string, error) {
 	hostOfFinishedTask := ""
 	indexOfFinishedTask := -1
 	found := false
@@ -390,7 +419,7 @@ func (s *BPMaxMinPistonCapping) deleteFromTaskMonitor(finishedTaskID string) (de
 	return finishedTask, hostOfFinishedTask, nil
 }
 
-func (s *BPMaxMinPistonCapping) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
+func (s *BPSWMaxMinPistonCapping) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
 	log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
 
 	if *status.State == mesos.TaskState_TASK_RUNNING {
@@ -405,9 +434,16 @@ func (s *BPMaxMinPistonCapping) StatusUpdate(driver sched.SchedulerDriver, statu
 			log.Println(err)
 		}
 
+		// Need to determine the watts consideration for the finishedTask
+		var wattsConsideration float64
+		if s.classMapWatts {
+			wattsConsideration = finishedTask.ClassToWatts[hostToPowerClass(hostOfFinishedTask)]
+		} else {
+			wattsConsideration = finishedTask.Watts
+		}
 		// Need to update the cap values for host of the finishedTask
 		bpMaxMinPistonCappingMutex.Lock()
-		bpMaxMinPistonCappingCapValues[hostOfFinishedTask] -= ((finishedTask.Watts * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100
+		bpMaxMinPistonCappingCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100
 		// Checking to see if the cap value has become 0, in which case we uncap the host.
 		if int(math.Floor(bpMaxMinPistonCappingCapValues[hostOfFinishedTask]+0.5)) == 0 {
 			bpMaxMinPistonCappingCapValues[hostOfFinishedTask] = 100
diff --git a/schedulers/bpMaxMinProacCC.go b/schedulers/bpswMaxMinProacCC.go
similarity index 84%
rename from schedulers/bpMaxMinProacCC.go
rename to schedulers/bpswMaxMinProacCC.go
index fe44f60..3ec4d6a 100644
--- a/schedulers/bpMaxMinProacCC.go
+++ b/schedulers/bpswMaxMinProacCC.go
@@ -22,19 +22,24 @@ import (
 )
 
 // Decides if to take an offer or not
-func (s *BPMaxMinProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool {
+func (s *BPSWMaxMinProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool {
 	cpus, mem, watts := offerUtils.OfferAgg(offer)
 
 	//TODO: Insert watts calculation here instead of taking them as a parameter
 
-	if cpus >= task.CPU && mem >= task.RAM && watts >= task.Watts {
+	wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+	if err != nil {
+		// Error in determining wattsConsideration
+		log.Fatal(err)
+	}
+	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || (watts >= wattsConsideration)) {
 		return true
 	}
 
 	return false
 }
 
-type BPMaxMinProacCC struct {
+type BPSWMaxMinProacCC struct {
 	base           // Type embedding to inherit common functions
 	tasksCreated   int
 	tasksRunning   int
@@ -45,6 +50,7 @@ type BPMaxMinProacCC struct {
 	availablePower map[string]float64
 	totalPower     map[string]float64
 	ignoreWatts    bool
+	classMapWatts  bool
 	capper         *powCap.ClusterwideCapper
 	ticker         *time.Ticker
 	recapTicker    *time.Ticker
@@ -69,7 +75,7 @@ type BPMaxMinProacCC struct {
 }
 
 // New electron scheduler
-func NewBPMaxMinProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *BPMaxMinProacCC {
+func NewBPSWMaxMinProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *BPSWMaxMinProacCC {
 	sort.Sort(def.WattsSorter(tasks))
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@@ -77,9 +83,10 @@ func NewBPMaxMinProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix str
 		log.Fatal(err)
 	}
 
-	s := &BPMaxMinProacCC{
+	s := &BPSWMaxMinProacCC{
 		tasks:          tasks,
 		ignoreWatts:    ignoreWatts,
+		classMapWatts:  classMapWatts,
 		Shutdown:       make(chan struct{}),
 		Done:           make(chan struct{}),
 		PCPLog:         make(chan struct{}),
@@ -101,7 +108,7 @@ func NewBPMaxMinProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix str
 // mutex
 var bpMaxMinProacCCMutex sync.Mutex
 
-func (s *BPMaxMinProacCC) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
+func (s *BPSWMaxMinProacCC) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
 	taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
 	s.tasksCreated++
 
@@ -133,7 +140,13 @@ func (s *BPMaxMinProacCC) newTask(offer *mesos.Offer, task def.Task) *mesos.Task
 	}
 
 	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
+		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
+			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
+			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
+		} else {
+			// Error in determining wattsConsideration
+			log.Fatal(err)
+		}
 	}
 
 	return &mesos.TaskInfo{
@@ -159,7 +172,7 @@ func (s *BPMaxMinProacCC) newTask(offer *mesos.Offer, task def.Task) *mesos.Task
 // go routine to cap the entire cluster in regular intervals of time.
 var bpMaxMinProacCCCapValue = 0.0    // initial value to indicate that we haven't capped the cluster yet.
 var bpMaxMinProacCCNewCapValue = 0.0 // newly computed cap value
-func (s *BPMaxMinProacCC) startCapping() {
+func (s *BPSWMaxMinProacCC) startCapping() {
 	go func() {
 		for {
 			select {
@@ -190,7 +203,7 @@ func (s *BPMaxMinProacCC) startCapping() {
 
 // go routine to recap the entire cluster in regular intervals of time.
 var bpMaxMinProacCCRecapValue = 0.0 // The cluster-wide cap value when recapping.
-func (s *BPMaxMinProacCC) startRecapping() {
+func (s *BPSWMaxMinProacCC) startRecapping() {
 	go func() {
 		for {
 			select {
@@ -216,7 +229,7 @@ func (s *BPMaxMinProacCC) startRecapping() {
 }
 
 // Stop cluster-wide capping
-func (s *BPMaxMinProacCC) stopCapping() {
+func (s *BPSWMaxMinProacCC) stopCapping() {
 	if s.isCapping {
 		log.Println("Stopping the cluster-wide capping.")
 		s.ticker.Stop()
@@ -228,7 +241,7 @@ func (s *BPMaxMinProacCC) stopCapping() {
 }
 
 // Stop the cluster-wide recapping
-func (s *BPMaxMinProacCC) stopRecapping() {
+func (s *BPSWMaxMinProacCC) stopRecapping() {
 	// If not capping, then definitely recapping.
 	if !s.isCapping && s.isRecapping {
 		log.Println("Stopping the cluster-wide re-capping.")
@@ -241,8 +254,9 @@ func (s *BPMaxMinProacCC) stopRecapping() {
 
 // Determine if the remaining space inside of the offer is enough for
 // the task we need to create. If it is, create TaskInfo and return it.
-func (s *BPMaxMinProacCC) CheckFit(i int,
+func (s *BPSWMaxMinProacCC) CheckFit(i int,
 	task def.Task,
+	wattsConsideration float64,
 	offer *mesos.Offer,
 	totalCPU *float64,
 	totalRAM *float64,
@@ -251,7 +265,7 @@ func (s *BPMaxMinProacCC) CheckFit(i int,
 	offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
 
 	// Does the task fit
-	if (s.ignoreWatts || (offerWatts >= (*totalWatts + task.Watts))) &&
+	if (s.ignoreWatts || (offerWatts >= (*totalWatts + wattsConsideration))) &&
 		(offerCPU >= (*totalCPU + task.CPU)) &&
 		(offerRAM >= (*totalRAM + task.RAM)) {
 
@@ -273,7 +287,7 @@ func (s *BPMaxMinProacCC) CheckFit(i int,
 			log.Println(err)
 		}
 
-		*totalWatts += task.Watts
+		*totalWatts += wattsConsideration
 		*totalCPU += task.CPU
 		*totalRAM += task.RAM
 		log.Println("Co-Located with: ")
@@ -305,7 +319,7 @@ func (s *BPMaxMinProacCC) CheckFit(i int,
 
 }
 
-func (s *BPMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
+func (s *BPSWMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
 	log.Printf("Received %d resource offers", len(offers))
 
 	// retrieving the available power for all the hosts in the offers.
@@ -347,6 +361,11 @@ func (s *BPMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers []
 		for i := len(s.tasks) - 1; i >= 0; i-- {
 
 			task := s.tasks[i]
+			wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+			if err != nil {
+				// Error in determining wattsConsideration
+				log.Fatal(err)
+			}
 			// Check host if it exists
 			if task.Host != "" {
 				// Don't take offer if it doesn't match our task's host requirement
@@ -356,7 +375,8 @@ func (s *BPMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers []
 			}
 
 			// TODO: Fix this so index doesn't need to be passed
-			taken, taskToSchedule := s.CheckFit(i, task, offer, &totalCPU, &totalRAM, &totalWatts)
+			taken, taskToSchedule := s.CheckFit(i, task, wattsConsideration, offer,
+				&totalCPU, &totalRAM, &totalWatts)
 
 			if taken {
 				offerTaken = true
@@ -366,7 +386,13 @@ func (s *BPMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers []
 		}
 
 		// Pack the rest of the offer with the smallest tasks
-		for i, task := range s.tasks {
+		for i := 0; i < len(s.tasks); i++ {
+			task := s.tasks[i]
+			wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+			if err != nil {
+				// Error in determining wattsConsideration
+				log.Fatal(err)
+			}
 
 			// Check host if it exists
 			if task.Host != "" {
@@ -378,7 +404,8 @@ func (s *BPMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers []
 
 			for *task.Instances > 0 {
 				// TODO: Fix this so index doesn't need to be passed
-				taken, taskToSchedule := s.CheckFit(i, task, offer, &totalCPU, &totalRAM, &totalWatts)
+				taken, taskToSchedule := s.CheckFit(i, task, wattsConsideration, offer,
+					&totalCPU, &totalRAM, &totalWatts)
 
 				if taken {
 					offerTaken = true
@@ -404,7 +431,7 @@ func (s *BPMaxMinProacCC) ResourceOffers(driver sched.SchedulerDriver, offers []
 	}
 }
 
-func (s *BPMaxMinProacCC) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
+func (s *BPSWMaxMinProacCC) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
 	log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
 
 	if *status.State == mesos.TaskState_TASK_RUNNING {
diff --git a/schedulers/firstfit.go b/schedulers/firstfit.go
index 4317a91..09d1c2f 100644
--- a/schedulers/firstfit.go
+++ b/schedulers/firstfit.go
@@ -22,7 +22,12 @@ func (s *FirstFit) takeOffer(offer *mesos.Offer, task def.Task) bool {
 
 	//TODO: Insert watts calculation here instead of taking them as a parameter
 
-	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= task.Watts) {
+	wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+	if err != nil {
+		// Error in determining wattsConsideration
+		log.Fatal(err)
+	}
+	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= wattsConsideration) {
 		return true
 	}
 
@@ -31,13 +36,14 @@ func (s *FirstFit) takeOffer(offer *mesos.Offer, task def.Task) bool {
 
 // electronScheduler implements the Scheduler interface
 type FirstFit struct {
-	base         // Type embedded to inherit common functions
-	tasksCreated int
-	tasksRunning int
-	tasks        []def.Task
-	metrics      map[string]def.Metric
-	running      map[string]map[string]bool
-	ignoreWatts  bool
+	base          // Type embedded to inherit common functions
+	tasksCreated  int
+	tasksRunning  int
+	tasks         []def.Task
+	metrics       map[string]def.Metric
+	running       map[string]map[string]bool
+	ignoreWatts   bool
+	classMapWatts bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -57,7 +63,7 @@ type FirstFit struct {
 }
 
 // New electron scheduler
-func NewFirstFit(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *FirstFit {
+func NewFirstFit(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *FirstFit {
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
 	if err != nil {
@@ -65,14 +71,15 @@ func NewFirstFit(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *F
 	}
 
 	s := &FirstFit{
-		tasks:       tasks,
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		RecordPCP:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
+		tasks:         tasks,
+		ignoreWatts:   ignoreWatts,
+		classMapWatts: classMapWatts,
+		Shutdown:      make(chan struct{}),
+		Done:          make(chan struct{}),
+		PCPLog:        make(chan struct{}),
+		running:       make(map[string]map[string]bool),
+		RecordPCP:     false,
+		schedTrace:    log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -101,7 +108,13 @@ func (s *FirstFit) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
 	}
 
 	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
+		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
+			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
+			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
+		} else {
+			// Error in determining wattsConsideration
+			log.Fatal(err)
+		}
 	}
 
 	return &mesos.TaskInfo{
diff --git a/schedulers/proactiveclusterwidecappingfcfs.go b/schedulers/firstfitProacCC.go
similarity index 88%
rename from schedulers/proactiveclusterwidecappingfcfs.go
rename to schedulers/firstfitProacCC.go
index b7d7c8c..1766447 100644
--- a/schedulers/proactiveclusterwidecappingfcfs.go
+++ b/schedulers/firstfitProacCC.go
@@ -21,17 +21,22 @@ import (
 )
 
 // Decides if to take an offer or not
-func (_ *ProactiveClusterwideCapFCFS) takeOffer(offer *mesos.Offer, task def.Task) bool {
+func (s *FirstFitProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool {
 	offer_cpu, offer_mem, offer_watts := offerUtils.OfferAgg(offer)
 
-	if offer_cpu >= task.CPU && offer_mem >= task.RAM && offer_watts >= task.Watts {
+	wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+	if err != nil {
+		// Error in determining wattsConsideration
+		log.Fatal(err)
+	}
+	if offer_cpu >= task.CPU && offer_mem >= task.RAM && (s.ignoreWatts || (offer_watts >= wattsConsideration)) {
 		return true
 	}
 	return false
 }
 
 // electronScheduler implements the Scheduler interface.
-type ProactiveClusterwideCapFCFS struct {
+type FirstFitProacCC struct {
 	base           // Type embedded to inherit common functions
 	tasksCreated   int
 	tasksRunning   int
@@ -42,6 +47,7 @@ type ProactiveClusterwideCapFCFS struct {
 	availablePower map[string]float64    // available power for each node in the cluster.
 	totalPower     map[string]float64    // total power for each node in the cluster.
 	ignoreWatts    bool
+	classMapWatts  bool
 	capper         *powCap.ClusterwideCapper
 	ticker         *time.Ticker
 	recapTicker    *time.Ticker
@@ -67,16 +73,18 @@ type ProactiveClusterwideCapFCFS struct {
 }
 
 // New electron scheduler.
-func NewProactiveClusterwideCapFCFS(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *ProactiveClusterwideCapFCFS {
+func NewFirstFitProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
+	classMapWatts bool) *FirstFitProacCC {
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
 	if err != nil {
 		log.Fatal(err)
 	}
 
-	s := &ProactiveClusterwideCapFCFS{
+	s := &FirstFitProacCC{
 		tasks:          tasks,
 		ignoreWatts:    ignoreWatts,
+		classMapWatts:  classMapWatts,
 		Shutdown:       make(chan struct{}),
 		Done:           make(chan struct{}),
 		PCPLog:         make(chan struct{}),
@@ -98,7 +106,7 @@ func NewProactiveClusterwideCapFCFS(tasks []def.Task, ignoreWatts bool, schedTra
 // mutex
 var fcfsMutex sync.Mutex
 
-func (s *ProactiveClusterwideCapFCFS) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
+func (s *FirstFitProacCC) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
 	taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
 	s.tasksCreated++
 
@@ -130,7 +138,13 @@ func (s *ProactiveClusterwideCapFCFS) newTask(offer *mesos.Offer, task def.Task)
 	}
 
 	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
+		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
+			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
+			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
+		} else {
+			// Error in determining wattsConsideration
+			log.Fatal(err)
+		}
 	}
 
 	return &mesos.TaskInfo{
@@ -153,7 +167,7 @@ func (s *ProactiveClusterwideCapFCFS) newTask(offer *mesos.Offer, task def.Task)
 	}
 }
 
-func (s *ProactiveClusterwideCapFCFS) Disconnected(sched.SchedulerDriver) {
+func (s *FirstFitProacCC) Disconnected(sched.SchedulerDriver) {
 	// Need to stop the capping process.
 	s.ticker.Stop()
 	s.recapTicker.Stop()
@@ -165,7 +179,7 @@ func (s *ProactiveClusterwideCapFCFS) Disconnected(sched.SchedulerDriver) {
 
 // go routine to cap the entire cluster in regular intervals of time.
 var fcfsCurrentCapValue = 0.0 // initial value to indicate that we haven't capped the cluster yet.
-func (s *ProactiveClusterwideCapFCFS) startCapping() {
+func (s *FirstFitProacCC) startCapping() {
 	go func() {
 		for {
 			select {
@@ -189,7 +203,7 @@ func (s *ProactiveClusterwideCapFCFS) startCapping() {
 
 // go routine to cap the entire cluster in regular intervals of time.
 var fcfsRecapValue = 0.0 // The cluster wide cap value when recapping.
-func (s *ProactiveClusterwideCapFCFS) startRecapping() {
+func (s *FirstFitProacCC) startRecapping() {
 	go func() {
 		for {
 			select {
@@ -214,7 +228,7 @@ func (s *ProactiveClusterwideCapFCFS) startRecapping() {
 }
 
 // Stop cluster wide capping
-func (s *ProactiveClusterwideCapFCFS) stopCapping() {
+func (s *FirstFitProacCC) stopCapping() {
 	if s.isCapping {
 		log.Println("Stopping the cluster wide capping.")
 		s.ticker.Stop()
@@ -226,7 +240,7 @@ func (s *ProactiveClusterwideCapFCFS) stopCapping() {
 }
 
 // Stop cluster wide Recapping
-func (s *ProactiveClusterwideCapFCFS) stopRecapping() {
+func (s *FirstFitProacCC) stopRecapping() {
 	// If not capping, then definitely recapping.
 	if !s.isCapping && s.isRecapping {
 		log.Println("Stopping the cluster wide re-capping.")
@@ -237,7 +251,7 @@ func (s *ProactiveClusterwideCapFCFS) stopRecapping() {
 	}
 }
 
-func (s *ProactiveClusterwideCapFCFS) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
+func (s *FirstFitProacCC) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
 	log.Printf("Received %d resource offers", len(offers))
 
 	// retrieving the available power for all the hosts in the offers.
@@ -301,7 +315,7 @@ func (s *ProactiveClusterwideCapFCFS) ResourceOffers(driver sched.SchedulerDrive
 					fcfsCurrentCapValue = tempCap
 					fcfsMutex.Unlock()
 				} else {
-					log.Printf("Failed to determine new cluster wide cap: ")
+					log.Println("Failed to determine new cluster wide cap: ")
 					log.Println(err)
 				}
 				log.Printf("Starting on [%s]\n", offer.GetHostname())
@@ -341,7 +355,7 @@ func (s *ProactiveClusterwideCapFCFS) ResourceOffers(driver sched.SchedulerDrive
 	}
 }
 
-func (s *ProactiveClusterwideCapFCFS) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
+func (s *FirstFitProacCC) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
 	log.Printf("Received task status [%s] for task [%s]\n", NameFor(status.State), *status.TaskId.Value)
 
 	if *status.State == mesos.TaskState_TASK_RUNNING {
diff --git a/schedulers/firstfitSortedOffers.go b/schedulers/firstfitSortedOffers.go
index 0611581..0d519d5 100644
--- a/schedulers/firstfitSortedOffers.go
+++ b/schedulers/firstfitSortedOffers.go
@@ -23,7 +23,12 @@ func (s *FirstFitSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool
 
 	//TODO: Insert watts calculation here instead of taking them as a parameter
 
-	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= task.Watts) {
+	wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+	if err != nil {
+		// Error in determining wattsConsideration
+		log.Fatal(err)
+	}
+	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= wattsConsideration) {
 		return true
 	}
 
@@ -32,13 +37,14 @@ func (s *FirstFitSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool
 
 // electronScheduler implements the Scheduler interface
 type FirstFitSortedOffers struct {
-	base         // Type embedded to inherit common functions
-	tasksCreated int
-	tasksRunning int
-	tasks        []def.Task
-	metrics      map[string]def.Metric
-	running      map[string]map[string]bool
-	ignoreWatts  bool
+	base          // Type embedded to inherit common functions
+	tasksCreated  int
+	tasksRunning  int
+	tasks         []def.Task
+	metrics       map[string]def.Metric
+	running       map[string]map[string]bool
+	ignoreWatts   bool
+	classMapWatts bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -58,7 +64,7 @@ type FirstFitSortedOffers struct {
 }
 
 // New electron scheduler
-func NewFirstFitSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *FirstFitSortedOffers {
+func NewFirstFitSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *FirstFitSortedOffers {
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
 	if err != nil {
@@ -66,14 +72,15 @@ func NewFirstFitSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefi
 	}
 
 	s := &FirstFitSortedOffers{
-		tasks:       tasks,
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		RecordPCP:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
+		tasks:         tasks,
+		ignoreWatts:   ignoreWatts,
+		classMapWatts: classMapWatts,
+		Shutdown:      make(chan struct{}),
+		Done:          make(chan struct{}),
+		PCPLog:        make(chan struct{}),
+		running:       make(map[string]map[string]bool),
+		RecordPCP:     false,
+		schedTrace:    log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -102,7 +109,13 @@ func (s *FirstFitSortedOffers) newTask(offer *mesos.Offer, task def.Task) *mesos
 	}
 
 	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
+		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
+			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
+			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
+		} else {
+			// Error in determining wattsConsideration
+			log.Fatal(err)
+		}
 	}
 
 	return &mesos.TaskInfo{
diff --git a/schedulers/firstfitSortedWattsClassMapWatts.go b/schedulers/firstfitSortedWattsClassMapWatts.go
deleted file mode 100644
index a7f5448..0000000
--- a/schedulers/firstfitSortedWattsClassMapWatts.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package schedulers
-
-import (
-	"bitbucket.org/sunybingcloud/electron/def"
-	"bitbucket.org/sunybingcloud/electron/utilities/mesosUtils"
-	"bitbucket.org/sunybingcloud/electron/utilities/offerUtils"
-	"fmt"
-	"github.com/golang/protobuf/proto"
-	mesos "github.com/mesos/mesos-go/mesosproto"
-	"github.com/mesos/mesos-go/mesosutil"
-	sched "github.com/mesos/mesos-go/scheduler"
-	"log"
-	"os"
-	"sort"
-	"strings"
-	"time"
-)
-
-// electron scheduler implements the Scheduler interface
-type FirstFitSortedWattsClassMapWatts struct {
-	base         // Type embedded to inherit common features.
-	tasksCreated int
-	tasksRunning int
-	tasks        []def.Task
-	metrics      map[string]def.Metric
-	running      map[string]map[string]bool
-	ignoreWatts  bool
-
-	// First set of PCP values are garbage values, signal to logger to start recording when we're
-	// about to schedule a new task
-	RecordPCP bool
-
-	// This channel is closed when the program receives an interrupt,
-	// signalling that the program should shut down.
-	Shutdown chan struct{}
-	// This channel is closed after shutdown is closed, and only when all
-	// outstanding tasks have been cleaned up
-	Done chan struct{}
-
-	// Controls when to shutdown pcp logging
-	PCPLog chan struct{}
-
-	schedTrace *log.Logger
-}
-
-// New electorn scheduler
-func NewFirstFitSortedWattsClassMapWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *FirstFitSortedWattsClassMapWatts {
-	sort.Sort(def.WattsSorter(tasks))
-
-	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	s := &FirstFitSortedWattsClassMapWatts{
-		tasks:       tasks,
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		RecordPCP:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
-	}
-	return s
-}
-
-func (s *FirstFitSortedWattsClassMapWatts) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo {
-	taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
-	s.tasksCreated++
-
-	if !s.RecordPCP {
-		// Turn on logging
-		s.RecordPCP = true
-		time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts
-	}
-
-	// If this is our first time running into this Agent
-	if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok {
-		s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
-	}
-
-	// Add task to list of tasks running on node
-	s.running[offer.GetSlaveId().GoString()][taskName] = true
-
-	resources := []*mesos.Resource{
-		mesosutil.NewScalarResource("cpus", task.CPU),
-		mesosutil.NewScalarResource("mem", task.RAM),
-	}
-
-	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass]))
-	}
-
-	return &mesos.TaskInfo{
-		Name: proto.String(taskName),
-		TaskId: &mesos.TaskID{
-			Value: proto.String("electron-" + taskName),
-		},
-		SlaveId:   offer.SlaveId,
-		Resources: resources,
-		Command: &mesos.CommandInfo{
-			Value: proto.String(task.CMD),
-		},
-		Container: &mesos.ContainerInfo{
-			Type: mesos.ContainerInfo_DOCKER.Enum(),
-			Docker: &mesos.ContainerInfo_DockerInfo{
-				Image:   proto.String(task.Image),
-				Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated
-			},
-		},
-	}
-}
-
-func (s *FirstFitSortedWattsClassMapWatts) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
-	log.Printf("Received %d resource offers", len(offers))
-
-	for _, offer := range offers {
-		select {
-		case <-s.Shutdown:
-			log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")
-			driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
-
-			log.Println("Number of tasks still running: ", s.tasksRunning)
-			continue
-		default:
-		}
-
-		offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
-
-		// First fit strategy
-		offerTaken := false
-		for i := 0; i < len(s.tasks); i++ {
-			task := s.tasks[i]
-			// Check host if it exists
-			if task.Host != "" {
-				// Don't take offer if it doens't match our task's host requirement.
-				if !strings.HasPrefix(*offer.Hostname, task.Host) {
-					continue
-				}
-			}
-
-			// retrieving the powerClass from the offer
-			powerClass := offerUtils.PowerClass(offer)
-
-			// Decision to take the offer or not
-			if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[powerClass])) &&
-				(offerCPU >= task.CPU) && (offerRAM >= task.RAM) {
-				fmt.Println("Watts being used: ", task.ClassToWatts[powerClass])
-				log.Println("Co-Located with: ")
-				coLocated(s.running[offer.GetSlaveId().GoString()])
-
-				taskToSchedule := s.newTask(offer, task, powerClass)
-				s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
-				log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname())
-				driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{taskToSchedule}, mesosUtils.DefaultFilter)
-
-				offerTaken = true
-				fmt.Println("Inst: ", *task.Instances)
-				*task.Instances--
-				if *task.Instances <= 0 {
-					// All instances of task have been scheduled, remove it
-					s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
-
-					if len(s.tasks) == 0 {
-						log.Println("Done scheduling all tasks")
-						close(s.Shutdown)
-					}
-				}
-				break // Offer taken, move on
-			}
-		}
-
-		// If there was no match for the task
-		if !offerTaken {
-			fmt.Println("There is not enough resources to launch a task:")
-			cpus, mem, watts := offerUtils.OfferAgg(offer)
-
-			log.Printf("<CPU: %f, RAM: %f, Watts: %f>\n", cpus, mem, watts)
-			driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter)
-
-		}
-	}
-}
-
-func (s *FirstFitSortedWattsClassMapWatts) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
-	log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
-
-	if *status.State == mesos.TaskState_TASK_RUNNING {
-		s.tasksRunning++
-	} else if IsTerminal(status.State) {
-		delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value)
-		s.tasksRunning--
-		if s.tasksRunning == 0 {
-			select {
-			case <-s.Shutdown:
-				close(s.Done)
-			default:
-			}
-		}
-	}
-	log.Printf("DONE: Task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
-}
diff --git a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go b/schedulers/firstfitSortedWattsClassMapWattsProacCC.go
deleted file mode 100644
index a896468..0000000
--- a/schedulers/firstfitSortedWattsClassMapWattsProacCC.go
+++ /dev/null
@@ -1,386 +0,0 @@
-package schedulers
-
-import (
-	"bitbucket.org/sunybingcloud/electron/constants"
-	"bitbucket.org/sunybingcloud/electron/def"
-	powCap "bitbucket.org/sunybingcloud/electron/powerCapping"
-	"bitbucket.org/sunybingcloud/electron/rapl"
-	"bitbucket.org/sunybingcloud/electron/utilities/mesosUtils"
-	"bitbucket.org/sunybingcloud/electron/utilities/offerUtils"
-	"fmt"
-	"github.com/golang/protobuf/proto"
-	mesos "github.com/mesos/mesos-go/mesosproto"
-	"github.com/mesos/mesos-go/mesosutil"
-	sched "github.com/mesos/mesos-go/scheduler"
-	"log"
-	"math"
-	"os"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-)
-
-// electron scheduler implements the Scheduler interface
-type FirstFitSortedWattsClassMapWattsProacCC struct {
-	base           // Type embedded to inherit common features.
-	tasksCreated   int
-	tasksRunning   int
-	tasks          []def.Task
-	metrics        map[string]def.Metric
-	running        map[string]map[string]bool
-	taskMonitor    map[string][]def.Task
-	availablePower map[string]float64
-	totalPower     map[string]float64
-	ignoreWatts    bool
-	capper         *powCap.ClusterwideCapper
-	ticker         *time.Ticker
-	recapTicker    *time.Ticker
-	isCapping      bool // indicate whether we are currently performing cluster-wide capping.
-	isRecapping    bool // indicate whether we are currently performing cluster-wide recapping.
-
-	// First set of PCP values are garbage values, signal to logger to start recording when we're
-	// about to schedule a new task
-	RecordPCP bool
-
-	// This channel is closed when the program receives an interrupt,
-	// signalling that the program should shut down.
-	Shutdown chan struct{}
-	// This channel is closed after shutdown is closed, and only when all
-	// outstanding tasks have been cleaned up
-	Done chan struct{}
-
-	// Controls when to shutdown pcp logging
-	PCPLog chan struct{}
-
-	schedTrace *log.Logger
-}
-
-// New electron scheduler
-func NewFirstFitSortedWattsClassMapWattsProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *FirstFitSortedWattsClassMapWattsProacCC {
-	sort.Sort(def.WattsSorter(tasks))
-
-	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	s := &FirstFitSortedWattsClassMapWattsProacCC{
-		tasks:          tasks,
-		ignoreWatts:    ignoreWatts,
-		Shutdown:       make(chan struct{}),
-		Done:           make(chan struct{}),
-		PCPLog:         make(chan struct{}),
-		running:        make(map[string]map[string]bool),
-		taskMonitor:    make(map[string][]def.Task),
-		availablePower: make(map[string]float64),
-		totalPower:     make(map[string]float64),
-		RecordPCP:      false,
-		capper:         powCap.GetClusterwideCapperInstance(),
-		ticker:         time.NewTicker(10 * time.Second),
-		recapTicker:    time.NewTicker(20 * time.Second),
-		isCapping:      false,
-		isRecapping:    false,
-		schedTrace:     log.New(logFile, "", log.LstdFlags),
-	}
-	return s
-}
-
-// mutex
-var ffswClassMapWattsProacCCMutex sync.Mutex
-
-func (s *FirstFitSortedWattsClassMapWattsProacCC) newTask(offer *mesos.Offer, task def.Task, powerClass string) *mesos.TaskInfo {
-	taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
-	s.tasksCreated++
-
-	if !s.RecordPCP {
-		// Turn on logging.
-		s.RecordPCP = true
-		time.Sleep(1 * time.Second) // Make sure we're recording by the time the first task starts
-	}
-
-	// If this is our first time running into this Agent
-	if _, ok := s.running[offer.GetSlaveId().GoString()]; !ok {
-		s.running[offer.GetSlaveId().GoString()] = make(map[string]bool)
-	}
-
-	// Setting the task ID to the task. This is done so that we can consider each task to be different,
-	// even though they have the same parameters.
-	task.SetTaskID(*proto.String("electron-" + taskName))
-	// Add task to the list of tasks running on the node.
-	s.running[offer.GetSlaveId().GoString()][taskName] = true
-	if len(s.taskMonitor[*offer.Hostname]) == 0 {
-		s.taskMonitor[*offer.Hostname] = []def.Task{task}
-	} else {
-		s.taskMonitor[*offer.Hostname] = append(s.taskMonitor[*offer.Hostname], task)
-	}
-
-	resources := []*mesos.Resource{
-		mesosutil.NewScalarResource("cpus", task.CPU),
-		mesosutil.NewScalarResource("mem", task.RAM),
-	}
-
-	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[powerClass]))
-	}
-
-	return &mesos.TaskInfo{
-		Name: proto.String(taskName),
-		TaskId: &mesos.TaskID{
-			Value: proto.String("electron-" + taskName),
-		},
-		SlaveId:   offer.SlaveId,
-		Resources: resources,
-		Command: &mesos.CommandInfo{
-			Value: proto.String(task.CMD),
-		},
-		Container: &mesos.ContainerInfo{
-			Type: mesos.ContainerInfo_DOCKER.Enum(),
-			Docker: &mesos.ContainerInfo_DockerInfo{
-				Image:   proto.String(task.Image),
-				Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(), // Run everything isolated
-			},
-		},
-	}
-}
-
-func (s *FirstFitSortedWattsClassMapWattsProacCC) Disconnected(sched.SchedulerDriver) {
-	// Need to stop the capping process
-	s.ticker.Stop()
-	s.recapTicker.Stop()
-	ffswClassMapWattsProacCCMutex.Lock()
-	s.isCapping = false
-	ffswClassMapWattsProacCCMutex.Unlock()
-	log.Println("Framework disconnected with master")
-}
-
-// go routine to cap the entire cluster in regular intervals of time
-var ffswClassMapWattsProacCCCapValue = 0.0    // initial value to indicate that we haven't capped the cluster yet.
-var ffswClassMapWattsProacCCNewCapValue = 0.0 // newly computed cap value
-func (s *FirstFitSortedWattsClassMapWattsProacCC) startCapping() {
-	go func() {
-		for {
-			select {
-			case <-s.ticker.C:
-				// Need to cap the cluster only if new cap value different from the old cap value.
-				// This way we don't unnecessarily cap the cluster.
-				ffswClassMapWattsProacCCMutex.Lock()
-				if s.isCapping {
-					if int(math.Floor(ffswClassMapWattsProacCCNewCapValue+0.5)) != int(math.Floor(ffswClassMapWattsProacCCCapValue+0.5)) {
-						// updating cap value
-						ffswClassMapWattsProacCCCapValue = ffswClassMapWattsProacCCNewCapValue
-						if ffswClassMapWattsProacCCCapValue > 0.0 {
-							for _, host := range constants.Hosts {
-								// Rounding cap value to the nearest int
-								if err := rapl.Cap(host, "rapl", int(math.Floor(ffswClassMapWattsProacCCCapValue+0.5))); err != nil {
-									log.Println(err)
-								}
-							}
-							log.Printf("Capped the cluster to %d", int(math.Floor(ffswClassMapWattsProacCCCapValue+0.5)))
-						}
-					}
-				}
-				ffswClassMapWattsProacCCMutex.Unlock()
-			}
-		}
-	}()
-}
-
-// go routine to recap the entire cluster in regular intervals of time.
-var ffswClassMapWattsProacCCRecapValue = 0.0 // The cluster-wide cap value when recapping.
-func (s *FirstFitSortedWattsClassMapWattsProacCC) startRecapping() {
-	go func() {
-		for {
-			select {
-			case <-s.recapTicker.C:
-				ffswClassMapWattsProacCCMutex.Lock()
-				// If stopped performing cluster wide capping, then we need to recap
-				if s.isRecapping && ffswClassMapWattsProacCCRecapValue > 0.0 {
-					for _, host := range constants.Hosts {
-						// Rounding the cap value to the nearest int
-						if err := rapl.Cap(host, "rapl", int(math.Floor(ffswClassMapWattsProacCCRecapValue+0.5))); err != nil {
-							log.Println(err)
-						}
-					}
-					log.Printf("Recapping the cluster to %d", int(math.Floor(ffswClassMapWattsProacCCRecapValue+0.5)))
-				}
-				// Setting recapping to false
-				s.isRecapping = false
-				ffswClassMapWattsProacCCMutex.Unlock()
-			}
-		}
-	}()
-}
-
-// Stop the cluster wide capping
-func (s *FirstFitSortedWattsClassMapWattsProacCC) stopCapping() {
-	if s.isCapping {
-		log.Println("Stopping the cluster-wide capping.")
-		s.ticker.Stop()
-		ffswClassMapWattsProacCCMutex.Lock()
-		s.isCapping = false
-		s.isRecapping = true
-		ffswClassMapWattsProacCCMutex.Unlock()
-	}
-}
-
-// Stop the cluster wide recapping
-func (s *FirstFitSortedWattsClassMapWattsProacCC) stopRecapping() {
-	// If not capping, then definitely recapping.
-	if !s.isCapping && s.isRecapping {
-		log.Println("Stopping the cluster-wide re-capping.")
-		s.recapTicker.Stop()
-		ffswClassMapWattsProacCCMutex.Lock()
-		s.isRecapping = false
-		ffswClassMapWattsProacCCMutex.Unlock()
-	}
-}
-
-func (s *FirstFitSortedWattsClassMapWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
-	log.Printf("Received %d resource offers", len(offers))
-
-	// retrieving the available power for all the hosts in the offers.
-	for _, offer := range offers {
-		_, _, offerWatts := offerUtils.OfferAgg(offer)
-		s.availablePower[*offer.Hostname] = offerWatts
-		// setting total power if the first time
-		if _, ok := s.totalPower[*offer.Hostname]; !ok {
-			s.totalPower[*offer.Hostname] = offerWatts
-		}
-	}
-
-	for host, tpower := range s.totalPower {
-		log.Printf("TotalPower[%s] = %f", host, tpower)
-	}
-
-	for _, offer := range offers {
-		select {
-		case <-s.Shutdown:
-			log.Println("Done scheduling tasks: declining offer on [", offer.GetHostname(), "]")
-			driver.DeclineOffer(offer.Id, mesosUtils.LongFilter)
-
-			log.Println("Number of tasks still running: ", s.tasksRunning)
-			continue
-		default:
-		}
-
-		offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
-
-		// First fit strategy
-		offerTaken := false
-		for i := 0; i < len(s.tasks); i++ {
-			task := s.tasks[i]
-			// Check host if it exists
-			if task.Host != "" {
-				// Don't take offer if it doens't match our task's host requirement.
-				if !strings.HasPrefix(*offer.Hostname, task.Host) {
-					continue
-				}
-			}
-
-			// retrieving the powerClass for the offer
-			powerClass := offerUtils.PowerClass(offer)
-
-			// Decision to take the offer or not
-			if (s.ignoreWatts || (offerWatts >= task.ClassToWatts[powerClass])) &&
-				(offerCPU >= task.CPU) && (offerRAM >= task.RAM) {
-
-				// Capping the cluster if haven't yet started
-				if !s.isCapping {
-					ffswClassMapWattsProacCCMutex.Lock()
-					s.isCapping = true
-					ffswClassMapWattsProacCCMutex.Unlock()
-					s.startCapping()
-				}
-
-				fmt.Println("Watts being used: ", task.ClassToWatts[powerClass])
-				tempCap, err := s.capper.FCFSDeterminedCap(s.totalPower, &task)
-				if err == nil {
-					ffswClassMapWattsProacCCMutex.Lock()
-					ffswClassMapWattsProacCCNewCapValue = tempCap
-					ffswClassMapWattsProacCCMutex.Unlock()
-				} else {
-					log.Println("Failed to determine new cluster-wide cap: ")
-					log.Println(err)
-				}
-
-				log.Println("Co-Located with: ")
-				coLocated(s.running[offer.GetSlaveId().GoString()])
-
-				taskToSchedule := s.newTask(offer, task, powerClass)
-				s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
-				log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname())
-				driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{taskToSchedule}, mesosUtils.DefaultFilter)
-
-				offerTaken = true
-				fmt.Println("Inst: ", *task.Instances)
-				*task.Instances--
-				if *task.Instances <= 0 {
-					// All instances of task have been scheduled, remove it
-					s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
-
-					if len(s.tasks) == 0 {
-						log.Println("Done scheduling all tasks")
-						// Need to stop the cluster-wide capping as there aren't any more tasks to schedule
-						s.stopCapping()
-						s.startRecapping() // Load changes after every task finishes and hence, we need to change the capping of the cluster
-						close(s.Shutdown)
-					}
-				}
-				break // Offer taken, move on
-			}
-		}
-
-		// If there was no match for the task
-		if !offerTaken {
-			fmt.Println("There is not enough resources to launch a task:")
-			cpus, mem, watts := offerUtils.OfferAgg(offer)
-
-			log.Printf("<CPU: %f, RAM: %f, Watts: %f>\n", cpus, mem, watts)
-			driver.DeclineOffer(offer.Id, mesosUtils.DefaultFilter)
-		}
-	}
-}
-
-func (s *FirstFitSortedWattsClassMapWattsProacCC) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
-	log.Printf("Received task status [%s] for task [%s]", NameFor(status.State), *status.TaskId.Value)
-
-	if *status.State == mesos.TaskState_TASK_RUNNING {
-		s.tasksRunning++
-	} else if IsTerminal(status.State) {
-		delete(s.running[status.GetSlaveId().GoString()], *status.TaskId.Value)
-		// Need to remove the task from the window
-		s.capper.TaskFinished(*status.TaskId.Value)
-		// Determining the new cluster wide recap value
-		//tempCap, err := s.capper.NaiveRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
-		tempCap, err := s.capper.CleverRecap(s.totalPower, s.taskMonitor, *status.TaskId.Value)
-		if err == nil {
-			// If new determined cap value is different from the current recap value, then we need to recap
-			if int(math.Floor(tempCap+0.5)) != int(math.Floor(ffswClassMapWattsProacCCRecapValue+0.5)) {
-				ffswClassMapWattsProacCCRecapValue = tempCap
-				ffswClassMapWattsProacCCMutex.Lock()
-				s.isRecapping = true
-				ffswClassMapWattsProacCCMutex.Unlock()
-				log.Printf("Determined re-cap value: %f\n", ffswClassMapWattsProacCCRecapValue)
-			} else {
-				ffswClassMapWattsProacCCMutex.Lock()
-				s.isRecapping = false
-				ffswClassMapWattsProacCCMutex.Unlock()
-			}
-		} else {
-			log.Println(err)
-		}
-
-		s.tasksRunning--
-		if s.tasksRunning == 0 {
-			select {
-			case <-s.Shutdown:
-				// Need to stop the cluster-wide recapping
-				s.stopRecapping()
-				close(s.Done)
-			default:
-			}
-		}
-	}
-	log.Printf("DONE: Task status [%s] for task[%s]", NameFor(status.State), *status.TaskId.Value)
-}
diff --git a/schedulers/proactiveclusterwidecappingranked.go b/schedulers/firstfitSortedWattsProacCC.go
similarity index 86%
rename from schedulers/proactiveclusterwidecappingranked.go
rename to schedulers/firstfitSortedWattsProacCC.go
index 52118db..e47cb14 100644
--- a/schedulers/proactiveclusterwidecappingranked.go
+++ b/schedulers/firstfitSortedWattsProacCC.go
@@ -32,17 +32,22 @@ import (
 )
 
 // Decides if to taken an offer or not
-func (_ *ProactiveClusterwideCapRanked) takeOffer(offer *mesos.Offer, task def.Task) bool {
+func (s *FirstFitSortedWattsProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool {
 	offer_cpu, offer_mem, offer_watts := offerUtils.OfferAgg(offer)
 
-	if offer_cpu >= task.CPU && offer_mem >= task.RAM && offer_watts >= task.Watts {
+	wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+	if err != nil {
+		// Error in determining wattsToConsider
+		log.Fatal(err)
+	}
+	if offer_cpu >= task.CPU && offer_mem >= task.RAM && (s.ignoreWatts || offer_watts >= wattsConsideration) {
 		return true
 	}
 	return false
 }
 
 // electronScheduler implements the Scheduler interface
-type ProactiveClusterwideCapRanked struct {
+type FirstFitSortedWattsProacCC struct {
 	base           // Type embedded to inherit common functions
 	tasksCreated   int
 	tasksRunning   int
@@ -53,6 +58,7 @@ type ProactiveClusterwideCapRanked struct {
 	availablePower map[string]float64    // available power for each node in the cluster.
 	totalPower     map[string]float64    // total power for each node in the cluster.
 	ignoreWatts    bool
+	classMapWatts  bool
 	capper         *powCap.ClusterwideCapper
 	ticker         *time.Ticker
 	recapTicker    *time.Ticker
@@ -78,16 +84,21 @@ type ProactiveClusterwideCapRanked struct {
 }
 
 // New electron scheduler.
-func NewProactiveClusterwideCapRanked(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *ProactiveClusterwideCapRanked {
+func NewFirstFitSortedWattsProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
+	classMapWatts bool) *FirstFitSortedWattsProacCC {
+
+	// Sorting tasks in ascending order of watts
+	sort.Sort(def.WattsSorter(tasks))
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
 	if err != nil {
 		log.Fatal(err)
 	}
 
-	s := &ProactiveClusterwideCapRanked{
+	s := &FirstFitSortedWattsProacCC{
 		tasks:          tasks,
 		ignoreWatts:    ignoreWatts,
+		classMapWatts:  classMapWatts,
 		Shutdown:       make(chan struct{}),
 		Done:           make(chan struct{}),
 		PCPLog:         make(chan struct{}),
@@ -109,7 +120,7 @@ func NewProactiveClusterwideCapRanked(tasks []def.Task, ignoreWatts bool, schedT
 // mutex
 var rankedMutex sync.Mutex
 
-func (s *ProactiveClusterwideCapRanked) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
+func (s *FirstFitSortedWattsProacCC) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
 	taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
 	s.tasksCreated++
 
@@ -141,7 +152,12 @@ func (s *ProactiveClusterwideCapRanked) newTask(offer *mesos.Offer, task def.Tas
 	}
 
 	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
+		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
+			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
+		} else {
+			// Error in determining wattsToConsider
+			log.Fatal(err)
+		}
 	}
 
 	return &mesos.TaskInfo{
@@ -164,7 +180,7 @@ func (s *ProactiveClusterwideCapRanked) newTask(offer *mesos.Offer, task def.Tas
 	}
 }
 
-func (s *ProactiveClusterwideCapRanked) Disconnected(sched.SchedulerDriver) {
+func (s *FirstFitSortedWattsProacCC) Disconnected(sched.SchedulerDriver) {
 	// Need to stop the capping process.
 	s.ticker.Stop()
 	s.recapTicker.Stop()
@@ -176,7 +192,7 @@ func (s *ProactiveClusterwideCapRanked) Disconnected(sched.SchedulerDriver) {
 
 // go routine to cap the entire cluster in regular intervals of time.
 var rankedCurrentCapValue = 0.0 // initial value to indicate that we haven't capped the cluster yet.
-func (s *ProactiveClusterwideCapRanked) startCapping() {
+func (s *FirstFitSortedWattsProacCC) startCapping() {
 	go func() {
 		for {
 			select {
@@ -185,12 +201,12 @@ func (s *ProactiveClusterwideCapRanked) startCapping() {
 				rankedMutex.Lock()
 				if rankedCurrentCapValue > 0.0 {
 					for _, host := range constants.Hosts {
-						// Rounding curreCapValue to the nearest int.
-						if err := rapl.Cap(host, "rapl", int(math.Floor(rankedCurrentCapValue+0.5))); err != nil {
+						// Rounding currentCapValue to the nearest int.
+						if err := rapl.Cap(host, "rapl", int(math.Floor(rankedCurrentCapValue + 0.5))); err != nil {
 							log.Println(err)
 						}
 					}
-					log.Printf("Capped the cluster to %d", int(math.Floor(rankedCurrentCapValue+0.5)))
+					log.Printf("Capped the cluster to %d", int(math.Floor(rankedCurrentCapValue + 0.5)))
 				}
 				rankedMutex.Unlock()
 			}
@@ -200,7 +216,7 @@ func (s *ProactiveClusterwideCapRanked) startCapping() {
 
 // go routine to cap the entire cluster in regular intervals of time.
 var rankedRecapValue = 0.0 // The cluster wide cap value when recapping.
-func (s *ProactiveClusterwideCapRanked) startRecapping() {
+func (s *FirstFitSortedWattsProacCC) startRecapping() {
 	go func() {
 		for {
 			select {
@@ -209,12 +225,12 @@ func (s *ProactiveClusterwideCapRanked) startRecapping() {
 				// If stopped performing cluster wide capping then we need to explicitly cap the entire cluster.
 				if s.isRecapping && rankedRecapValue > 0.0 {
 					for _, host := range constants.Hosts {
-						// Rounding curreCapValue to the nearest int.
-						if err := rapl.Cap(host, "rapl", int(math.Floor(rankedRecapValue+0.5))); err != nil {
+						// Rounding currentCapValue to the nearest int.
+						if err := rapl.Cap(host, "rapl", int(math.Floor(rankedRecapValue + 0.5))); err != nil {
 							log.Println(err)
 						}
 					}
-					log.Printf("Recapped the cluster to %d", int(math.Floor(rankedRecapValue+0.5)))
+					log.Printf("Recapped the cluster to %d", int(math.Floor(rankedRecapValue + 0.5)))
 				}
 				// setting recapping to false
 				s.isRecapping = false
@@ -225,7 +241,7 @@ func (s *ProactiveClusterwideCapRanked) startRecapping() {
 }
 
 // Stop cluster wide capping
-func (s *ProactiveClusterwideCapRanked) stopCapping() {
+func (s *FirstFitSortedWattsProacCC) stopCapping() {
 	if s.isCapping {
 		log.Println("Stopping the cluster wide capping.")
 		s.ticker.Stop()
@@ -237,7 +253,7 @@ func (s *ProactiveClusterwideCapRanked) stopCapping() {
 }
 
 // Stop cluster wide Recapping
-func (s *ProactiveClusterwideCapRanked) stopRecapping() {
+func (s *FirstFitSortedWattsProacCC) stopRecapping() {
 	// If not capping, then definitely recapping.
 	if !s.isCapping && s.isRecapping {
 		log.Println("Stopping the cluster wide re-capping.")
@@ -248,7 +264,7 @@ func (s *ProactiveClusterwideCapRanked) stopRecapping() {
 	}
 }
 
-func (s *ProactiveClusterwideCapRanked) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
+func (s *FirstFitSortedWattsProacCC) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
 	log.Printf("Received %d resource offers", len(offers))
 
 	// retrieving the available power for all the hosts in the offers.
@@ -265,16 +281,6 @@ func (s *ProactiveClusterwideCapRanked) ResourceOffers(driver sched.SchedulerDri
 		log.Printf("TotalPower[%s] = %f", host, tpower)
 	}
 
-	// sorting the tasks in ascending order of watts.
-	if len(s.tasks) > 0 {
-		sort.Sort(def.WattsSorter(s.tasks))
-		// calculating the total number of tasks ranked.
-		numberOfRankedTasks := 0
-		for _, task := range s.tasks {
-			numberOfRankedTasks += *task.Instances
-		}
-		log.Printf("Ranked %d tasks in ascending order of tasks.", numberOfRankedTasks)
-	}
 	for _, offer := range offers {
 		select {
 		case <-s.Shutdown:
@@ -303,6 +309,7 @@ func (s *ProactiveClusterwideCapRanked) ResourceOffers(driver sched.SchedulerDri
 
 		for i := 0; i < len(s.tasks); i++ {
 			task := s.tasks[i]
+
 			// Don't take offer if it doesn't match our task's host requirement.
 			if !strings.HasPrefix(*offer.Hostname, task.Host) {
 				continue
@@ -364,7 +371,7 @@ func (s *ProactiveClusterwideCapRanked) ResourceOffers(driver sched.SchedulerDri
 	}
 }
 
-func (s *ProactiveClusterwideCapRanked) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
+func (s *FirstFitSortedWattsProacCC) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
 	log.Printf("Received task status [%s] for task [%s]\n", NameFor(status.State), *status.TaskId.Value)
 
 	if *status.State == mesos.TaskState_TASK_RUNNING {
diff --git a/schedulers/firstfitSortedWattsSortedOffers.go b/schedulers/firstfitSortedWattsSortedOffers.go
index 9ceb095..1047bff 100644
--- a/schedulers/firstfitSortedWattsSortedOffers.go
+++ b/schedulers/firstfitSortedWattsSortedOffers.go
@@ -23,7 +23,12 @@ func (s *FirstFitSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def
 
 	//TODO: Insert watts calculation here instead of taking them as a parameter
 
-	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= task.Watts) {
+	wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+	if err != nil {
+		// Error in determining wattsConsideration
+		log.Fatal(err)
+	}
+	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= wattsConsideration) {
 		return true
 	}
 
@@ -32,13 +37,14 @@ func (s *FirstFitSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def
 
 // electronScheduler implements the Scheduler interface
 type FirstFitSortedWattsSortedOffers struct {
-	base         // Type embedded to inherit common functions
-	tasksCreated int
-	tasksRunning int
-	tasks        []def.Task
-	metrics      map[string]def.Metric
-	running      map[string]map[string]bool
-	ignoreWatts  bool
+	base          // Type embedded to inherit common functions
+	tasksCreated  int
+	tasksRunning  int
+	tasks         []def.Task
+	metrics       map[string]def.Metric
+	running       map[string]map[string]bool
+	ignoreWatts   bool
+	classMapWatts bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -58,7 +64,8 @@ type FirstFitSortedWattsSortedOffers struct {
 }
 
 // New electron scheduler
-func NewFirstFitSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *FirstFitSortedWattsSortedOffers {
+func NewFirstFitSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
+	classMapWatts bool) *FirstFitSortedWattsSortedOffers {
 
 	// Sorting the tasks in increasing order of watts requirement.
 	sort.Sort(def.WattsSorter(tasks))
@@ -69,14 +76,15 @@ func NewFirstFitSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, sche
 	}
 
 	s := &FirstFitSortedWattsSortedOffers{
-		tasks:       tasks,
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		RecordPCP:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
+		tasks:         tasks,
+		ignoreWatts:   ignoreWatts,
+		classMapWatts: classMapWatts,
+		Shutdown:      make(chan struct{}),
+		Done:          make(chan struct{}),
+		PCPLog:        make(chan struct{}),
+		running:       make(map[string]map[string]bool),
+		RecordPCP:     false,
+		schedTrace:    log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -105,7 +113,13 @@ func (s *FirstFitSortedWattsSortedOffers) newTask(offer *mesos.Offer, task def.T
 	}
 
 	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
+		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
+			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
+			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
+		} else {
+			// Error in determining wattsConsideration
+			log.Fatal(err)
+		}
 	}
 
 	return &mesos.TaskInfo{
diff --git a/schedulers/firstfitsortedwatts.go b/schedulers/firstfitsortedwatts.go
index b62d5b3..6dc6ee9 100644
--- a/schedulers/firstfitsortedwatts.go
+++ b/schedulers/firstfitsortedwatts.go
@@ -23,7 +23,12 @@ func (s *FirstFitSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool
 
 	//TODO: Insert watts calculation here instead of taking them as a parameter
 
-	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= task.Watts) {
+	wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+	if err != nil {
+		// Error in determining wattsConsideration
+		log.Fatal(err)
+	}
+	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= wattsConsideration) {
 		return true
 	}
 
@@ -32,13 +37,14 @@ func (s *FirstFitSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool
 
 // electronScheduler implements the Scheduler interface
 type FirstFitSortedWatts struct {
-	base         // Type embedded to inherit common functions
-	tasksCreated int
-	tasksRunning int
-	tasks        []def.Task
-	metrics      map[string]def.Metric
-	running      map[string]map[string]bool
-	ignoreWatts  bool
+	base          // Type embedded to inherit common functions
+	tasksCreated  int
+	tasksRunning  int
+	tasks         []def.Task
+	metrics       map[string]def.Metric
+	running       map[string]map[string]bool
+	ignoreWatts   bool
+	classMapWatts bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -58,7 +64,7 @@ type FirstFitSortedWatts struct {
 }
 
 // New electron scheduler
-func NewFirstFitSortedWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *FirstFitSortedWatts {
+func NewFirstFitSortedWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *FirstFitSortedWatts {
 
 	sort.Sort(def.WattsSorter(tasks))
 
@@ -68,14 +74,15 @@ func NewFirstFitSortedWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix
 	}
 
 	s := &FirstFitSortedWatts{
-		tasks:       tasks,
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		RecordPCP:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
+		tasks:         tasks,
+		ignoreWatts:   ignoreWatts,
+		classMapWatts: classMapWatts,
+		Shutdown:      make(chan struct{}),
+		Done:          make(chan struct{}),
+		PCPLog:        make(chan struct{}),
+		running:       make(map[string]map[string]bool),
+		RecordPCP:     false,
+		schedTrace:    log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -104,7 +111,13 @@ func (s *FirstFitSortedWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.
 	}
 
 	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.Watts))
+		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
+			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
+			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
+		} else {
+			// Error in determining wattsConsideration
+			log.Fatal(err)
+		}
 	}
 
 	return &mesos.TaskInfo{
diff --git a/schedulers/firstfitwattsonly.go b/schedulers/firstfitwattsonly.go
index 12f6331..a349b57 100644
--- a/schedulers/firstfitwattsonly.go
+++ b/schedulers/firstfitwattsonly.go
@@ -16,13 +16,18 @@ import (
 )
 
 // Decides if to take an offer or not
-func (*FirstFitWattsOnly) takeOffer(offer *mesos.Offer, task def.Task) bool {
+func (s *FirstFitWattsOnly) takeOffer(offer *mesos.Offer, task def.Task) bool {
 
 	_, _, watts := offerUtils.OfferAgg(offer)
 
 	//TODO: Insert watts calculation here instead of taking them as a parameter
 
-	if watts >= task.Watts {
+	wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+	if err != nil {
+		// Error in determining wattsConsideration
+		log.Fatal(err)
+	}
+	if watts >= wattsConsideration {
 		return true
 	}
 
@@ -30,13 +35,14 @@ func (*FirstFitWattsOnly) takeOffer(offer *mesos.Offer, task def.Task) bool {
 }
 
 type FirstFitWattsOnly struct {
-	base         // Type embedded to inherit common functions
-	tasksCreated int
-	tasksRunning int
-	tasks        []def.Task
-	metrics      map[string]def.Metric
-	running      map[string]map[string]bool
-	ignoreWatts  bool
+	base          // Type embedded to inherit common functions
+	tasksCreated  int
+	tasksRunning  int
+	tasks         []def.Task
+	metrics       map[string]def.Metric
+	running       map[string]map[string]bool
+	ignoreWatts   bool
+	classMapWatts bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -56,7 +62,7 @@ type FirstFitWattsOnly struct {
 }
 
 // New electron scheduler
-func NewFirstFitWattsOnly(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *FirstFitWattsOnly {
+func NewFirstFitWattsOnly(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *FirstFitWattsOnly {
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
 	if err != nil {
@@ -64,14 +70,15 @@ func NewFirstFitWattsOnly(tasks []def.Task, ignoreWatts bool, schedTracePrefix s
 	}
 
 	s := &FirstFitWattsOnly{
-		tasks:       tasks,
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		RecordPCP:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
+		tasks:         tasks,
+		ignoreWatts:   ignoreWatts,
+		classMapWatts: classMapWatts,
+		Shutdown:      make(chan struct{}),
+		Done:          make(chan struct{}),
+		PCPLog:        make(chan struct{}),
+		running:       make(map[string]map[string]bool),
+		RecordPCP:     false,
+		schedTrace:    log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -94,8 +101,13 @@ func (s *FirstFitWattsOnly) newTask(offer *mesos.Offer, task def.Task) *mesos.Ta
 	// Add task to list of tasks running on node
 	s.running[offer.GetSlaveId().GoString()][taskName] = true
 
+	wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+	if err != nil {
+		// Error in determining wattsConsideration
+		log.Fatal(err)
+	}
 	resources := []*mesos.Resource{
-		mesosutil.NewScalarResource("watts", task.Watts),
+		mesosutil.NewScalarResource("watts", wattsConsideration),
 	}
 
 	return &mesos.TaskInfo{
diff --git a/schedulers/helpers.go b/schedulers/helpers.go
index 1891808..23d1441 100644
--- a/schedulers/helpers.go
+++ b/schedulers/helpers.go
@@ -3,9 +3,7 @@ package schedulers
 import (
 	"fmt"
 	"log"
-	"bitbucket.org/sunybingcloud/electron/def"
-	mesos "github.com/mesos/mesos-go/mesosproto"
-	"bitbucket.org/sunybingcloud/electron/utilities/offerUtils"
+	"bitbucket.org/sunybingcloud/electron/constants"
 )
 
 func coLocated(tasks map[string]bool) {
@@ -17,21 +15,13 @@ func coLocated(tasks map[string]bool) {
 	fmt.Println("---------------------")
 }
 
-/*
- Determine the watts value to consider for each task.
-
- This value could either be task.Watts or task.ClassToWatts[<power class>]
- If task.ClassToWatts is not present, then return task.Watts (this would be for workloads which don't have classMapWatts)
-*/
-func wattsToConsider(task def.Task, classMapWatts bool, offer *mesos.Offer) float64 {
-	if classMapWatts {
-		// checking if ClassToWatts was present in the workload.
-		if task.ClassToWatts != nil {
-			return task.ClassToWatts[offerUtils.PowerClass(offer)]
-		} else {
-			return task.Watts
+// Get the powerClass of the given hostname
+func hostToPowerClass(hostName string) string {
+	for powerClass, hosts := range constants.PowerClasses {
+		if ok := hosts[hostName]; ok {
+			return powerClass
 		}
-	} else {
-		return task.Watts
 	}
+	return ""
 }
+
diff --git a/schedulers/topHeavy.go b/schedulers/topHeavy.go
index ab4fdd6..61be09c 100644
--- a/schedulers/topHeavy.go
+++ b/schedulers/topHeavy.go
@@ -35,6 +35,7 @@ type TopHeavy struct {
 	metrics                map[string]def.Metric
 	running                map[string]map[string]bool
 	ignoreWatts            bool
+	classMapWatts          bool
 	smallTasks, largeTasks []def.Task
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
@@ -55,7 +56,7 @@ type TopHeavy struct {
 }
 
 // New electron scheduler
-func NewPackSmallSpreadBig(tasks []def.Task, ignoreWatts bool, schedTracePrefix string) *TopHeavy {
+func NewTopHeavy(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *TopHeavy {
 	sort.Sort(def.WattsSorter(tasks))
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@@ -67,20 +68,21 @@ func NewPackSmallSpreadBig(tasks []def.Task, ignoreWatts bool, schedTracePrefix
 	// Classification done based on MMPU watts requirements.
 	mid := int(math.Floor((float64(len(tasks)) / 2.0) + 0.5))
 	s := &TopHeavy{
-		smallTasks:  tasks[:mid],
-		largeTasks:  tasks[mid+1:],
-		ignoreWatts: ignoreWatts,
-		Shutdown:    make(chan struct{}),
-		Done:        make(chan struct{}),
-		PCPLog:      make(chan struct{}),
-		running:     make(map[string]map[string]bool),
-		RecordPCP:   false,
-		schedTrace:  log.New(logFile, "", log.LstdFlags),
+		smallTasks:    tasks[:mid],
+		largeTasks:    tasks[mid+1:],
+		ignoreWatts:   ignoreWatts,
+		classMapWatts: classMapWatts,
+		Shutdown:      make(chan struct{}),
+		Done:          make(chan struct{}),
+		PCPLog:        make(chan struct{}),
+		running:       make(map[string]map[string]bool),
+		RecordPCP:     false,
+		schedTrace:    log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
 
-func (s *TopHeavy) newTask(offer *mesos.Offer, task def.Task, newTaskClass string) *mesos.TaskInfo {
+func (s *TopHeavy) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
 	taskName := fmt.Sprintf("%s-%d", task.Name, *task.Instances)
 	s.tasksCreated++
 
@@ -104,7 +106,13 @@ func (s *TopHeavy) newTask(offer *mesos.Offer, task def.Task, newTaskClass strin
 	}
 
 	if !s.ignoreWatts {
-		resources = append(resources, mesosutil.NewScalarResource("watts", task.ClassToWatts[newTaskClass]))
+		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
+			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
+			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
+		} else {
+			// Error in determining wattsConsideration
+			log.Fatal(err)
+		}
 	}
 
 	return &mesos.TaskInfo{
@@ -136,11 +144,10 @@ func (s *TopHeavy) shutDownIfNecessary() {
 }
 
 // create TaskInfo and log scheduling trace
-func (s *TopHeavy) createTaskInfoAndLogSchedTrace(offer *mesos.Offer,
-	powerClass string, task def.Task) *mesos.TaskInfo {
+func (s *TopHeavy) createTaskInfoAndLogSchedTrace(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
 	log.Println("Co-Located with:")
 	coLocated(s.running[offer.GetSlaveId().GoString()])
-	taskToSchedule := s.newTask(offer, task, powerClass)
+	taskToSchedule := s.newTask(offer, task)
 
 	fmt.Println("Inst: ", *task.Instances)
 	s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
@@ -169,24 +176,24 @@ func (s *TopHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) {
 		taken := false
 		for i := 0; i < len(s.smallTasks); i++ {
 			task := s.smallTasks[i]
+			wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+			if err != nil {
+				// Error in determining wattsConsideration
+				log.Fatal(err)
+			}
 
 			for *task.Instances > 0 {
-				powerClass := offerUtils.PowerClass(offer)
 				// Does the task fit
 				// OR lazy evaluation. If ignore watts is set to true, second statement won't
 				// be evaluated.
-				wattsToConsider := task.Watts
-				if !s.ignoreWatts {
-					wattsToConsider = task.ClassToWatts[powerClass]
-				}
-				if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsToConsider))) &&
+				if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsConsideration))) &&
 					(offerCPU >= (totalCPU + task.CPU)) &&
 					(offerRAM >= (totalRAM + task.RAM)) {
 					taken = true
-					totalWatts += wattsToConsider
+					totalWatts += wattsConsideration
 					totalCPU += task.CPU
 					totalRAM += task.RAM
-					tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, powerClass, task))
+					tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, task))
 
 					if *task.Instances <= 0 {
 						// All instances of task have been scheduled, remove it
@@ -231,17 +238,17 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) {
 		offerTaken := false
 		for i := 0; i < len(s.largeTasks); i++ {
 			task := s.largeTasks[i]
-			powerClass := offerUtils.PowerClass(offer)
+			wattsConsideration, err := def.WattsToConsider(task, s.classMapWatts, offer)
+			if err != nil {
+				// Error in determining wattsConsideration
+				log.Fatal(err)
+			}
 
 			// Decision to take the offer or not
-			wattsToConsider := task.Watts
-			if !s.ignoreWatts {
-				wattsToConsider = task.ClassToWatts[powerClass]
-			}
-			if (s.ignoreWatts || (offerWatts >= wattsToConsider)) &&
+			if (s.ignoreWatts || (offerWatts >= wattsConsideration)) &&
 				(offerCPU >= task.CPU) && (offerRAM >= task.RAM) {
 				offerTaken = true
-				tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, powerClass, task))
+				tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, task))
 				log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname())
 				driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, mesosUtils.DefaultFilter)
 
@@ -286,10 +293,10 @@ func (s *TopHeavy) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.
 		default:
 		}
 
-		if constants.PowerClasses["ClassA"][*offer.Hostname] ||
-			constants.PowerClasses["ClassB"][*offer.Hostname] {
+		if constants.PowerClasses["A"][*offer.Hostname] ||
+			constants.PowerClasses["B"][*offer.Hostname] {
 			offersClassAB = append(offersClassAB, offer)
-		} else if constants.PowerClasses["ClassC"][*offer.Hostname] {
+		} else if constants.PowerClasses["C"][*offer.Hostname] {
 			offersClassC = append(offersClassC, offer)
 		}
 	}
diff --git a/utilities/utils.go b/utilities/utils.go
index 4c4444e..6662c59 100644
--- a/utilities/utils.go
+++ b/utilities/utils.go
@@ -1,6 +1,8 @@
 package utilities
 
-import "errors"
+import (
+	"errors"
+)
 
 /*
 The Pair and PairList have been taken from google groups forum,
@@ -44,11 +46,3 @@ func OrderedKeys(plist PairList) ([]string, error) {
 	return orderedKeys, nil
 }
 
-// determine the max value
-func Max(a, b float64) float64 {
-	if a > b {
-		return a
-	} else {
-		return b
-	}
-}

From 57512ac2ddcc98b10f8c045048e8231e6624d958 Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Thu, 9 Feb 2017 18:41:40 -0500
Subject: [PATCH 06/15] Changed ignoreWatts to wattsAsAResource. This resulted
 in flipping of the condition checks that use these variablees

---
 scheduler.go                                  |  6 +-
 schedulers/binPackSortedWattsSortedOffers.go  | 42 +++++-----
 schedulers/binpackedpistoncapping.go          | 56 ++++++-------
 schedulers/binpacksortedwatts.go              | 42 +++++-----
 schedulers/bottomHeavy.go                     | 30 +++----
 schedulers/bpswMaxMin.go                      | 12 +--
 schedulers/bpswMaxMinPistonCapping.go         | 58 +++++++-------
 schedulers/bpswMaxMinProacCC.go               | 74 ++++++++---------
 schedulers/firstfit.go                        | 40 +++++-----
 schedulers/firstfitProacCC.go                 | 72 ++++++++---------
 schedulers/firstfitSortedOffers.go            | 40 +++++-----
 schedulers/firstfitSortedWattsProacCC.go      | 80 +++++++++----------
 schedulers/firstfitSortedWattsSortedOffers.go | 40 +++++-----
 schedulers/firstfitsortedwatts.go             | 40 +++++-----
 schedulers/firstfitwattsonly.go               | 36 ++++-----
 schedulers/topHeavy.go                        | 30 +++----
 16 files changed, 349 insertions(+), 349 deletions(-)

diff --git a/scheduler.go b/scheduler.go
index 82d7dba..9b6dc45 100644
--- a/scheduler.go
+++ b/scheduler.go
@@ -17,7 +17,7 @@ import (
 
 var master = flag.String("master", "xavier:5050", "Location of leading Mesos master")
 var tasksFile = flag.String("workload", "", "JSON file containing task definitions")
-var ignoreWatts = flag.Bool("ignoreWatts", false, "Ignore watts in offers")
+var wattsAsAResource = flag.Bool("wattsAsAResource", false, "Enable Watts as a Resource")
 var pcplogPrefix = flag.String("logPrefix", "", "Prefix for pcplog")
 var hiThreshold = flag.Float64("hiThreshold", 0.0, "Upperbound for when we should start capping")
 var loThreshold = flag.Float64("loThreshold", 0.0, "Lowerbound for when we should start uncapping")
@@ -27,7 +27,7 @@ var classMapWatts = flag.Bool("classMapWatts", false, "Enable mapping of watts t
 func init() {
 	flag.StringVar(master, "m", "xavier:5050", "Location of leading Mesos master (shorthand)")
 	flag.StringVar(tasksFile, "w", "", "JSON file containing task definitions (shorthand)")
-	flag.BoolVar(ignoreWatts, "i", false, "Ignore watts in offers (shorthand)")
+	flag.BoolVar(wattsAsAResource, "waar", false, "Enable Watts as a Resource")
 	flag.StringVar(pcplogPrefix, "p", "", "Prefix for pcplog (shorthand)")
 	flag.Float64Var(hiThreshold, "ht", 700.0, "Upperbound for when we should start capping (shorthand)")
 	flag.Float64Var(loThreshold, "lt", 400.0, "Lowerbound for when we should start uncapping (shorthand)")
@@ -60,7 +60,7 @@ func main() {
 	startTime := time.Now().Format("20060102150405")
 	logPrefix := *pcplogPrefix + "_" + startTime
 
-	scheduler := schedulers.NewBinPackedPistonCapper(tasks, *ignoreWatts, logPrefix, *classMapWatts)
+	scheduler := schedulers.NewBinPackedPistonCapper(tasks, *wattsAsAResource, logPrefix, *classMapWatts)
 	driver, err := sched.NewMesosSchedulerDriver(sched.DriverConfig{
 		Master: *master,
 		Framework: &mesos.FrameworkInfo{
diff --git a/schedulers/binPackSortedWattsSortedOffers.go b/schedulers/binPackSortedWattsSortedOffers.go
index 0eae312..9c27aad 100644
--- a/schedulers/binPackSortedWattsSortedOffers.go
+++ b/schedulers/binPackSortedWattsSortedOffers.go
@@ -28,7 +28,7 @@ func (s *BinPackSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def.
 		// Error in determining wattsConsideration
 		log.Fatal(err)
 	}
-	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || (watts >= wattsConsideration)) {
+	if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || (watts >= wattsConsideration)) {
 		return true
 	}
 
@@ -36,14 +36,14 @@ func (s *BinPackSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def.
 }
 
 type BinPackSortedWattsSortedOffers struct {
-	base          // Type embedded to inherit common functions
-	tasksCreated  int
-	tasksRunning  int
-	tasks         []def.Task
-	metrics       map[string]def.Metric
-	running       map[string]map[string]bool
-	ignoreWatts   bool
-	classMapWatts bool
+	base             // Type embedded to inherit common functions
+	tasksCreated     int
+	tasksRunning     int
+	tasks            []def.Task
+	metrics          map[string]def.Metric
+	running          map[string]map[string]bool
+	wattsAsAResource bool
+	classMapWatts    bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -63,7 +63,7 @@ type BinPackSortedWattsSortedOffers struct {
 }
 
 // New electron scheduler
-func NewBinPackSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
+func NewBinPackSortedWattsSortedOffers(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string,
 	classMapWatts bool) *BinPackSortedWattsSortedOffers {
 	sort.Sort(def.WattsSorter(tasks))
 
@@ -73,15 +73,15 @@ func NewBinPackSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, sched
 	}
 
 	s := &BinPackSortedWattsSortedOffers{
-		tasks:         tasks,
-		ignoreWatts:   ignoreWatts,
-		classMapWatts: classMapWatts,
-		Shutdown:      make(chan struct{}),
-		Done:          make(chan struct{}),
-		PCPLog:        make(chan struct{}),
-		running:       make(map[string]map[string]bool),
-		RecordPCP:     false,
-		schedTrace:    log.New(logFile, "", log.LstdFlags),
+		tasks:            tasks,
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		RecordPCP:        false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -109,7 +109,7 @@ func (s *BinPackSortedWattsSortedOffers) newTask(offer *mesos.Offer, task def.Ta
 		mesosutil.NewScalarResource("mem", task.RAM),
 	}
 
-	if !s.ignoreWatts {
+	if s.wattsAsAResource {
 		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
 			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
 			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
@@ -190,7 +190,7 @@ func (s *BinPackSortedWattsSortedOffers) ResourceOffers(driver sched.SchedulerDr
 
 			for *task.Instances > 0 {
 				// Does the task fit
-				if (s.ignoreWatts || (offer_watts >= (totalWatts + wattsConsideration))) &&
+				if (!s.wattsAsAResource || (offer_watts >= (totalWatts + wattsConsideration))) &&
 					(offer_cpu >= (totalCPU + task.CPU)) &&
 					(offer_ram >= (totalRAM + task.RAM)) {
 
diff --git a/schedulers/binpackedpistoncapping.go b/schedulers/binpackedpistoncapping.go
index b77a89e..ca5ab5a 100644
--- a/schedulers/binpackedpistoncapping.go
+++ b/schedulers/binpackedpistoncapping.go
@@ -27,18 +27,18 @@ import (
   corresponding to the load on that node.
 */
 type BinPackedPistonCapper struct {
-	base          // Type embedded to inherit common functions
-	tasksCreated  int
-	tasksRunning  int
-	tasks         []def.Task
-	metrics       map[string]def.Metric
-	running       map[string]map[string]bool
-	taskMonitor   map[string][]def.Task
-	totalPower    map[string]float64
-	ignoreWatts   bool
-	classMapWatts bool
-	ticker        *time.Ticker
-	isCapping     bool
+	base             // Type embedded to inherit common functions
+	tasksCreated     int
+	tasksRunning     int
+	tasks            []def.Task
+	metrics          map[string]def.Metric
+	running          map[string]map[string]bool
+	taskMonitor      map[string][]def.Task
+	totalPower       map[string]float64
+	wattsAsAResource bool
+	classMapWatts    bool
+	ticker           *time.Ticker
+	isCapping        bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule the new task.
@@ -59,7 +59,7 @@ type BinPackedPistonCapper struct {
 }
 
 // New electron scheduler.
-func NewBinPackedPistonCapper(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
+func NewBinPackedPistonCapper(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string,
 	classMapWatts bool) *BinPackedPistonCapper {
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@@ -68,19 +68,19 @@ func NewBinPackedPistonCapper(tasks []def.Task, ignoreWatts bool, schedTracePref
 	}
 
 	s := &BinPackedPistonCapper{
-		tasks:         tasks,
-		ignoreWatts:   ignoreWatts,
-		classMapWatts: classMapWatts,
-		Shutdown:      make(chan struct{}),
-		Done:          make(chan struct{}),
-		PCPLog:        make(chan struct{}),
-		running:       make(map[string]map[string]bool),
-		taskMonitor:   make(map[string][]def.Task),
-		totalPower:    make(map[string]float64),
-		RecordPCP:     false,
-		ticker:        time.NewTicker(5 * time.Second),
-		isCapping:     false,
-		schedTrace:    log.New(logFile, "", log.LstdFlags),
+		tasks:            tasks,
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		taskMonitor:      make(map[string][]def.Task),
+		totalPower:       make(map[string]float64),
+		RecordPCP:        false,
+		ticker:           time.NewTicker(5 * time.Second),
+		isCapping:        false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -93,7 +93,7 @@ func (s *BinPackedPistonCapper) takeOffer(offer *mesos.Offer, offerWatts float64
 		// Error in determining wattsToConsider
 		log.Fatal(err)
 	}
-	if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsConsideration))) &&
+	if (!s.wattsAsAResource || (offerWatts >= (totalWatts + wattsConsideration))) &&
 		(offerCPU >= (totalCPU + task.CPU)) &&
 		(offerRAM >= (totalRAM + task.RAM)) {
 		return true
@@ -137,7 +137,7 @@ func (s *BinPackedPistonCapper) newTask(offer *mesos.Offer, task def.Task) *meso
 		mesosutil.NewScalarResource("mem", task.RAM),
 	}
 
-	if !s.ignoreWatts {
+	if s.wattsAsAResource {
 		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
 			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
 			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
diff --git a/schedulers/binpacksortedwatts.go b/schedulers/binpacksortedwatts.go
index 215341a..936f7f6 100644
--- a/schedulers/binpacksortedwatts.go
+++ b/schedulers/binpacksortedwatts.go
@@ -28,7 +28,7 @@ func (s *BinPackSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
 		// Error in determining wattsConsideration
 		log.Fatal(err)
 	}
-	if cpus >= task.CPU && mem >= task.RAM && watts >= wattsConsideration {
+	if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || (watts >= wattsConsideration)) {
 		return true
 	}
 
@@ -36,14 +36,14 @@ func (s *BinPackSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
 }
 
 type BinPackSortedWatts struct {
-	base          // Type embedded to inherit common functions
-	tasksCreated  int
-	tasksRunning  int
-	tasks         []def.Task
-	metrics       map[string]def.Metric
-	running       map[string]map[string]bool
-	ignoreWatts   bool
-	classMapWatts bool
+	base             // Type embedded to inherit common functions
+	tasksCreated     int
+	tasksRunning     int
+	tasks            []def.Task
+	metrics          map[string]def.Metric
+	running          map[string]map[string]bool
+	wattsAsAResource bool
+	classMapWatts    bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -63,7 +63,7 @@ type BinPackSortedWatts struct {
 }
 
 // New electron scheduler
-func NewBinPackSortedWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *BinPackSortedWatts {
+func NewBinPackSortedWatts(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *BinPackSortedWatts {
 	sort.Sort(def.WattsSorter(tasks))
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@@ -72,15 +72,15 @@ func NewBinPackSortedWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix
 	}
 
 	s := &BinPackSortedWatts{
-		tasks:         tasks,
-		ignoreWatts:   ignoreWatts,
-		classMapWatts: classMapWatts,
-		Shutdown:      make(chan struct{}),
-		Done:          make(chan struct{}),
-		PCPLog:        make(chan struct{}),
-		running:       make(map[string]map[string]bool),
-		RecordPCP:     false,
-		schedTrace:    log.New(logFile, "", log.LstdFlags),
+		tasks:            tasks,
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		RecordPCP:        false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -108,7 +108,7 @@ func (s *BinPackSortedWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.T
 		mesosutil.NewScalarResource("mem", task.RAM),
 	}
 
-	if !s.ignoreWatts {
+	if s.wattsAsAResource {
 		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
 			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
 			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
@@ -178,7 +178,7 @@ func (s *BinPackSortedWatts) ResourceOffers(driver sched.SchedulerDriver, offers
 
 			for *task.Instances > 0 {
 				// Does the task fit
-				if (s.ignoreWatts || (offer_watts >= (totalWatts + wattsConsideration))) &&
+				if (!s.wattsAsAResource || (offer_watts >= (totalWatts + wattsConsideration))) &&
 					(offer_cpu >= (totalCPU + task.CPU)) &&
 					(offer_ram >= (totalRAM + task.RAM)) {
 
diff --git a/schedulers/bottomHeavy.go b/schedulers/bottomHeavy.go
index a0bf3b4..7ee8fca 100644
--- a/schedulers/bottomHeavy.go
+++ b/schedulers/bottomHeavy.go
@@ -34,7 +34,7 @@ type BottomHeavy struct {
 	tasks                  []def.Task
 	metrics                map[string]def.Metric
 	running                map[string]map[string]bool
-	ignoreWatts            bool
+	wattsAsAResource       bool
 	classMapWatts          bool
 	smallTasks, largeTasks []def.Task
 
@@ -56,7 +56,7 @@ type BottomHeavy struct {
 }
 
 // New electron scheduler
-func NewBottomHeavy(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *BottomHeavy {
+func NewBottomHeavy(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *BottomHeavy {
 	sort.Sort(def.WattsSorter(tasks))
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@@ -68,16 +68,16 @@ func NewBottomHeavy(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
 	// Classification done based on MMPU watts requirements.
 	mid := int(math.Floor((float64(len(tasks)) / 2.0) + 0.5))
 	s := &BottomHeavy{
-		smallTasks:    tasks[:mid],
-		largeTasks:    tasks[mid+1:],
-		ignoreWatts:   ignoreWatts,
-		classMapWatts: classMapWatts,
-		Shutdown:      make(chan struct{}),
-		Done:          make(chan struct{}),
-		PCPLog:        make(chan struct{}),
-		running:       make(map[string]map[string]bool),
-		RecordPCP:     false,
-		schedTrace:    log.New(logFile, "", log.LstdFlags),
+		smallTasks:       tasks[:mid],
+		largeTasks:       tasks[mid+1:],
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		RecordPCP:        false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -105,7 +105,7 @@ func (s *BottomHeavy) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo
 		mesosutil.NewScalarResource("mem", task.RAM),
 	}
 
-	if !s.ignoreWatts {
+	if s.wattsAsAResource {
 		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
 			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
 			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
@@ -186,7 +186,7 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver)
 				// Does the task fit
 				// OR lazy evaluation. If ignore watts is set to true, second statement won't
 				// be evaluated.
-				if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsConsideration))) &&
+				if (!s.wattsAsAResource || (offerWatts >= (totalWatts + wattsConsideration))) &&
 					(offerCPU >= (totalCPU + task.CPU)) &&
 					(offerRAM >= (totalRAM + task.RAM)) {
 					offerTaken = true
@@ -248,7 +248,7 @@ func (s *BottomHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver
 			}
 
 			// Decision to take the offer or not
-			if (s.ignoreWatts || (offerWatts >= wattsConsideration)) &&
+			if (!s.wattsAsAResource || (offerWatts >= wattsConsideration)) &&
 				(offerCPU >= task.CPU) && (offerRAM >= task.RAM) {
 				taken = true
 				tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, task))
diff --git a/schedulers/bpswMaxMin.go b/schedulers/bpswMaxMin.go
index f6d4f3b..ae47645 100644
--- a/schedulers/bpswMaxMin.go
+++ b/schedulers/bpswMaxMin.go
@@ -28,7 +28,7 @@ func (s *BPSWMaxMinWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
 		// Error in determining wattsConsideration
 		log.Fatal(err)
 	}
-	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || (watts >= wattsConsideration)) {
+	if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || (watts >= wattsConsideration)) {
 		return true
 	}
 
@@ -42,7 +42,7 @@ type BPSWMaxMinWatts struct {
 	tasks         []def.Task
 	metrics       map[string]def.Metric
 	running       map[string]map[string]bool
-	ignoreWatts   bool
+	wattsAsAResource   bool
 	classMapWatts bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
@@ -63,7 +63,7 @@ type BPSWMaxMinWatts struct {
 }
 
 // New electron scheduler
-func NewBPMaxMinWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *BPSWMaxMinWatts {
+func NewBPMaxMinWatts(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *BPSWMaxMinWatts {
 	sort.Sort(def.WattsSorter(tasks))
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@@ -73,7 +73,7 @@ func NewBPMaxMinWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix strin
 
 	s := &BPSWMaxMinWatts{
 		tasks:         tasks,
-		ignoreWatts:   ignoreWatts,
+		wattsAsAResource:   wattsAsAResource,
 		classMapWatts: classMapWatts,
 		Shutdown:      make(chan struct{}),
 		Done:          make(chan struct{}),
@@ -109,7 +109,7 @@ func (s *BPSWMaxMinWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.Task
 		mesosutil.NewScalarResource("mem", task.RAM),
 	}
 
-	if !s.ignoreWatts {
+	if s.wattsAsAResource {
 		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
 			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
 			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
@@ -152,7 +152,7 @@ func (s *BPSWMaxMinWatts) CheckFit(i int,
 	offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
 
 	// Does the task fit
-	if (s.ignoreWatts || (offerWatts >= (*totalWatts + wattsConsideration))) &&
+	if (!s.wattsAsAResource || (offerWatts >= (*totalWatts + wattsConsideration))) &&
 		(offerCPU >= (*totalCPU + task.CPU)) &&
 		(offerRAM >= (*totalRAM + task.RAM)) {
 
diff --git a/schedulers/bpswMaxMinPistonCapping.go b/schedulers/bpswMaxMinPistonCapping.go
index 6214e0e..d2f63e1 100644
--- a/schedulers/bpswMaxMinPistonCapping.go
+++ b/schedulers/bpswMaxMinPistonCapping.go
@@ -33,7 +33,7 @@ func (s *BPSWMaxMinPistonCapping) takeOffer(offer *mesos.Offer, task def.Task) b
 		// Error in determining wattsConsideration
 		log.Fatal(err)
 	}
-	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || (watts >= wattsConsideration)) {
+	if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || (watts >= wattsConsideration)) {
 		return true
 	}
 
@@ -41,18 +41,18 @@ func (s *BPSWMaxMinPistonCapping) takeOffer(offer *mesos.Offer, task def.Task) b
 }
 
 type BPSWMaxMinPistonCapping struct {
-	base          //Type embedding to inherit common functions
-	tasksCreated  int
-	tasksRunning  int
-	tasks         []def.Task
-	metrics       map[string]def.Metric
-	running       map[string]map[string]bool
-	taskMonitor   map[string][]def.Task
-	totalPower    map[string]float64
-	ignoreWatts   bool
-	classMapWatts bool
-	ticker        *time.Ticker
-	isCapping     bool
+	base             //Type embedding to inherit common functions
+	tasksCreated     int
+	tasksRunning     int
+	tasks            []def.Task
+	metrics          map[string]def.Metric
+	running          map[string]map[string]bool
+	taskMonitor      map[string][]def.Task
+	totalPower       map[string]float64
+	wattsAsAResource bool
+	classMapWatts    bool
+	ticker           *time.Ticker
+	isCapping        bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -72,7 +72,7 @@ type BPSWMaxMinPistonCapping struct {
 }
 
 // New electron scheduler
-func NewBPSWMaxMinPistonCapping(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
+func NewBPSWMaxMinPistonCapping(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string,
 	classMapWatts bool) *BPSWMaxMinPistonCapping {
 	sort.Sort(def.WattsSorter(tasks))
 
@@ -82,19 +82,19 @@ func NewBPSWMaxMinPistonCapping(tasks []def.Task, ignoreWatts bool, schedTracePr
 	}
 
 	s := &BPSWMaxMinPistonCapping{
-		tasks:         tasks,
-		ignoreWatts:   ignoreWatts,
-		classMapWatts: classMapWatts,
-		Shutdown:      make(chan struct{}),
-		Done:          make(chan struct{}),
-		PCPLog:        make(chan struct{}),
-		running:       make(map[string]map[string]bool),
-		taskMonitor:   make(map[string][]def.Task),
-		totalPower:    make(map[string]float64),
-		RecordPCP:     false,
-		ticker:        time.NewTicker(5 * time.Second),
-		isCapping:     false,
-		schedTrace:    log.New(logFile, "", log.LstdFlags),
+		tasks:            tasks,
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		taskMonitor:      make(map[string][]def.Task),
+		totalPower:       make(map[string]float64),
+		RecordPCP:        false,
+		ticker:           time.NewTicker(5 * time.Second),
+		isCapping:        false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 
@@ -134,7 +134,7 @@ func (s *BPSWMaxMinPistonCapping) newTask(offer *mesos.Offer, task def.Task) *me
 		mesosutil.NewScalarResource("mem", task.RAM),
 	}
 
-	if !s.ignoreWatts {
+	if s.wattsAsAResource {
 		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
 			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
 			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
@@ -242,7 +242,7 @@ func (s *BPSWMaxMinPistonCapping) CheckFit(i int,
 	offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
 
 	// Does the task fit
-	if (s.ignoreWatts || (offerWatts >= (*totalWatts + wattsConsideration))) &&
+	if (!s.wattsAsAResource || (offerWatts >= (*totalWatts + wattsConsideration))) &&
 		(offerCPU >= (*totalCPU + task.CPU)) &&
 		(offerRAM >= (*totalRAM + task.RAM)) {
 
diff --git a/schedulers/bpswMaxMinProacCC.go b/schedulers/bpswMaxMinProacCC.go
index 3ec4d6a..129b030 100644
--- a/schedulers/bpswMaxMinProacCC.go
+++ b/schedulers/bpswMaxMinProacCC.go
@@ -32,7 +32,7 @@ func (s *BPSWMaxMinProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool {
 		// Error in determining wattsConsideration
 		log.Fatal(err)
 	}
-	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || (watts >= wattsConsideration)) {
+	if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || (watts >= wattsConsideration)) {
 		return true
 	}
 
@@ -40,22 +40,22 @@ func (s *BPSWMaxMinProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool {
 }
 
 type BPSWMaxMinProacCC struct {
-	base           // Type embedding to inherit common functions
-	tasksCreated   int
-	tasksRunning   int
-	tasks          []def.Task
-	metrics        map[string]def.Metric
-	running        map[string]map[string]bool
-	taskMonitor    map[string][]def.Task
-	availablePower map[string]float64
-	totalPower     map[string]float64
-	ignoreWatts    bool
-	classMapWatts  bool
-	capper         *powCap.ClusterwideCapper
-	ticker         *time.Ticker
-	recapTicker    *time.Ticker
-	isCapping      bool // indicate whether we are currently performing cluster-wide capping.
-	isRecapping    bool // indicate whether we are currently performing cluster-wide recapping.
+	base             // Type embedding to inherit common functions
+	tasksCreated     int
+	tasksRunning     int
+	tasks            []def.Task
+	metrics          map[string]def.Metric
+	running          map[string]map[string]bool
+	taskMonitor      map[string][]def.Task
+	availablePower   map[string]float64
+	totalPower       map[string]float64
+	wattsAsAResource bool
+	classMapWatts    bool
+	capper           *powCap.ClusterwideCapper
+	ticker           *time.Ticker
+	recapTicker      *time.Ticker
+	isCapping        bool // indicate whether we are currently performing cluster-wide capping.
+	isRecapping      bool // indicate whether we are currently performing cluster-wide recapping.
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -75,7 +75,7 @@ type BPSWMaxMinProacCC struct {
 }
 
 // New electron scheduler
-func NewBPSWMaxMinProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *BPSWMaxMinProacCC {
+func NewBPSWMaxMinProacCC(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *BPSWMaxMinProacCC {
 	sort.Sort(def.WattsSorter(tasks))
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@@ -84,23 +84,23 @@ func NewBPSWMaxMinProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix s
 	}
 
 	s := &BPSWMaxMinProacCC{
-		tasks:          tasks,
-		ignoreWatts:    ignoreWatts,
-		classMapWatts:  classMapWatts,
-		Shutdown:       make(chan struct{}),
-		Done:           make(chan struct{}),
-		PCPLog:         make(chan struct{}),
-		running:        make(map[string]map[string]bool),
-		taskMonitor:    make(map[string][]def.Task),
-		availablePower: make(map[string]float64),
-		totalPower:     make(map[string]float64),
-		RecordPCP:      false,
-		capper:         powCap.GetClusterwideCapperInstance(),
-		ticker:         time.NewTicker(10 * time.Second),
-		recapTicker:    time.NewTicker(20 * time.Second),
-		isCapping:      false,
-		isRecapping:    false,
-		schedTrace:     log.New(logFile, "", log.LstdFlags),
+		tasks:            tasks,
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		taskMonitor:      make(map[string][]def.Task),
+		availablePower:   make(map[string]float64),
+		totalPower:       make(map[string]float64),
+		RecordPCP:        false,
+		capper:           powCap.GetClusterwideCapperInstance(),
+		ticker:           time.NewTicker(10 * time.Second),
+		recapTicker:      time.NewTicker(20 * time.Second),
+		isCapping:        false,
+		isRecapping:      false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -139,7 +139,7 @@ func (s *BPSWMaxMinProacCC) newTask(offer *mesos.Offer, task def.Task) *mesos.Ta
 		mesosutil.NewScalarResource("mem", task.RAM),
 	}
 
-	if !s.ignoreWatts {
+	if s.wattsAsAResource {
 		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
 			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
 			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
@@ -265,7 +265,7 @@ func (s *BPSWMaxMinProacCC) CheckFit(i int,
 	offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
 
 	// Does the task fit
-	if (s.ignoreWatts || (offerWatts >= (*totalWatts + wattsConsideration))) &&
+	if (!s.wattsAsAResource || (offerWatts >= (*totalWatts + wattsConsideration))) &&
 		(offerCPU >= (*totalCPU + task.CPU)) &&
 		(offerRAM >= (*totalRAM + task.RAM)) {
 
diff --git a/schedulers/firstfit.go b/schedulers/firstfit.go
index 09d1c2f..9992721 100644
--- a/schedulers/firstfit.go
+++ b/schedulers/firstfit.go
@@ -27,7 +27,7 @@ func (s *FirstFit) takeOffer(offer *mesos.Offer, task def.Task) bool {
 		// Error in determining wattsConsideration
 		log.Fatal(err)
 	}
-	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= wattsConsideration) {
+	if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || watts >= wattsConsideration) {
 		return true
 	}
 
@@ -36,14 +36,14 @@ func (s *FirstFit) takeOffer(offer *mesos.Offer, task def.Task) bool {
 
 // electronScheduler implements the Scheduler interface
 type FirstFit struct {
-	base          // Type embedded to inherit common functions
-	tasksCreated  int
-	tasksRunning  int
-	tasks         []def.Task
-	metrics       map[string]def.Metric
-	running       map[string]map[string]bool
-	ignoreWatts   bool
-	classMapWatts bool
+	base             // Type embedded to inherit common functions
+	tasksCreated     int
+	tasksRunning     int
+	tasks            []def.Task
+	metrics          map[string]def.Metric
+	running          map[string]map[string]bool
+	wattsAsAResource bool
+	classMapWatts    bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -63,7 +63,7 @@ type FirstFit struct {
 }
 
 // New electron scheduler
-func NewFirstFit(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *FirstFit {
+func NewFirstFit(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *FirstFit {
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
 	if err != nil {
@@ -71,15 +71,15 @@ func NewFirstFit(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, cl
 	}
 
 	s := &FirstFit{
-		tasks:         tasks,
-		ignoreWatts:   ignoreWatts,
-		classMapWatts: classMapWatts,
-		Shutdown:      make(chan struct{}),
-		Done:          make(chan struct{}),
-		PCPLog:        make(chan struct{}),
-		running:       make(map[string]map[string]bool),
-		RecordPCP:     false,
-		schedTrace:    log.New(logFile, "", log.LstdFlags),
+		tasks:            tasks,
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		RecordPCP:        false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -107,7 +107,7 @@ func (s *FirstFit) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
 		mesosutil.NewScalarResource("mem", task.RAM),
 	}
 
-	if !s.ignoreWatts {
+	if s.wattsAsAResource {
 		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
 			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
 			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
diff --git a/schedulers/firstfitProacCC.go b/schedulers/firstfitProacCC.go
index 1766447..ba06be6 100644
--- a/schedulers/firstfitProacCC.go
+++ b/schedulers/firstfitProacCC.go
@@ -29,7 +29,7 @@ func (s *FirstFitProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool {
 		// Error in determining wattsConsideration
 		log.Fatal(err)
 	}
-	if offer_cpu >= task.CPU && offer_mem >= task.RAM && (s.ignoreWatts || (offer_watts >= wattsConsideration)) {
+	if offer_cpu >= task.CPU && offer_mem >= task.RAM && (!s.wattsAsAResource || (offer_watts >= wattsConsideration)) {
 		return true
 	}
 	return false
@@ -37,22 +37,22 @@ func (s *FirstFitProacCC) takeOffer(offer *mesos.Offer, task def.Task) bool {
 
 // electronScheduler implements the Scheduler interface.
 type FirstFitProacCC struct {
-	base           // Type embedded to inherit common functions
-	tasksCreated   int
-	tasksRunning   int
-	tasks          []def.Task
-	metrics        map[string]def.Metric
-	running        map[string]map[string]bool
-	taskMonitor    map[string][]def.Task // store tasks that are currently running.
-	availablePower map[string]float64    // available power for each node in the cluster.
-	totalPower     map[string]float64    // total power for each node in the cluster.
-	ignoreWatts    bool
-	classMapWatts  bool
-	capper         *powCap.ClusterwideCapper
-	ticker         *time.Ticker
-	recapTicker    *time.Ticker
-	isCapping      bool // indicate whether we are currently performing cluster wide capping.
-	isRecapping    bool // indicate whether we are currently performing cluster wide re-capping.
+	base             // Type embedded to inherit common functions
+	tasksCreated     int
+	tasksRunning     int
+	tasks            []def.Task
+	metrics          map[string]def.Metric
+	running          map[string]map[string]bool
+	taskMonitor      map[string][]def.Task // store tasks that are currently running.
+	availablePower   map[string]float64    // available power for each node in the cluster.
+	totalPower       map[string]float64    // total power for each node in the cluster.
+	wattsAsAResource bool
+	classMapWatts    bool
+	capper           *powCap.ClusterwideCapper
+	ticker           *time.Ticker
+	recapTicker      *time.Ticker
+	isCapping        bool // indicate whether we are currently performing cluster wide capping.
+	isRecapping      bool // indicate whether we are currently performing cluster wide re-capping.
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule the new task.
@@ -73,7 +73,7 @@ type FirstFitProacCC struct {
 }
 
 // New electron scheduler.
-func NewFirstFitProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
+func NewFirstFitProacCC(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string,
 	classMapWatts bool) *FirstFitProacCC {
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@@ -82,23 +82,23 @@ func NewFirstFitProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix str
 	}
 
 	s := &FirstFitProacCC{
-		tasks:          tasks,
-		ignoreWatts:    ignoreWatts,
-		classMapWatts:  classMapWatts,
-		Shutdown:       make(chan struct{}),
-		Done:           make(chan struct{}),
-		PCPLog:         make(chan struct{}),
-		running:        make(map[string]map[string]bool),
-		taskMonitor:    make(map[string][]def.Task),
-		availablePower: make(map[string]float64),
-		totalPower:     make(map[string]float64),
-		RecordPCP:      false,
-		capper:         powCap.GetClusterwideCapperInstance(),
-		ticker:         time.NewTicker(10 * time.Second),
-		recapTicker:    time.NewTicker(20 * time.Second),
-		isCapping:      false,
-		isRecapping:    false,
-		schedTrace:     log.New(logFile, "", log.LstdFlags),
+		tasks:            tasks,
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		taskMonitor:      make(map[string][]def.Task),
+		availablePower:   make(map[string]float64),
+		totalPower:       make(map[string]float64),
+		RecordPCP:        false,
+		capper:           powCap.GetClusterwideCapperInstance(),
+		ticker:           time.NewTicker(10 * time.Second),
+		recapTicker:      time.NewTicker(20 * time.Second),
+		isCapping:        false,
+		isRecapping:      false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -137,7 +137,7 @@ func (s *FirstFitProacCC) newTask(offer *mesos.Offer, task def.Task) *mesos.Task
 		mesosutil.NewScalarResource("mem", task.RAM),
 	}
 
-	if !s.ignoreWatts {
+	if s.wattsAsAResource {
 		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
 			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
 			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
diff --git a/schedulers/firstfitSortedOffers.go b/schedulers/firstfitSortedOffers.go
index 0d519d5..3e4fabe 100644
--- a/schedulers/firstfitSortedOffers.go
+++ b/schedulers/firstfitSortedOffers.go
@@ -28,7 +28,7 @@ func (s *FirstFitSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool
 		// Error in determining wattsConsideration
 		log.Fatal(err)
 	}
-	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= wattsConsideration) {
+	if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || watts >= wattsConsideration) {
 		return true
 	}
 
@@ -37,14 +37,14 @@ func (s *FirstFitSortedOffers) takeOffer(offer *mesos.Offer, task def.Task) bool
 
 // electronScheduler implements the Scheduler interface
 type FirstFitSortedOffers struct {
-	base          // Type embedded to inherit common functions
-	tasksCreated  int
-	tasksRunning  int
-	tasks         []def.Task
-	metrics       map[string]def.Metric
-	running       map[string]map[string]bool
-	ignoreWatts   bool
-	classMapWatts bool
+	base             // Type embedded to inherit common functions
+	tasksCreated     int
+	tasksRunning     int
+	tasks            []def.Task
+	metrics          map[string]def.Metric
+	running          map[string]map[string]bool
+	wattsAsAResource bool
+	classMapWatts    bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -64,7 +64,7 @@ type FirstFitSortedOffers struct {
 }
 
 // New electron scheduler
-func NewFirstFitSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *FirstFitSortedOffers {
+func NewFirstFitSortedOffers(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *FirstFitSortedOffers {
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
 	if err != nil {
@@ -72,15 +72,15 @@ func NewFirstFitSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefi
 	}
 
 	s := &FirstFitSortedOffers{
-		tasks:         tasks,
-		ignoreWatts:   ignoreWatts,
-		classMapWatts: classMapWatts,
-		Shutdown:      make(chan struct{}),
-		Done:          make(chan struct{}),
-		PCPLog:        make(chan struct{}),
-		running:       make(map[string]map[string]bool),
-		RecordPCP:     false,
-		schedTrace:    log.New(logFile, "", log.LstdFlags),
+		tasks:            tasks,
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		RecordPCP:        false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -108,7 +108,7 @@ func (s *FirstFitSortedOffers) newTask(offer *mesos.Offer, task def.Task) *mesos
 		mesosutil.NewScalarResource("mem", task.RAM),
 	}
 
-	if !s.ignoreWatts {
+	if s.wattsAsAResource {
 		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
 			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
 			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
diff --git a/schedulers/firstfitSortedWattsProacCC.go b/schedulers/firstfitSortedWattsProacCC.go
index e47cb14..bf4964e 100644
--- a/schedulers/firstfitSortedWattsProacCC.go
+++ b/schedulers/firstfitSortedWattsProacCC.go
@@ -40,7 +40,7 @@ func (s *FirstFitSortedWattsProacCC) takeOffer(offer *mesos.Offer, task def.Task
 		// Error in determining wattsToConsider
 		log.Fatal(err)
 	}
-	if offer_cpu >= task.CPU && offer_mem >= task.RAM && (s.ignoreWatts || offer_watts >= wattsConsideration) {
+	if offer_cpu >= task.CPU && offer_mem >= task.RAM && (!s.wattsAsAResource || offer_watts >= wattsConsideration) {
 		return true
 	}
 	return false
@@ -48,22 +48,22 @@ func (s *FirstFitSortedWattsProacCC) takeOffer(offer *mesos.Offer, task def.Task
 
 // electronScheduler implements the Scheduler interface
 type FirstFitSortedWattsProacCC struct {
-	base           // Type embedded to inherit common functions
-	tasksCreated   int
-	tasksRunning   int
-	tasks          []def.Task
-	metrics        map[string]def.Metric
-	running        map[string]map[string]bool
-	taskMonitor    map[string][]def.Task // store tasks that are currently running.
-	availablePower map[string]float64    // available power for each node in the cluster.
-	totalPower     map[string]float64    // total power for each node in the cluster.
-	ignoreWatts    bool
-	classMapWatts  bool
-	capper         *powCap.ClusterwideCapper
-	ticker         *time.Ticker
-	recapTicker    *time.Ticker
-	isCapping      bool // indicate whether we are currently performing cluster wide capping.
-	isRecapping    bool // indicate whether we are currently performing cluster wide re-capping.
+	base             // Type embedded to inherit common functions
+	tasksCreated     int
+	tasksRunning     int
+	tasks            []def.Task
+	metrics          map[string]def.Metric
+	running          map[string]map[string]bool
+	taskMonitor      map[string][]def.Task // store tasks that are currently running.
+	availablePower   map[string]float64    // available power for each node in the cluster.
+	totalPower       map[string]float64    // total power for each node in the cluster.
+	wattsAsAResource bool
+	classMapWatts    bool
+	capper           *powCap.ClusterwideCapper
+	ticker           *time.Ticker
+	recapTicker      *time.Ticker
+	isCapping        bool // indicate whether we are currently performing cluster wide capping.
+	isRecapping      bool // indicate whether we are currently performing cluster wide re-capping.
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule the new task.
@@ -84,7 +84,7 @@ type FirstFitSortedWattsProacCC struct {
 }
 
 // New electron scheduler.
-func NewFirstFitSortedWattsProacCC(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
+func NewFirstFitSortedWattsProacCC(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string,
 	classMapWatts bool) *FirstFitSortedWattsProacCC {
 
 	// Sorting tasks in ascending order of watts
@@ -96,23 +96,23 @@ func NewFirstFitSortedWattsProacCC(tasks []def.Task, ignoreWatts bool, schedTrac
 	}
 
 	s := &FirstFitSortedWattsProacCC{
-		tasks:          tasks,
-		ignoreWatts:    ignoreWatts,
-		classMapWatts:  classMapWatts,
-		Shutdown:       make(chan struct{}),
-		Done:           make(chan struct{}),
-		PCPLog:         make(chan struct{}),
-		running:        make(map[string]map[string]bool),
-		taskMonitor:    make(map[string][]def.Task),
-		availablePower: make(map[string]float64),
-		totalPower:     make(map[string]float64),
-		RecordPCP:      false,
-		capper:         powCap.GetClusterwideCapperInstance(),
-		ticker:         time.NewTicker(10 * time.Second),
-		recapTicker:    time.NewTicker(20 * time.Second),
-		isCapping:      false,
-		isRecapping:    false,
-		schedTrace:     log.New(logFile, "", log.LstdFlags),
+		tasks:            tasks,
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		taskMonitor:      make(map[string][]def.Task),
+		availablePower:   make(map[string]float64),
+		totalPower:       make(map[string]float64),
+		RecordPCP:        false,
+		capper:           powCap.GetClusterwideCapperInstance(),
+		ticker:           time.NewTicker(10 * time.Second),
+		recapTicker:      time.NewTicker(20 * time.Second),
+		isCapping:        false,
+		isRecapping:      false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -151,7 +151,7 @@ func (s *FirstFitSortedWattsProacCC) newTask(offer *mesos.Offer, task def.Task)
 		mesosutil.NewScalarResource("mem", task.RAM),
 	}
 
-	if !s.ignoreWatts {
+	if s.wattsAsAResource {
 		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
 			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
 		} else {
@@ -202,11 +202,11 @@ func (s *FirstFitSortedWattsProacCC) startCapping() {
 				if rankedCurrentCapValue > 0.0 {
 					for _, host := range constants.Hosts {
 						// Rounding currentCapValue to the nearest int.
-						if err := rapl.Cap(host, "rapl", int(math.Floor(rankedCurrentCapValue + 0.5))); err != nil {
+						if err := rapl.Cap(host, "rapl", int(math.Floor(rankedCurrentCapValue+0.5))); err != nil {
 							log.Println(err)
 						}
 					}
-					log.Printf("Capped the cluster to %d", int(math.Floor(rankedCurrentCapValue + 0.5)))
+					log.Printf("Capped the cluster to %d", int(math.Floor(rankedCurrentCapValue+0.5)))
 				}
 				rankedMutex.Unlock()
 			}
@@ -226,11 +226,11 @@ func (s *FirstFitSortedWattsProacCC) startRecapping() {
 				if s.isRecapping && rankedRecapValue > 0.0 {
 					for _, host := range constants.Hosts {
 						// Rounding currentCapValue to the nearest int.
-						if err := rapl.Cap(host, "rapl", int(math.Floor(rankedRecapValue + 0.5))); err != nil {
+						if err := rapl.Cap(host, "rapl", int(math.Floor(rankedRecapValue+0.5))); err != nil {
 							log.Println(err)
 						}
 					}
-					log.Printf("Recapped the cluster to %d", int(math.Floor(rankedRecapValue + 0.5)))
+					log.Printf("Recapped the cluster to %d", int(math.Floor(rankedRecapValue+0.5)))
 				}
 				// setting recapping to false
 				s.isRecapping = false
diff --git a/schedulers/firstfitSortedWattsSortedOffers.go b/schedulers/firstfitSortedWattsSortedOffers.go
index 1047bff..3742db2 100644
--- a/schedulers/firstfitSortedWattsSortedOffers.go
+++ b/schedulers/firstfitSortedWattsSortedOffers.go
@@ -28,7 +28,7 @@ func (s *FirstFitSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def
 		// Error in determining wattsConsideration
 		log.Fatal(err)
 	}
-	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= wattsConsideration) {
+	if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || watts >= wattsConsideration) {
 		return true
 	}
 
@@ -37,14 +37,14 @@ func (s *FirstFitSortedWattsSortedOffers) takeOffer(offer *mesos.Offer, task def
 
 // electronScheduler implements the Scheduler interface
 type FirstFitSortedWattsSortedOffers struct {
-	base          // Type embedded to inherit common functions
-	tasksCreated  int
-	tasksRunning  int
-	tasks         []def.Task
-	metrics       map[string]def.Metric
-	running       map[string]map[string]bool
-	ignoreWatts   bool
-	classMapWatts bool
+	base             // Type embedded to inherit common functions
+	tasksCreated     int
+	tasksRunning     int
+	tasks            []def.Task
+	metrics          map[string]def.Metric
+	running          map[string]map[string]bool
+	wattsAsAResource bool
+	classMapWatts    bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -64,7 +64,7 @@ type FirstFitSortedWattsSortedOffers struct {
 }
 
 // New electron scheduler
-func NewFirstFitSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, schedTracePrefix string,
+func NewFirstFitSortedWattsSortedOffers(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string,
 	classMapWatts bool) *FirstFitSortedWattsSortedOffers {
 
 	// Sorting the tasks in increasing order of watts requirement.
@@ -76,15 +76,15 @@ func NewFirstFitSortedWattsSortedOffers(tasks []def.Task, ignoreWatts bool, sche
 	}
 
 	s := &FirstFitSortedWattsSortedOffers{
-		tasks:         tasks,
-		ignoreWatts:   ignoreWatts,
-		classMapWatts: classMapWatts,
-		Shutdown:      make(chan struct{}),
-		Done:          make(chan struct{}),
-		PCPLog:        make(chan struct{}),
-		running:       make(map[string]map[string]bool),
-		RecordPCP:     false,
-		schedTrace:    log.New(logFile, "", log.LstdFlags),
+		tasks:            tasks,
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		RecordPCP:        false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -112,7 +112,7 @@ func (s *FirstFitSortedWattsSortedOffers) newTask(offer *mesos.Offer, task def.T
 		mesosutil.NewScalarResource("mem", task.RAM),
 	}
 
-	if !s.ignoreWatts {
+	if s.wattsAsAResource {
 		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
 			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
 			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
diff --git a/schedulers/firstfitsortedwatts.go b/schedulers/firstfitsortedwatts.go
index 6dc6ee9..5d624cf 100644
--- a/schedulers/firstfitsortedwatts.go
+++ b/schedulers/firstfitsortedwatts.go
@@ -28,7 +28,7 @@ func (s *FirstFitSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool
 		// Error in determining wattsConsideration
 		log.Fatal(err)
 	}
-	if cpus >= task.CPU && mem >= task.RAM && (s.ignoreWatts || watts >= wattsConsideration) {
+	if cpus >= task.CPU && mem >= task.RAM && (!s.wattsAsAResource || watts >= wattsConsideration) {
 		return true
 	}
 
@@ -37,14 +37,14 @@ func (s *FirstFitSortedWatts) takeOffer(offer *mesos.Offer, task def.Task) bool
 
 // electronScheduler implements the Scheduler interface
 type FirstFitSortedWatts struct {
-	base          // Type embedded to inherit common functions
-	tasksCreated  int
-	tasksRunning  int
-	tasks         []def.Task
-	metrics       map[string]def.Metric
-	running       map[string]map[string]bool
-	ignoreWatts   bool
-	classMapWatts bool
+	base             // Type embedded to inherit common functions
+	tasksCreated     int
+	tasksRunning     int
+	tasks            []def.Task
+	metrics          map[string]def.Metric
+	running          map[string]map[string]bool
+	wattsAsAResource bool
+	classMapWatts    bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -64,7 +64,7 @@ type FirstFitSortedWatts struct {
 }
 
 // New electron scheduler
-func NewFirstFitSortedWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *FirstFitSortedWatts {
+func NewFirstFitSortedWatts(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *FirstFitSortedWatts {
 
 	sort.Sort(def.WattsSorter(tasks))
 
@@ -74,15 +74,15 @@ func NewFirstFitSortedWatts(tasks []def.Task, ignoreWatts bool, schedTracePrefix
 	}
 
 	s := &FirstFitSortedWatts{
-		tasks:         tasks,
-		ignoreWatts:   ignoreWatts,
-		classMapWatts: classMapWatts,
-		Shutdown:      make(chan struct{}),
-		Done:          make(chan struct{}),
-		PCPLog:        make(chan struct{}),
-		running:       make(map[string]map[string]bool),
-		RecordPCP:     false,
-		schedTrace:    log.New(logFile, "", log.LstdFlags),
+		tasks:            tasks,
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		RecordPCP:        false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -110,7 +110,7 @@ func (s *FirstFitSortedWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.
 		mesosutil.NewScalarResource("mem", task.RAM),
 	}
 
-	if !s.ignoreWatts {
+	if s.wattsAsAResource {
 		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
 			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
 			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
diff --git a/schedulers/firstfitwattsonly.go b/schedulers/firstfitwattsonly.go
index a349b57..2d531c9 100644
--- a/schedulers/firstfitwattsonly.go
+++ b/schedulers/firstfitwattsonly.go
@@ -35,14 +35,14 @@ func (s *FirstFitWattsOnly) takeOffer(offer *mesos.Offer, task def.Task) bool {
 }
 
 type FirstFitWattsOnly struct {
-	base          // Type embedded to inherit common functions
-	tasksCreated  int
-	tasksRunning  int
-	tasks         []def.Task
-	metrics       map[string]def.Metric
-	running       map[string]map[string]bool
-	ignoreWatts   bool
-	classMapWatts bool
+	base             // Type embedded to inherit common functions
+	tasksCreated     int
+	tasksRunning     int
+	tasks            []def.Task
+	metrics          map[string]def.Metric
+	running          map[string]map[string]bool
+	wattsAsAResource bool
+	classMapWatts    bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -62,7 +62,7 @@ type FirstFitWattsOnly struct {
 }
 
 // New electron scheduler
-func NewFirstFitWattsOnly(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *FirstFitWattsOnly {
+func NewFirstFitWattsOnly(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *FirstFitWattsOnly {
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
 	if err != nil {
@@ -70,15 +70,15 @@ func NewFirstFitWattsOnly(tasks []def.Task, ignoreWatts bool, schedTracePrefix s
 	}
 
 	s := &FirstFitWattsOnly{
-		tasks:         tasks,
-		ignoreWatts:   ignoreWatts,
-		classMapWatts: classMapWatts,
-		Shutdown:      make(chan struct{}),
-		Done:          make(chan struct{}),
-		PCPLog:        make(chan struct{}),
-		running:       make(map[string]map[string]bool),
-		RecordPCP:     false,
-		schedTrace:    log.New(logFile, "", log.LstdFlags),
+		tasks:            tasks,
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		RecordPCP:        false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
diff --git a/schedulers/topHeavy.go b/schedulers/topHeavy.go
index 61be09c..39ffe03 100644
--- a/schedulers/topHeavy.go
+++ b/schedulers/topHeavy.go
@@ -34,7 +34,7 @@ type TopHeavy struct {
 	tasks                  []def.Task
 	metrics                map[string]def.Metric
 	running                map[string]map[string]bool
-	ignoreWatts            bool
+	wattsAsAResource       bool
 	classMapWatts          bool
 	smallTasks, largeTasks []def.Task
 
@@ -56,7 +56,7 @@ type TopHeavy struct {
 }
 
 // New electron scheduler
-func NewTopHeavy(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, classMapWatts bool) *TopHeavy {
+func NewTopHeavy(tasks []def.Task, wattsAsAResource bool, schedTracePrefix string, classMapWatts bool) *TopHeavy {
 	sort.Sort(def.WattsSorter(tasks))
 
 	logFile, err := os.Create("./" + schedTracePrefix + "_schedTrace.log")
@@ -68,16 +68,16 @@ func NewTopHeavy(tasks []def.Task, ignoreWatts bool, schedTracePrefix string, cl
 	// Classification done based on MMPU watts requirements.
 	mid := int(math.Floor((float64(len(tasks)) / 2.0) + 0.5))
 	s := &TopHeavy{
-		smallTasks:    tasks[:mid],
-		largeTasks:    tasks[mid+1:],
-		ignoreWatts:   ignoreWatts,
-		classMapWatts: classMapWatts,
-		Shutdown:      make(chan struct{}),
-		Done:          make(chan struct{}),
-		PCPLog:        make(chan struct{}),
-		running:       make(map[string]map[string]bool),
-		RecordPCP:     false,
-		schedTrace:    log.New(logFile, "", log.LstdFlags),
+		smallTasks:       tasks[:mid],
+		largeTasks:       tasks[mid+1:],
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		RecordPCP:        false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
@@ -105,7 +105,7 @@ func (s *TopHeavy) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInfo {
 		mesosutil.NewScalarResource("mem", task.RAM),
 	}
 
-	if !s.ignoreWatts {
+	if s.wattsAsAResource {
 		if wattsToConsider, err := def.WattsToConsider(task, s.classMapWatts, offer); err == nil {
 			log.Printf("Watts considered for host[%s] and task[%s] = %f", *offer.Hostname, task.Name, wattsToConsider)
 			resources = append(resources, mesosutil.NewScalarResource("watts", wattsToConsider))
@@ -186,7 +186,7 @@ func (s *TopHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) {
 				// Does the task fit
 				// OR lazy evaluation. If ignore watts is set to true, second statement won't
 				// be evaluated.
-				if (s.ignoreWatts || (offerWatts >= (totalWatts + wattsConsideration))) &&
+				if (!s.wattsAsAResource || (offerWatts >= (totalWatts + wattsConsideration))) &&
 					(offerCPU >= (totalCPU + task.CPU)) &&
 					(offerRAM >= (totalRAM + task.RAM)) {
 					taken = true
@@ -245,7 +245,7 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) {
 			}
 
 			// Decision to take the offer or not
-			if (s.ignoreWatts || (offerWatts >= wattsConsideration)) &&
+			if (!s.wattsAsAResource || (offerWatts >= wattsConsideration)) &&
 				(offerCPU >= task.CPU) && (offerRAM >= task.RAM) {
 				offerTaken = true
 				tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, task))

From 814d16b54d2bfb4441d9f20d847057caccad0625 Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Fri, 10 Feb 2017 15:46:20 -0500
Subject: [PATCH 07/15] added hostmismatch function to be called by all
 schedulers

---
 utilities/offerUtils/offerUtils.go | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/utilities/offerUtils/offerUtils.go b/utilities/offerUtils/offerUtils.go
index 16144dd..e1f6817 100644
--- a/utilities/offerUtils/offerUtils.go
+++ b/utilities/offerUtils/offerUtils.go
@@ -2,6 +2,7 @@ package offerUtils
 
 import (
 	mesos "github.com/mesos/mesos-go/mesosproto"
+	"strings"
 )
 
 func OfferAgg(offer *mesos.Offer) (float64, float64, float64) {
@@ -49,3 +50,11 @@ func (offersSorter OffersSorter) Less(i, j int) bool {
 	cpu2, _, _ := OfferAgg(offersSorter[j])
 	return cpu1 <= cpu2
 }
+
+// Is there a mismatch between the task's host requirement and the host corresponding to the offer.
+func HostMismatch(offerHost string, taskHost string) bool {
+	if taskHost != "" && !strings.HasPrefix(offerHost, taskHost) {
+		return true
+	}
+	return false
+}

From f5ddc56f2752023add9a7ba47258ce4b144797c2 Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Fri, 10 Feb 2017 16:39:13 -0500
Subject: [PATCH 08/15] changed the name of takeOffer(...) to
 takeOfferBinPack(...) and then created another function called
 takeOfferFirstFit(...). Made sure that these functions are called instead of
 inlining code.

---
 schedulers/bottomHeavy.go | 22 ++++++++++++++--------
 schedulers/topHeavy.go    | 33 ++++++++++++++++++++++++++-------
 2 files changed, 40 insertions(+), 15 deletions(-)

diff --git a/schedulers/bottomHeavy.go b/schedulers/bottomHeavy.go
index 6ec542d..38d6b66 100644
--- a/schedulers/bottomHeavy.go
+++ b/schedulers/bottomHeavy.go
@@ -26,7 +26,7 @@ BinPacking has the most effect when co-scheduling of tasks is increased. Large t
 	co-scheduling them has a great impact on the total power utilization.
 */
 
-func (s *BottomHeavy) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, totalWatts,
+func (s *BottomHeavy) takeOfferBinPack(offer *mesos.Offer, totalCPU, totalRAM, totalWatts,
 	wattsToConsider float64, task def.Task) bool {
 	offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
 
@@ -40,6 +40,17 @@ func (s *BottomHeavy) takeOffer(offer *mesos.Offer, totalCPU, totalRAM, totalWat
 
 }
 
+func (s *BottomHeavy) takeOfferFirstFit(offer *mesos.Offer, wattsConsideration float64, task def.Task) bool {
+	offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
+
+	//TODO: Insert watts calculation here instead of taking them as a parameter
+	if (!s.wattsAsAResource || (offerWatts >= wattsConsideration)) &&
+		(offerCPU >= task.CPU) && (offerRAM >= task.RAM) {
+		return true
+	}
+	return false
+}
+
 // electronScheduler implements the Scheduler interface
 type BottomHeavy struct {
 	base                   // Type embedded to inherit common functions
@@ -199,7 +210,7 @@ func (s *BottomHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver)
 				// Does the task fit
 				// OR lazy evaluation. If ignore watts is set to true, second statement won't
 				// be evaluated.
-				if s.takeOffer(offer, totalCPU, totalRAM, totalWatts, wattsConsideration, task) {
+				if s.takeOfferBinPack(offer, totalCPU, totalRAM, totalWatts, wattsConsideration, task) {
 					offerTaken = true
 					totalWatts += wattsConsideration
 					totalCPU += task.CPU
@@ -245,7 +256,6 @@ func (s *BottomHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver
 		}
 
 		tasks := []*mesos.TaskInfo{}
-		offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
 		taken := false
 		for i := 0; i < len(s.smallTasks); i++ {
 			task := s.smallTasks[i]
@@ -253,14 +263,10 @@ func (s *BottomHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver
 			if err != nil {
 				// Error in determining wattsConsideration
 				log.Fatal(err)
-			} else {
-				// Logging the watts consideration
-				log.Printf("Watts Considered for host[%s], task[%s] = %f\n", *offer.Hostname, task.Name, wattsConsideration)
 			}
 
 			// Decision to take the offer or not
-			if (!s.wattsAsAResource || (offerWatts >= wattsConsideration)) &&
-				(offerCPU >= task.CPU) && (offerRAM >= task.RAM) {
+			if s.takeOfferFirstFit(offer, wattsConsideration, task) {
 				taken = true
 				tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, task))
 				log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname())
diff --git a/schedulers/topHeavy.go b/schedulers/topHeavy.go
index 39ffe03..e42b527 100644
--- a/schedulers/topHeavy.go
+++ b/schedulers/topHeavy.go
@@ -26,6 +26,30 @@ This was done to give a little more room for the large tasks (power intensive) f
 starvation of power intensive tasks.
 */
 
+func (s *TopHeavy) takeOfferBinPack(offer *mesos.Offer, totalCPU, totalRAM, totalWatts,
+	wattsToConsider float64, task def.Task) bool {
+	offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
+
+	//TODO: Insert watts calculation here instead of taking them as a parameter
+	if (!s.wattsAsAResource || (offerWatts >= (totalWatts + wattsToConsider))) &&
+		(offerCPU >= (totalCPU + task.CPU)) &&
+		(offerRAM >= (totalRAM + task.RAM)) {
+		return true
+	}
+	return false
+}
+
+func (s *TopHeavy) takeOfferFirstFit(offer *mesos.Offer, wattsConsideration float64, task def.Task) bool {
+	offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
+
+	//TODO: Insert watts calculation here instead of taking them as a parameter
+	if (!s.wattsAsAResource || (offerWatts >= wattsConsideration)) &&
+		(offerCPU >= task.CPU) && (offerRAM >= task.RAM) {
+		return true
+	}
+	return false
+}
+
 // electronScheduler implements the Scheduler interface
 type TopHeavy struct {
 	base                   // Type embedded to inherit common functions
@@ -169,7 +193,6 @@ func (s *TopHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) {
 		}
 
 		tasks := []*mesos.TaskInfo{}
-		offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
 		totalWatts := 0.0
 		totalCPU := 0.0
 		totalRAM := 0.0
@@ -186,9 +209,7 @@ func (s *TopHeavy) pack(offers []*mesos.Offer, driver sched.SchedulerDriver) {
 				// Does the task fit
 				// OR lazy evaluation. If ignore watts is set to true, second statement won't
 				// be evaluated.
-				if (!s.wattsAsAResource || (offerWatts >= (totalWatts + wattsConsideration))) &&
-					(offerCPU >= (totalCPU + task.CPU)) &&
-					(offerRAM >= (totalRAM + task.RAM)) {
+				if s.takeOfferBinPack(offer, totalCPU, totalRAM, totalWatts, wattsConsideration, task) {
 					taken = true
 					totalWatts += wattsConsideration
 					totalCPU += task.CPU
@@ -234,7 +255,6 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) {
 		}
 
 		tasks := []*mesos.TaskInfo{}
-		offerCPU, offerRAM, offerWatts := offerUtils.OfferAgg(offer)
 		offerTaken := false
 		for i := 0; i < len(s.largeTasks); i++ {
 			task := s.largeTasks[i]
@@ -245,8 +265,7 @@ func (s *TopHeavy) spread(offers []*mesos.Offer, driver sched.SchedulerDriver) {
 			}
 
 			// Decision to take the offer or not
-			if (!s.wattsAsAResource || (offerWatts >= wattsConsideration)) &&
-				(offerCPU >= task.CPU) && (offerRAM >= task.RAM) {
+			if s.takeOfferFirstFit(offer, wattsConsideration, task) {
 				offerTaken = true
 				tasks = append(tasks, s.createTaskInfoAndLogSchedTrace(offer, task))
 				log.Printf("Starting %s on [%s]\n", task.Name, offer.GetHostname())

From 05f92bcfd29ffbe7c3fc420e117f9700ab19babc Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Fri, 10 Feb 2017 18:02:06 -0500
Subject: [PATCH 09/15] resolved merge conflict

---
 constants/constants.go | 9 ---------
 1 file changed, 9 deletions(-)

diff --git a/constants/constants.go b/constants/constants.go
index e1045d5..8447e6c 100644
--- a/constants/constants.go
+++ b/constants/constants.go
@@ -1,18 +1,9 @@
 /*
 Constants that are used across scripts
 1. The available hosts = stratos-00x (x varies from 1 to 8)
-<<<<<<< HEAD
 2. CapMargin = percentage of the requested power to allocate
 3. ConsiderationWindowSize = number of tasks to consider for computation of the dynamic cap.
-=======
-2. cap_margin = percentage of the requested power to allocate
-3. power_threshold = overloading factor
-5. window_size = number of tasks to consider for computation of the dynamic cap.
-
-Also, exposing functions to update or initialize some of the constants.
-
 TODO: Clean this up and use Mesos Attributes instead.
->>>>>>> a0a3e78041067e5e2f9dc9b5d1e7b6dd001ce1e9
 */
 package constants
 

From ae55790c7655e4e1a8eda98f6668e66c4c3f1919 Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Fri, 10 Feb 2017 18:11:52 -0500
Subject: [PATCH 10/15] removed TODO for adding the possibility of enabling and
 disabling classMapWatts from command-line

---
 README.md | 1 -
 1 file changed, 1 deletion(-)

diff --git a/README.md b/README.md
index 96226e8..1948b49 100644
--- a/README.md
+++ b/README.md
@@ -15,7 +15,6 @@ To Do:
  * Log fix for declining offer -- different reason when insufficient resources as compared to when there are no
     longer any tasks to schedule.
  * Have a centralised logFile that can be filtered by identifier. All electron logs should go into this file.
- * Make ClassMapWatts to commandLine arguments so Electron can be run with ClassMapWatts enabled/disabled.
  * Make def.Task an interface for further modularization and flexibility.
 
 **Requires [Performance Co-Pilot](http://pcp.io/) tool pmdumptext to be installed on the

From ad925dfc8ffc057f3e2b72f92cd268db1d3ce76a Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Fri, 10 Feb 2017 20:28:06 -0500
Subject: [PATCH 11/15] Added TODO for making WattsToConsider(...) a receiver
 of def.Task and changing its name to Watts(...)

---
 README.md | 1 +
 1 file changed, 1 insertion(+)

diff --git a/README.md b/README.md
index 1948b49..64d6fcb 100644
--- a/README.md
+++ b/README.md
@@ -16,6 +16,7 @@ To Do:
     longer any tasks to schedule.
  * Have a centralised logFile that can be filtered by identifier. All electron logs should go into this file.
  * Make def.Task an interface for further modularization and flexibility.
+ * Convert def#WattsToConsider(...) to be a receiver of def.Task and change the name of it to Watts(...).
 
 **Requires [Performance Co-Pilot](http://pcp.io/) tool pmdumptext to be installed on the
 machine on which electron is launched for logging to work and PCP collector agents installed

From ec7848006773fa694bb037c3baa3794c12863d0d Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Fri, 10 Feb 2017 20:53:18 -0500
Subject: [PATCH 12/15] renamed constants.CapMargin to constants.Tolerance for
 better semantics

---
 constants/constants.go                      | 6 +++---
 powerCapping/proactiveclusterwidecappers.go | 6 +++---
 schedulers/binpackedpistoncapping.go        | 4 ++--
 schedulers/bpswMaxMinPistonCapping.go       | 4 ++--
 4 files changed, 10 insertions(+), 10 deletions(-)

diff --git a/constants/constants.go b/constants/constants.go
index 8447e6c..16225a5 100644
--- a/constants/constants.go
+++ b/constants/constants.go
@@ -1,7 +1,7 @@
 /*
 Constants that are used across scripts
 1. The available hosts = stratos-00x (x varies from 1 to 8)
-2. CapMargin = percentage of the requested power to allocate
+2. Tolerance = tolerance for a task that when exceeded would starve the task.
 3. ConsiderationWindowSize = number of tasks to consider for computation of the dynamic cap.
 TODO: Clean this up and use Mesos Attributes instead.
 */
@@ -32,10 +32,10 @@ var PowerClasses = map[string]map[string]bool{
 
 /*
   Margin with respect to the required power for a job.
-  So, if power required = 10W, the node would be capped to CapMargin * 10W.
+  So, if power required = 10W, the node would be capped to Tolerance * 10W.
   This value can be changed upon convenience.
 */
-var CapMargin = 0.70
+var Tolerance = 0.70
 
 // Window size for running average
 var ConsiderationWindowSize = 20
diff --git a/powerCapping/proactiveclusterwidecappers.go b/powerCapping/proactiveclusterwidecappers.go
index d1f8009..fb3a3e3 100644
--- a/powerCapping/proactiveclusterwidecappers.go
+++ b/powerCapping/proactiveclusterwidecappers.go
@@ -23,7 +23,7 @@ type taskWrapper struct {
 }
 
 func (tw taskWrapper) Val() float64 {
-	return tw.task.Watts * constants.CapMargin
+	return tw.task.Watts * constants.Tolerance
 }
 
 func (tw taskWrapper) ID() string {
@@ -121,7 +121,7 @@ func (capper ClusterwideCapper) CleverRecap(totalPower map[string]float64,
 				// Not considering this task for the computation of totalAllocatedPower and totalRunningTasks
 				continue
 			}
-			wattsUsages[host] = append(wattsUsages[host], float64(task.Watts)*constants.CapMargin)
+			wattsUsages[host] = append(wattsUsages[host], float64(task.Watts)*constants.Tolerance)
 		}
 	}
 
@@ -202,7 +202,7 @@ func (capper ClusterwideCapper) NaiveRecap(totalPower map[string]float64,
 				// Not considering this task for the computation of totalAllocatedPower and totalRunningTasks
 				continue
 			}
-			totalAllocatedPower += (float64(task.Watts) * constants.CapMargin)
+			totalAllocatedPower += (float64(task.Watts) * constants.Tolerance)
 			totalRunningTasks++
 		}
 	}
diff --git a/schedulers/binpackedpistoncapping.go b/schedulers/binpackedpistoncapping.go
index 2b24b8a..68f8448 100644
--- a/schedulers/binpackedpistoncapping.go
+++ b/schedulers/binpackedpistoncapping.go
@@ -305,7 +305,7 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off
 					s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
 					*task.Instances--
 					// updating the cap value for offer.Hostname
-					partialLoad += ((wattsConsideration * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100
+					partialLoad += ((wattsConsideration * constants.Tolerance) / s.totalPower[*offer.Hostname]) * 100
 
 					if *task.Instances <= 0 {
 						// All instances of task have been scheduled. Remove it
@@ -395,7 +395,7 @@ func (s *BinPackedPistonCapper) StatusUpdate(driver sched.SchedulerDriver, statu
 		}
 		// Need to update the cap values for host of the finishedTask
 		bpPistonMutex.Lock()
-		bpPistonCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100
+		bpPistonCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.Tolerance) / s.totalPower[hostOfFinishedTask]) * 100
 		// Checking to see if the cap value has become 0, in which case we uncap the host.
 		if int(math.Floor(bpPistonCapValues[hostOfFinishedTask]+0.5)) == 0 {
 			bpPistonCapValues[hostOfFinishedTask] = 100
diff --git a/schedulers/bpswMaxMinPistonCapping.go b/schedulers/bpswMaxMinPistonCapping.go
index 3bf48df..1789a8d 100644
--- a/schedulers/bpswMaxMinPistonCapping.go
+++ b/schedulers/bpswMaxMinPistonCapping.go
@@ -257,7 +257,7 @@ func (s *BPSWMaxMinPistonCapping) CheckFit(i int,
 		fmt.Println("Inst: ", *task.Instances)
 		s.schedTrace.Print(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
 		*task.Instances--
-		*partialLoad += ((wattsConsideration * constants.CapMargin) / s.totalPower[*offer.Hostname]) * 100
+		*partialLoad += ((wattsConsideration * constants.Tolerance) / s.totalPower[*offer.Hostname]) * 100
 
 		if *task.Instances <= 0 {
 			// All instances of task have been scheduled, remove it
@@ -431,7 +431,7 @@ func (s *BPSWMaxMinPistonCapping) StatusUpdate(driver sched.SchedulerDriver, sta
 		}
 		// Need to update the cap values for host of the finishedTask
 		bpMaxMinPistonCappingMutex.Lock()
-		bpMaxMinPistonCappingCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100
+		bpMaxMinPistonCappingCapValues[hostOfFinishedTask] -= ((wattsConsideration * constants.Tolerance) / s.totalPower[hostOfFinishedTask]) * 100
 		// Checking to see if the cap value has become 0, in which case we uncap the host.
 		if int(math.Floor(bpMaxMinPistonCappingCapValues[hostOfFinishedTask]+0.5)) == 0 {
 			bpMaxMinPistonCappingCapValues[hostOfFinishedTask] = 100

From 13479e03a462fa9260e4287f0116f5311b226894 Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Sat, 11 Feb 2017 00:05:42 -0500
Subject: [PATCH 13/15] formatted files

---
 schedulers/binpackedpistoncapping.go |  4 +++-
 schedulers/bpswMaxMin.go             | 34 ++++++++++++++--------------
 schedulers/helpers.go                |  3 +--
 utilities/utils.go                   |  1 -
 4 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/schedulers/binpackedpistoncapping.go b/schedulers/binpackedpistoncapping.go
index 68f8448..e58f674 100644
--- a/schedulers/binpackedpistoncapping.go
+++ b/schedulers/binpackedpistoncapping.go
@@ -279,7 +279,9 @@ func (s *BinPackedPistonCapper) ResourceOffers(driver sched.SchedulerDriver, off
 			}
 
 			// Don't take offer if it doesn't match our task's host requirement
-			if offerUtils.HostMismatch(*offer.Hostname, task.Host) {continue}
+			if offerUtils.HostMismatch(*offer.Hostname, task.Host) {
+				continue
+			}
 
 			for *task.Instances > 0 {
 				// Does the task fit
diff --git a/schedulers/bpswMaxMin.go b/schedulers/bpswMaxMin.go
index bdb8f45..b98ab54 100644
--- a/schedulers/bpswMaxMin.go
+++ b/schedulers/bpswMaxMin.go
@@ -34,14 +34,14 @@ func (s *BPSWMaxMinWatts) takeOffer(offer *mesos.Offer, task def.Task) bool {
 }
 
 type BPSWMaxMinWatts struct {
-	base          //Type embedding to inherit common functions
-	tasksCreated  int
-	tasksRunning  int
-	tasks         []def.Task
-	metrics       map[string]def.Metric
-	running       map[string]map[string]bool
-	wattsAsAResource   bool
-	classMapWatts bool
+	base             //Type embedding to inherit common functions
+	tasksCreated     int
+	tasksRunning     int
+	tasks            []def.Task
+	metrics          map[string]def.Metric
+	running          map[string]map[string]bool
+	wattsAsAResource bool
+	classMapWatts    bool
 
 	// First set of PCP values are garbage values, signal to logger to start recording when we're
 	// about to schedule a new task
@@ -70,15 +70,15 @@ func NewBPMaxMinWatts(tasks []def.Task, wattsAsAResource bool, schedTracePrefix
 	}
 
 	s := &BPSWMaxMinWatts{
-		tasks:         tasks,
-		wattsAsAResource:   wattsAsAResource,
-		classMapWatts: classMapWatts,
-		Shutdown:      make(chan struct{}),
-		Done:          make(chan struct{}),
-		PCPLog:        make(chan struct{}),
-		running:       make(map[string]map[string]bool),
-		RecordPCP:     false,
-		schedTrace:    log.New(logFile, "", log.LstdFlags),
+		tasks:            tasks,
+		wattsAsAResource: wattsAsAResource,
+		classMapWatts:    classMapWatts,
+		Shutdown:         make(chan struct{}),
+		Done:             make(chan struct{}),
+		PCPLog:           make(chan struct{}),
+		running:          make(map[string]map[string]bool),
+		RecordPCP:        false,
+		schedTrace:       log.New(logFile, "", log.LstdFlags),
 	}
 	return s
 }
diff --git a/schedulers/helpers.go b/schedulers/helpers.go
index 23d1441..e6ba7fb 100644
--- a/schedulers/helpers.go
+++ b/schedulers/helpers.go
@@ -1,9 +1,9 @@
 package schedulers
 
 import (
+	"bitbucket.org/sunybingcloud/electron/constants"
 	"fmt"
 	"log"
-	"bitbucket.org/sunybingcloud/electron/constants"
 )
 
 func coLocated(tasks map[string]bool) {
@@ -24,4 +24,3 @@ func hostToPowerClass(hostName string) string {
 	}
 	return ""
 }
-
diff --git a/utilities/utils.go b/utilities/utils.go
index 6662c59..18b2400 100644
--- a/utilities/utils.go
+++ b/utilities/utils.go
@@ -45,4 +45,3 @@ func OrderedKeys(plist PairList) ([]string, error) {
 	}
 	return orderedKeys, nil
 }
-

From 6ac1b388837e11a96a9ef2928bd77c1986bbde9f Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Sat, 11 Feb 2017 01:14:02 -0500
Subject: [PATCH 14/15] Added comment to explain the classification of nodes in
 the cluster into power-classes.

---
 constants/constants.go | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/constants/constants.go b/constants/constants.go
index 16225a5..bbacf42 100644
--- a/constants/constants.go
+++ b/constants/constants.go
@@ -12,7 +12,11 @@ var Hosts = []string{"stratos-001.cs.binghamton.edu", "stratos-002.cs.binghamton
 	"stratos-005.cs.binghamton.edu", "stratos-006.cs.binghamton.edu",
 	"stratos-007.cs.binghamton.edu", "stratos-008.cs.binghamton.edu"}
 
-// Classification of the nodes in the cluster based on their power consumption.
+/*
+ Classification of the nodes in the cluster based on their Thermal Design Power (TDP).
+ The power classes are labelled in the decreasing order of the corresponding TDP, with class A nodes
+ 	having the highest TDP and class C nodes having the lowest TDP.
+*/
 var PowerClasses = map[string]map[string]bool{
 	"A": map[string]bool{
 		"stratos-005.cs.binghamton.edu": true,

From ceff625d322082f2cf6758a82eaff4f0792259fd Mon Sep 17 00:00:00 2001
From: Pradyumna Kaushik <pkaushi1@binghamton.edu>
Date: Sat, 11 Feb 2017 01:23:07 -0500
Subject: [PATCH 15/15] fixed formatting of function arguments for
 CheckFit(...)

---
 schedulers/bpswMaxMin.go              | 3 ++-
 schedulers/bpswMaxMinPistonCapping.go | 3 ++-
 schedulers/bpswMaxMinProacCC.go       | 3 ++-
 3 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/schedulers/bpswMaxMin.go b/schedulers/bpswMaxMin.go
index b98ab54..41356e2 100644
--- a/schedulers/bpswMaxMin.go
+++ b/schedulers/bpswMaxMin.go
@@ -139,7 +139,8 @@ func (s *BPSWMaxMinWatts) newTask(offer *mesos.Offer, task def.Task) *mesos.Task
 
 // Determine if the remaining space inside of the offer is enough for this
 // the task we need to create. If it is, create a TaskInfo and return it.
-func (s *BPSWMaxMinWatts) CheckFit(i int,
+func (s *BPSWMaxMinWatts) CheckFit(
+	i int,
 	task def.Task,
 	wattsConsideration float64,
 	offer *mesos.Offer,
diff --git a/schedulers/bpswMaxMinPistonCapping.go b/schedulers/bpswMaxMinPistonCapping.go
index 1789a8d..925cbdc 100644
--- a/schedulers/bpswMaxMinPistonCapping.go
+++ b/schedulers/bpswMaxMinPistonCapping.go
@@ -228,7 +228,8 @@ func (s *BPSWMaxMinPistonCapping) stopCapping() {
 
 // Determine if the remaining sapce inside of the offer is enough for
 // the task we need to create. If it is, create a TaskInfo and return it.
-func (s *BPSWMaxMinPistonCapping) CheckFit(i int,
+func (s *BPSWMaxMinPistonCapping) CheckFit(
+	i int,
 	task def.Task,
 	wattsConsideration float64,
 	offer *mesos.Offer,
diff --git a/schedulers/bpswMaxMinProacCC.go b/schedulers/bpswMaxMinProacCC.go
index dc6912a..8c7b880 100644
--- a/schedulers/bpswMaxMinProacCC.go
+++ b/schedulers/bpswMaxMinProacCC.go
@@ -252,7 +252,8 @@ func (s *BPSWMaxMinProacCC) stopRecapping() {
 
 // Determine if the remaining space inside of the offer is enough for
 // the task we need to create. If it is, create TaskInfo and return it.
-func (s *BPSWMaxMinProacCC) CheckFit(i int,
+func (s *BPSWMaxMinProacCC) CheckFit(
+	i int,
 	task def.Task,
 	wattsConsideration float64,
 	offer *mesos.Offer,