formatted the code

This commit is contained in:
Pradyumna Kaushik 2016-12-16 15:49:30 -05:00 committed by Renan DelValle
parent 16e25cea0f
commit bfcb254f23

View file

@ -4,16 +4,17 @@ import (
"bitbucket.org/sunybingcloud/electron/constants" "bitbucket.org/sunybingcloud/electron/constants"
"bitbucket.org/sunybingcloud/electron/def" "bitbucket.org/sunybingcloud/electron/def"
"bitbucket.org/sunybingcloud/electron/rapl" "bitbucket.org/sunybingcloud/electron/rapl"
"fmt"
"errors" "errors"
"fmt"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
mesos "github.com/mesos/mesos-go/mesosproto" mesos "github.com/mesos/mesos-go/mesosproto"
"github.com/mesos/mesos-go/mesosutil" "github.com/mesos/mesos-go/mesosutil"
sched "github.com/mesos/mesos-go/scheduler" sched "github.com/mesos/mesos-go/scheduler"
"log" "log"
"math" "math"
"sync" "sort"
"strings" "strings"
"sync"
"time" "time"
) )
@ -30,7 +31,6 @@ type PistonCapper struct {
metrics map[string]def.Metric metrics map[string]def.Metric
running map[string]map[string]bool running map[string]map[string]bool
taskMonitor map[string][]def.Task taskMonitor map[string][]def.Task
clusterLoad map[string]float64
totalPower map[string]float64 totalPower map[string]float64
ignoreWatts bool ignoreWatts bool
ticker *time.Ticker ticker *time.Ticker
@ -62,10 +62,9 @@ func NewPistonCapper(tasks []def.Task, ignoreWatts bool) *PistonCapper {
PCPLog: make(chan struct{}), PCPLog: make(chan struct{}),
running: make(map[string]map[string]bool), running: make(map[string]map[string]bool),
taskMonitor: make(map[string][]def.Task), taskMonitor: make(map[string][]def.Task),
clusterLoad: make(map[string]float64),
totalPower: make(map[string]float64), totalPower: make(map[string]float64),
RecordPCP: false, RecordPCP: false,
ticker: time.NewTicker(10 * time.Second), ticker: time.NewTicker(5 * time.Second),
isCapping: false, isCapping: false,
} }
return s return s
@ -130,8 +129,6 @@ func (s *PistonCapper) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInf
} }
} }
func (s *PistonCapper) Registered( func (s *PistonCapper) Registered(
_ sched.SchedulerDriver, _ sched.SchedulerDriver,
frameworkID *mesos.FrameworkID, frameworkID *mesos.FrameworkID,
@ -149,8 +146,10 @@ func (s *PistonCapper) Disconnected(sched.SchedulerDriver) {
// go routine to cap the each node in the cluster at regular intervals of time. // go routine to cap the each node in the cluster at regular intervals of time.
var capValues = make(map[string]float64) var capValues = make(map[string]float64)
// Storing the previous cap value for each host so as to not repeatedly cap the nodes to the same value. (reduces overhead) // Storing the previous cap value for each host so as to not repeatedly cap the nodes to the same value. (reduces overhead)
var previousRoundedCapValues = make(map[string]int) var previousRoundedCapValues = make(map[string]int)
func (s *PistonCapper) startCapping() { func (s *PistonCapper) startCapping() {
go func() { go func() {
for { for {
@ -166,7 +165,7 @@ func (s *PistonCapper) startCapping() {
if err := rapl.Cap(host, "rapl", roundedCapValue); err != nil { if err := rapl.Cap(host, "rapl", roundedCapValue); err != nil {
log.Println(err) log.Println(err)
} else { } else {
log.Printf("Capped [%s] at %d", host, int(math.Floor(capValue + 0.5))) log.Printf("Capped [%s] at %d", host, int(math.Floor(capValue+0.5)))
} }
previousRoundedCapValues[host] = roundedCapValue previousRoundedCapValues[host] = roundedCapValue
} }
@ -174,7 +173,7 @@ func (s *PistonCapper) startCapping() {
if err := rapl.Cap(host, "rapl", roundedCapValue); err != nil { if err := rapl.Cap(host, "rapl", roundedCapValue); err != nil {
log.Println(err) log.Println(err)
} else { } else {
log.Printf("Capped [%s] at %d", host, int(math.Floor(capValue + 0.5))) log.Printf("Capped [%s] at %d", host, int(math.Floor(capValue+0.5)))
} }
previousRoundedCapValues[host] = roundedCapValue previousRoundedCapValues[host] = roundedCapValue
} }
@ -356,7 +355,7 @@ func (s *PistonCapper) StatusUpdate(driver sched.SchedulerDriver, status *mesos.
mutex.Lock() mutex.Lock()
capValues[hostOfFinishedTask] -= ((finishedTask.Watts * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100 capValues[hostOfFinishedTask] -= ((finishedTask.Watts * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100
// Checking to see if the cap value has become 0, in which case we uncap the host. // Checking to see if the cap value has become 0, in which case we uncap the host.
if int(math.Floor(capValues[hostOfFinishedTask] + 0.5)) == 0 { if int(math.Floor(capValues[hostOfFinishedTask]+0.5)) == 0 {
capValues[hostOfFinishedTask] = 100 capValues[hostOfFinishedTask] = 100
} }
s.tasksRunning-- s.tasksRunning--