formatted the code

This commit is contained in:
Pradyumna Kaushik 2016-12-16 15:49:30 -05:00 committed by Renan DelValle
parent 16e25cea0f
commit bfcb254f23

View file

@ -4,16 +4,17 @@ import (
"bitbucket.org/sunybingcloud/electron/constants"
"bitbucket.org/sunybingcloud/electron/def"
"bitbucket.org/sunybingcloud/electron/rapl"
"fmt"
"errors"
"fmt"
"github.com/golang/protobuf/proto"
mesos "github.com/mesos/mesos-go/mesosproto"
"github.com/mesos/mesos-go/mesosutil"
sched "github.com/mesos/mesos-go/scheduler"
"log"
"math"
"sync"
"sort"
"strings"
"sync"
"time"
)
@ -30,7 +31,6 @@ type PistonCapper struct {
metrics map[string]def.Metric
running map[string]map[string]bool
taskMonitor map[string][]def.Task
clusterLoad map[string]float64
totalPower map[string]float64
ignoreWatts bool
ticker *time.Ticker
@ -62,10 +62,9 @@ func NewPistonCapper(tasks []def.Task, ignoreWatts bool) *PistonCapper {
PCPLog: make(chan struct{}),
running: make(map[string]map[string]bool),
taskMonitor: make(map[string][]def.Task),
clusterLoad: make(map[string]float64),
totalPower: make(map[string]float64),
RecordPCP: false,
ticker: time.NewTicker(10 * time.Second),
ticker: time.NewTicker(5 * time.Second),
isCapping: false,
}
return s
@ -130,8 +129,6 @@ func (s *PistonCapper) newTask(offer *mesos.Offer, task def.Task) *mesos.TaskInf
}
}
func (s *PistonCapper) Registered(
_ sched.SchedulerDriver,
frameworkID *mesos.FrameworkID,
@ -149,8 +146,10 @@ func (s *PistonCapper) Disconnected(sched.SchedulerDriver) {
// go routine to cap the each node in the cluster at regular intervals of time.
var capValues = make(map[string]float64)
// Storing the previous cap value for each host so as to not repeatedly cap the nodes to the same value. (reduces overhead)
var previousRoundedCapValues = make(map[string]int)
func (s *PistonCapper) startCapping() {
go func() {
for {
@ -166,7 +165,7 @@ func (s *PistonCapper) startCapping() {
if err := rapl.Cap(host, "rapl", roundedCapValue); err != nil {
log.Println(err)
} else {
log.Printf("Capped [%s] at %d", host, int(math.Floor(capValue + 0.5)))
log.Printf("Capped [%s] at %d", host, int(math.Floor(capValue+0.5)))
}
previousRoundedCapValues[host] = roundedCapValue
}
@ -174,7 +173,7 @@ func (s *PistonCapper) startCapping() {
if err := rapl.Cap(host, "rapl", roundedCapValue); err != nil {
log.Println(err)
} else {
log.Printf("Capped [%s] at %d", host, int(math.Floor(capValue + 0.5)))
log.Printf("Capped [%s] at %d", host, int(math.Floor(capValue+0.5)))
}
previousRoundedCapValues[host] = roundedCapValue
}
@ -356,7 +355,7 @@ func (s *PistonCapper) StatusUpdate(driver sched.SchedulerDriver, status *mesos.
mutex.Lock()
capValues[hostOfFinishedTask] -= ((finishedTask.Watts * constants.CapMargin) / s.totalPower[hostOfFinishedTask]) * 100
// Checking to see if the cap value has become 0, in which case we uncap the host.
if int(math.Floor(capValues[hostOfFinishedTask] + 0.5)) == 0 {
if int(math.Floor(capValues[hostOfFinishedTask]+0.5)) == 0 {
capValues[hostOfFinishedTask] = 100
}
s.tasksRunning--