formatted the code

This commit is contained in:
Pradyumna Kaushik 2016-11-25 17:42:08 -05:00 committed by Renan DelValle
parent f6ec974182
commit 81d795f197
4 changed files with 113 additions and 111 deletions

View file

@ -11,19 +11,19 @@ Also, exposing functions to update or initialize some of the constants.
package constants package constants
var Hosts = []string{"stratos-001.cs.binghamton.edu", "stratos-002.cs.binghamton.edu", var Hosts = []string{"stratos-001.cs.binghamton.edu", "stratos-002.cs.binghamton.edu",
"stratos-003.cs.binghamton.edu", "stratos-004.cs.binghamton.edu", "stratos-003.cs.binghamton.edu", "stratos-004.cs.binghamton.edu",
"stratos-005.cs.binghamton.edu", "stratos-006.cs.binghamton.edu", "stratos-005.cs.binghamton.edu", "stratos-006.cs.binghamton.edu",
"stratos-007.cs.binghamton.edu", "stratos-008.cs.binghamton.edu"} "stratos-007.cs.binghamton.edu", "stratos-008.cs.binghamton.edu"}
// Add a new host to the slice of hosts. // Add a new host to the slice of hosts.
func AddNewHost(new_host string) bool { func AddNewHost(new_host string) bool {
// Validation // Validation
if new_host == "" { if new_host == "" {
return false return false
} else { } else {
Hosts = append(Hosts, new_host) Hosts = append(Hosts, new_host)
return true return true
} }
} }
// Lower bound of the percentage of requested power, that can be allocated to a task. // Lower bound of the percentage of requested power, that can be allocated to a task.
@ -38,16 +38,15 @@ var Cap_margin = 0.50
// Modify the cap margin. // Modify the cap margin.
func UpdateCapMargin(new_cap_margin float64) bool { func UpdateCapMargin(new_cap_margin float64) bool {
// Checking if the new_cap_margin is less than the power threshold. // Checking if the new_cap_margin is less than the power threshold.
if new_cap_margin < Starvation_factor { if new_cap_margin < Starvation_factor {
return false return false
} else { } else {
Cap_margin = new_cap_margin Cap_margin = new_cap_margin
return true return true
} }
} }
// Threshold factor that would make (Cap_margin * task.Watts) equal to (60/100 * task.Watts). // Threshold factor that would make (Cap_margin * task.Watts) equal to (60/100 * task.Watts).
var Starvation_factor = 0.8 var Starvation_factor = 0.8
@ -56,32 +55,32 @@ var Total_power map[string]float64
// Initialize the total power per node. This should be done before accepting any set of tasks for scheduling. // Initialize the total power per node. This should be done before accepting any set of tasks for scheduling.
func AddTotalPowerForHost(host string, total_power float64) bool { func AddTotalPowerForHost(host string, total_power float64) bool {
// Validation // Validation
is_correct_host := false is_correct_host := false
for _, existing_host := range Hosts { for _, existing_host := range Hosts {
if host == existing_host { if host == existing_host {
is_correct_host = true is_correct_host = true
} }
} }
if !is_correct_host { if !is_correct_host {
return false return false
} else { } else {
Total_power[host] = total_power Total_power[host] = total_power
return true return true
} }
} }
// Window size for running average // Window size for running average
var Window_size = 160 var Window_size = 10
// Update the window size. // Update the window size.
func UpdateWindowSize(new_window_size int) bool { func UpdateWindowSize(new_window_size int) bool {
// Validation // Validation
if new_window_size == 0 { if new_window_size == 0 {
return false return false
} else{ } else {
Window_size = new_window_size Window_size = new_window_size
return true return true
} }
} }

View file

@ -17,7 +17,7 @@ import (
"container/list" "container/list"
"errors" "errors"
"github.com/montanaflynn/stats" "github.com/montanaflynn/stats"
"log" "log"
"sort" "sort"
) )
@ -118,8 +118,11 @@ A recapping strategy which decides between 2 different recapping schemes.
2. A scheme based on the average of the loads on each node in the cluster. 2. A scheme based on the average of the loads on each node in the cluster.
The recap value picked the least among the two. The recap value picked the least among the two.
The cleverRecap scheme works well when the cluster is relatively idle and until then,
the primitive recapping scheme works better.
*/ */
func (capper clusterwideCapper) cleverRecap(total_power map[string]float64, func (capper clusterwideCapper) cleverRecap(total_power map[string]float64,
task_monitor map[string][]def.Task, finished_taskId string) (float64, error) { task_monitor map[string][]def.Task, finished_taskId string) (float64, error) {
// Validation // Validation
if total_power == nil || task_monitor == nil { if total_power == nil || task_monitor == nil {
@ -136,48 +139,48 @@ func (capper clusterwideCapper) cleverRecap(total_power map[string]float64,
// watts usage on each node in the cluster. // watts usage on each node in the cluster.
watts_usages := make(map[string][]float64) watts_usages := make(map[string][]float64)
host_of_finished_task := "" host_of_finished_task := ""
index_of_finished_task := -1 index_of_finished_task := -1
for _, host := range constants.Hosts { for _, host := range constants.Hosts {
watts_usages[host] = []float64{0.0} watts_usages[host] = []float64{0.0}
} }
for host, tasks := range task_monitor { for host, tasks := range task_monitor {
for i, task := range tasks { for i, task := range tasks {
if task.TaskID == finished_taskId { if task.TaskID == finished_taskId {
host_of_finished_task = host host_of_finished_task = host
index_of_finished_task = i index_of_finished_task = i
// Not considering this task for the computation of total_allocated_power and total_running_tasks // Not considering this task for the computation of total_allocated_power and total_running_tasks
continue continue
} }
watts_usages[host] = append(watts_usages[host], float64(task.Watts) * constants.Cap_margin) watts_usages[host] = append(watts_usages[host], float64(task.Watts)*constants.Cap_margin)
} }
} }
// Updating task monitor. If recap(...) has deleted the finished task from the taskMonitor, // Updating task monitor. If recap(...) has deleted the finished task from the taskMonitor,
// then this will be ignored. // then this will be ignored. Else (this is only when an error occured with recap(...)), we remove it here.
if host_of_finished_task != "" && index_of_finished_task != -1 { if host_of_finished_task != "" && index_of_finished_task != -1 {
log.Printf("Removing task with task [%s] from the list of running tasks\n", log.Printf("Removing task with task [%s] from the list of running tasks\n",
task_monitor[host_of_finished_task][index_of_finished_task].TaskID) task_monitor[host_of_finished_task][index_of_finished_task].TaskID)
task_monitor[host_of_finished_task] = append(task_monitor[host_of_finished_task][:index_of_finished_task], task_monitor[host_of_finished_task] = append(task_monitor[host_of_finished_task][:index_of_finished_task],
task_monitor[host_of_finished_task][index_of_finished_task+1:]...) task_monitor[host_of_finished_task][index_of_finished_task+1:]...)
} }
// Need to check whether there are still tasks running on the cluster. If not then we return an error. // Need to check whether there are still tasks running on the cluster. If not then we return an error.
clusterIdle := true clusterIdle := true
for _, tasks := range task_monitor { for _, tasks := range task_monitor {
if len(tasks) > 0 { if len(tasks) > 0 {
clusterIdle = false clusterIdle = false
} }
} }
if !clusterIdle { if !clusterIdle {
// load on each node in the cluster. // load on each node in the cluster.
loads := []float64{0.0} loads := []float64{0.0}
for host, usages := range watts_usages { for host, usages := range watts_usages {
total_usage := 0.0 total_usage := 0.0
for _, usage := range usages { for _, usage := range usages {
total_usage += usage total_usage += usage
} }
loads = append(loads, total_usage / total_power[host]) loads = append(loads, total_usage/total_power[host])
} }
// Now need to compute the average load. // Now need to compute the average load.
@ -219,33 +222,33 @@ func (capper clusterwideCapper) recap(total_power map[string]float64,
total_allocated_power := 0.0 total_allocated_power := 0.0
total_running_tasks := 0 total_running_tasks := 0
host_of_finished_task := "" host_of_finished_task := ""
index_of_finished_task := -1 index_of_finished_task := -1
for host, tasks := range task_monitor { for host, tasks := range task_monitor {
for i, task := range tasks { for i, task := range tasks {
if task.TaskID == finished_taskId { if task.TaskID == finished_taskId {
host_of_finished_task = host host_of_finished_task = host
index_of_finished_task = i index_of_finished_task = i
// Not considering this task for the computation of total_allocated_power and total_running_tasks // Not considering this task for the computation of total_allocated_power and total_running_tasks
continue continue
} }
total_allocated_power += (float64(task.Watts) * constants.Cap_margin) total_allocated_power += (float64(task.Watts) * constants.Cap_margin)
total_running_tasks++ total_running_tasks++
} }
} }
// Updating task monitor // Updating task monitor
if host_of_finished_task != "" && index_of_finished_task != -1 { if host_of_finished_task != "" && index_of_finished_task != -1 {
log.Printf("Removing task with task [%s] from the list of running tasks\n", log.Printf("Removing task with task [%s] from the list of running tasks\n",
task_monitor[host_of_finished_task][index_of_finished_task].TaskID) task_monitor[host_of_finished_task][index_of_finished_task].TaskID)
task_monitor[host_of_finished_task] = append(task_monitor[host_of_finished_task][:index_of_finished_task], task_monitor[host_of_finished_task] = append(task_monitor[host_of_finished_task][:index_of_finished_task],
task_monitor[host_of_finished_task][index_of_finished_task+1:]...) task_monitor[host_of_finished_task][index_of_finished_task+1:]...)
} }
// For the last task, total_allocated_power and total_running_tasks would be 0 // For the last task, total_allocated_power and total_running_tasks would be 0
if total_allocated_power == 0 && total_running_tasks == 0 { if total_allocated_power == 0 && total_running_tasks == 0 {
return 100, errors.New("No task running on the cluster.") return 100, errors.New("No task running on the cluster.")
} }
average := total_allocated_power / float64(total_running_tasks) average := total_allocated_power / float64(total_running_tasks)
ratios := []float64{} ratios := []float64{}

View file

@ -76,7 +76,7 @@ func NewProactiveClusterwideCapFCFS(tasks []def.Task, ignoreWatts bool) *Proacti
ticker: time.NewTicker(10 * time.Second), ticker: time.NewTicker(10 * time.Second),
recapTicker: time.NewTicker(20 * time.Second), recapTicker: time.NewTicker(20 * time.Second),
isCapping: false, isCapping: false,
isRecapping: false, isRecapping: false,
} }
return s return s
} }

View file

@ -9,8 +9,8 @@ https://groups.google.com/forum/#!topic/golang-nuts/FT7cjmcL7gw
// Utility struct that helps in sorting the available power by value. // Utility struct that helps in sorting the available power by value.
type Pair struct { type Pair struct {
Key string Key string
Value float64 Value float64
} }
// A slice of pairs that implements the sort.Interface to sort by value. // A slice of pairs that implements the sort.Interface to sort by value.
@ -18,37 +18,37 @@ type PairList []Pair
// Swap pairs in the PairList // Swap pairs in the PairList
func (plist PairList) Swap(i, j int) { func (plist PairList) Swap(i, j int) {
plist[i], plist[j] = plist[j], plist[i] plist[i], plist[j] = plist[j], plist[i]
} }
// function to return the length of the pairlist. // function to return the length of the pairlist.
func (plist PairList) Len() int { func (plist PairList) Len() int {
return len(plist) return len(plist)
} }
// function to compare two elements in pairlist. // function to compare two elements in pairlist.
func (plist PairList) Less(i, j int) bool { func (plist PairList) Less(i, j int) bool {
return plist[i].Value < plist[j].Value return plist[i].Value < plist[j].Value
} }
// convert a PairList to a map[string]float64 // convert a PairList to a map[string]float64
func OrderedKeys(plist PairList) ([]string, error) { func OrderedKeys(plist PairList) ([]string, error) {
// Validation // Validation
if plist == nil { if plist == nil {
return nil, errors.New("Invalid argument: plist") return nil, errors.New("Invalid argument: plist")
} }
ordered_keys := make([]string, len(plist)) ordered_keys := make([]string, len(plist))
for _, pair := range plist { for _, pair := range plist {
ordered_keys = append(ordered_keys, pair.Key) ordered_keys = append(ordered_keys, pair.Key)
} }
return ordered_keys, nil return ordered_keys, nil
} }
// determine the max value // determine the max value
func Max(a, b float64) float64 { func Max(a, b float64) float64 {
if a > b { if a > b {
return a return a
} else { } else {
return b return b
} }
} }