Fixed the comments to be capitalized at the start and also terminate with a period.

This commit is contained in:
Pradyumna Kaushik 2017-09-28 15:36:47 -04:00
parent 577120ae7c
commit b807625b78
19 changed files with 194 additions and 201 deletions

View file

@ -6,20 +6,20 @@ import (
"sort"
)
// Information about a cluster of tasks
// Information about a cluster of tasks.
type TaskCluster struct {
ClusterIndex int
Tasks []Task
SizeScore int // How many other clusters is this cluster bigger than
}
// Classification of Tasks using KMeans clustering using the watts consumption observations
// Classification of Tasks using KMeans clustering using the watts consumption observations.
type TasksToClassify []Task
// Basic taskObservation calculator. This returns an array consisting of the MMPU requirements of a task.
func (tc TasksToClassify) taskObservationCalculator(task Task) []float64 {
if task.ClassToWatts != nil {
// taking the aggregate
// Taking the aggregate.
observations := []float64{}
for _, watts := range task.ClassToWatts {
observations = append(observations, watts)
@ -29,7 +29,7 @@ func (tc TasksToClassify) taskObservationCalculator(task Task) []float64 {
return []float64{task.Watts}
} else {
log.Fatal("Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload.")
return []float64{0.0} // won't reach here
return []float64{0.0} // Won't reach here.
}
}
@ -41,9 +41,9 @@ func ClassifyTasks(tasks []Task, numberOfClusters int) []TaskCluster {
func (tc TasksToClassify) classify(numberOfClusters int, taskObservation func(task Task) []float64) []TaskCluster {
clusters := make(map[int][]Task)
observations := getObservations(tc, taskObservation)
// TODO: Make the max number of rounds configurable based on the size of the workload
// TODO: Make the max number of rounds configurable based on the size of the workload.
// The max number of rounds (currently defaulted to 100) is the number of iterations performed to obtain
// distinct clusters. When the data size becomes very large, we would need more iterations for clustering.
// distinct clusters. When the data size becomes very large, we would need more iterations for clustering.
if trained, centroids := gokmeans.Train(observations, numberOfClusters, 100); trained {
for i := 0; i < len(observations); i++ {
observation := observations[i]
@ -58,7 +58,7 @@ func (tc TasksToClassify) classify(numberOfClusters int, taskObservation func(ta
return labelAndOrder(clusters, numberOfClusters, taskObservation)
}
// Record observations
// Record observations.
func getObservations(tasks []Task, taskObservation func(task Task) []float64) []gokmeans.Node {
observations := []gokmeans.Node{}
for i := 0; i < len(tasks); i++ {
@ -67,7 +67,7 @@ func getObservations(tasks []Task, taskObservation func(task Task) []float64) []
return observations
}
// Size tasks based on the power consumption
// Size tasks based on the power consumption.
// TODO: Size the cluster in a better way other than just taking an aggregate of the watts resource requirement.
func clusterSize(tasks []Task, taskObservation func(task Task) []float64) float64 {
size := 0.0
@ -79,12 +79,12 @@ func clusterSize(tasks []Task, taskObservation func(task Task) []float64) float6
return size
}
// Order clusters in increasing order of task heaviness
// Order clusters in increasing order of task heaviness.
func labelAndOrder(clusters map[int][]Task, numberOfClusters int, taskObservation func(task Task) []float64) []TaskCluster {
// Determine the position of the cluster in the ordered list of clusters
// Determine the position of the cluster in the ordered list of clusters.
sizedClusters := []TaskCluster{}
// Initializing
// Initializing.
for i := 0; i < numberOfClusters; i++ {
sizedClusters = append(sizedClusters, TaskCluster{
ClusterIndex: i,
@ -94,10 +94,10 @@ func labelAndOrder(clusters map[int][]Task, numberOfClusters int, taskObservatio
}
for i := 0; i < numberOfClusters-1; i++ {
// Sizing the current cluster
// Sizing the current cluster.
sizeI := clusterSize(clusters[i], taskObservation)
// Comparing with the other clusters
// Comparing with the other clusters.
for j := i + 1; j < numberOfClusters; j++ {
sizeJ := clusterSize(clusters[j], taskObservation)
if sizeI > sizeJ {
@ -108,7 +108,7 @@ func labelAndOrder(clusters map[int][]Task, numberOfClusters int, taskObservatio
}
}
// Sorting the clusters based on sizeScore
// Sorting the clusters based on sizeScore.
sort.SliceStable(sizedClusters, func(i, j int) bool {
return sizedClusters[i].SizeScore <= sizedClusters[j].SizeScore
})