Consolidated the ClassifyTasks(...) functions from topHeavy and bottomHeavy and added the function to def/taskUtils.go. Added TODOs for refining the means by which the kmeans classified clusters were sorted.

This commit is contained in:
Pradyumna Kaushik 2017-08-22 13:00:40 -04:00
parent 41ef269c62
commit f4459c8cbf
4 changed files with 31 additions and 37 deletions

View file

@ -7,6 +7,7 @@ To Do:
* Fix the race condition on 'tasksRunning' in proactiveclusterwidecappingfcfs.go and proactiveclusterwidecappingranked.go
* **Critical**: Separate the capping strategies from the scheduling algorithms and make it possible to use any capping strategy with any scheduler.
* Create a package that would contain routines to perform various logging and move helpers.coLocated(...) into that.
* Refine the sorting algorithm that sorts the clusters of tasks retrieved using the kmeans algorithm. This also involves the reduction in time complexity of the same.
Scheduling Algorithms:

View file

@ -65,24 +65,8 @@ func NewBottomHeavy(tasks []def.Task, wattsAsAResource bool, schedTracePrefix st
log.Fatal(err)
}
// Separating small tasks from large tasks.
// Classification done based on MMPU watts requirements.
tasksToClassify := def.TasksToClassify(tasks)
classifiedTasks := tasksToClassify.ClassifyTasks(2, func(task def.Task) []float64 {
if task.ClassToWatts != nil {
// taking the aggregate
observations := []float64{}
for _, watts := range task.ClassToWatts {
observations = append(observations, watts)
}
return observations
} else if task.Watts != 0.0 {
return []float64{task.Watts}
} else {
log.Fatal("Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload.")
return []float64{0.0} // won't reach here
}
})
// Classification done based on MMPU watts requirements, into 2 clusters.
classifiedTasks := def.ClassifyTasks(tasks, 2)
s := &BottomHeavy{
base: base{
@ -95,6 +79,7 @@ func NewBottomHeavy(tasks []def.Task, wattsAsAResource bool, schedTracePrefix st
RecordPCP: false,
schedTrace: log.New(logFile, "", log.LstdFlags),
},
// Separating small tasks from large tasks.
smallTasks: classifiedTasks[0].Tasks,
largeTasks: classifiedTasks[1].Tasks,
}

View file

@ -64,24 +64,8 @@ func NewTopHeavy(tasks []def.Task, wattsAsAResource bool, schedTracePrefix strin
log.Fatal(err)
}
// Separating small tasks from large tasks.
// Classification done based on MMPU watts requirements.
tasksToClassify := def.TasksToClassify(tasks)
classifiedTasks := tasksToClassify.ClassifyTasks(2, func(task def.Task) []float64 {
if task.ClassToWatts != nil {
// taking the aggregate
observations := []float64{}
for _, watts := range task.ClassToWatts {
observations = append(observations, watts)
}
return observations
} else if task.Watts != 0.0 {
return []float64{task.Watts}
} else {
log.Fatal("Unable to classify tasks. Missing Watts or ClassToWatts attribute in workload.")
return []float64{0.0} // won't be here
}
})
// Classification done based on MMPU watts requirements, into 2 clusters.
classifiedTasks := def.ClassifyTasks(tasks, 2)
s := &TopHeavy{
base: base{
@ -94,6 +78,7 @@ func NewTopHeavy(tasks []def.Task, wattsAsAResource bool, schedTracePrefix strin
RecordPCP: false,
schedTrace: log.New(logFile, "", log.LstdFlags),
},
// Separating small tasks from large tasks.
smallTasks: classifiedTasks[0].Tasks,
largeTasks: classifiedTasks[1].Tasks,
}