Fixed the comments to be capitalized at the start and also terminate with a period.
This commit is contained in:
parent
577120ae7c
commit
b807625b78
19 changed files with 194 additions and 201 deletions
|
@ -40,10 +40,10 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
scanner := bufio.NewScanner(pipe)
|
||||
|
||||
go func(logging *bool, hiThreshold, loThreshold float64) {
|
||||
// Get names of the columns
|
||||
// Get names of the columns.
|
||||
scanner.Scan()
|
||||
|
||||
// Write to logfile
|
||||
// Write to logfile.
|
||||
logFile.WriteString(scanner.Text() + "\n")
|
||||
|
||||
headers := strings.Split(scanner.Text(), ",")
|
||||
|
@ -54,22 +54,21 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
|
||||
for i, hostMetric := range headers {
|
||||
metricSplit := strings.Split(hostMetric, ":")
|
||||
//log.Printf("%d Host %s: Metric: %s\n", i, split[0], split[1])
|
||||
|
||||
if strings.Contains(metricSplit[1], "RAPL_ENERGY_PKG") ||
|
||||
strings.Contains(metricSplit[1], "RAPL_ENERGY_DRAM") {
|
||||
//fmt.Println("Index: ", i)
|
||||
powerIndexes = append(powerIndexes, i)
|
||||
indexToHost[i] = metricSplit[0]
|
||||
|
||||
// Only create one ring per host
|
||||
// Only create one ring per host.
|
||||
if _, ok := powerHistories[metricSplit[0]]; !ok {
|
||||
powerHistories[metricSplit[0]] = ring.New(20) // Two PKGS, two DRAM per node, 20 = 5 seconds of tracking
|
||||
// Two PKGS, two DRAM per node, 20 - 5 seconds of tracking.
|
||||
powerHistories[metricSplit[0]] = ring.New(20)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Throw away first set of results
|
||||
// Throw away first set of results.
|
||||
scanner.Scan()
|
||||
|
||||
cappedHosts := make(map[string]bool)
|
||||
|
@ -108,7 +107,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
|
||||
if clusterMean > hiThreshold {
|
||||
log.Printf("Need to cap a node")
|
||||
// Create statics for all victims and choose one to cap
|
||||
// Create statics for all victims and choose one to cap.
|
||||
victims := make([]pcp.Victim, 0, 8)
|
||||
|
||||
// TODO: Just keep track of the largest to reduce fron nlogn to n
|
||||
|
@ -116,15 +115,15 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
|
||||
histMean := pcp.AverageNodePowerHistory(history)
|
||||
|
||||
// Consider doing mean calculations using go routines if we need to speed up
|
||||
// Consider doing mean calculations using go routines if we need to speed up.
|
||||
victims = append(victims, pcp.Victim{Watts: histMean, Host: name})
|
||||
}
|
||||
|
||||
sort.Sort(pcp.VictimSorter(victims)) // Sort by average wattage
|
||||
sort.Sort(pcp.VictimSorter(victims)) // Sort by average wattage.
|
||||
|
||||
// From best victim to worst, if everyone is already capped NOOP
|
||||
// From best victim to worst, if everyone is already capped NOOP.
|
||||
for _, victim := range victims {
|
||||
// Only cap if host hasn't been capped yet
|
||||
// Only cap if host hasn't been capped yet.
|
||||
if !cappedHosts[victim.Host] {
|
||||
cappedHosts[victim.Host] = true
|
||||
orderCapped = append(orderCapped, victim.Host)
|
||||
|
@ -132,7 +131,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
if err := rapl.Cap(victim.Host, "rapl", 50); err != nil {
|
||||
log.Print("Error capping host")
|
||||
}
|
||||
break // Only cap one machine at at time
|
||||
break // Only cap one machine at at time.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -142,7 +141,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
host := orderCapped[len(orderCapped)-1]
|
||||
orderCapped = orderCapped[:len(orderCapped)-1]
|
||||
cappedHosts[host] = false
|
||||
// User RAPL package to send uncap
|
||||
// User RAPL package to send uncap.
|
||||
log.Printf("Uncapping host %s", host)
|
||||
if err := rapl.Cap(host, "rapl", 100); err != nil {
|
||||
log.Print("Error uncapping host")
|
||||
|
@ -169,7 +168,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, prefix s
|
|||
time.Sleep(5 * time.Second)
|
||||
|
||||
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly
|
||||
// kill process and all children processes
|
||||
// Kill process and all children processes.
|
||||
syscall.Kill(-pgid, 15)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -54,10 +54,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
scanner := bufio.NewScanner(pipe)
|
||||
|
||||
go func(logging *bool, hiThreshold, loThreshold float64) {
|
||||
// Get names of the columns
|
||||
// Get names of the columns.
|
||||
scanner.Scan()
|
||||
|
||||
// Write to logfile
|
||||
// Write to logfile.
|
||||
logFile.WriteString(scanner.Text() + "\n")
|
||||
|
||||
headers := strings.Split(scanner.Text(), ",")
|
||||
|
@ -68,30 +68,29 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
|
||||
for i, hostMetric := range headers {
|
||||
metricSplit := strings.Split(hostMetric, ":")
|
||||
//log.Printf("%d Host %s: Metric: %s\n", i, split[0], split[1])
|
||||
|
||||
if strings.Contains(metricSplit[1], "RAPL_ENERGY_PKG") ||
|
||||
strings.Contains(metricSplit[1], "RAPL_ENERGY_DRAM") {
|
||||
//fmt.Println("Index: ", i)
|
||||
powerIndexes = append(powerIndexes, i)
|
||||
indexToHost[i] = metricSplit[0]
|
||||
|
||||
// Only create one ring per host
|
||||
// Only create one ring per host.
|
||||
if _, ok := powerHistories[metricSplit[0]]; !ok {
|
||||
powerHistories[metricSplit[0]] = ring.New(20) // Two PKGS, two DRAM per node, 20 = 5 seconds of tracking
|
||||
// Two PKGS, two DRAM per node, 20 = 5 seconds of tracking.
|
||||
powerHistories[metricSplit[0]] = ring.New(20)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Throw away first set of results
|
||||
// Throw away first set of results.
|
||||
scanner.Scan()
|
||||
|
||||
// To keep track of the capped states of the capped victims
|
||||
// To keep track of the capped states of the capped victims.
|
||||
cappedVictims := make(map[string]float64)
|
||||
// TODO: Come with a better name for this.
|
||||
orderCapped := make([]string, 0, 8)
|
||||
// TODO: Change this to a priority queue ordered by the cap value. This will get rid of the sorting performed in the code.
|
||||
// Parallel data structure to orderCapped to keep track of the uncapped states of the uncapped victims
|
||||
// Parallel data structure to orderCapped to keep track of the uncapped states of the uncapped victims.
|
||||
orderCappedVictims := make(map[string]float64)
|
||||
clusterPowerHist := ring.New(5)
|
||||
seconds := 0
|
||||
|
@ -128,7 +127,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
log.Println("Need to cap a node")
|
||||
log.Printf("Cap values of capped victims: %v", cappedVictims)
|
||||
log.Printf("Cap values of victims to uncap: %v", orderCappedVictims)
|
||||
// Create statics for all victims and choose one to cap
|
||||
// Create statics for all victims and choose one to cap.
|
||||
victims := make([]pcp.Victim, 0, 8)
|
||||
|
||||
// TODO: Just keep track of the largest to reduce fron nlogn to n
|
||||
|
@ -136,46 +135,46 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
|
||||
histMean := pcp.AverageNodePowerHistory(history)
|
||||
|
||||
// Consider doing mean calculations using go routines if we need to speed up
|
||||
// Consider doing mean calculations using go routines if we need to speed up.
|
||||
victims = append(victims, pcp.Victim{Watts: histMean, Host: name})
|
||||
}
|
||||
|
||||
sort.Sort(pcp.VictimSorter(victims)) // Sort by average wattage
|
||||
sort.Sort(pcp.VictimSorter(victims)) // Sort by average wattage.
|
||||
|
||||
// Finding the best victim to cap in a round robin manner
|
||||
// Finding the best victim to cap in a round robin manner.
|
||||
newVictimFound := false
|
||||
alreadyCappedHosts := []string{} // Host-names of victims that are already capped
|
||||
alreadyCappedHosts := []string{} // Host-names of victims that are already capped.
|
||||
for i := 0; i < len(victims); i++ {
|
||||
// Try to pick a victim that hasn't been capped yet
|
||||
// Try to pick a victim that hasn't been capped yet.
|
||||
if _, ok := cappedVictims[victims[i].Host]; !ok {
|
||||
// If this victim can't be capped further, then we move on to find another victim
|
||||
// If this victim can't be capped further, then we move on to find another victim.
|
||||
if _, ok := orderCappedVictims[victims[i].Host]; ok {
|
||||
continue
|
||||
}
|
||||
// Need to cap this victim
|
||||
// Need to cap this victim.
|
||||
if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil {
|
||||
log.Printf("Error capping host %s", victims[i].Host)
|
||||
} else {
|
||||
log.Printf("Capped host[%s] at %f", victims[i].Host, 50.0)
|
||||
// Keeping track of this victim and it's cap value
|
||||
// Keeping track of this victim and it's cap value.
|
||||
cappedVictims[victims[i].Host] = 50.0
|
||||
newVictimFound = true
|
||||
// This node can be uncapped and hence adding to orderCapped
|
||||
// This node can be uncapped and hence adding to orderCapped.
|
||||
orderCapped = append(orderCapped, victims[i].Host)
|
||||
orderCappedVictims[victims[i].Host] = 50.0
|
||||
break // Breaking only on successful cap
|
||||
break // Breaking only on successful cap.
|
||||
}
|
||||
} else {
|
||||
alreadyCappedHosts = append(alreadyCappedHosts, victims[i].Host)
|
||||
}
|
||||
}
|
||||
// If no new victim found, then we need to cap the best victim among the ones that are already capped
|
||||
// If no new victim found, then we need to cap the best victim among the ones that are already capped.
|
||||
if !newVictimFound {
|
||||
canCapAlreadyCappedVictim := false
|
||||
for i := 0; i < len(alreadyCappedHosts); i++ {
|
||||
// If already capped then the host must be present in orderCappedVictims
|
||||
// If already capped then the host must be present in orderCappedVictims.
|
||||
capValue := orderCappedVictims[alreadyCappedHosts[i]]
|
||||
// If capValue is greater than the threshold then cap, else continue
|
||||
// If capValue is greater than the threshold then cap, else continue.
|
||||
if capValue > constants.LowerCapLimit {
|
||||
newCapValue := getNextCapValue(capValue, 2)
|
||||
if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil {
|
||||
|
@ -183,14 +182,14 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
} else {
|
||||
// Successful cap
|
||||
log.Printf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue)
|
||||
// Checking whether this victim can be capped further
|
||||
// Checking whether this victim can be capped further.
|
||||
if newCapValue <= constants.LowerCapLimit {
|
||||
// Deleting victim from cappedVictims
|
||||
// Deleting victim from cappedVictims.
|
||||
delete(cappedVictims, alreadyCappedHosts[i])
|
||||
// Updating the cap value in orderCappedVictims
|
||||
// Updating the cap value in orderCappedVictims.
|
||||
orderCappedVictims[alreadyCappedHosts[i]] = newCapValue
|
||||
} else {
|
||||
// Updating the cap value
|
||||
// Updating the cap value.
|
||||
cappedVictims[alreadyCappedHosts[i]] = newCapValue
|
||||
orderCappedVictims[alreadyCappedHosts[i]] = newCapValue
|
||||
}
|
||||
|
@ -198,9 +197,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
break // Breaking only on successful cap.
|
||||
}
|
||||
} else {
|
||||
// Do nothing
|
||||
// Do nothing.
|
||||
// Continue to find another victim to cap.
|
||||
// If cannot find any victim, then all nodes have been capped to the maximum and we stop capping at this point.
|
||||
// If cannot find any victim, then all nodes have been
|
||||
// capped to the maximum and we stop capping at this point.
|
||||
}
|
||||
}
|
||||
if !canCapAlreadyCappedVictim {
|
||||
|
@ -213,9 +213,9 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
log.Printf("Cap values of capped victims: %v", cappedVictims)
|
||||
log.Printf("Cap values of victims to uncap: %v", orderCappedVictims)
|
||||
if len(orderCapped) > 0 {
|
||||
// We pick the host that is capped the most to uncap
|
||||
// We pick the host that is capped the most to uncap.
|
||||
orderCappedToSort := utilities.GetPairList(orderCappedVictims)
|
||||
sort.Sort(orderCappedToSort) // Sorted hosts in non-decreasing order of capped states
|
||||
sort.Sort(orderCappedToSort) // Sorted hosts in non-decreasing order of capped states.
|
||||
hostToUncap := orderCappedToSort[0].Key
|
||||
// Uncapping the host.
|
||||
// This is a floating point operation and might suffer from precision loss.
|
||||
|
@ -223,23 +223,23 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil {
|
||||
log.Printf("Error uncapping host[%s]", hostToUncap)
|
||||
} else {
|
||||
// Successful uncap
|
||||
// Successful uncap.
|
||||
log.Printf("Uncapped host[%s] to %f", hostToUncap, newUncapValue)
|
||||
// Can we uncap this host further. If not, then we remove its entry from orderCapped
|
||||
if newUncapValue >= 100.0 { // can compare using ==
|
||||
// Deleting entry from orderCapped
|
||||
// Can we uncap this host further. If not, then we remove its entry from orderCapped.
|
||||
if newUncapValue >= 100.0 { // Can compare using ==
|
||||
// Deleting entry from orderCapped.
|
||||
for i, victimHost := range orderCapped {
|
||||
if victimHost == hostToUncap {
|
||||
orderCapped = append(orderCapped[:i], orderCapped[i+1:]...)
|
||||
break // We are done removing host from orderCapped
|
||||
break // We are done removing host from orderCapped.
|
||||
}
|
||||
}
|
||||
// Removing entry for host from the parallel data structure
|
||||
// Removing entry for host from the parallel data structure.
|
||||
delete(orderCappedVictims, hostToUncap)
|
||||
// Removing entry from cappedVictims as this host is no longer capped
|
||||
// Removing entry from cappedVictims as this host is no longer capped.
|
||||
delete(cappedVictims, hostToUncap)
|
||||
} else if newUncapValue > constants.LowerCapLimit { // this check is unnecessary and can be converted to 'else'
|
||||
// Updating the cap value
|
||||
} else if newUncapValue > constants.LowerCapLimit { // This check is unnecessary and can be converted to 'else'.
|
||||
// Updating the cap value.
|
||||
orderCappedVictims[hostToUncap] = newUncapValue
|
||||
cappedVictims[hostToUncap] = newUncapValue
|
||||
}
|
||||
|
@ -268,7 +268,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, pref
|
|||
time.Sleep(5 * time.Second)
|
||||
|
||||
// http://stackoverflow.com/questions/22470193/why-wont-go-kill-a-child-process-correctly
|
||||
// kill process and all children processes
|
||||
// Kill process and all children processes.
|
||||
syscall.Kill(-pgid, 15)
|
||||
return
|
||||
}
|
||||
|
|
Reference in a new issue