Fixed import alias prefix.

Changed the prefix to import aliases to be 'elek' instead of 'elec'.
This commit is contained in:
Pradyumna Kaushik 2018-10-04 13:45:31 -04:00
parent b06bdeba59
commit 8e87bcb439
8 changed files with 115 additions and 115 deletions

View file

@ -10,11 +10,11 @@ import (
"github.com/mesos/mesos-go/api/v0/scheduler"
"github.com/montanaflynn/stats"
elecLogDef "gitlab.com/spdf/elektron/logging/def"
elekLogDef "gitlab.com/spdf/elektron/logging/def"
"gitlab.com/spdf/elektron/schedulers"
)
func Start(quit chan struct{}, logging *bool, logMType chan elecLogDef.LogMessageType, logMsg chan string, s scheduler.Scheduler) {
func Start(quit chan struct{}, logging *bool, logMType chan elekLogDef.LogMessageType, logMsg chan string, s scheduler.Scheduler) {
baseSchedRef := s.(*schedulers.BaseScheduler)
const pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c config"
cmd := exec.Command("sh", "-c", pcpCommand)
@ -33,10 +33,10 @@ func Start(quit chan struct{}, logging *bool, logMType chan elecLogDef.LogMessag
scanner.Scan()
// Write to logfile
logMType <- elecLogDef.PCP
logMType <- elekLogDef.PCP
logMsg <- scanner.Text()
logMType <- elecLogDef.DEG_COL
logMType <- elekLogDef.DEG_COL
logMsg <- "CPU Variance, CPU Task Share Variance, Memory Variance, Memory Task Share Variance"
// Throw away first set of results
@ -48,7 +48,7 @@ func Start(quit chan struct{}, logging *bool, logMType chan elecLogDef.LogMessag
text := scanner.Text()
if *logging {
logMType <- elecLogDef.PCP
logMType <- elekLogDef.PCP
logMsg <- text
}
@ -84,12 +84,12 @@ func Start(quit chan struct{}, logging *bool, logMType chan elecLogDef.LogMessag
memVariance, _ := stats.Variance(memUtils)
memTaskSharesVariance, _ := stats.Variance(memTaskShares)
logMType <- elecLogDef.DEG_COL
logMType <- elekLogDef.DEG_COL
logMsg <- fmt.Sprintf("%f, %f, %f, %f", cpuVariance, cpuTaskSharesVariance, memVariance, memTaskSharesVariance)
}
}(logging)
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- "PCP logging started"
if err := cmd.Start(); err != nil {
@ -100,7 +100,7 @@ func Start(quit chan struct{}, logging *bool, logMType chan elecLogDef.LogMessag
select {
case <-quit:
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- "Stopping PCP logging in 5 seconds"
time.Sleep(5 * time.Second)

View file

@ -12,13 +12,13 @@ import (
"syscall"
"time"
elecLogDef "gitlab.com/spdf/elektron/logging/def"
elekLogDef "gitlab.com/spdf/elektron/logging/def"
"gitlab.com/spdf/elektron/pcp"
"gitlab.com/spdf/elektron/rapl"
)
func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThreshold, loThreshold float64,
logMType chan elecLogDef.LogMessageType, logMsg chan string) {
logMType chan elekLogDef.LogMessageType, logMsg chan string) {
const pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c config"
cmd := exec.Command("sh", "-c", pcpCommand)
@ -41,7 +41,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
scanner.Scan()
// Write to logfile
logMType <- elecLogDef.PCP
logMType <- elekLogDef.PCP
logMsg <- scanner.Text()
headers := strings.Split(scanner.Text(), ",")
@ -77,10 +77,10 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
for scanner.Scan() {
if *logging {
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- "Logging PCP..."
split := strings.Split(scanner.Text(), ",")
logMType <- elecLogDef.PCP
logMType <- elekLogDef.PCP
logMsg <- scanner.Text()
totalPower := 0.0
@ -92,7 +92,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
powerHistories[host].Value = power
powerHistories[host] = powerHistories[host].Next()
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Host: %s, Power: %f", indexToHost[powerIndex], (power * pcp.RAPLUnits))
totalPower += power
@ -104,11 +104,11 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Total power: %f, %d Sec Avg: %f", clusterPower, clusterPowerHist.Len(), clusterMean)
if clusterMean > hiThreshold {
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- "Need to cap a node"
// Create statics for all victims and choose one to cap
victims := make([]pcp.Victim, 0, 8)
@ -130,10 +130,10 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
if !cappedHosts[victim.Host] {
cappedHosts[victim.Host] = true
orderCapped = append(orderCapped, victim.Host)
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Capping Victim %s Avg. Wattage: %f", victim.Host, victim.Watts*pcp.RAPLUnits)
if err := rapl.Cap(victim.Host, "rapl", 50); err != nil {
logMType <- elecLogDef.ERROR
logMType <- elekLogDef.ERROR
logMsg <- "Error capping host"
}
break // Only cap one machine at at time.
@ -148,10 +148,10 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
cappedHosts[host] = false
// User RAPL package to send uncap.
log.Printf("Uncapping host %s", host)
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Uncapped host %s", host)
if err := rapl.Cap(host, "rapl", 100); err != nil {
logMType <- elecLogDef.ERROR
logMType <- elekLogDef.ERROR
logMsg <- "Error capping host"
}
}
@ -162,7 +162,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
}
}(logging, hiThreshold, loThreshold)
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- "PCP logging started"
if err := cmd.Start(); err != nil {
@ -173,7 +173,7 @@ func StartPCPLogAndExtremaDynamicCap(quit chan struct{}, logging *bool, hiThresh
select {
case <-quit:
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- "Stopping PCP logging in 5 seconds"
time.Sleep(5 * time.Second)

View file

@ -14,7 +14,7 @@ import (
"time"
"gitlab.com/spdf/elektron/constants"
elecLogDef "gitlab.com/spdf/elektron/logging/def"
elekLogDef "gitlab.com/spdf/elektron/logging/def"
"gitlab.com/spdf/elektron/pcp"
"gitlab.com/spdf/elektron/rapl"
"gitlab.com/spdf/elektron/utilities"
@ -31,14 +31,14 @@ func getNextCapValue(curCapValue float64, precision int) float64 {
}
func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiThreshold, loThreshold float64,
logMType chan elecLogDef.LogMessageType, logMsg chan string) {
logMType chan elekLogDef.LogMessageType, logMsg chan string) {
const pcpCommand string = "pmdumptext -m -l -f '' -t 1.0 -d , -c config"
cmd := exec.Command("sh", "-c", pcpCommand)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
if hiThreshold < loThreshold {
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- "High threshold is lower than low threshold!"
}
@ -55,7 +55,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
scanner.Scan()
// Write to logfile
logMType <- elecLogDef.PCP
logMType <- elekLogDef.PCP
logMsg <- scanner.Text()
headers := strings.Split(scanner.Text(), ",")
@ -95,10 +95,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
for scanner.Scan() {
if *logging {
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- "Logging PCP..."
split := strings.Split(scanner.Text(), ",")
logMType <- elecLogDef.PCP
logMType <- elekLogDef.PCP
logMsg <- scanner.Text()
totalPower := 0.0
@ -110,7 +110,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
powerHistories[host].Value = power
powerHistories[host] = powerHistories[host].Next()
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Host: %s, Power %f",
indexToHost[powerIndex], (power * pcp.RAPLUnits))
@ -123,15 +123,15 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
clusterMean := pcp.AverageClusterPowerHistory(clusterPowerHist)
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Total power: %f, %d Sec Avg: %f", clusterPower, clusterPowerHist.Len(), clusterMean)
if clusterMean >= hiThreshold {
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- "Need to cap a node"
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Cap values of capped victims: %v", cappedVictims)
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Cap values of victims to uncap: %v", orderCappedVictims)
// Create statics for all victims and choose one to cap
victims := make([]pcp.Victim, 0, 8)
@ -159,10 +159,10 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
}
// Need to cap this victim.
if err := rapl.Cap(victims[i].Host, "rapl", 50.0); err != nil {
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Error capping host %s", victims[i].Host)
} else {
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Capped host[%s] at %f", victims[i].Host, 50.0)
// Keeping track of this victim and it's cap value
cappedVictims[victims[i].Host] = 50.0
@ -186,11 +186,11 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
if capValue > constants.LowerCapLimit {
newCapValue := getNextCapValue(capValue, 2)
if err := rapl.Cap(alreadyCappedHosts[i], "rapl", newCapValue); err != nil {
logMType <- elecLogDef.ERROR
logMType <- elekLogDef.ERROR
logMsg <- fmt.Sprintf("Error capping host[%s]", alreadyCappedHosts[i])
} else {
// Successful cap
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Capped host[%s] at %f", alreadyCappedHosts[i], newCapValue)
// Checking whether this victim can be capped further
if newCapValue <= constants.LowerCapLimit {
@ -214,17 +214,17 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
}
}
if !canCapAlreadyCappedVictim {
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- "No Victim left to cap."
}
}
} else if clusterMean < loThreshold {
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- "Need to uncap a node"
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Cap values of capped victims: %v", cappedVictims)
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Cap values of victims to uncap: %v", orderCappedVictims)
if len(orderCapped) > 0 {
// We pick the host that is capped the most to uncap.
@ -235,11 +235,11 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
// This is a floating point operation and might suffer from precision loss.
newUncapValue := orderCappedVictims[hostToUncap] * 2.0
if err := rapl.Cap(hostToUncap, "rapl", newUncapValue); err != nil {
logMType <- elecLogDef.ERROR
logMType <- elekLogDef.ERROR
logMsg <- fmt.Sprintf("Error uncapping host[%s]", hostToUncap)
} else {
// Successful uncap
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- fmt.Sprintf("Uncapped host[%s] to %f", hostToUncap, newUncapValue)
// Can we uncap this host further. If not, then we remove its entry from orderCapped
if newUncapValue >= 100.0 { // can compare using ==
@ -261,7 +261,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
}
}
} else {
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- "No host staged for Uncapped"
}
}
@ -271,7 +271,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
}(logging, hiThreshold, loThreshold)
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- "PCP logging started"
if err := cmd.Start(); err != nil {
@ -282,7 +282,7 @@ func StartPCPLogAndProgressiveExtremaCap(quit chan struct{}, logging *bool, hiTh
select {
case <-quit:
logMType <- elecLogDef.GENERAL
logMType <- elekLogDef.GENERAL
logMsg <- "Stopping PCP logging in 5 seconds"
time.Sleep(5 * time.Second)

View file

@ -13,7 +13,7 @@ import (
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"gitlab.com/spdf/elektron/def"
elecLogDef "gitlab.com/spdf/elektron/logging/def"
elekLogDef "gitlab.com/spdf/elektron/logging/def"
"gitlab.com/spdf/elektron/pcp"
"gitlab.com/spdf/elektron/schedulers"
"gitlab.com/spdf/elektron/powerCap"
@ -82,9 +82,9 @@ func main() {
logPrefix := *pcplogPrefix + "_" + formattedStartTime
// creating logger and attaching different logging platforms
logger := elecLogDef.BuildLogger(startTime, logPrefix)
logger := elekLogDef.BuildLogger(startTime, logPrefix)
// logging channels
logMType := make(chan elecLogDef.LogMessageType)
logMType := make(chan elekLogDef.LogMessageType)
logMsg := make(chan string)
go logger.Listen(logMType, logMsg)
@ -101,32 +101,32 @@ func main() {
if *tasksFile == "" {
//fmt.Println("No file containing tasks specifiction provided.")
logger.WriteLog(elecLogDef.ERROR, "No file containing tasks specification provided")
logger.WriteLog(elekLogDef.ERROR, "No file containing tasks specification provided")
os.Exit(1)
}
if *hiThreshold < *loThreshold {
//fmt.Println("High threshold is of a lower value than low threshold.")
logger.WriteLog(elecLogDef.ERROR, "High threshold is of a lower value than low threshold")
logger.WriteLog(elekLogDef.ERROR, "High threshold is of a lower value than low threshold")
os.Exit(1)
}
tasks, err := def.TasksFromJSON(*tasksFile)
if err != nil || len(tasks) == 0 {
//fmt.Println("Invalid tasks specification file provided")
logger.WriteLog(elecLogDef.ERROR, "Invalid tasks specification file provided")
logger.WriteLog(elekLogDef.ERROR, "Invalid tasks specification file provided")
os.Exit(1)
}
//log.Println("Scheduling the following tasks:")
logger.WriteLog(elecLogDef.GENERAL, "Scheduling the following tasks:")
logger.WriteLog(elekLogDef.GENERAL, "Scheduling the following tasks:")
for _, task := range tasks {
fmt.Println(task)
}
if *enableSchedPolicySwitch {
if spcf := *schedPolConfigFile; spcf == "" {
logger.WriteLog(elecLogDef.ERROR, "No file containing characteristics for scheduling policies")
logger.WriteLog(elekLogDef.ERROR, "No file containing characteristics for scheduling policies")
} else {
// Initializing the characteristics of the scheduling policies.
schedulers.InitSchedPolicyCharacteristics(spcf)
@ -172,7 +172,7 @@ func main() {
}
if _, ok := powercapValues[*powerCapPolicy]; !ok {
logger.WriteLog(elecLogDef.ERROR, "Incorrect power capping policy specified.")
logger.WriteLog(elekLogDef.ERROR, "Incorrect power capping policy specified.")
os.Exit(1)
} else {
// Indicating which power capping policy to use, if any.
@ -188,7 +188,7 @@ func main() {
// These values are not used to configure the scheduler.
// hiThreshold and loThreshold are passed to the powercappers.
if *hiThreshold < *loThreshold {
logger.WriteLog(elecLogDef.ERROR, "High threshold is of a"+
logger.WriteLog(elekLogDef.ERROR, "High threshold is of a"+
" lower value than low threshold.")
os.Exit(1)
}

View file

@ -12,7 +12,7 @@ import (
"github.com/mesos/mesos-go/api/v0/mesosutil"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"gitlab.com/spdf/elektron/def"
elecLogDef "gitlab.com/spdf/elektron/logging/def"
elekLogDef "gitlab.com/spdf/elektron/logging/def"
"gitlab.com/spdf/elektron/utilities"
"gitlab.com/spdf/elektron/utilities/schedUtils"
)
@ -51,7 +51,7 @@ type BaseScheduler struct {
schedTrace *log.Logger
// Send the type of the message to be logged
logMsgType chan elecLogDef.LogMessageType
logMsgType chan elekLogDef.LogMessageType
// Send the message to be logged
logMsg chan string
@ -234,7 +234,7 @@ func (s *BaseScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos
}
}
func (s *BaseScheduler) Log(lmt elecLogDef.LogMessageType, msg string) {
func (s *BaseScheduler) Log(lmt elekLogDef.LogMessageType, msg string) {
s.mutex.Lock()
s.logMsgType <- lmt
s.logMsg <- msg
@ -242,8 +242,8 @@ func (s *BaseScheduler) Log(lmt elecLogDef.LogMessageType, msg string) {
}
func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.GENERAL
msgColor := elekLogDef.LogMessageColors[lmt]
var msg string
if ts == nil {
msg = msgColor.Sprintf("TASKS STARTING... host = [%s]", offer.GetHostname())
@ -255,38 +255,38 @@ func (s *BaseScheduler) LogTaskStarting(ts *def.Task, offer *mesos.Offer) {
}
func (s *BaseScheduler) LogTaskWattsConsideration(ts def.Task, host string, wattsToConsider float64) {
lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.GENERAL
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Watts considered for task[%s] and host[%s] = %f Watts",
ts.Name, host, wattsToConsider)
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogOffersReceived(offers []*mesos.Offer) {
lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.GENERAL
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Received %d resource offers", len(offers))
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogNoPendingTasksDeclineOffers(offer *mesos.Offer) {
lmt := elecLogDef.WARNING
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.WARNING
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("DECLINING OFFER for host[%s]... "+
"No tasks left to schedule", offer.GetHostname())
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogNumberOfRunningTasks() {
lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.GENERAL
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Number of tasks still Running = %d", s.tasksRunning)
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.GENERAL
msgColor := elekLogDef.LogMessageColors[lmt]
buffer := bytes.Buffer{}
buffer.WriteString(fmt.Sprintln("Colocated with:"))
s.TasksRunningMutex.Lock()
@ -300,20 +300,20 @@ func (s *BaseScheduler) LogCoLocatedTasks(slaveID string) {
func (s *BaseScheduler) LogSchedTrace(taskToSchedule *mesos.TaskInfo, offer *mesos.Offer) {
msg := fmt.Sprint(offer.GetHostname() + ":" + taskToSchedule.GetTaskId().GetValue())
s.Log(elecLogDef.SCHED_TRACE, msg)
s.Log(elekLogDef.SCHED_TRACE, msg)
}
func (s *BaseScheduler) LogTerminateScheduler() {
lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.GENERAL
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprint("Done scheduling all tasks!")
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
offerResources ...interface{}) {
lmt := elecLogDef.WARNING
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.WARNING
msgColor := elekLogDef.LogMessageColors[lmt]
buffer := bytes.Buffer{}
buffer.WriteString(fmt.Sprintln("DECLINING OFFER... Offer has insufficient resources to launch a task"))
buffer.WriteString(fmt.Sprintf("Offer Resources <CPU: %f, RAM: %f, Watts: %f>", offerResources...))
@ -322,91 +322,91 @@ func (s *BaseScheduler) LogInsufficientResourcesDeclineOffer(offer *mesos.Offer,
}
func (s *BaseScheduler) LogOfferRescinded(offerID *mesos.OfferID) {
lmt := elecLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.ERROR
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("OFFER RESCINDED: OfferID = %s", offerID)
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogSlaveLost(slaveID *mesos.SlaveID) {
lmt := elecLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.ERROR
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("SLAVE LOST: SlaveID = %s", slaveID)
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogExecutorLost(executorID *mesos.ExecutorID, slaveID *mesos.SlaveID) {
lmt := elecLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.ERROR
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("EXECUTOR LOST: ExecutorID = %s, SlaveID = %s", executorID, slaveID)
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogFrameworkMessage(executorID *mesos.ExecutorID,
slaveID *mesos.SlaveID, message string) {
lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.GENERAL
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Received Framework message from executor [%s]: %s", executorID, message)
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogMesosError(err string) {
lmt := elecLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.ERROR
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("MESOS ERROR: %s", err)
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogElectronError(err error) {
lmt := elecLogDef.ERROR
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.ERROR
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("ELECTRON ERROR: %v", err)
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogFrameworkRegistered(frameworkID *mesos.FrameworkID,
masterInfo *mesos.MasterInfo) {
lmt := elecLogDef.SUCCESS
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.SUCCESS
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("FRAMEWORK REGISTERED! frameworkID = %s, master = %s",
frameworkID, masterInfo)
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogFrameworkReregistered(masterInfo *mesos.MasterInfo) {
lmt := elecLogDef.GENERAL
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.GENERAL
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Framework re-registered with master %s", masterInfo)
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogDisconnected() {
lmt := elecLogDef.WARNING
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.WARNING
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprint("Framework disconnected with master")
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogTaskStatusUpdate(status *mesos.TaskStatus) {
var lmt elecLogDef.LogMessageType
var lmt elekLogDef.LogMessageType
switch *status.State {
case mesos.TaskState_TASK_ERROR, mesos.TaskState_TASK_FAILED,
mesos.TaskState_TASK_KILLED, mesos.TaskState_TASK_LOST:
lmt = elecLogDef.ERROR
lmt = elekLogDef.ERROR
case mesos.TaskState_TASK_FINISHED:
lmt = elecLogDef.SUCCESS
lmt = elekLogDef.SUCCESS
default:
lmt = elecLogDef.GENERAL
lmt = elekLogDef.GENERAL
}
msgColor := elecLogDef.LogMessageColors[lmt]
msg := elecLogDef.LogMessageColors[elecLogDef.GENERAL].Sprintf("Task Status received for task [%s] --> %s",
msgColor := elekLogDef.LogMessageColors[lmt]
msg := elekLogDef.LogMessageColors[elekLogDef.GENERAL].Sprintf("Task Status received for task [%s] --> %s",
*status.TaskId.Value, msgColor.Sprint(NameFor(status.State)))
s.Log(lmt, msg)
}
func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicyState) {
logSPS := func() {
s.Log(elecLogDef.SPS, name)
s.Log(elekLogDef.SPS, name)
}
if s.hasReceivedResourceOffers && (s.curSchedPolicy != nextPolicy) {
logSPS()
@ -415,10 +415,10 @@ func (s *BaseScheduler) LogSchedPolicySwitch(name string, nextPolicy SchedPolicy
}
// Logging the size of the scheduling window and the scheduling policy
// that is going to schedule the tasks in the scheduling window.
s.Log(elecLogDef.SCHED_WINDOW, fmt.Sprintf("%d %s", s.schedWindowSize, name))
s.Log(elekLogDef.SCHED_WINDOW, fmt.Sprintf("%d %s", s.schedWindowSize, name))
}
func (s *BaseScheduler) LogClsfnAndTaskDistOverhead(overhead time.Duration) {
// Logging the overhead in microseconds.
s.Log(elecLogDef.CLSFN_TASKDIST_OVERHEAD, fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0))
s.Log(elekLogDef.CLSFN_TASKDIST_OVERHEAD, fmt.Sprintf("%f", float64(overhead.Nanoseconds())/1000.0))
}

View file

@ -6,7 +6,7 @@ import (
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"gitlab.com/spdf/elektron/def"
elecLogDef "gitlab.com/spdf/elektron/logging/def"
elekLogDef "gitlab.com/spdf/elektron/logging/def"
)
// Implements mesos scheduler.
@ -21,7 +21,7 @@ type ElectronScheduler interface {
// log message type, and the log message to the corresponding channels.
// Pass the logMessageType and the logMessage to the loggers for logging.
Log(logMType elecLogDef.LogMessageType, logMsg string)
Log(logMType elekLogDef.LogMessageType, logMsg string)
// To be called when about to launch a task.
// Log message indicating that a task is about to start executing.
// Also, log the host on which the task is going to be launched.

View file

@ -6,7 +6,7 @@ import (
"github.com/pkg/errors"
"gitlab.com/spdf/elektron/constants"
"gitlab.com/spdf/elektron/def"
elecLogDef "gitlab.com/spdf/elektron/logging/def"
elekLogDef "gitlab.com/spdf/elektron/logging/def"
"gitlab.com/spdf/elektron/utilities"
"gitlab.com/spdf/elektron/utilities/mesosUtils"
)
@ -14,10 +14,10 @@ import (
func coLocated(tasks map[string]bool, s BaseScheduler) {
for task := range tasks {
s.Log(elecLogDef.GENERAL, task)
s.Log(elekLogDef.GENERAL, task)
}
s.Log(elecLogDef.GENERAL, "---------------------")
s.Log(elekLogDef.GENERAL, "---------------------")
}
// Get the powerClass of the given hostname.
@ -109,7 +109,7 @@ func WithPCPLog(pcpLog chan struct{}) schedulerOptions {
}
}
func WithLoggingChannels(lmt chan elecLogDef.LogMessageType, msg chan string) schedulerOptions {
func WithLoggingChannels(lmt chan elekLogDef.LogMessageType, msg chan string) schedulerOptions {
return func(s ElectronScheduler) error {
s.(*BaseScheduler).logMsgType = lmt
s.(*BaseScheduler).logMsg = msg
@ -132,8 +132,8 @@ func WithSchedPolSwitchEnabled(enableSchedPolicySwitch bool, switchingCriteria s
func WithNameOfFirstSchedPolToFix(nameOfFirstSchedPol string) schedulerOptions {
return func(s ElectronScheduler) error {
if nameOfFirstSchedPol == "" {
lmt := elecLogDef.WARNING
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.WARNING
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("First scheduling policy to deploy not mentioned. This is now going to be determined at runtime.")
s.(*BaseScheduler).Log(lmt, msg)
return nil
@ -152,8 +152,8 @@ func WithFixedSchedulingWindow(toFixSchedWindow bool, fixedSchedWindowSize int)
if fixedSchedWindowSize <= 0 {
return errors.New("Invalid value of scheduling window size. Please provide a value > 0.")
}
lmt := elecLogDef.WARNING
msgColor := elecLogDef.LogMessageColors[lmt]
lmt := elekLogDef.WARNING
msgColor := elekLogDef.LogMessageColors[lmt]
msg := msgColor.Sprintf("Fixing the size of the scheduling window to %d...", fixedSchedWindowSize)
s.(*BaseScheduler).Log(lmt, msg)
s.(*BaseScheduler).toFixSchedWindow = toFixSchedWindow

View file

@ -8,7 +8,7 @@ import (
mesos "github.com/mesos/mesos-go/api/v0/mesosproto"
sched "github.com/mesos/mesos-go/api/v0/scheduler"
"gitlab.com/spdf/elektron/def"
elecLogDef "gitlab.com/spdf/elektron/logging/def"
elekLogDef "gitlab.com/spdf/elektron/logging/def"
)
type SchedPolicyContext interface {
@ -71,7 +71,7 @@ func switchTaskDistBased(baseSchedRef *BaseScheduler) string {
// Determine the distribution of tasks in the new scheduling window.
taskDist, err := def.GetTaskDistributionInWindow(baseSchedRef.schedWindowSize, baseSchedRef.tasks)
baseSchedRef.LogClsfnAndTaskDistOverhead(time.Now().Sub(startTime))
baseSchedRef.Log(elecLogDef.GENERAL, fmt.Sprintf("Switching... TaskDistribution[%f]", taskDist))
baseSchedRef.Log(elekLogDef.GENERAL, fmt.Sprintf("Switching... TaskDistribution[%f]", taskDist))
if err != nil {
// All the tasks in the window were only classified into 1 cluster.
// Max-Min and Max-GreedyMins would work the same way as Bin-Packing for this situation.