Changing monitors to use time.Duration to be more explicit in code and to have tighter control.
This commit is contained in:
parent
b6effe66b7
commit
3e4590dcc0
2 changed files with 33 additions and 24 deletions
|
@ -171,7 +171,7 @@ func main() {
|
|||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if ok, mErr := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 5, 50); !ok || mErr != nil {
|
||||
if ok, mErr := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 5*time.Second, 50*time.Second); !ok || mErr != nil {
|
||||
err := r.KillJob(job.JobKey())
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
|
@ -190,7 +190,7 @@ func main() {
|
|||
}
|
||||
fmt.Println(result.String())
|
||||
|
||||
if ok, mErr := monitor.JobUpdate(*result.GetKey(), 5, 180); !ok || mErr != nil {
|
||||
if ok, mErr := monitor.JobUpdate(*result.GetKey(), 5*time.Second, 180*time.Second); !ok || mErr != nil {
|
||||
err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out")
|
||||
err = r.KillJob(job.JobKey())
|
||||
if err != nil {
|
||||
|
@ -208,7 +208,7 @@ func main() {
|
|||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
|
||||
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10*time.Second, 300*time.Second); !ok || err != nil {
|
||||
err := r.KillJob(job.JobKey())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
|
@ -224,7 +224,7 @@ func main() {
|
|||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
|
||||
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10*time.Second, 300*time.Second); !ok || err != nil {
|
||||
err := r.KillJob(job.JobKey())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
|
@ -265,7 +265,7 @@ func main() {
|
|||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if ok, err := monitor.Instances(job.JobKey(), 0, 5, 50); !ok || err != nil {
|
||||
if ok, err := monitor.Instances(job.JobKey(), 0, 5*time.Second, 50*time.Second); !ok || err != nil {
|
||||
log.Fatal("Unable to kill all instances of job")
|
||||
}
|
||||
|
||||
|
@ -321,7 +321,7 @@ func main() {
|
|||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if ok, err := monitor.Instances(job.JobKey(), currInstances+numOfInstances, 5, 50); !ok || err != nil {
|
||||
if ok, err := monitor.Instances(job.JobKey(), currInstances+numOfInstances, 5*time.Second, 50*time.Second); !ok || err != nil {
|
||||
fmt.Println("Flexing up failed")
|
||||
}
|
||||
|
||||
|
@ -342,7 +342,7 @@ func main() {
|
|||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if ok, err := monitor.Instances(job.JobKey(), currInstances-numOfInstances, 5, 100); !ok || err != nil {
|
||||
if ok, err := monitor.Instances(job.JobKey(), currInstances-numOfInstances, 5*time.Second, 100*time.Second); !ok || err != nil {
|
||||
fmt.Println("flexDown failed")
|
||||
}
|
||||
|
||||
|
@ -372,7 +372,7 @@ func main() {
|
|||
}
|
||||
|
||||
jobUpdateKey := result.GetKey()
|
||||
monitor.JobUpdate(*jobUpdateKey, 5, 500)
|
||||
monitor.JobUpdate(*jobUpdateKey, 5*time.Second, 6*time.Minute)
|
||||
|
||||
case "pauseJobUpdate":
|
||||
err := r.PauseJobUpdate(&aurora.JobUpdateKey{
|
||||
|
@ -525,8 +525,8 @@ func main() {
|
|||
hostResult, err := monitor.HostMaintenance(
|
||||
hosts,
|
||||
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
|
||||
5,
|
||||
10)
|
||||
5*time.Second,
|
||||
10*time.Second)
|
||||
if err != nil {
|
||||
for host, ok := range hostResult {
|
||||
if !ok {
|
||||
|
@ -556,8 +556,8 @@ func main() {
|
|||
hostResult, err := monitor.HostMaintenance(
|
||||
hosts,
|
||||
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
|
||||
5,
|
||||
10)
|
||||
5*time.Second,
|
||||
10*time.Second)
|
||||
if err != nil {
|
||||
for host, ok := range hostResult {
|
||||
if !ok {
|
||||
|
@ -584,8 +584,8 @@ func main() {
|
|||
hostResult, err := monitor.HostMaintenance(
|
||||
hosts,
|
||||
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
|
||||
5,
|
||||
10)
|
||||
5*time.Second,
|
||||
10*time.Second)
|
||||
if err != nil {
|
||||
for host, ok := range hostResult {
|
||||
if !ok {
|
||||
|
|
29
monitors.go
29
monitors.go
|
@ -33,15 +33,18 @@ type Monitor struct {
|
|||
}
|
||||
|
||||
// Polls the scheduler every certain amount of time to see if the update has succeeded
|
||||
func (m *Monitor) JobUpdate(updateKey aurora.JobUpdateKey, interval int, timeout int) (bool, error) {
|
||||
func (m *Monitor) JobUpdate(updateKey aurora.JobUpdateKey, interval, timeout time.Duration) (bool, error) {
|
||||
if interval < 1*time.Second || timeout < 1*time.Second {
|
||||
return false, errors.New("Interval or timeout cannot be below one second.")
|
||||
}
|
||||
|
||||
updateQ := aurora.JobUpdateQuery{
|
||||
Key: &updateKey,
|
||||
Limit: 1,
|
||||
}
|
||||
ticker := time.NewTicker(time.Second * time.Duration(interval))
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
timer := time.NewTimer(time.Second * time.Duration(timeout))
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
|
@ -83,18 +86,21 @@ func (m *Monitor) JobUpdate(updateKey aurora.JobUpdateKey, interval int, timeout
|
|||
}
|
||||
|
||||
// Monitor a AuroraJob until all instances enter one of the LIVE_STATES
|
||||
func (m *Monitor) Instances(key *aurora.JobKey, instances int32, interval, timeout int) (bool, error) {
|
||||
func (m *Monitor) Instances(key *aurora.JobKey, instances int32, interval, timeout time.Duration) (bool, error) {
|
||||
return m.ScheduleStatus(key, instances, aurora.LIVE_STATES, interval, timeout)
|
||||
}
|
||||
|
||||
// Monitor a AuroraJob until all instances enter a desired status.
|
||||
// Defaults sets of desired statuses provided by the thrift API include:
|
||||
// ACTIVE_STATES, SLAVE_ASSIGNED_STATES, LIVE_STATES, and TERMINAL_STATES
|
||||
func (m *Monitor) ScheduleStatus(key *aurora.JobKey, instanceCount int32, desiredStatuses map[aurora.ScheduleStatus]bool, interval, timeout int) (bool, error) {
|
||||
func (m *Monitor) ScheduleStatus(key *aurora.JobKey, instanceCount int32, desiredStatuses map[aurora.ScheduleStatus]bool, interval, timeout time.Duration) (bool, error) {
|
||||
if interval < 1*time.Second || timeout < 1*time.Second {
|
||||
return false, errors.New("Interval or timeout cannot be below one second.")
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Second * time.Duration(interval))
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
timer := time.NewTimer(time.Second * time.Duration(timeout))
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
|
@ -119,7 +125,10 @@ func (m *Monitor) ScheduleStatus(key *aurora.JobKey, instanceCount int32, desire
|
|||
|
||||
// Monitor host status until all hosts match the status provided. Returns a map where the value is true if the host
|
||||
// is in one of the desired mode(s) or false if it is not as of the time when the monitor exited.
|
||||
func (m *Monitor) HostMaintenance(hosts []string, modes []aurora.MaintenanceMode, interval, timeout int) (map[string]bool, error) {
|
||||
func (m *Monitor) HostMaintenance(hosts []string, modes []aurora.MaintenanceMode, interval, timeout time.Duration) (map[string]bool, error) {
|
||||
if interval < 1*time.Second || timeout < 1*time.Second {
|
||||
return nil, errors.New("Interval or timeout cannot be below one second.")
|
||||
}
|
||||
|
||||
// Transform modes to monitor for into a set for easy lookup
|
||||
desiredMode := make(map[aurora.MaintenanceMode]struct{})
|
||||
|
@ -137,9 +146,9 @@ func (m *Monitor) HostMaintenance(hosts []string, modes []aurora.MaintenanceMode
|
|||
|
||||
hostResult := make(map[string]bool)
|
||||
|
||||
ticker := time.NewTicker(time.Second * time.Duration(interval))
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
timer := time.NewTimer(time.Second * time.Duration(timeout))
|
||||
timer := time.NewTimer(timeout)
|
||||
defer timer.Stop()
|
||||
|
||||
for {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue