use exponential back off func from realis lib (#39)

* use exponential back off func from realis lib

* remove exponential backoffs from monitors

* dont compare for retry errors
This commit is contained in:
Sivaram Mothiki 2017-11-04 15:06:26 -07:00 committed by Renan DelValle
parent 23430cbf30
commit 72b746e431
6 changed files with 407 additions and 557 deletions

1
.gitignore vendored
View file

@ -6,6 +6,7 @@
# Folders # Folders
_obj _obj
_test _test
.idea
# Architecture specific extensions/prefixes # Architecture specific extensions/prefixes
*.[568vq] *.[568vq]

View file

@ -22,10 +22,11 @@ import (
"time" "time"
"strings"
"github.com/paypal/gorealis" "github.com/paypal/gorealis"
"github.com/paypal/gorealis/gen-go/apache/aurora" "github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/paypal/gorealis/response" "github.com/paypal/gorealis/response"
"strings"
) )
var cmd, executor, url, clustersConfig, clusterName, updateId, username, password, zkUrl, hostList string var cmd, executor, url, clustersConfig, clusterName, updateId, username, password, zkUrl, hostList string

View file

@ -19,14 +19,15 @@ import (
"fmt" "fmt"
"time" "time"
"github.com/pkg/errors"
"github.com/paypal/gorealis/gen-go/apache/aurora" "github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/paypal/gorealis/response" "github.com/paypal/gorealis/response"
"github.com/pkg/errors"
) )
const ( const (
UpdateFailed = "update failed" UpdateFailed = "update failed"
RolledBack = "update rolled back" RolledBack = "update rolled back"
Timeout = "timeout"
) )
type Monitor struct { type Monitor struct {
@ -40,117 +41,86 @@ func (m *Monitor) JobUpdate(updateKey aurora.JobUpdateKey, interval int, timeout
Key: &updateKey, Key: &updateKey,
Limit: 1, Limit: 1,
} }
ticker := time.NewTicker(time.Second * time.Duration(interval))
defaultBackoff := m.Client.RealisConfig().backoff defer ticker.Stop()
duration := defaultBackoff.Duration //defaultBackoff.Duration timer := time.NewTimer(time.Second * time.Duration(timeout))
var err error defer timer.Stop()
var cliErr error
var respDetail *aurora.Response var respDetail *aurora.Response
timedout := false
for {
select {
case <-ticker.C:
respDetail, cliErr = m.Client.JobUpdateDetails(updateQ)
if cliErr != nil {
return false, cliErr
}
for i := 0; i*interval <= timeout; i++ { updateDetail := response.JobUpdateDetails(respDetail)
for step := 0; step < defaultBackoff.Steps; step++ {
if step != 0 { if len(updateDetail) == 0 {
adjusted := duration fmt.Println("No update found")
if defaultBackoff.Jitter > 0.0 { return false, errors.New("No update found for " + updateKey.String())
adjusted = Jitter(duration, defaultBackoff.Jitter) }
status := updateDetail[0].Update.Summary.State.Status
if _, ok := aurora.ACTIVE_JOB_UPDATE_STATES[status]; !ok {
// Rolled forward is the only state in which an update has been successfully updated
// if we encounter an inactive state and it is not at rolled forward, update failed
switch status {
case aurora.JobUpdateStatus_ROLLED_FORWARD:
fmt.Println("Update succeded")
return true, nil
case aurora.JobUpdateStatus_FAILED:
fmt.Println("Update failed")
return false, errors.New(UpdateFailed)
case aurora.JobUpdateStatus_ROLLED_BACK:
fmt.Println("rolled back")
return false, errors.New(RolledBack)
default:
return false, nil
} }
fmt.Println(" sleeping for: ", adjusted)
time.Sleep(adjusted)
duration = time.Duration(float64(duration) * defaultBackoff.Factor)
}
if respDetail, err = m.Client.JobUpdateDetails(updateQ); err == nil {
break
}
err1 := m.Client.ReestablishConn()
if err1 != nil {
fmt.Println("error in ReestablishConn: ", err1)
} }
case <-timer.C:
timedout = true
} }
// if error remains then return (false, err). if timedout {
if err != nil { break
return false, err
} }
updateDetail := response.JobUpdateDetails(respDetail)
if len(updateDetail) == 0 {
fmt.Println("No update found")
return false, errors.New("No update found for " + updateKey.String())
}
status := updateDetail[0].Update.Summary.State.Status
if _, ok := aurora.ACTIVE_JOB_UPDATE_STATES[status]; !ok {
// Rolled forward is the only state in which an update has been successfully updated
// if we encounter an inactive state and it is not at rolled forward, update failed
switch status {
case aurora.JobUpdateStatus_ROLLED_FORWARD:
fmt.Println("Update succeded")
return true, nil
case aurora.JobUpdateStatus_FAILED:
fmt.Println("Update failed")
return false, errors.New(UpdateFailed)
case aurora.JobUpdateStatus_ROLLED_BACK:
fmt.Println("rolled back")
return false, errors.New(RolledBack)
default:
return false, nil
}
}
fmt.Println("Polling, update still active...")
time.Sleep(time.Duration(interval) * time.Second)
} }
return false, errors.New(Timeout)
fmt.Println("Timed out")
return false, nil
} }
func (m *Monitor) Instances(key *aurora.JobKey, instances int32, interval int, timeout int) (bool, error) { func (m *Monitor) Instances(key *aurora.JobKey, instances int32, interval int, timeout int) (bool, error) {
defaultBackoff := m.Client.RealisConfig().backoff var cliErr error
duration := defaultBackoff.Duration
var err error
var live map[int32]bool var live map[int32]bool
ticker := time.NewTicker(time.Second * time.Duration(interval))
defer ticker.Stop()
timer := time.NewTimer(time.Second * time.Duration(timeout))
defer timer.Stop()
for i := 0; i*interval < timeout; i++ { timedout := false
for step := 0; step < defaultBackoff.Steps; step++ { for {
if step != 0 { select {
adjusted := duration case <-ticker.C:
if defaultBackoff.Jitter > 0.0 { live, cliErr = m.Client.GetInstanceIds(key, aurora.LIVE_STATES)
adjusted = Jitter(duration, defaultBackoff.Jitter)
} if cliErr != nil {
fmt.Println(" sleeping for: ", adjusted) return false, errors.Wrap(cliErr, "Unable to communicate with Aurora")
time.Sleep(adjusted)
fmt.Println(" sleeping done")
duration = time.Duration(float64(duration) * defaultBackoff.Factor)
} }
if live, err = m.Client.GetInstanceIds(key, aurora.LIVE_STATES); err == nil { if len(live) == int(instances) {
fmt.Println(" live: ", live) return true, nil
break
} }
case <-timer.C:
if err != nil { timedout = true
err1 := m.Client.ReestablishConn()
if err1 != nil {
fmt.Println("error in ReestablishConn: ", err1)
}
}
} }
if timedout {
//live, err := m.Client.GetInstanceIds(key, aurora.LIVE_STATES) break
if err != nil {
return false, errors.Wrap(err, "Unable to communicate with Aurora")
} }
if len(live) == int(instances) {
return true, nil
}
fmt.Println("Polling, instances running: ", len(live))
time.Sleep(time.Duration(interval) * time.Second)
} }
return false, errors.New(Timeout)
fmt.Println("Timed out")
return false, nil
} }
// Monitor host status until all hosts match the status provided. Returns a map where the value is true if the host // Monitor host status until all hosts match the status provided. Returns a map where the value is true if the host
@ -206,5 +176,5 @@ func (m *Monitor) HostMaintenance(hosts []string, modes []aurora.MaintenanceMode
hostResult[host] = false hostResult[host] = false
} }
return hostResult, errors.New("Timed out") return hostResult, errors.New(Timeout)
} }

770
realis.go

File diff suppressed because it is too large Load diff

View file

@ -19,14 +19,25 @@ limitations under the License.
package realis package realis
import ( import (
"time"
"errors" "errors"
"time"
"github.com/paypal/gorealis/gen-go/apache/aurora"
) )
const (
ConnRefusedErr = "connection refused"
NoLeaderFoundErr = "No leader found"
)
var RetryConnErr = errors.New("error occured during with aurora retrying")
// ConditionFunc returns true if the condition is satisfied, or an error // ConditionFunc returns true if the condition is satisfied, or an error
// if the loop should be aborted. // if the loop should be aborted.
type ConditionFunc func() (done bool, err error) type ConditionFunc func() (done bool, err error)
type AuroraThriftCall func() (resp *aurora.Response, err error)
// ExponentialBackoff repeats a condition check with exponential backoff. // ExponentialBackoff repeats a condition check with exponential backoff.
// //
// It checks the condition up to Steps times, increasing the wait by multiplying // It checks the condition up to Steps times, increasing the wait by multiplying
@ -54,3 +65,18 @@ func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
} }
return errors.New("Timed out while retrying") return errors.New("Timed out while retrying")
} }
// CheckAndRetryConn function takes realis client and a trhift API function to call and returns response and error
// If Error from the APi call is Retry able . THe functions re establishes the connection with aurora by getting the latest aurora master from zookeeper.
// If Error is retyable return resp and RetryConnErr error.
func CheckAndRetryConn(r Realis, auroraCall AuroraThriftCall) (*aurora.Response, error) {
resp, cliErr := auroraCall()
if cliErr != nil /*&& (strings.Contains(cliErr.Error(), ConnRefusedErr) || strings.Contains(cliErr.Error(), NoLeaderFoundErr))*/ {
r.ReestablishConn()
return resp, RetryConnErr
}
if resp != nil && resp.GetResponseCode() == aurora.ResponseCode_ERROR_TRANSIENT {
return resp, RetryConnErr
}
return resp, cliErr
}