Upgrading gorealis v1 to Thrift 0.12.0 code generation. End to end tests cleanup (#96)
* Ported all code from Thrift 0.9.3 to Thrift 0.12.0 while backporting some fixes from gorealis v2 * Removing git.apache.org dependency from Vendor folder as this dependency has migrated to github. * Adding github.com thrift dependency back but now it points to github.com * Removing unnecessary files from Thrift Vendor folder and adding them to .gitignore. * Updating dep dependencies to include Thrift 0.12.0 from github.com * Adding changelog. * End to end tests: Adding coverage for killinstances. * End to end tests: Deleting instances after partition policy recovers them. * End to end tests: Adding more coverage to the realis API. * End to end tests: Allowing arguments to be passed to runTestMac so that '-run <test name>' can be passed in. * End to end tests: Reducing the resources used by CreateJob test. * End to end tests: Adding coverage for Pause and Resume update. * End to end tests: Removed checks for Aurora_OK response as that should always be handled by the error returned by the API. Changed names to be less verbose and repetitive. * End to end tests: Reducing watch time for instance running when creating service for reducing time it takes to run end to end test.
This commit is contained in:
parent
2b7eb3a852
commit
79fa7ba16d
2150 changed files with 32523 additions and 412691 deletions
12
monitors.go
12
monitors.go
|
@ -103,7 +103,7 @@ func (m *Monitor) JobUpdateStatus(updateKey aurora.JobUpdateKey,
|
|||
|
||||
// Monitor a Job until all instances enter one of the LIVE_STATES
|
||||
func (m *Monitor) Instances(key *aurora.JobKey, instances int32, interval, timeout int) (bool, error) {
|
||||
return m.ScheduleStatus(key, instances, aurora.LIVE_STATES, interval, timeout)
|
||||
return m.ScheduleStatus(key, instances, LiveStates, interval, timeout)
|
||||
}
|
||||
|
||||
// Monitor a Job until all instances enter a desired status.
|
||||
|
@ -116,12 +116,18 @@ func (m *Monitor) ScheduleStatus(key *aurora.JobKey, instanceCount int32, desire
|
|||
timer := time.NewTimer(time.Second * time.Duration(timeout))
|
||||
defer timer.Stop()
|
||||
|
||||
wantedStatuses := make([]aurora.ScheduleStatus, 0)
|
||||
|
||||
for status := range desiredStatuses {
|
||||
wantedStatuses = append(wantedStatuses, status)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
|
||||
// Query Aurora for the state of the job key ever interval
|
||||
instCount, cliErr := m.Client.GetInstanceIds(key, desiredStatuses)
|
||||
instCount, cliErr := m.Client.GetInstanceIds(key, wantedStatuses)
|
||||
if cliErr != nil {
|
||||
return false, errors.Wrap(cliErr, "Unable to communicate with Aurora")
|
||||
}
|
||||
|
@ -174,7 +180,7 @@ func (m *Monitor) HostMaintenance(hosts []string, modes []aurora.MaintenanceMode
|
|||
return hostResult, errors.Wrap(err, "client error in monitor")
|
||||
}
|
||||
|
||||
for status := range result.GetStatuses() {
|
||||
for _, status := range result.GetStatuses() {
|
||||
|
||||
if _, ok := desiredMode[status.GetMode()]; ok {
|
||||
hostResult[status.GetHost()] = true
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue