Upgrade to Aurora 0.22.0 (#5)

* Upgrading to Thrift 0.13.1. This version is a fork of 0.13.0 with a patch on top of it to fix an issue where trying a realis call after the connection has been closed results in a panic.

* Upgrading compose set up to Mesos 1.6.2 and Aurora 0.22.0.

* Adding support for using different update strategies.

* Adding a monitor that is friendly with auto pause.

* Adding tests for new update strategies.
This commit is contained in:
Renán I. Del Valle 2020-05-05 20:55:25 -07:00 committed by GitHub
parent 1d8afcd329
commit 69ced895e2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 2013 additions and 1068 deletions

View file

@ -26,6 +26,7 @@ import (
"github.com/aurora-scheduler/gorealis/v2/gen-go/apache/aurora"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var r *realis.Client
@ -698,7 +699,6 @@ func TestRealisClient_PartitionPolicy(t *testing.T) {
Environment("prod").
Role(role).
Name("create_thermos_job_partition_policy_test").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ThermosExecutor(thermosExec).
CPU(.5).
RAM(64).
@ -723,3 +723,103 @@ func TestRealisClient_PartitionPolicy(t *testing.T) {
}
}
func TestRealisClient_UpdateStrategies(t *testing.T) {
// Create a single job
job := realis.NewJob().
Environment("prod").
Role("vagrant").
ThermosExecutor(thermosExec).
CPU(.01).
RAM(4).
Disk(10).
InstanceCount(6).
IsService(true)
// Needed to populate the task config correctly
assert.NoError(t, job.BuildThermosPayload())
strategies := []struct {
jobUpdate *realis.JobUpdate
Name string
}{
{
jobUpdate: realis.JobUpdateFromAuroraTask(job.AuroraTask()).
QueueUpdateStrategy(2).
InstanceCount(6).
WatchTime(1000),
Name: "Queue",
},
{
jobUpdate: realis.JobUpdateFromAuroraTask(job.AuroraTask()).
BatchUpdateStrategy(false, 2).
InstanceCount(6).
WatchTime(1000),
Name: "Batch",
},
{
jobUpdate: realis.JobUpdateFromAuroraTask(job.AuroraTask()).
VariableBatchStrategy(false, 1, 2, 3).
InstanceCount(6).
WatchTime(1000),
Name: "VarBatch",
},
}
for _, strategy := range strategies {
t.Run("TestRealisClient_UpdateStrategies_"+strategy.Name, func(t *testing.T) {
strategy.jobUpdate.Name("update_strategies_" + strategy.Name)
result, err := r.StartJobUpdate(strategy.jobUpdate, "")
require.NoError(t, err)
assert.NotNil(t, result)
var ok bool
var mErr error
key := *result.GetKey()
if ok, mErr = r.MonitorJobUpdate(key, 5, 240); !ok || mErr != nil {
// Update may already be in a terminal state so don't check for error
assert.NoError(t, r.AbortJobUpdate(key, "Monitor timed out."))
}
assert.NoError(t, r.KillJob(strategy.jobUpdate.JobKey()))
})
}
}
func TestRealisClient_BatchAwareAutoPause(t *testing.T) {
// Create a single job
job := realis.NewJob().
Environment("prod").
Role("vagrant").
Name("BatchAwareAutoPauseTest").
ThermosExecutor(thermosExec).
CPU(.01).
RAM(4).
Disk(10).
InstanceCount(6).
IsService(true)
updateGroups := []int32{1, 2, 3}
strategy := realis.JobUpdateFromAuroraTask(job.AuroraTask()).
VariableBatchStrategy(true, updateGroups...).
InstanceCount(6).
WatchTime(1000)
result, err := r.StartJobUpdate(strategy, "")
require.NoError(t, err)
require.NotNil(t, result)
key := *result.GetKey()
for i := range updateGroups {
curStep, mErr := r.MonitorAutoPausedUpdate(key, time.Second*5, time.Second*240)
if mErr != nil {
// Update may already be in a terminal state so don't check for error
assert.NoError(t, r.AbortJobUpdate(key, "Monitor timed out."))
}
assert.Equal(t, i, curStep)
require.NoError(t, r.ResumeJobUpdate(key, "auto resuming test"))
}
assert.NoError(t, r.KillJob(strategy.JobKey()))
}