Cleaned up tests. Removed checks for Aurora_OK response as that should always be handled by the error returned by the API. Changed names to be less verbose and repetitive.

This commit is contained in:
Renan DelValle 2019-02-18 12:18:58 -08:00
parent ee440c1789
commit dab2f64c51
No known key found for this signature in database
GPG key ID: C240AD6D6F443EC9

View file

@ -209,18 +209,18 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// Test asking the scheduler to perform a Snpshot // Test asking the scheduler to perform a Snpshot
t.Run("TestRealisClient_Snapshot", func(t *testing.T) { t.Run("Snapshot", func(t *testing.T) {
err := r.Snapshot() err := r.Snapshot()
assert.NoError(t, err) assert.NoError(t, err)
}) })
// Test asking the scheduler to backup a Snapshot // Test asking the scheduler to backup a Snapshot
t.Run("TestRealisClient_PerformBackup", func(t *testing.T) { t.Run("PerformBackup", func(t *testing.T) {
err := r.PerformBackup() err := r.PerformBackup()
assert.NoError(t, err) assert.NoError(t, err)
}) })
t.Run("TestRealisClient_GetTaskStatus_Thermos", func(t *testing.T) { t.Run("GetTaskStatus", func(t *testing.T) {
status, err := r.GetTaskStatus(&aurora.TaskQuery{ status, err := r.GetTaskStatus(&aurora.TaskQuery{
JobKeys: []*aurora.JobKey{job.JobKey()}, JobKeys: []*aurora.JobKey{job.JobKey()},
Statuses: []aurora.ScheduleStatus{aurora.ScheduleStatus_RUNNING}}) Statuses: []aurora.ScheduleStatus{aurora.ScheduleStatus_RUNNING}})
@ -231,7 +231,7 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
// TODO: Assert that assigned task matches the configuration of the task scheduled // TODO: Assert that assigned task matches the configuration of the task scheduled
}) })
t.Run("TestRealisClient_AddInstances_Thermos", func(t *testing.T) { t.Run("AddInstances", func(t *testing.T) {
_, err := r.AddInstances(aurora.InstanceKey{JobKey: job.JobKey(), InstanceId: 0}, 2) _, err := r.AddInstances(aurora.InstanceKey{JobKey: job.JobKey(), InstanceId: 0}, 2)
assert.NoError(t, err) assert.NoError(t, err)
success, err := monitor.Instances(job.JobKey(), 4, 1, 50) success, err := monitor.Instances(job.JobKey(), 4, 1, 50)
@ -239,7 +239,7 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
}) })
t.Run("TestRealisClient_KillInstances_Thermos", func(t *testing.T) { t.Run("KillInstances", func(t *testing.T) {
_, err := r.KillInstances(job.JobKey(), 2, 3) _, err := r.KillInstances(job.JobKey(), 2, 3)
assert.NoError(t, err) assert.NoError(t, err)
success, err := monitor.Instances(job.JobKey(), 2, 1, 50) success, err := monitor.Instances(job.JobKey(), 2, 1, 50)
@ -247,7 +247,7 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
}) })
t.Run("TestRealisClient_RestartInstances_Thermos", func(t *testing.T) { t.Run("RestartInstances", func(t *testing.T) {
_, err := r.RestartInstances(job.JobKey(), 0) _, err := r.RestartInstances(job.JobKey(), 0)
assert.NoError(t, err) assert.NoError(t, err)
success, err := monitor.Instances(job.JobKey(), 2, 1, 50) success, err := monitor.Instances(job.JobKey(), 2, 1, 50)
@ -255,7 +255,7 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
}) })
t.Run("TestRealisClient_RestartJobs_Thermos", func(t *testing.T) { t.Run("RestartJobs", func(t *testing.T) {
_, err := r.RestartJob(job.JobKey()) _, err := r.RestartJob(job.JobKey())
assert.NoError(t, err) assert.NoError(t, err)
success, err := monitor.Instances(job.JobKey(), 2, 1, 50) success, err := monitor.Instances(job.JobKey(), 2, 1, 50)
@ -264,7 +264,7 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
}) })
// Tasks must exist for it to, be killed // Tasks must exist for it to, be killed
t.Run("TestRealisClient_KillJob_Thermos", func(t *testing.T) { t.Run("KillJob", func(t *testing.T) {
_, err := r.KillJob(job.JobKey()) _, err := r.KillJob(job.JobKey())
assert.NoError(t, err) assert.NoError(t, err)
success, err := monitor.Instances(job.JobKey(), 0, 1, 50) success, err := monitor.Instances(job.JobKey(), 0, 1, 50)
@ -349,15 +349,14 @@ func TestRealisClient_CreateService_WithPulse_Thermos(t *testing.T) {
AddLabel("currentTime", time.Now().String()) AddLabel("currentTime", time.Now().String())
settings := realis.NewUpdateSettings() settings := realis.NewUpdateSettings()
settings.BlockIfNoPulsesAfterMs = thrift.Int32Ptr(30) settings.BlockIfNoPulsesAfterMs = thrift.Int32Ptr(500)
settings.UpdateGroupSize = 1 settings.UpdateGroupSize = 1
settings.MinWaitInInstanceRunningMs = 5000
settings.WaitForBatchCompletion = true settings.WaitForBatchCompletion = true
job.InstanceCount(2) job.InstanceCount(2)
resp, result, err := r.CreateService(job, settings)
fmt.Println(result.String())
_, result, err := r.CreateService(job, settings)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
updateQ := aurora.JobUpdateQuery{ updateQ := aurora.JobUpdateQuery{
Key: result.GetKey(), Key: result.GetKey(),
@ -366,7 +365,7 @@ func TestRealisClient_CreateService_WithPulse_Thermos(t *testing.T) {
var updateDetails []*aurora.JobUpdateDetails var updateDetails []*aurora.JobUpdateDetails
ticker := time.NewTicker(time.Second * 3) ticker := time.NewTicker(time.Second * 1)
timer := time.NewTimer(time.Minute * 6) timer := time.NewTimer(time.Minute * 6)
defer ticker.Stop() defer ticker.Stop()
defer timer.Stop() defer timer.Stop()
@ -376,35 +375,28 @@ pulseLoop:
select { select {
case <-ticker.C: case <-ticker.C:
fmt.Println("sending PulseJobUpdate....") _, err = r.PulseJobUpdate(result.GetKey())
resp, err = r.PulseJobUpdate(result.GetKey()) assert.Nil(t, err, "unable to pulse job update")
require.NotNil(t, resp, "received a nil response from Aurora")
assert.Nil(t, err)
respDetail, err := r.JobUpdateDetails(updateQ) respDetail, err := r.JobUpdateDetails(updateQ)
assert.Nil(t, err) assert.Nil(t, err)
updateDetails = response.JobUpdateDetails(respDetail) updateDetails = response.JobUpdateDetails(respDetail)
if len(updateDetails) == 0 { assert.Len(t, updateDetails, 1, "No update found")
fmt.Println("No update found")
assert.NotEqual(t, len(updateDetails), 0)
}
status := updateDetails[0].Update.Summary.State.Status
status := updateDetails[0].Update.Summary.State.Status
if _, ok := realis.ActiveJobUpdateStates[status]; !ok { if _, ok := realis.ActiveJobUpdateStates[status]; !ok {
// Rolled forward is the only state in which an update has been successfully updated // Rolled forward is the only state in which an update has been successfully updated
// if we encounter an inactive state and it is not at rolled forward, update failed // if we encounter an inactive state and it is not at rolled forward, update failed
if status == aurora.JobUpdateStatus_ROLLED_FORWARD { if status == aurora.JobUpdateStatus_ROLLED_FORWARD {
fmt.Println("Update succeded") fmt.Println("update succeed")
break pulseLoop break pulseLoop
} else { } else {
fmt.Println("Update failed") fmt.Println("update failed")
break pulseLoop break pulseLoop
} }
} }
fmt.Println("Polling, update still active...")
case <-timer.C: case <-timer.C:
_, err := r.AbortJobUpdate(*updateDetails[0].GetUpdate().GetSummary().GetKey(), "") _, err := r.AbortJobUpdate(*updateDetails[0].GetUpdate().GetSummary().GetKey(), "")
assert.NoError(t, err) assert.NoError(t, err)
@ -413,9 +405,8 @@ pulseLoop:
} }
} }
resp, err = r.KillJob(job.JobKey()) _, err = r.KillJob(job.JobKey())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
} }
// Test configuring an executor that doesn't exist for CreateJob API // Test configuring an executor that doesn't exist for CreateJob API
@ -443,12 +434,12 @@ func TestRealisClient_CreateService(t *testing.T) {
assert.NotNil(t, result) assert.NotNil(t, result)
// Test asking the scheduler to backup a Snapshot // Test asking the scheduler to backup a Snapshot
t.Run("TestRealisClient_PauseJobUpdate", func(t *testing.T) { t.Run("PauseJobUpdate", func(t *testing.T) {
_, err = r.PauseJobUpdate(result.GetKey(), "") _, err = r.PauseJobUpdate(result.GetKey(), "")
assert.NoError(t, err) assert.NoError(t, err)
}) })
t.Run("TestRealisClient_ResumeJobUpdate", func(t *testing.T) { t.Run("ResumeJobUpdate", func(t *testing.T) {
_, err = r.ResumeJobUpdate(result.GetKey(), "") _, err = r.ResumeJobUpdate(result.GetKey(), "")
assert.NoError(t, err) assert.NoError(t, err)
}) })
@ -459,9 +450,7 @@ func TestRealisClient_CreateService(t *testing.T) {
if ok, mErr = monitor.JobUpdate(*result.GetKey(), 5, 240); !ok || mErr != nil { if ok, mErr = monitor.JobUpdate(*result.GetKey(), 5, 240); !ok || mErr != nil {
// Update may already be in a terminal state so don't check for error // Update may already be in a terminal state so don't check for error
_, err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out.") _, err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out.")
_, err = r.KillJob(job.JobKey()) _, err = r.KillJob(job.JobKey())
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -470,7 +459,6 @@ func TestRealisClient_CreateService(t *testing.T) {
// Kill task test task after confirming it came up fine // Kill task test task after confirming it came up fine
_, err = r.KillJob(job.JobKey()) _, err = r.KillJob(job.JobKey())
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -491,8 +479,8 @@ func TestRealisClient_CreateService_ExecutorDoesNotExist(t *testing.T) {
settings := realis.NewUpdateSettings() settings := realis.NewUpdateSettings()
job.InstanceCount(3) job.InstanceCount(3)
resp, result, err := r.CreateService(job, settings)
resp, result, err := r.CreateService(job, settings)
assert.Error(t, err) assert.Error(t, err)
assert.Nil(t, result) assert.Nil(t, result)
assert.Equal(t, aurora.ResponseCode_INVALID_REQUEST, resp.GetResponseCode()) assert.Equal(t, aurora.ResponseCode_INVALID_REQUEST, resp.GetResponseCode())
@ -518,41 +506,24 @@ func TestRealisClient_ScheduleCronJob_Thermos(t *testing.T) {
CronSchedule("* * * * *"). CronSchedule("* * * * *").
IsService(false) IsService(false)
resp, err := r.ScheduleCronJob(job) _, err = r.ScheduleCronJob(job)
if err != nil { assert.NoError(t, err)
fmt.Println(err)
os.Exit(1)
}
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
t.Run("TestRealisClient_StartCronJob_Thermos", func(t *testing.T) {
start := time.Now()
resp, err := r.StartCronJob(job.JobKey())
end := time.Now()
t.Run("Start", func(t *testing.T) {
_, err := r.StartCronJob(job.JobKey())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
fmt.Printf("Schedule cron call took %d ns\n", (end.UnixNano() - start.UnixNano()))
}) })
t.Run("TestRealisClient_DeschedulerCronJob_Thermos", func(t *testing.T) { t.Run("Deschedule", func(t *testing.T) {
start := time.Now() _, err := r.DescheduleCronJob(job.JobKey())
resp, err := r.DescheduleCronJob(job.JobKey())
end := time.Now()
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
fmt.Printf("Deschedule cron call took %d ns\n", (end.UnixNano() - start.UnixNano()))
}) })
} }
func TestRealisClient_StartMaintenance(t *testing.T) { func TestRealisClient_StartMaintenance(t *testing.T) {
hosts := []string{"localhost"} hosts := []string{"localhost"}
_, _, err := r.StartMaintenance(hosts...) _, _, err := r.StartMaintenance(hosts...)
if err != nil { assert.NoError(t, err, "unable to start maintenance")
fmt.Printf("error: %+v\n", err.Error())
os.Exit(1)
}
// Monitor change to DRAINING and DRAINED mode // Monitor change to DRAINING and DRAINED mode
hostResults, err := monitor.HostMaintenance( hostResults, err := monitor.HostMaintenance(
@ -577,22 +548,24 @@ func TestRealisClient_StartMaintenance(t *testing.T) {
func TestRealisClient_DrainHosts(t *testing.T) { func TestRealisClient_DrainHosts(t *testing.T) {
hosts := []string{"localhost"} hosts := []string{"localhost"}
_, _, err := r.DrainHosts(hosts...)
if err != nil {
fmt.Printf("error: %+v\n", err.Error())
os.Exit(1)
}
// Monitor change to DRAINING and DRAINED mode t.Run("DrainHosts", func(t *testing.T) {
hostResults, err := monitor.HostMaintenance( _, _, err := r.DrainHosts(hosts...)
hosts, assert.NoError(t, err, "unable to drain host")
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING}, })
1,
50)
assert.Equal(t, map[string]bool{"localhost": true}, hostResults)
assert.NoError(t, err)
t.Run("TestRealisClient_MonitorNontransitioned", func(t *testing.T) { t.Run("MonitorTransitionToDrained", func(t *testing.T) {
// Monitor change to DRAINING and DRAINED mode
hostResults, err := monitor.HostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
1,
50)
assert.Equal(t, map[string]bool{"localhost": true}, hostResults)
assert.NoError(t, err)
})
t.Run("MonitorNonExistentHost", func(t *testing.T) {
// Monitor change to DRAINING and DRAINED mode // Monitor change to DRAINING and DRAINED mode
hostResults, err := monitor.HostMaintenance( hostResults, err := monitor.HostMaintenance(
append(hosts, "IMAGINARY_HOST"), append(hosts, "IMAGINARY_HOST"),
@ -605,7 +578,7 @@ func TestRealisClient_DrainHosts(t *testing.T) {
assert.Equal(t, map[string]bool{"localhost": true, "IMAGINARY_HOST": false}, hostResults) assert.Equal(t, map[string]bool{"localhost": true, "IMAGINARY_HOST": false}, hostResults)
}) })
t.Run("TestRealisClient_EndMaintenance", func(t *testing.T) { t.Run("EndMaintenance", func(t *testing.T) {
_, _, err := r.EndMaintenance(hosts...) _, _, err := r.EndMaintenance(hosts...)
assert.NoError(t, err) assert.NoError(t, err)
@ -625,10 +598,7 @@ func TestRealisClient_SLADrainHosts(t *testing.T) {
policy := aurora.SlaPolicy{PercentageSlaPolicy: &aurora.PercentageSlaPolicy{Percentage: 50.0}} policy := aurora.SlaPolicy{PercentageSlaPolicy: &aurora.PercentageSlaPolicy{Percentage: 50.0}}
_, err := r.SLADrainHosts(&policy, 30, hosts...) _, err := r.SLADrainHosts(&policy, 30, hosts...)
if err != nil { assert.NoError(t, err, "unable to drain host with SLA policy")
fmt.Printf("error: %+v\n", err.Error())
os.Exit(1)
}
// Monitor change to DRAINING and DRAINED mode // Monitor change to DRAINING and DRAINED mode
hostResults, err := monitor.HostMaintenance( hostResults, err := monitor.HostMaintenance(
@ -666,15 +636,13 @@ func TestRealisClient_SessionThreadSafety(t *testing.T) {
Disk(10). Disk(10).
InstanceCount(1000) // Impossible amount to go live in any sane machine InstanceCount(1000) // Impossible amount to go live in any sane machine
resp, err := r.CreateJob(job) _, err := r.CreateJob(job)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
for i := 0; i < 20; i++ { threadCount := 20
wg.Add(threadCount)
wg.Add(1) for i := 0; i < threadCount; i++ {
// Launch multiple monitors that will poll every second // Launch multiple monitors that will poll every second
go func() { go func() {
@ -685,11 +653,8 @@ func TestRealisClient_SessionThreadSafety(t *testing.T) {
assert.False(t, success) assert.False(t, success)
assert.Error(t, err) assert.Error(t, err)
resp, err := r.KillJob(job.JobKey()) _, err = r.KillJob(job.JobKey())
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
}() }()
} }
@ -697,14 +662,20 @@ func TestRealisClient_SessionThreadSafety(t *testing.T) {
} }
// Test setting and getting the quota // Test setting and getting the quota
func TestRealisClient_SetQuota(t *testing.T) { func TestRealisClient_Quota(t *testing.T) {
var cpu = 3.5 var resp *aurora.Response
var ram int64 = 20480 var err error
var disk int64 = 10240
resp, err := r.SetQuota("vagrant", &cpu, &ram, &disk) cpu := 3.5
assert.NoError(t, err) ram := int64(20480)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode) disk := int64(10240)
t.Run("TestRealisClient_GetQuota", func(t *testing.T) {
t.Run("Set", func(t *testing.T) {
resp, err = r.SetQuota("vagrant", &cpu, &ram, &disk)
assert.NoError(t, err)
})
t.Run("Get", func(t *testing.T) {
// Test GetQuota based on previously set values // Test GetQuota based on previously set values
var result *aurora.GetQuotaResult_ var result *aurora.GetQuotaResult_
resp, err = r.GetQuota("vagrant") resp, err = r.GetQuota("vagrant")
@ -712,7 +683,7 @@ func TestRealisClient_SetQuota(t *testing.T) {
result = resp.GetResult_().GetQuotaResult_ result = resp.GetResult_().GetQuotaResult_
} }
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
for _, res := range result.Quota.GetResources() { for _, res := range result.Quota.GetResources() {
switch true { switch true {
case res.DiskMb != nil: case res.DiskMb != nil:
@ -726,7 +697,6 @@ func TestRealisClient_SetQuota(t *testing.T) {
break break
} }
} }
fmt.Print("GetQuota Result", result.String())
}) })
} }
@ -741,15 +711,13 @@ func TestRealisClient_ForceExplicitTaskReconciliation(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
// Custom batch value // Custom batch value
var batchSize int32 = 32 err = r.ForceExplicitTaskReconciliation(thrift.Int32Ptr(32))
err = r.ForceExplicitTaskReconciliation(&batchSize)
assert.NoError(t, err) assert.NoError(t, err)
} }
func TestRealisClient_PartitionPolicy(t *testing.T) { func TestRealisClient_PartitionPolicy(t *testing.T) {
role := "vagrant" role := "vagrant"
var partitionDelay int64 = 30
job := realis.NewJob(). job := realis.NewJob().
Environment("prod"). Environment("prod").
Role(role). Role(role).
@ -761,14 +729,13 @@ func TestRealisClient_PartitionPolicy(t *testing.T) {
Disk(100). Disk(100).
IsService(true). IsService(true).
InstanceCount(2). InstanceCount(2).
PartitionPolicy(&aurora.PartitionPolicy{Reschedule: true, DelaySecs: &partitionDelay}) PartitionPolicy(&aurora.PartitionPolicy{Reschedule: true, DelaySecs: thrift.Int64Ptr(30)})
settings := realis.NewUpdateSettings() settings := realis.NewUpdateSettings()
settings.UpdateGroupSize = 2 settings.UpdateGroupSize = 2
resp, result, err := r.CreateService(job, settings)
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode) _, result, err := r.CreateService(job, settings)
assert.NoError(t, err)
var ok bool var ok bool
var mErr error var mErr error
@ -779,5 +746,6 @@ func TestRealisClient_PartitionPolicy(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
// Clean up after finishing test
_, err = r.KillJob(job.JobKey()) _, err = r.KillJob(job.JobKey())
} }