Refactor of update job code to use an AuroraTask underneath it and forward the necessary pointer receivers down to the AuroraTask. Code and tests for doing a deep copy of AuroraTask have been included.

This commit is contained in:
Renan DelValle 2018-12-11 17:45:23 -08:00
parent 98b4061513
commit 005980fc44
No known key found for this signature in database
GPG key ID: C240AD6D6F443EC9
2 changed files with 203 additions and 81 deletions

View file

@ -20,119 +20,72 @@ import (
// Structure to collect all information required to create job update
type JobUpdate struct {
Task *Task
task *AuroraTask
request *aurora.JobUpdateRequest
}
// Create a default JobUpdate object.
func NewDefaultJobUpdate(task *Task) *JobUpdate {
// Create a default JobUpdate object with an empty task and no fields filled in.
func NewJobUpdate(task *AuroraTask) *JobUpdate {
newTask := NewTask()
req := aurora.JobUpdateRequest{}
req.TaskConfig = task.task
req.TaskConfig = newTask.TaskConfig()
req.Settings = NewUpdateSettings()
// Rebuild resource map from TaskConfig
for _, ptr := range task.task.Resources {
if ptr.NumCpus != nil {
task.resources["cpu"].NumCpus = ptr.NumCpus
continue // Guard against Union violations that Go won't enforce
}
if ptr.RamMb != nil {
task.resources["ram"].RamMb = ptr.RamMb
continue
}
if ptr.DiskMb != nil {
task.resources["disk"].DiskMb = ptr.DiskMb
continue
}
}
// Mirrors defaults set by Pystachio
req.Settings.UpdateOnlyTheseInstances = []*aurora.Range{}
req.Settings.UpdateGroupSize = 1
req.Settings.WaitForBatchCompletion = false
req.Settings.MinWaitInInstanceRunningMs = 45000
req.Settings.MaxPerInstanceFailures = 0
req.Settings.MaxFailedInstances = 0
req.Settings.RollbackOnFailure = true
//TODO(rdelvalle): Deep copy task struct to avoid unexpected behavior
return &JobUpdate{Task: task, request: &req}
return &JobUpdate{task: newTask, request: &req}
}
func NewUpdateJob(config *aurora.TaskConfig, settings *aurora.JobUpdateSettings) *JobUpdate {
func JobUpdateFormTask(task *AuroraTask) *JobUpdate {
// Perform a deep copy to avoid unexpected behavior
newTask := task.Clone()
req := aurora.NewJobUpdateRequest()
req.TaskConfig = config
req.Settings = settings
req := aurora.JobUpdateRequest{}
req.TaskConfig = newTask.TaskConfig()
req.Settings = NewUpdateSettings()
task := NewTask()
task.task = config
// Rebuild resource map from TaskConfig
for _, ptr := range config.Resources {
if ptr.NumCpus != nil {
task.resources["cpu"].NumCpus = ptr.NumCpus
continue // Guard against Union violations that Go won't enforce
}
if ptr.RamMb != nil {
task.resources["ram"].RamMb = ptr.RamMb
continue
}
if ptr.DiskMb != nil {
task.resources["disk"].DiskMb = ptr.DiskMb
continue
}
}
//TODO(rdelvalle): Deep copy job struct to avoid unexpected behavior
return &JobUpdate{Task: task, request: req}
return &JobUpdate{task: newTask, request: &req}
}
// Set instance count the job will have after the update.
func (u *JobUpdate) InstanceCount(inst int32) *JobUpdate {
u.request.InstanceCount = inst
return u
func (j *JobUpdate) InstanceCount(inst int32) *JobUpdate {
j.request.InstanceCount = inst
return j
}
// Max number of instances being updated at any given moment.
func (u *JobUpdate) BatchSize(size int32) *JobUpdate {
u.request.Settings.UpdateGroupSize = size
return u
func (j *JobUpdate) BatchSize(size int32) *JobUpdate {
j.request.Settings.UpdateGroupSize = size
return j
}
// Minimum number of seconds a shard must remain in RUNNING state before considered a success.
func (u *JobUpdate) WatchTime(ms int32) *JobUpdate {
u.request.Settings.MinWaitInInstanceRunningMs = ms
return u
func (j *JobUpdate) WatchTime(ms int32) *JobUpdate {
j.request.Settings.MinWaitInInstanceRunningMs = ms
return j
}
// Wait for all instances in a group to be done before moving on.
func (u *JobUpdate) WaitForBatchCompletion(batchWait bool) *JobUpdate {
u.request.Settings.WaitForBatchCompletion = batchWait
return u
func (j *JobUpdate) WaitForBatchCompletion(batchWait bool) *JobUpdate {
j.request.Settings.WaitForBatchCompletion = batchWait
return j
}
// Max number of instance failures to tolerate before marking instance as FAILED.
func (u *JobUpdate) MaxPerInstanceFailures(inst int32) *JobUpdate {
u.request.Settings.MaxPerInstanceFailures = inst
return u
func (j *JobUpdate) MaxPerInstanceFailures(inst int32) *JobUpdate {
j.request.Settings.MaxPerInstanceFailures = inst
return j
}
// Max number of FAILED instances to tolerate before terminating the update.
func (u *JobUpdate) MaxFailedInstances(inst int32) *JobUpdate {
u.request.Settings.MaxFailedInstances = inst
return u
func (j *JobUpdate) MaxFailedInstances(inst int32) *JobUpdate {
j.request.Settings.MaxFailedInstances = inst
return j
}
// When False, prevents auto rollback of a failed update.
func (u *JobUpdate) RollbackOnFail(rollback bool) *JobUpdate {
u.request.Settings.RollbackOnFailure = rollback
return u
func (j *JobUpdate) RollbackOnFail(rollback bool) *JobUpdate {
j.request.Settings.RollbackOnFailure = rollback
return j
}
func NewUpdateSettings() *aurora.JobUpdateSettings {
@ -149,3 +102,91 @@ func NewUpdateSettings() *aurora.JobUpdateSettings {
return &us
}
/*
AuroraTask specific API, see task.go for further documentation.
These functions are provided for the convenience of chaining API calls.
*/
func (j *JobUpdate) ExecutorName(name string) *JobUpdate {
j.task.ExecutorName(name)
return j
}
func (j *JobUpdate) ExecutorData(data string) *JobUpdate {
j.task.ExecutorData(data)
return j
}
func (j *JobUpdate) CPU(cpus float64) *JobUpdate {
j.task.CPU(cpus)
return j
}
func (j *JobUpdate) RAM(ram int64) *JobUpdate {
j.task.RAM(ram)
return j
}
func (j *JobUpdate) Disk(disk int64) *JobUpdate {
j.task.Disk(disk)
return j
}
func (j *JobUpdate) Tier(tier string) *JobUpdate {
j.task.Tier(tier)
return j
}
func (j *JobUpdate) MaxFailure(maxFail int32) *JobUpdate {
j.task.MaxFailure(maxFail)
return j
}
func (j *JobUpdate) IsService(isService bool) *JobUpdate {
j.task.IsService(isService)
return j
}
func (j *JobUpdate) TaskConfig() *aurora.TaskConfig {
return j.task.TaskConfig()
}
func (j *JobUpdate) AddURIs(extract bool, cache bool, values ...string) *JobUpdate {
j.task.AddURIs(extract, cache, values...)
return j
}
func (j *JobUpdate) AddLabel(key string, value string) *JobUpdate {
j.task.AddLabel(key, value)
return j
}
func (j *JobUpdate) AddNamedPorts(names ...string) *JobUpdate {
j.task.AddNamedPorts(names...)
return j
}
func (j *JobUpdate) AddPorts(num int) *JobUpdate {
j.task.AddPorts(num)
return j
}
func (j *JobUpdate) AddValueConstraint(name string, negated bool, values ...string) *JobUpdate {
j.task.AddValueConstraint(name, negated, values...)
return j
}
func (j *JobUpdate) AddLimitConstraint(name string, limit int32) *JobUpdate {
j.task.AddLimitConstraint(name, limit)
return j
}
func (j *JobUpdate) AddDedicatedConstraint(role, name string) *JobUpdate {
j.task.AddDedicatedConstraint(role, name)
return j
}
func (j *JobUpdate) Container(container Container) *JobUpdate {
j.task.Container(container)
return j
}

81
task_test.go Normal file
View file

@ -0,0 +1,81 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis_test
import (
"testing"
"github.com/paypal/gorealis"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/stretchr/testify/assert"
)
func TestAuroraTask_Clone_DockerContainer(t *testing.T) {
task0 := realis.NewTask().
Environment("development").
Role("ubuntu").
Name("this_is_a_test").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData("{fake:payload}").
CPU(10).
RAM(643).
Disk(1000).
IsService(true).
AddPorts(10).
Tier("preferred").
MaxFailure(23).
AddURIs(true, true, "testURI").
AddLabel("Test", "Value").
AddNamedPorts("test").
AddValueConstraint("test", false, "testing").
AddLimitConstraint("test_limit", 1).
AddDedicatedConstraint("ubuntu", "name").
Container(realis.NewDockerContainer().AddParameter("hello", "world").Image("testImg"))
task1 := task0.Clone()
assert.EqualValues(t, task0, task1, "Clone does not return the correct deep copy of AuroraTask")
}
func TestAuroraTask_Clone_MesosContainer(t *testing.T) {
task0 := realis.NewTask().
Environment("development").
Role("ubuntu").
Name("this_is_a_test").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData("{fake:payload}").
CPU(10).
RAM(643).
Disk(1000).
IsService(true).
AddPorts(10).
Tier("preferred").
MaxFailure(23).
AddURIs(true, true, "testURI").
AddLabel("Test", "Value").
AddNamedPorts("test").
AddValueConstraint("test", false, "testing").
AddLimitConstraint("test_limit", 1).
AddDedicatedConstraint("ubuntu", "name").
Container(realis.NewMesosContainer().
AppcImage("test", "testing").
AddVolume("test", "test", aurora.Mode_RW))
task1 := task0.Clone()
assert.EqualValues(t, task0, task1, "Clone does not return the correct deep copy of AuroraTask")
}