Added Job interface
This commit is contained in:
parent
ae3657baf3
commit
dba42611d7
2 changed files with 131 additions and 76 deletions
176
job.go
176
job.go
|
@ -19,17 +19,39 @@ import (
|
|||
"strconv"
|
||||
)
|
||||
|
||||
type Job interface {
|
||||
// Set Job Key environment.
|
||||
Environment(env string) Job
|
||||
Role(role string) Job
|
||||
Name(name string) Job
|
||||
CPU(cpus float64) Job
|
||||
Disk(disk int64) Job
|
||||
RAM(ram int64) Job
|
||||
ExecutorName(name string) Job
|
||||
ExecutorData(data string) Job
|
||||
AddPorts(num int) Job
|
||||
AddLabel(key string, value string) Job
|
||||
AddNamedPorts(names ...string) Job
|
||||
AddLimitConstraint(name string, limit int32) Job
|
||||
AddValueConstraint(name string, negated bool, values ...string) Job
|
||||
AddURIs(extract bool, cache bool, values ...string) Job
|
||||
JobKey() *aurora.JobKey
|
||||
JobConfig() *aurora.JobConfiguration
|
||||
TaskConfig() *aurora.TaskConfig
|
||||
IsService(isService bool) Job
|
||||
InstanceCount(instCount int32) Job
|
||||
MaxFailure(maxFail int32) Job
|
||||
}
|
||||
|
||||
// Structure to collect all information pertaining to an Aurora job.
|
||||
type Job struct {
|
||||
type AuroraJob struct {
|
||||
jobConfig *aurora.JobConfiguration
|
||||
numCpus *aurora.Resource
|
||||
ramMb *aurora.Resource
|
||||
diskMb *aurora.Resource
|
||||
resources map[string]*aurora.Resource
|
||||
portCount int
|
||||
}
|
||||
|
||||
// Create a Job object with everything initialized.
|
||||
func NewJob() *Job {
|
||||
func NewJob() Job {
|
||||
jobConfig := aurora.NewJobConfiguration()
|
||||
taskConfig := aurora.NewTaskConfig()
|
||||
jobKey := aurora.NewJobKey()
|
||||
|
@ -52,135 +74,149 @@ func NewJob() *Job {
|
|||
ramMb := aurora.NewResource()
|
||||
diskMb := aurora.NewResource()
|
||||
|
||||
resources := make(map[string]*aurora.Resource)
|
||||
resources["cpu"] = numCpus
|
||||
resources["ram"] = ramMb
|
||||
resources["disk"] = diskMb
|
||||
|
||||
taskConfig.Resources = make(map[*aurora.Resource]bool)
|
||||
taskConfig.Resources[numCpus] = true
|
||||
taskConfig.Resources[ramMb] = true
|
||||
taskConfig.Resources[diskMb] = true
|
||||
|
||||
return &Job{jobConfig, numCpus, ramMb, diskMb, 0}
|
||||
return AuroraJob{jobConfig, resources, 0}
|
||||
}
|
||||
|
||||
// Set Job Key environment.
|
||||
func (a *Job) Environment(env string) *Job {
|
||||
a.jobConfig.Key.Environment = env
|
||||
return a
|
||||
func (j AuroraJob) Environment(env string) Job {
|
||||
j.jobConfig.Key.Environment = env
|
||||
return j
|
||||
}
|
||||
|
||||
// Set Job Key Role.
|
||||
func (a *Job) Role(role string) *Job {
|
||||
a.jobConfig.Key.Role = role
|
||||
func (j AuroraJob) Role(role string) Job {
|
||||
j.jobConfig.Key.Role = role
|
||||
|
||||
//Will be deprecated
|
||||
identity := &aurora.Identity{role}
|
||||
a.jobConfig.Owner = identity
|
||||
a.jobConfig.TaskConfig.Owner = identity
|
||||
return a
|
||||
j.jobConfig.Owner = identity
|
||||
j.jobConfig.TaskConfig.Owner = identity
|
||||
return j
|
||||
}
|
||||
|
||||
// Set Job Key Name.
|
||||
func (a *Job) Name(name string) *Job {
|
||||
a.jobConfig.Key.Name = name
|
||||
return a
|
||||
func (j AuroraJob) Name(name string) Job {
|
||||
j.jobConfig.Key.Name = name
|
||||
return j
|
||||
}
|
||||
|
||||
// Set name of the executor that will the task will be configured to.
|
||||
func (a *Job) ExecutorName(name string) *Job {
|
||||
a.jobConfig.TaskConfig.ExecutorConfig.Name = name
|
||||
return a
|
||||
func (j AuroraJob) ExecutorName(name string) Job {
|
||||
j.jobConfig.TaskConfig.ExecutorConfig.Name = name
|
||||
return j
|
||||
}
|
||||
|
||||
// Will be included as part of entire task inside the scheduler that will be serialized.
|
||||
func (a *Job) ExecutorData(data string) *Job {
|
||||
a.jobConfig.TaskConfig.ExecutorConfig.Data = data
|
||||
return a
|
||||
func (j AuroraJob) ExecutorData(data string) Job {
|
||||
j.jobConfig.TaskConfig.ExecutorConfig.Data = data
|
||||
return j
|
||||
}
|
||||
|
||||
func (a *Job) CPU(cpus float64) *Job {
|
||||
a.numCpus.NumCpus = &cpus
|
||||
a.jobConfig.TaskConfig.NumCpus = cpus //Will be deprecated soon
|
||||
func (j AuroraJob) CPU(cpus float64) Job {
|
||||
j.resources["cpu"].NumCpus = &cpus
|
||||
j.jobConfig.TaskConfig.NumCpus = cpus //Will be deprecated soon
|
||||
|
||||
return a
|
||||
return j
|
||||
}
|
||||
|
||||
func (a *Job) RAM(ram int64) *Job {
|
||||
a.ramMb.RamMb = &ram
|
||||
a.jobConfig.TaskConfig.RamMb = ram //Will be deprecated soon
|
||||
func (j AuroraJob) RAM(ram int64) Job {
|
||||
j.resources["ram"].RamMb = &ram
|
||||
j.jobConfig.TaskConfig.RamMb = ram //Will be deprecated soon
|
||||
|
||||
return a
|
||||
return j
|
||||
}
|
||||
|
||||
func (a *Job) Disk(disk int64) *Job {
|
||||
a.diskMb.DiskMb = &disk
|
||||
a.jobConfig.TaskConfig.DiskMb = disk //Will be deprecated
|
||||
func (j AuroraJob) Disk(disk int64) Job {
|
||||
j.resources["disk"].DiskMb = &disk
|
||||
j.jobConfig.TaskConfig.DiskMb = disk //Will be deprecated
|
||||
|
||||
return a
|
||||
return j
|
||||
}
|
||||
|
||||
// How many failures to tolerate before giving up.
|
||||
func (a *Job) MaxFailure(maxFail int32) *Job {
|
||||
a.jobConfig.TaskConfig.MaxTaskFailures = maxFail
|
||||
return a
|
||||
func (j AuroraJob) MaxFailure(maxFail int32) Job {
|
||||
j.jobConfig.TaskConfig.MaxTaskFailures = maxFail
|
||||
return j
|
||||
}
|
||||
|
||||
// How many instances of the job to run
|
||||
func (a *Job) InstanceCount(instCount int32) *Job {
|
||||
a.jobConfig.InstanceCount = instCount
|
||||
return a
|
||||
func (j AuroraJob) InstanceCount(instCount int32) Job {
|
||||
j.jobConfig.InstanceCount = instCount
|
||||
return j
|
||||
}
|
||||
|
||||
// Restart the job's tasks if they fail
|
||||
func (a *Job) IsService(isService bool) *Job {
|
||||
a.jobConfig.TaskConfig.IsService = isService
|
||||
return a
|
||||
func (j AuroraJob) IsService(isService bool) Job {
|
||||
j.jobConfig.TaskConfig.IsService = isService
|
||||
return j
|
||||
}
|
||||
|
||||
// Get the current job configurations key to use for some realis calls.
|
||||
func (a *Job) JobKey() *aurora.JobKey {
|
||||
return a.jobConfig.Key
|
||||
func (j AuroraJob) JobKey() *aurora.JobKey {
|
||||
return j.jobConfig.Key
|
||||
}
|
||||
|
||||
// Get the current job configurations key to use for some realis calls.
|
||||
func (j AuroraJob) JobConfig() *aurora.JobConfiguration {
|
||||
return j.jobConfig
|
||||
}
|
||||
|
||||
func (j AuroraJob) TaskConfig() *aurora.TaskConfig {
|
||||
return j.jobConfig.TaskConfig
|
||||
}
|
||||
|
||||
// Add a list of URIs with the same extract and cache configuration. Scheduler must have
|
||||
// --enable_mesos_fetcher flag enabled. Currently there is no duplicate detection.
|
||||
func (a *Job) AddURIs(extract bool, cache bool, values ...string) *Job {
|
||||
func (j AuroraJob) AddURIs(extract bool, cache bool, values ...string) Job {
|
||||
for _, value := range values {
|
||||
a.jobConfig.
|
||||
j.jobConfig.
|
||||
TaskConfig.
|
||||
MesosFetcherUris[&aurora.MesosFetcherURI{value, &extract, &cache}] = true
|
||||
}
|
||||
return a
|
||||
return j
|
||||
}
|
||||
|
||||
// Adds a Mesos label to the job. Note that Aurora will add the
|
||||
// prefix "org.apache.aurora.metadata." to the beginning of each key.
|
||||
func (a *Job) AddLabel(key string, value string) *Job {
|
||||
a.jobConfig.TaskConfig.Metadata[&aurora.Metadata{key, value}] = true
|
||||
return a
|
||||
func (j AuroraJob) AddLabel(key string, value string) Job {
|
||||
j.jobConfig.TaskConfig.Metadata[&aurora.Metadata{key, value}] = true
|
||||
return j
|
||||
}
|
||||
|
||||
// Add a named port to the job configuration These are random ports as it's
|
||||
// not currently possible to request specific ports using Aurora.
|
||||
func (a *Job) AddNamedPorts(names ...string) *Job {
|
||||
a.portCount += len(names)
|
||||
func (j AuroraJob) AddNamedPorts(names ...string) Job {
|
||||
j.portCount += len(names)
|
||||
for _, name := range names {
|
||||
a.jobConfig.TaskConfig.Resources[&aurora.Resource{NamedPort: &name}] = true
|
||||
j.jobConfig.TaskConfig.Resources[&aurora.Resource{NamedPort: &name}] = true
|
||||
}
|
||||
|
||||
return a
|
||||
return j
|
||||
}
|
||||
|
||||
// Adds a request for a number of ports to the job configuration. The names chosen for these ports
|
||||
// will be org.apache.aurora.port.X, where X is the current port count for the job configuration
|
||||
// starting at 0. These are random ports as it's not currently possible to request
|
||||
// specific ports using Aurora.
|
||||
func (a *Job) AddPorts(num int) *Job {
|
||||
start := a.portCount
|
||||
a.portCount += num
|
||||
for i := start; i < a.portCount; i++ {
|
||||
func (j AuroraJob) AddPorts(num int) Job {
|
||||
start := j.portCount
|
||||
j.portCount += num
|
||||
for i := start; i < j.portCount; i++ {
|
||||
portName := "org.apache.aurora.port." + strconv.Itoa(i)
|
||||
a.jobConfig.TaskConfig.Resources[&aurora.Resource{NamedPort: &portName}] = true
|
||||
j.jobConfig.TaskConfig.Resources[&aurora.Resource{NamedPort: &portName}] = true
|
||||
}
|
||||
|
||||
return a
|
||||
return j
|
||||
}
|
||||
|
||||
// From Aurora Docs:
|
||||
|
@ -188,27 +224,23 @@ func (a *Job) AddPorts(num int) *Job {
|
|||
// name - Mesos slave attribute that the constraint is matched against.
|
||||
// If negated = true , treat this as a 'not' - to avoid specific values.
|
||||
// Values - list of values we look for in attribute name
|
||||
func (a *Job) AddValueConstraint(name string,
|
||||
negated bool,
|
||||
values ...string) *Job {
|
||||
|
||||
func (j AuroraJob) AddValueConstraint(name string, negated bool, values ...string) Job {
|
||||
constraintValues := make(map[string]bool)
|
||||
for _, value := range values {
|
||||
constraintValues[value] = true
|
||||
}
|
||||
a.jobConfig.TaskConfig.Constraints[&aurora.Constraint{name,
|
||||
j.jobConfig.TaskConfig.Constraints[&aurora.Constraint{name,
|
||||
&aurora.TaskConstraint{&aurora.ValueConstraint{negated, constraintValues}, nil}}] = true
|
||||
|
||||
return a
|
||||
return j
|
||||
}
|
||||
|
||||
// From Aurora Docs:
|
||||
// A constraint that specifies the maximum number of active tasks on a host with
|
||||
// a matching attribute that may be scheduled simultaneously.
|
||||
func (a *Job) AddLimitConstraint(name string, limit int32) *Job {
|
||||
|
||||
a.jobConfig.TaskConfig.Constraints[&aurora.Constraint{name,
|
||||
func (j AuroraJob) AddLimitConstraint(name string, limit int32) Job {
|
||||
j.jobConfig.TaskConfig.Constraints[&aurora.Constraint{name,
|
||||
&aurora.TaskConstraint{nil, &aurora.LimitConstraint{limit}}}] = true
|
||||
|
||||
return a
|
||||
return j
|
||||
}
|
||||
|
|
31
updatejob.go
31
updatejob.go
|
@ -16,19 +16,42 @@ package realis
|
|||
|
||||
import "gen-go/apache/aurora"
|
||||
|
||||
// Structure to collect all information requrired to create job update
|
||||
// Structure to collect all information required to create job update
|
||||
type UpdateJob struct {
|
||||
*Job // SetInstanceCount for job is hidden, access via full qualifier
|
||||
Job // SetInstanceCount for job is hidden, access via full qualifier
|
||||
req *aurora.JobUpdateRequest
|
||||
}
|
||||
|
||||
// Create a default UpdateJob object.
|
||||
func NewUpdateJob(job *Job) *UpdateJob {
|
||||
func NewUpdateJob(config *aurora.TaskConfig) *UpdateJob {
|
||||
|
||||
req := aurora.NewJobUpdateRequest()
|
||||
req.TaskConfig = job.jobConfig.TaskConfig
|
||||
req.TaskConfig = config
|
||||
req.Settings = aurora.NewJobUpdateSettings()
|
||||
|
||||
job := NewJob().(AuroraJob)
|
||||
job.jobConfig.TaskConfig = config
|
||||
|
||||
|
||||
// Rebuild resource map from TaskConfig
|
||||
job.resources = make(map[string]*aurora.Resource)
|
||||
for ptr := range config.Resources {
|
||||
if(ptr.NumCpus != nil){
|
||||
job.resources["cpu"].NumCpus = ptr.NumCpus
|
||||
continue // Guard against Union violations that Go won't enforce
|
||||
}
|
||||
|
||||
if(ptr.RamMb != nil){
|
||||
job.resources["ram"].RamMb = ptr.RamMb
|
||||
continue
|
||||
}
|
||||
|
||||
if(ptr.DiskMb != nil){
|
||||
job.resources["disk"].DiskMb = ptr.DiskMb
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Mirrors defaults set by Pystachio
|
||||
req.Settings.UpdateOnlyTheseInstances = make(map[*aurora.Range]bool)
|
||||
req.Settings.UpdateGroupSize = 1
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue