diff --git a/auroraAPI.thrift b/auroraAPI.thrift index 063774d..3e43f6f 100644 --- a/auroraAPI.thrift +++ b/auroraAPI.thrift @@ -716,9 +716,40 @@ struct JobUpdateKey { 2: string id } +/** Limits the amount of active changes being made to instances to groupSize. */ +struct QueueJobUpdateStrategy { + 1: i32 groupSize +} + +/** Similar to Queue strategy but will not start a new group until all instances in an active + * group have finished updating. + */ +struct BatchJobUpdateStrategy { + 1: i32 groupSize + /* Update will pause automatically after each batch completes */ + 2: bool autopauseAfterBatch +} + +/** Same as Batch strategy but each time an active group completes, the size of the next active + * group may change. + */ +struct VariableBatchJobUpdateStrategy { + 1: list groupSizes + /* Update will pause automatically after each batch completes */ + 2: bool autopauseAfterBatch +} + +union JobUpdateStrategy { + 1: QueueJobUpdateStrategy queueStrategy + 2: BatchJobUpdateStrategy batchStrategy + 3: VariableBatchJobUpdateStrategy varBatchStrategy +} + /** Job update thresholds and limits. */ struct JobUpdateSettings { - /** Max number of instances being updated at any given moment. */ + /** Deprecated, please set value inside of desired update strategy instead. + * Max number of instances being updated at any given moment. + */ 1: i32 updateGroupSize /** Max number of instance failures to tolerate before marking instance as FAILED. */ @@ -736,7 +767,7 @@ struct JobUpdateSettings { /** Instance IDs to act on. All instances will be affected if this is not set. */ 7: set updateOnlyTheseInstances - /** + /** Deprecated, please set updateStrategy to the Batch strategy instead. * If true, use updateGroupSize as strict batching boundaries, and avoid proceeding to another * batch until the preceding batch finishes updating. */ @@ -755,6 +786,9 @@ struct JobUpdateSettings { * differs between the old and new task configurations, updates will use the newest configuration. */ 10: optional bool slaAware + + /** Update strategy to be used for the update. See JobUpdateStrategy for choices. */ + 11: optional JobUpdateStrategy updateStrategy } /** Event marking a state transition in job update lifecycle. */ diff --git a/docker-compose.yml b/docker-compose.yml index 3632491..f103d35 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -14,7 +14,7 @@ services: ipv4_address: 192.168.33.2 master: - image: rdelvalle/mesos-master:1.5.1 + image: rdelvalle/mesos-master:1.6.2 restart: on-failure ports: - "5050:5050" @@ -32,7 +32,7 @@ services: - zk agent-one: - image: rdelvalle/mesos-agent:1.5.1 + image: rdelvalle/mesos-agent:1.6.2 pid: host restart: on-failure ports: @@ -56,7 +56,7 @@ services: - zk aurora-one: - image: rdelvalle/aurora:0.21.0 + image: rdelvalle/aurora:0.22.0 pid: host ports: - "8081:8081" @@ -69,6 +69,7 @@ services: -http_authentication_mechanism=BASIC -shiro_realm_modules=INI_AUTHNZ -shiro_ini_path=/etc/aurora/security.ini + -min_required_instances_for_sla_check=1 volumes: - ./.aurora-config:/etc/aurora networks: diff --git a/gen-go/apache/aurora/GoUnusedProtection__.go b/gen-go/apache/aurora/GoUnusedProtection__.go index 6ba25e0..7ac5b29 100644 --- a/gen-go/apache/aurora/GoUnusedProtection__.go +++ b/gen-go/apache/aurora/GoUnusedProtection__.go @@ -1,4 +1,4 @@ -// Autogenerated by Thrift Compiler (0.12.0) +// Autogenerated by Thrift Compiler (0.13.0) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING package aurora diff --git a/gen-go/apache/aurora/auroraAPI-consts.go b/gen-go/apache/aurora/auroraAPI-consts.go index 6e79077..3db5138 100644 --- a/gen-go/apache/aurora/auroraAPI-consts.go +++ b/gen-go/apache/aurora/auroraAPI-consts.go @@ -1,9 +1,9 @@ -// Autogenerated by Thrift Compiler (0.12.0) +// Autogenerated by Thrift Compiler (0.13.0) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING package aurora -import ( +import( "bytes" "context" "reflect" diff --git a/gen-go/apache/aurora/auroraAPI.go b/gen-go/apache/aurora/auroraAPI.go index 36cde22..cf0b088 100644 --- a/gen-go/apache/aurora/auroraAPI.go +++ b/gen-go/apache/aurora/auroraAPI.go @@ -1,9 +1,9 @@ -// Autogenerated by Thrift Compiler (0.12.0) +// Autogenerated by Thrift Compiler (0.13.0) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING package aurora -import ( +import( "bytes" "context" "reflect" @@ -10039,16 +10039,600 @@ func (p *JobUpdateKey) String() string { return fmt.Sprintf("JobUpdateKey(%+v)", *p) } +// Limits the amount of active changes being made to instances to groupSize. +// +// Attributes: +// - GroupSize +type QueueJobUpdateStrategy struct { + GroupSize int32 `thrift:"groupSize,1" db:"groupSize" json:"groupSize"` +} + +func NewQueueJobUpdateStrategy() *QueueJobUpdateStrategy { + return &QueueJobUpdateStrategy{} +} + + +func (p *QueueJobUpdateStrategy) GetGroupSize() int32 { + return p.GroupSize +} +func (p *QueueJobUpdateStrategy) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *QueueJobUpdateStrategy) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.GroupSize = v +} + return nil +} + +func (p *QueueJobUpdateStrategy) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("QueueJobUpdateStrategy"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *QueueJobUpdateStrategy) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("groupSize", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:groupSize: ", p), err) } + if err := oprot.WriteI32(int32(p.GroupSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.groupSize (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:groupSize: ", p), err) } + return err +} + +func (p *QueueJobUpdateStrategy) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("QueueJobUpdateStrategy(%+v)", *p) +} + +// Similar to Queue strategy but will not start a new group until all instances in an active +// group have finished updating. +// +// Attributes: +// - GroupSize +// - AutopauseAfterBatch +type BatchJobUpdateStrategy struct { + GroupSize int32 `thrift:"groupSize,1" db:"groupSize" json:"groupSize"` + AutopauseAfterBatch bool `thrift:"autopauseAfterBatch,2" db:"autopauseAfterBatch" json:"autopauseAfterBatch"` +} + +func NewBatchJobUpdateStrategy() *BatchJobUpdateStrategy { + return &BatchJobUpdateStrategy{} +} + + +func (p *BatchJobUpdateStrategy) GetGroupSize() int32 { + return p.GroupSize +} + +func (p *BatchJobUpdateStrategy) GetAutopauseAfterBatch() bool { + return p.AutopauseAfterBatch +} +func (p *BatchJobUpdateStrategy) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BatchJobUpdateStrategy) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.GroupSize = v +} + return nil +} + +func (p *BatchJobUpdateStrategy) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.AutopauseAfterBatch = v +} + return nil +} + +func (p *BatchJobUpdateStrategy) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("BatchJobUpdateStrategy"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(oprot); err != nil { return err } + if err := p.writeField2(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *BatchJobUpdateStrategy) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("groupSize", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:groupSize: ", p), err) } + if err := oprot.WriteI32(int32(p.GroupSize)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.groupSize (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:groupSize: ", p), err) } + return err +} + +func (p *BatchJobUpdateStrategy) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("autopauseAfterBatch", thrift.BOOL, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:autopauseAfterBatch: ", p), err) } + if err := oprot.WriteBool(bool(p.AutopauseAfterBatch)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.autopauseAfterBatch (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:autopauseAfterBatch: ", p), err) } + return err +} + +func (p *BatchJobUpdateStrategy) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BatchJobUpdateStrategy(%+v)", *p) +} + +// Same as Batch strategy but each time an active group completes, the size of the next active +// group may change. +// +// Attributes: +// - GroupSizes +// - AutopauseAfterBatch +type VariableBatchJobUpdateStrategy struct { + GroupSizes []int32 `thrift:"groupSizes,1" db:"groupSizes" json:"groupSizes"` + AutopauseAfterBatch bool `thrift:"autopauseAfterBatch,2" db:"autopauseAfterBatch" json:"autopauseAfterBatch"` +} + +func NewVariableBatchJobUpdateStrategy() *VariableBatchJobUpdateStrategy { + return &VariableBatchJobUpdateStrategy{} +} + + +func (p *VariableBatchJobUpdateStrategy) GetGroupSizes() []int32 { + return p.GroupSizes +} + +func (p *VariableBatchJobUpdateStrategy) GetAutopauseAfterBatch() bool { + return p.AutopauseAfterBatch +} +func (p *VariableBatchJobUpdateStrategy) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *VariableBatchJobUpdateStrategy) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]int32, 0, size) + p.GroupSizes = tSlice + for i := 0; i < size; i ++ { +var _elem25 int32 + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + _elem25 = v +} + p.GroupSizes = append(p.GroupSizes, _elem25) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *VariableBatchJobUpdateStrategy) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.AutopauseAfterBatch = v +} + return nil +} + +func (p *VariableBatchJobUpdateStrategy) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("VariableBatchJobUpdateStrategy"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(oprot); err != nil { return err } + if err := p.writeField2(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *VariableBatchJobUpdateStrategy) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("groupSizes", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:groupSizes: ", p), err) } + if err := oprot.WriteListBegin(thrift.I32, len(p.GroupSizes)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.GroupSizes { + if err := oprot.WriteI32(int32(v)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:groupSizes: ", p), err) } + return err +} + +func (p *VariableBatchJobUpdateStrategy) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("autopauseAfterBatch", thrift.BOOL, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:autopauseAfterBatch: ", p), err) } + if err := oprot.WriteBool(bool(p.AutopauseAfterBatch)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.autopauseAfterBatch (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:autopauseAfterBatch: ", p), err) } + return err +} + +func (p *VariableBatchJobUpdateStrategy) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("VariableBatchJobUpdateStrategy(%+v)", *p) +} + +// Attributes: +// - QueueStrategy +// - BatchStrategy +// - VarBatchStrategy +type JobUpdateStrategy struct { + QueueStrategy *QueueJobUpdateStrategy `thrift:"queueStrategy,1" db:"queueStrategy" json:"queueStrategy,omitempty"` + BatchStrategy *BatchJobUpdateStrategy `thrift:"batchStrategy,2" db:"batchStrategy" json:"batchStrategy,omitempty"` + VarBatchStrategy *VariableBatchJobUpdateStrategy `thrift:"varBatchStrategy,3" db:"varBatchStrategy" json:"varBatchStrategy,omitempty"` +} + +func NewJobUpdateStrategy() *JobUpdateStrategy { + return &JobUpdateStrategy{} +} + +var JobUpdateStrategy_QueueStrategy_DEFAULT *QueueJobUpdateStrategy +func (p *JobUpdateStrategy) GetQueueStrategy() *QueueJobUpdateStrategy { + if !p.IsSetQueueStrategy() { + return JobUpdateStrategy_QueueStrategy_DEFAULT + } +return p.QueueStrategy +} +var JobUpdateStrategy_BatchStrategy_DEFAULT *BatchJobUpdateStrategy +func (p *JobUpdateStrategy) GetBatchStrategy() *BatchJobUpdateStrategy { + if !p.IsSetBatchStrategy() { + return JobUpdateStrategy_BatchStrategy_DEFAULT + } +return p.BatchStrategy +} +var JobUpdateStrategy_VarBatchStrategy_DEFAULT *VariableBatchJobUpdateStrategy +func (p *JobUpdateStrategy) GetVarBatchStrategy() *VariableBatchJobUpdateStrategy { + if !p.IsSetVarBatchStrategy() { + return JobUpdateStrategy_VarBatchStrategy_DEFAULT + } +return p.VarBatchStrategy +} +func (p *JobUpdateStrategy) CountSetFieldsJobUpdateStrategy() int { + count := 0 + if (p.IsSetQueueStrategy()) { + count++ + } + if (p.IsSetBatchStrategy()) { + count++ + } + if (p.IsSetVarBatchStrategy()) { + count++ + } + return count + +} + +func (p *JobUpdateStrategy) IsSetQueueStrategy() bool { + return p.QueueStrategy != nil +} + +func (p *JobUpdateStrategy) IsSetBatchStrategy() bool { + return p.BatchStrategy != nil +} + +func (p *JobUpdateStrategy) IsSetVarBatchStrategy() bool { + return p.VarBatchStrategy != nil +} + +func (p *JobUpdateStrategy) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *JobUpdateStrategy) ReadField1(iprot thrift.TProtocol) error { + p.QueueStrategy = &QueueJobUpdateStrategy{} + if err := p.QueueStrategy.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.QueueStrategy), err) + } + return nil +} + +func (p *JobUpdateStrategy) ReadField2(iprot thrift.TProtocol) error { + p.BatchStrategy = &BatchJobUpdateStrategy{} + if err := p.BatchStrategy.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.BatchStrategy), err) + } + return nil +} + +func (p *JobUpdateStrategy) ReadField3(iprot thrift.TProtocol) error { + p.VarBatchStrategy = &VariableBatchJobUpdateStrategy{} + if err := p.VarBatchStrategy.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.VarBatchStrategy), err) + } + return nil +} + +func (p *JobUpdateStrategy) Write(oprot thrift.TProtocol) error { + if c := p.CountSetFieldsJobUpdateStrategy(); c != 1 { + return fmt.Errorf("%T write union: exactly one field must be set (%d set).", p, c) + } + if err := oprot.WriteStructBegin("JobUpdateStrategy"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(oprot); err != nil { return err } + if err := p.writeField2(oprot); err != nil { return err } + if err := p.writeField3(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *JobUpdateStrategy) writeField1(oprot thrift.TProtocol) (err error) { + if p.IsSetQueueStrategy() { + if err := oprot.WriteFieldBegin("queueStrategy", thrift.STRUCT, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:queueStrategy: ", p), err) } + if err := p.QueueStrategy.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.QueueStrategy), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:queueStrategy: ", p), err) } + } + return err +} + +func (p *JobUpdateStrategy) writeField2(oprot thrift.TProtocol) (err error) { + if p.IsSetBatchStrategy() { + if err := oprot.WriteFieldBegin("batchStrategy", thrift.STRUCT, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:batchStrategy: ", p), err) } + if err := p.BatchStrategy.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.BatchStrategy), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:batchStrategy: ", p), err) } + } + return err +} + +func (p *JobUpdateStrategy) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetVarBatchStrategy() { + if err := oprot.WriteFieldBegin("varBatchStrategy", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:varBatchStrategy: ", p), err) } + if err := p.VarBatchStrategy.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.VarBatchStrategy), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:varBatchStrategy: ", p), err) } + } + return err +} + +func (p *JobUpdateStrategy) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("JobUpdateStrategy(%+v)", *p) +} + // Job update thresholds and limits. // // Attributes: -// - UpdateGroupSize: Max number of instances being updated at any given moment. +// - UpdateGroupSize: Deprecated, please set value inside of desired update strategy instead. +// Max number of instances being updated at any given moment. // - MaxPerInstanceFailures: Max number of instance failures to tolerate before marking instance as FAILED. // - MaxFailedInstances: Max number of FAILED instances to tolerate before terminating the update. // - MinWaitInInstanceRunningMs: Min time to watch a RUNNING instance. // - RollbackOnFailure: If true, enables failed update rollback. // - UpdateOnlyTheseInstances: Instance IDs to act on. All instances will be affected if this is not set. -// - WaitForBatchCompletion: If true, use updateGroupSize as strict batching boundaries, and avoid proceeding to another +// - WaitForBatchCompletion: Deprecated, please set updateStrategy to the Batch strategy instead. +// If true, use updateGroupSize as strict batching boundaries, and avoid proceeding to another // batch until the preceding batch finishes updating. // - BlockIfNoPulsesAfterMs: If set, requires external calls to pulseJobUpdate RPC within the specified rate for the // update to make progress. If no pulses received within specified interval the update will @@ -10056,6 +10640,7 @@ func (p *JobUpdateKey) String() string { // unblocked by a fresh pulseJobUpdate call. // - SlaAware: If true, updates will obey the SLA requirements of the tasks being updated. If the SLA policy // differs between the old and new task configurations, updates will use the newest configuration. +// - UpdateStrategy: Update strategy to be used for the update. See JobUpdateStrategy for choices. type JobUpdateSettings struct { UpdateGroupSize int32 `thrift:"updateGroupSize,1" db:"updateGroupSize" json:"updateGroupSize"` MaxPerInstanceFailures int32 `thrift:"maxPerInstanceFailures,2" db:"maxPerInstanceFailures" json:"maxPerInstanceFailures"` @@ -10067,6 +10652,7 @@ type JobUpdateSettings struct { WaitForBatchCompletion bool `thrift:"waitForBatchCompletion,8" db:"waitForBatchCompletion" json:"waitForBatchCompletion"` BlockIfNoPulsesAfterMs *int32 `thrift:"blockIfNoPulsesAfterMs,9" db:"blockIfNoPulsesAfterMs" json:"blockIfNoPulsesAfterMs,omitempty"` SlaAware *bool `thrift:"slaAware,10" db:"slaAware" json:"slaAware,omitempty"` + UpdateStrategy *JobUpdateStrategy `thrift:"updateStrategy,11" db:"updateStrategy" json:"updateStrategy,omitempty"` } func NewJobUpdateSettings() *JobUpdateSettings { @@ -10115,6 +10701,13 @@ func (p *JobUpdateSettings) GetSlaAware() bool { } return *p.SlaAware } +var JobUpdateSettings_UpdateStrategy_DEFAULT *JobUpdateStrategy +func (p *JobUpdateSettings) GetUpdateStrategy() *JobUpdateStrategy { + if !p.IsSetUpdateStrategy() { + return JobUpdateSettings_UpdateStrategy_DEFAULT + } +return p.UpdateStrategy +} func (p *JobUpdateSettings) IsSetBlockIfNoPulsesAfterMs() bool { return p.BlockIfNoPulsesAfterMs != nil } @@ -10123,6 +10716,10 @@ func (p *JobUpdateSettings) IsSetSlaAware() bool { return p.SlaAware != nil } +func (p *JobUpdateSettings) IsSetUpdateStrategy() bool { + return p.UpdateStrategy != nil +} + func (p *JobUpdateSettings) Read(iprot thrift.TProtocol) error { if _, err := iprot.ReadStructBegin(); err != nil { return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) @@ -10226,6 +10823,16 @@ func (p *JobUpdateSettings) Read(iprot thrift.TProtocol) error { return err } } + case 11: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField11(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } default: if err := iprot.Skip(fieldTypeId); err != nil { return err @@ -10294,11 +10901,11 @@ func (p *JobUpdateSettings) ReadField7(iprot thrift.TProtocol) error { tSet := make([]*Range, 0, size) p.UpdateOnlyTheseInstances = tSet for i := 0; i < size; i ++ { - _elem25 := &Range{} - if err := _elem25.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem25), err) + _elem26 := &Range{} + if err := _elem26.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem26), err) } - p.UpdateOnlyTheseInstances = append(p.UpdateOnlyTheseInstances, _elem25) + p.UpdateOnlyTheseInstances = append(p.UpdateOnlyTheseInstances, _elem26) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -10333,6 +10940,14 @@ func (p *JobUpdateSettings) ReadField10(iprot thrift.TProtocol) error { return nil } +func (p *JobUpdateSettings) ReadField11(iprot thrift.TProtocol) error { + p.UpdateStrategy = &JobUpdateStrategy{} + if err := p.UpdateStrategy.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.UpdateStrategy), err) + } + return nil +} + func (p *JobUpdateSettings) Write(oprot thrift.TProtocol) error { if err := oprot.WriteStructBegin("JobUpdateSettings"); err != nil { return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } @@ -10346,6 +10961,7 @@ func (p *JobUpdateSettings) Write(oprot thrift.TProtocol) error { if err := p.writeField8(oprot); err != nil { return err } if err := p.writeField9(oprot); err != nil { return err } if err := p.writeField10(oprot); err != nil { return err } + if err := p.writeField11(oprot); err != nil { return err } } if err := oprot.WriteFieldStop(); err != nil { return thrift.PrependError("write field stop error: ", err) } @@ -10464,6 +11080,19 @@ func (p *JobUpdateSettings) writeField10(oprot thrift.TProtocol) (err error) { return err } +func (p *JobUpdateSettings) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetUpdateStrategy() { + if err := oprot.WriteFieldBegin("updateStrategy", thrift.STRUCT, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:updateStrategy: ", p), err) } + if err := p.UpdateStrategy.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.UpdateStrategy), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:updateStrategy: ", p), err) } + } + return err +} + func (p *JobUpdateSettings) String() string { if p == nil { return "" @@ -10996,11 +11625,11 @@ func (p *InstanceTaskConfig) ReadField2(iprot thrift.TProtocol) error { tSet := make([]*Range, 0, size) p.Instances = tSet for i := 0; i < size; i ++ { - _elem26 := &Range{} - if err := _elem26.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem26), err) + _elem27 := &Range{} + if err := _elem27.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem27), err) } - p.Instances = append(p.Instances, _elem26) + p.Instances = append(p.Instances, _elem27) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -11387,11 +12016,11 @@ func (p *JobUpdateSummary) ReadField6(iprot thrift.TProtocol) error { tSet := make([]*Metadata, 0, size) p.Metadata = tSet for i := 0; i < size; i ++ { - _elem27 := &Metadata{} - if err := _elem27.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem27), err) + _elem28 := &Metadata{} + if err := _elem28.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem28), err) } - p.Metadata = append(p.Metadata, _elem27) + p.Metadata = append(p.Metadata, _elem28) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -11590,11 +12219,11 @@ func (p *JobUpdateInstructions) ReadField1(iprot thrift.TProtocol) error { tSet := make([]*InstanceTaskConfig, 0, size) p.InitialState = tSet for i := 0; i < size; i ++ { - _elem28 := &InstanceTaskConfig{} - if err := _elem28.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem28), err) + _elem29 := &InstanceTaskConfig{} + if err := _elem29.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem29), err) } - p.InitialState = append(p.InitialState, _elem28) + p.InitialState = append(p.InitialState, _elem29) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -11938,11 +12567,11 @@ func (p *JobUpdateDetails) ReadField2(iprot thrift.TProtocol) error { tSlice := make([]*JobUpdateEvent, 0, size) p.UpdateEvents = tSlice for i := 0; i < size; i ++ { - _elem29 := &JobUpdateEvent{} - if err := _elem29.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem29), err) + _elem30 := &JobUpdateEvent{} + if err := _elem30.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem30), err) } - p.UpdateEvents = append(p.UpdateEvents, _elem29) + p.UpdateEvents = append(p.UpdateEvents, _elem30) } if err := iprot.ReadListEnd(); err != nil { return thrift.PrependError("error reading list end: ", err) @@ -11958,11 +12587,11 @@ func (p *JobUpdateDetails) ReadField3(iprot thrift.TProtocol) error { tSlice := make([]*JobInstanceUpdateEvent, 0, size) p.InstanceEvents = tSlice for i := 0; i < size; i ++ { - _elem30 := &JobInstanceUpdateEvent{} - if err := _elem30.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem30), err) + _elem31 := &JobInstanceUpdateEvent{} + if err := _elem31.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem31), err) } - p.InstanceEvents = append(p.InstanceEvents, _elem30) + p.InstanceEvents = append(p.InstanceEvents, _elem31) } if err := iprot.ReadListEnd(); err != nil { return thrift.PrependError("error reading list end: ", err) @@ -12195,11 +12824,11 @@ func (p *JobUpdateRequest) ReadField4(iprot thrift.TProtocol) error { tSet := make([]*Metadata, 0, size) p.Metadata = tSet for i := 0; i < size; i ++ { - _elem31 := &Metadata{} - if err := _elem31.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem31), err) + _elem32 := &Metadata{} + if err := _elem32.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem32), err) } - p.Metadata = append(p.Metadata, _elem31) + p.Metadata = append(p.Metadata, _elem32) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -12517,14 +13146,14 @@ func (p *JobUpdateQuery) ReadField5(iprot thrift.TProtocol) error { tSet := make([]JobUpdateStatus, 0, size) p.UpdateStatuses = tSet for i := 0; i < size; i ++ { -var _elem32 JobUpdateStatus +var _elem33 JobUpdateStatus if v, err := iprot.ReadI32(); err != nil { return thrift.PrependError("error reading field 0: ", err) } else { temp := JobUpdateStatus(v) - _elem32 = temp + _elem33 = temp } - p.UpdateStatuses = append(p.UpdateStatuses, _elem32) + p.UpdateStatuses = append(p.UpdateStatuses, _elem33) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -12939,13 +13568,13 @@ func (p *ListBackupsResult_) ReadField1(iprot thrift.TProtocol) error { tSet := make([]string, 0, size) p.Backups = tSet for i := 0; i < size; i ++ { -var _elem33 string +var _elem34 string if v, err := iprot.ReadString(); err != nil { return thrift.PrependError("error reading field 0: ", err) } else { - _elem33 = v + _elem34 = v } - p.Backups = append(p.Backups, _elem33) + p.Backups = append(p.Backups, _elem34) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -13058,11 +13687,11 @@ func (p *StartMaintenanceResult_) ReadField1(iprot thrift.TProtocol) error { tSet := make([]*HostStatus, 0, size) p.Statuses = tSet for i := 0; i < size; i ++ { - _elem34 := &HostStatus{} - if err := _elem34.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem34), err) + _elem35 := &HostStatus{} + if err := _elem35.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem35), err) } - p.Statuses = append(p.Statuses, _elem34) + p.Statuses = append(p.Statuses, _elem35) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -13176,11 +13805,11 @@ func (p *DrainHostsResult_) ReadField1(iprot thrift.TProtocol) error { tSet := make([]*HostStatus, 0, size) p.Statuses = tSet for i := 0; i < size; i ++ { - _elem35 := &HostStatus{} - if err := _elem35.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem35), err) + _elem36 := &HostStatus{} + if err := _elem36.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem36), err) } - p.Statuses = append(p.Statuses, _elem35) + p.Statuses = append(p.Statuses, _elem36) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -13294,11 +13923,11 @@ func (p *QueryRecoveryResult_) ReadField1(iprot thrift.TProtocol) error { tSet := make([]*ScheduledTask, 0, size) p.Tasks = tSet for i := 0; i < size; i ++ { - _elem36 := &ScheduledTask{} - if err := _elem36.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem36), err) + _elem37 := &ScheduledTask{} + if err := _elem37.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem37), err) } - p.Tasks = append(p.Tasks, _elem36) + p.Tasks = append(p.Tasks, _elem37) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -13412,11 +14041,11 @@ func (p *MaintenanceStatusResult_) ReadField1(iprot thrift.TProtocol) error { tSet := make([]*HostStatus, 0, size) p.Statuses = tSet for i := 0; i < size; i ++ { - _elem37 := &HostStatus{} - if err := _elem37.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem37), err) + _elem38 := &HostStatus{} + if err := _elem38.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem38), err) } - p.Statuses = append(p.Statuses, _elem37) + p.Statuses = append(p.Statuses, _elem38) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -13530,11 +14159,11 @@ func (p *EndMaintenanceResult_) ReadField1(iprot thrift.TProtocol) error { tSet := make([]*HostStatus, 0, size) p.Statuses = tSet for i := 0; i < size; i ++ { - _elem38 := &HostStatus{} - if err := _elem38.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem38), err) + _elem39 := &HostStatus{} + if err := _elem39.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem39), err) } - p.Statuses = append(p.Statuses, _elem38) + p.Statuses = append(p.Statuses, _elem39) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -13648,11 +14277,11 @@ func (p *RoleSummaryResult_) ReadField1(iprot thrift.TProtocol) error { tSet := make([]*RoleSummary, 0, size) p.Summaries = tSet for i := 0; i < size; i ++ { - _elem39 := &RoleSummary{} - if err := _elem39.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem39), err) + _elem40 := &RoleSummary{} + if err := _elem40.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem40), err) } - p.Summaries = append(p.Summaries, _elem39) + p.Summaries = append(p.Summaries, _elem40) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -13766,11 +14395,11 @@ func (p *JobSummaryResult_) ReadField1(iprot thrift.TProtocol) error { tSet := make([]*JobSummary, 0, size) p.Summaries = tSet for i := 0; i < size; i ++ { - _elem40 := &JobSummary{} - if err := _elem40.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem40), err) + _elem41 := &JobSummary{} + if err := _elem41.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem41), err) } - p.Summaries = append(p.Summaries, _elem40) + p.Summaries = append(p.Summaries, _elem41) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -13982,11 +14611,11 @@ func (p *GetPendingReasonResult_) ReadField1(iprot thrift.TProtocol) error { tSet := make([]*PendingReason, 0, size) p.Reasons = tSet for i := 0; i < size; i ++ { - _elem41 := &PendingReason{} - if err := _elem41.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem41), err) + _elem42 := &PendingReason{} + if err := _elem42.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem42), err) } - p.Reasons = append(p.Reasons, _elem41) + p.Reasons = append(p.Reasons, _elem42) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -14247,11 +14876,11 @@ func (p *GetJobUpdateSummariesResult_) ReadField1(iprot thrift.TProtocol) error tSlice := make([]*JobUpdateSummary, 0, size) p.UpdateSummaries = tSlice for i := 0; i < size; i ++ { - _elem42 := &JobUpdateSummary{} - if err := _elem42.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem42), err) + _elem43 := &JobUpdateSummary{} + if err := _elem43.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem43), err) } - p.UpdateSummaries = append(p.UpdateSummaries, _elem42) + p.UpdateSummaries = append(p.UpdateSummaries, _elem43) } if err := iprot.ReadListEnd(); err != nil { return thrift.PrependError("error reading list end: ", err) @@ -14391,11 +15020,11 @@ func (p *GetJobUpdateDetailsResult_) ReadField2(iprot thrift.TProtocol) error { tSlice := make([]*JobUpdateDetails, 0, size) p.DetailsList = tSlice for i := 0; i < size; i ++ { - _elem43 := &JobUpdateDetails{} - if err := _elem43.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem43), err) + _elem44 := &JobUpdateDetails{} + if err := _elem44.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem44), err) } - p.DetailsList = append(p.DetailsList, _elem43) + p.DetailsList = append(p.DetailsList, _elem44) } if err := iprot.ReadListEnd(); err != nil { return thrift.PrependError("error reading list end: ", err) @@ -14656,11 +15285,11 @@ func (p *GetJobUpdateDiffResult_) ReadField1(iprot thrift.TProtocol) error { tSet := make([]*ConfigGroup, 0, size) p.Add = tSet for i := 0; i < size; i ++ { - _elem44 := &ConfigGroup{} - if err := _elem44.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem44), err) + _elem45 := &ConfigGroup{} + if err := _elem45.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem45), err) } - p.Add = append(p.Add, _elem44) + p.Add = append(p.Add, _elem45) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -14676,11 +15305,11 @@ func (p *GetJobUpdateDiffResult_) ReadField2(iprot thrift.TProtocol) error { tSet := make([]*ConfigGroup, 0, size) p.Remove = tSet for i := 0; i < size; i ++ { - _elem45 := &ConfigGroup{} - if err := _elem45.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem45), err) + _elem46 := &ConfigGroup{} + if err := _elem46.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem46), err) } - p.Remove = append(p.Remove, _elem45) + p.Remove = append(p.Remove, _elem46) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -14696,11 +15325,11 @@ func (p *GetJobUpdateDiffResult_) ReadField3(iprot thrift.TProtocol) error { tSet := make([]*ConfigGroup, 0, size) p.Update = tSet for i := 0; i < size; i ++ { - _elem46 := &ConfigGroup{} - if err := _elem46.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem46), err) + _elem47 := &ConfigGroup{} + if err := _elem47.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem47), err) } - p.Update = append(p.Update, _elem46) + p.Update = append(p.Update, _elem47) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -14716,11 +15345,11 @@ func (p *GetJobUpdateDiffResult_) ReadField4(iprot thrift.TProtocol) error { tSet := make([]*ConfigGroup, 0, size) p.Unchanged = tSet for i := 0; i < size; i ++ { - _elem47 := &ConfigGroup{} - if err := _elem47.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem47), err) + _elem48 := &ConfigGroup{} + if err := _elem48.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem48), err) } - p.Unchanged = append(p.Unchanged, _elem47) + p.Unchanged = append(p.Unchanged, _elem48) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -14942,19 +15571,19 @@ func (p *TierConfig) ReadField2(iprot thrift.TProtocol) error { tMap := make(map[string]string, size) p.Settings = tMap for i := 0; i < size; i ++ { -var _key48 string +var _key49 string if v, err := iprot.ReadString(); err != nil { return thrift.PrependError("error reading field 0: ", err) } else { - _key48 = v + _key49 = v } -var _val49 string +var _val50 string if v, err := iprot.ReadString(); err != nil { return thrift.PrependError("error reading field 0: ", err) } else { - _val49 = v + _val50 = v } - p.Settings[_key48] = _val49 + p.Settings[_key49] = _val50 } if err := iprot.ReadMapEnd(); err != nil { return thrift.PrependError("error reading map end: ", err) @@ -15100,11 +15729,11 @@ func (p *GetTierConfigResult_) ReadField2(iprot thrift.TProtocol) error { tSet := make([]*TierConfig, 0, size) p.Tiers = tSet for i := 0; i < size; i ++ { - _elem50 := &TierConfig{} - if err := _elem50.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem50), err) + _elem51 := &TierConfig{} + if err := _elem51.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem51), err) } - p.Tiers = append(p.Tiers, _elem50) + p.Tiers = append(p.Tiers, _elem51) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -16568,11 +17197,11 @@ func (p *Response) ReadField6(iprot thrift.TProtocol) error { tSlice := make([]*ResponseDetail, 0, size) p.Details = tSlice for i := 0; i < size; i ++ { - _elem51 := &ResponseDetail{} - if err := _elem51.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem51), err) + _elem52 := &ResponseDetail{} + if err := _elem52.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem52), err) } - p.Details = append(p.Details, _elem51) + p.Details = append(p.Details, _elem52) } if err := iprot.ReadListEnd(); err != nil { return thrift.PrependError("error reading list end: ", err) @@ -16848,12 +17477,12 @@ func (p *ReadOnlySchedulerClient) Client_() thrift.TClient { } // Returns a summary of the jobs grouped by role. func (p *ReadOnlySchedulerClient) GetRoleSummary(ctx context.Context) (r *Response, err error) { - var _args52 ReadOnlySchedulerGetRoleSummaryArgs - var _result53 ReadOnlySchedulerGetRoleSummaryResult - if err = p.Client_().Call(ctx, "getRoleSummary", &_args52, &_result53); err != nil { + var _args53 ReadOnlySchedulerGetRoleSummaryArgs + var _result54 ReadOnlySchedulerGetRoleSummaryResult + if err = p.Client_().Call(ctx, "getRoleSummary", &_args53, &_result54); err != nil { return } - return _result53.GetSuccess(), nil + return _result54.GetSuccess(), nil } // Returns a summary of jobs, optionally only those owned by a specific role. @@ -16861,13 +17490,13 @@ func (p *ReadOnlySchedulerClient) GetRoleSummary(ctx context.Context) (r *Respon // Parameters: // - Role func (p *ReadOnlySchedulerClient) GetJobSummary(ctx context.Context, role string) (r *Response, err error) { - var _args54 ReadOnlySchedulerGetJobSummaryArgs - _args54.Role = role - var _result55 ReadOnlySchedulerGetJobSummaryResult - if err = p.Client_().Call(ctx, "getJobSummary", &_args54, &_result55); err != nil { + var _args55 ReadOnlySchedulerGetJobSummaryArgs + _args55.Role = role + var _result56 ReadOnlySchedulerGetJobSummaryResult + if err = p.Client_().Call(ctx, "getJobSummary", &_args55, &_result56); err != nil { return } - return _result55.GetSuccess(), nil + return _result56.GetSuccess(), nil } // Fetches the status of tasks. @@ -16875,13 +17504,13 @@ func (p *ReadOnlySchedulerClient) GetJobSummary(ctx context.Context, role string // Parameters: // - Query func (p *ReadOnlySchedulerClient) GetTasksStatus(ctx context.Context, query *TaskQuery) (r *Response, err error) { - var _args56 ReadOnlySchedulerGetTasksStatusArgs - _args56.Query = query - var _result57 ReadOnlySchedulerGetTasksStatusResult - if err = p.Client_().Call(ctx, "getTasksStatus", &_args56, &_result57); err != nil { + var _args57 ReadOnlySchedulerGetTasksStatusArgs + _args57.Query = query + var _result58 ReadOnlySchedulerGetTasksStatusResult + if err = p.Client_().Call(ctx, "getTasksStatus", &_args57, &_result58); err != nil { return } - return _result57.GetSuccess(), nil + return _result58.GetSuccess(), nil } // Same as getTaskStatus but without the TaskConfig.ExecutorConfig data set. @@ -16890,13 +17519,13 @@ func (p *ReadOnlySchedulerClient) GetTasksStatus(ctx context.Context, query *Tas // Parameters: // - Query func (p *ReadOnlySchedulerClient) GetTasksWithoutConfigs(ctx context.Context, query *TaskQuery) (r *Response, err error) { - var _args58 ReadOnlySchedulerGetTasksWithoutConfigsArgs - _args58.Query = query - var _result59 ReadOnlySchedulerGetTasksWithoutConfigsResult - if err = p.Client_().Call(ctx, "getTasksWithoutConfigs", &_args58, &_result59); err != nil { + var _args59 ReadOnlySchedulerGetTasksWithoutConfigsArgs + _args59.Query = query + var _result60 ReadOnlySchedulerGetTasksWithoutConfigsResult + if err = p.Client_().Call(ctx, "getTasksWithoutConfigs", &_args59, &_result60); err != nil { return } - return _result59.GetSuccess(), nil + return _result60.GetSuccess(), nil } // Returns user-friendly reasons (if available) for tasks retained in PENDING state. @@ -16904,13 +17533,13 @@ func (p *ReadOnlySchedulerClient) GetTasksWithoutConfigs(ctx context.Context, qu // Parameters: // - Query func (p *ReadOnlySchedulerClient) GetPendingReason(ctx context.Context, query *TaskQuery) (r *Response, err error) { - var _args60 ReadOnlySchedulerGetPendingReasonArgs - _args60.Query = query - var _result61 ReadOnlySchedulerGetPendingReasonResult - if err = p.Client_().Call(ctx, "getPendingReason", &_args60, &_result61); err != nil { + var _args61 ReadOnlySchedulerGetPendingReasonArgs + _args61.Query = query + var _result62 ReadOnlySchedulerGetPendingReasonResult + if err = p.Client_().Call(ctx, "getPendingReason", &_args61, &_result62); err != nil { return } - return _result61.GetSuccess(), nil + return _result62.GetSuccess(), nil } // Fetches the configuration summary of active tasks for the specified job. @@ -16918,13 +17547,13 @@ func (p *ReadOnlySchedulerClient) GetPendingReason(ctx context.Context, query *T // Parameters: // - Job func (p *ReadOnlySchedulerClient) GetConfigSummary(ctx context.Context, job *JobKey) (r *Response, err error) { - var _args62 ReadOnlySchedulerGetConfigSummaryArgs - _args62.Job = job - var _result63 ReadOnlySchedulerGetConfigSummaryResult - if err = p.Client_().Call(ctx, "getConfigSummary", &_args62, &_result63); err != nil { + var _args63 ReadOnlySchedulerGetConfigSummaryArgs + _args63.Job = job + var _result64 ReadOnlySchedulerGetConfigSummaryResult + if err = p.Client_().Call(ctx, "getConfigSummary", &_args63, &_result64); err != nil { return } - return _result63.GetSuccess(), nil + return _result64.GetSuccess(), nil } // Fetches the status of jobs. @@ -16933,13 +17562,13 @@ func (p *ReadOnlySchedulerClient) GetConfigSummary(ctx context.Context, job *Job // Parameters: // - OwnerRole func (p *ReadOnlySchedulerClient) GetJobs(ctx context.Context, ownerRole string) (r *Response, err error) { - var _args64 ReadOnlySchedulerGetJobsArgs - _args64.OwnerRole = ownerRole - var _result65 ReadOnlySchedulerGetJobsResult - if err = p.Client_().Call(ctx, "getJobs", &_args64, &_result65); err != nil { + var _args65 ReadOnlySchedulerGetJobsArgs + _args65.OwnerRole = ownerRole + var _result66 ReadOnlySchedulerGetJobsResult + if err = p.Client_().Call(ctx, "getJobs", &_args65, &_result66); err != nil { return } - return _result65.GetSuccess(), nil + return _result66.GetSuccess(), nil } // Fetches the quota allocated for a user. @@ -16947,13 +17576,13 @@ func (p *ReadOnlySchedulerClient) GetJobs(ctx context.Context, ownerRole string) // Parameters: // - OwnerRole func (p *ReadOnlySchedulerClient) GetQuota(ctx context.Context, ownerRole string) (r *Response, err error) { - var _args66 ReadOnlySchedulerGetQuotaArgs - _args66.OwnerRole = ownerRole - var _result67 ReadOnlySchedulerGetQuotaResult - if err = p.Client_().Call(ctx, "getQuota", &_args66, &_result67); err != nil { + var _args67 ReadOnlySchedulerGetQuotaArgs + _args67.OwnerRole = ownerRole + var _result68 ReadOnlySchedulerGetQuotaResult + if err = p.Client_().Call(ctx, "getQuota", &_args67, &_result68); err != nil { return } - return _result67.GetSuccess(), nil + return _result68.GetSuccess(), nil } // Populates fields in a job configuration as though it were about to be run. @@ -16962,13 +17591,13 @@ func (p *ReadOnlySchedulerClient) GetQuota(ctx context.Context, ownerRole string // Parameters: // - Description func (p *ReadOnlySchedulerClient) PopulateJobConfig(ctx context.Context, description *JobConfiguration) (r *Response, err error) { - var _args68 ReadOnlySchedulerPopulateJobConfigArgs - _args68.Description = description - var _result69 ReadOnlySchedulerPopulateJobConfigResult - if err = p.Client_().Call(ctx, "populateJobConfig", &_args68, &_result69); err != nil { + var _args69 ReadOnlySchedulerPopulateJobConfigArgs + _args69.Description = description + var _result70 ReadOnlySchedulerPopulateJobConfigResult + if err = p.Client_().Call(ctx, "populateJobConfig", &_args69, &_result70); err != nil { return } - return _result69.GetSuccess(), nil + return _result70.GetSuccess(), nil } // Gets job update summaries. @@ -16976,13 +17605,13 @@ func (p *ReadOnlySchedulerClient) PopulateJobConfig(ctx context.Context, descrip // Parameters: // - JobUpdateQuery func (p *ReadOnlySchedulerClient) GetJobUpdateSummaries(ctx context.Context, jobUpdateQuery *JobUpdateQuery) (r *Response, err error) { - var _args70 ReadOnlySchedulerGetJobUpdateSummariesArgs - _args70.JobUpdateQuery = jobUpdateQuery - var _result71 ReadOnlySchedulerGetJobUpdateSummariesResult - if err = p.Client_().Call(ctx, "getJobUpdateSummaries", &_args70, &_result71); err != nil { + var _args71 ReadOnlySchedulerGetJobUpdateSummariesArgs + _args71.JobUpdateQuery = jobUpdateQuery + var _result72 ReadOnlySchedulerGetJobUpdateSummariesResult + if err = p.Client_().Call(ctx, "getJobUpdateSummaries", &_args71, &_result72); err != nil { return } - return _result71.GetSuccess(), nil + return _result72.GetSuccess(), nil } // Gets job update details. @@ -16990,13 +17619,13 @@ func (p *ReadOnlySchedulerClient) GetJobUpdateSummaries(ctx context.Context, job // Parameters: // - Query func (p *ReadOnlySchedulerClient) GetJobUpdateDetails(ctx context.Context, query *JobUpdateQuery) (r *Response, err error) { - var _args72 ReadOnlySchedulerGetJobUpdateDetailsArgs - _args72.Query = query - var _result73 ReadOnlySchedulerGetJobUpdateDetailsResult - if err = p.Client_().Call(ctx, "getJobUpdateDetails", &_args72, &_result73); err != nil { + var _args73 ReadOnlySchedulerGetJobUpdateDetailsArgs + _args73.Query = query + var _result74 ReadOnlySchedulerGetJobUpdateDetailsResult + if err = p.Client_().Call(ctx, "getJobUpdateDetails", &_args73, &_result74); err != nil { return } - return _result73.GetSuccess(), nil + return _result74.GetSuccess(), nil } // Gets the diff between client (desired) and server (current) job states. @@ -17004,23 +17633,23 @@ func (p *ReadOnlySchedulerClient) GetJobUpdateDetails(ctx context.Context, query // Parameters: // - Request func (p *ReadOnlySchedulerClient) GetJobUpdateDiff(ctx context.Context, request *JobUpdateRequest) (r *Response, err error) { - var _args74 ReadOnlySchedulerGetJobUpdateDiffArgs - _args74.Request = request - var _result75 ReadOnlySchedulerGetJobUpdateDiffResult - if err = p.Client_().Call(ctx, "getJobUpdateDiff", &_args74, &_result75); err != nil { + var _args75 ReadOnlySchedulerGetJobUpdateDiffArgs + _args75.Request = request + var _result76 ReadOnlySchedulerGetJobUpdateDiffResult + if err = p.Client_().Call(ctx, "getJobUpdateDiff", &_args75, &_result76); err != nil { return } - return _result75.GetSuccess(), nil + return _result76.GetSuccess(), nil } // Gets tier configurations. func (p *ReadOnlySchedulerClient) GetTierConfigs(ctx context.Context) (r *Response, err error) { - var _args76 ReadOnlySchedulerGetTierConfigsArgs - var _result77 ReadOnlySchedulerGetTierConfigsResult - if err = p.Client_().Call(ctx, "getTierConfigs", &_args76, &_result77); err != nil { + var _args77 ReadOnlySchedulerGetTierConfigsArgs + var _result78 ReadOnlySchedulerGetTierConfigsResult + if err = p.Client_().Call(ctx, "getTierConfigs", &_args77, &_result78); err != nil { return } - return _result77.GetSuccess(), nil + return _result78.GetSuccess(), nil } type ReadOnlySchedulerProcessor struct { @@ -17043,21 +17672,21 @@ func (p *ReadOnlySchedulerProcessor) ProcessorMap() map[string]thrift.TProcessor func NewReadOnlySchedulerProcessor(handler ReadOnlyScheduler) *ReadOnlySchedulerProcessor { - self78 := &ReadOnlySchedulerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} - self78.processorMap["getRoleSummary"] = &readOnlySchedulerProcessorGetRoleSummary{handler:handler} - self78.processorMap["getJobSummary"] = &readOnlySchedulerProcessorGetJobSummary{handler:handler} - self78.processorMap["getTasksStatus"] = &readOnlySchedulerProcessorGetTasksStatus{handler:handler} - self78.processorMap["getTasksWithoutConfigs"] = &readOnlySchedulerProcessorGetTasksWithoutConfigs{handler:handler} - self78.processorMap["getPendingReason"] = &readOnlySchedulerProcessorGetPendingReason{handler:handler} - self78.processorMap["getConfigSummary"] = &readOnlySchedulerProcessorGetConfigSummary{handler:handler} - self78.processorMap["getJobs"] = &readOnlySchedulerProcessorGetJobs{handler:handler} - self78.processorMap["getQuota"] = &readOnlySchedulerProcessorGetQuota{handler:handler} - self78.processorMap["populateJobConfig"] = &readOnlySchedulerProcessorPopulateJobConfig{handler:handler} - self78.processorMap["getJobUpdateSummaries"] = &readOnlySchedulerProcessorGetJobUpdateSummaries{handler:handler} - self78.processorMap["getJobUpdateDetails"] = &readOnlySchedulerProcessorGetJobUpdateDetails{handler:handler} - self78.processorMap["getJobUpdateDiff"] = &readOnlySchedulerProcessorGetJobUpdateDiff{handler:handler} - self78.processorMap["getTierConfigs"] = &readOnlySchedulerProcessorGetTierConfigs{handler:handler} -return self78 + self79 := &ReadOnlySchedulerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} + self79.processorMap["getRoleSummary"] = &readOnlySchedulerProcessorGetRoleSummary{handler:handler} + self79.processorMap["getJobSummary"] = &readOnlySchedulerProcessorGetJobSummary{handler:handler} + self79.processorMap["getTasksStatus"] = &readOnlySchedulerProcessorGetTasksStatus{handler:handler} + self79.processorMap["getTasksWithoutConfigs"] = &readOnlySchedulerProcessorGetTasksWithoutConfigs{handler:handler} + self79.processorMap["getPendingReason"] = &readOnlySchedulerProcessorGetPendingReason{handler:handler} + self79.processorMap["getConfigSummary"] = &readOnlySchedulerProcessorGetConfigSummary{handler:handler} + self79.processorMap["getJobs"] = &readOnlySchedulerProcessorGetJobs{handler:handler} + self79.processorMap["getQuota"] = &readOnlySchedulerProcessorGetQuota{handler:handler} + self79.processorMap["populateJobConfig"] = &readOnlySchedulerProcessorPopulateJobConfig{handler:handler} + self79.processorMap["getJobUpdateSummaries"] = &readOnlySchedulerProcessorGetJobUpdateSummaries{handler:handler} + self79.processorMap["getJobUpdateDetails"] = &readOnlySchedulerProcessorGetJobUpdateDetails{handler:handler} + self79.processorMap["getJobUpdateDiff"] = &readOnlySchedulerProcessorGetJobUpdateDiff{handler:handler} + self79.processorMap["getTierConfigs"] = &readOnlySchedulerProcessorGetTierConfigs{handler:handler} +return self79 } func (p *ReadOnlySchedulerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { @@ -17068,12 +17697,12 @@ func (p *ReadOnlySchedulerProcessor) Process(ctx context.Context, iprot, oprot t } iprot.Skip(thrift.STRUCT) iprot.ReadMessageEnd() - x79 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) + x80 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x79.Write(oprot) + x80.Write(oprot) oprot.WriteMessageEnd() oprot.Flush(ctx) - return false, x79 + return false, x80 } @@ -20280,13 +20909,13 @@ func NewAuroraSchedulerManagerClient(c thrift.TClient) *AuroraSchedulerManagerCl // Parameters: // - Description func (p *AuroraSchedulerManagerClient) CreateJob(ctx context.Context, description *JobConfiguration) (r *Response, err error) { - var _args131 AuroraSchedulerManagerCreateJobArgs - _args131.Description = description - var _result132 AuroraSchedulerManagerCreateJobResult - if err = p.Client_().Call(ctx, "createJob", &_args131, &_result132); err != nil { + var _args132 AuroraSchedulerManagerCreateJobArgs + _args132.Description = description + var _result133 AuroraSchedulerManagerCreateJobResult + if err = p.Client_().Call(ctx, "createJob", &_args132, &_result133); err != nil { return } - return _result132.GetSuccess(), nil + return _result133.GetSuccess(), nil } // Enters a job into the cron schedule, without actually starting the job. @@ -20296,13 +20925,13 @@ func (p *AuroraSchedulerManagerClient) CreateJob(ctx context.Context, descriptio // Parameters: // - Description func (p *AuroraSchedulerManagerClient) ScheduleCronJob(ctx context.Context, description *JobConfiguration) (r *Response, err error) { - var _args133 AuroraSchedulerManagerScheduleCronJobArgs - _args133.Description = description - var _result134 AuroraSchedulerManagerScheduleCronJobResult - if err = p.Client_().Call(ctx, "scheduleCronJob", &_args133, &_result134); err != nil { + var _args134 AuroraSchedulerManagerScheduleCronJobArgs + _args134.Description = description + var _result135 AuroraSchedulerManagerScheduleCronJobResult + if err = p.Client_().Call(ctx, "scheduleCronJob", &_args134, &_result135); err != nil { return } - return _result134.GetSuccess(), nil + return _result135.GetSuccess(), nil } // Removes a job from the cron schedule. The request will be denied if the job was not previously @@ -20311,13 +20940,13 @@ func (p *AuroraSchedulerManagerClient) ScheduleCronJob(ctx context.Context, desc // Parameters: // - Job func (p *AuroraSchedulerManagerClient) DescheduleCronJob(ctx context.Context, job *JobKey) (r *Response, err error) { - var _args135 AuroraSchedulerManagerDescheduleCronJobArgs - _args135.Job = job - var _result136 AuroraSchedulerManagerDescheduleCronJobResult - if err = p.Client_().Call(ctx, "descheduleCronJob", &_args135, &_result136); err != nil { + var _args136 AuroraSchedulerManagerDescheduleCronJobArgs + _args136.Job = job + var _result137 AuroraSchedulerManagerDescheduleCronJobResult + if err = p.Client_().Call(ctx, "descheduleCronJob", &_args136, &_result137); err != nil { return } - return _result136.GetSuccess(), nil + return _result137.GetSuccess(), nil } // Starts a cron job immediately. The request will be denied if the specified job does not @@ -20326,13 +20955,13 @@ func (p *AuroraSchedulerManagerClient) DescheduleCronJob(ctx context.Context, jo // Parameters: // - Job func (p *AuroraSchedulerManagerClient) StartCronJob(ctx context.Context, job *JobKey) (r *Response, err error) { - var _args137 AuroraSchedulerManagerStartCronJobArgs - _args137.Job = job - var _result138 AuroraSchedulerManagerStartCronJobResult - if err = p.Client_().Call(ctx, "startCronJob", &_args137, &_result138); err != nil { + var _args138 AuroraSchedulerManagerStartCronJobArgs + _args138.Job = job + var _result139 AuroraSchedulerManagerStartCronJobResult + if err = p.Client_().Call(ctx, "startCronJob", &_args138, &_result139); err != nil { return } - return _result138.GetSuccess(), nil + return _result139.GetSuccess(), nil } // Restarts a batch of shards. @@ -20341,14 +20970,14 @@ func (p *AuroraSchedulerManagerClient) StartCronJob(ctx context.Context, job *Jo // - Job // - ShardIds func (p *AuroraSchedulerManagerClient) RestartShards(ctx context.Context, job *JobKey, shardIds []int32) (r *Response, err error) { - var _args139 AuroraSchedulerManagerRestartShardsArgs - _args139.Job = job - _args139.ShardIds = shardIds - var _result140 AuroraSchedulerManagerRestartShardsResult - if err = p.Client_().Call(ctx, "restartShards", &_args139, &_result140); err != nil { + var _args140 AuroraSchedulerManagerRestartShardsArgs + _args140.Job = job + _args140.ShardIds = shardIds + var _result141 AuroraSchedulerManagerRestartShardsResult + if err = p.Client_().Call(ctx, "restartShards", &_args140, &_result141); err != nil { return } - return _result140.GetSuccess(), nil + return _result141.GetSuccess(), nil } // Initiates a kill on tasks. @@ -20358,15 +20987,15 @@ func (p *AuroraSchedulerManagerClient) RestartShards(ctx context.Context, job *J // - Instances // - Message func (p *AuroraSchedulerManagerClient) KillTasks(ctx context.Context, job *JobKey, instances []int32, message string) (r *Response, err error) { - var _args141 AuroraSchedulerManagerKillTasksArgs - _args141.Job = job - _args141.Instances = instances - _args141.Message = message - var _result142 AuroraSchedulerManagerKillTasksResult - if err = p.Client_().Call(ctx, "killTasks", &_args141, &_result142); err != nil { + var _args142 AuroraSchedulerManagerKillTasksArgs + _args142.Job = job + _args142.Instances = instances + _args142.Message = message + var _result143 AuroraSchedulerManagerKillTasksResult + if err = p.Client_().Call(ctx, "killTasks", &_args142, &_result143); err != nil { return } - return _result142.GetSuccess(), nil + return _result143.GetSuccess(), nil } // Adds new instances with the TaskConfig of the existing instance pointed by the key. @@ -20375,14 +21004,14 @@ func (p *AuroraSchedulerManagerClient) KillTasks(ctx context.Context, job *JobKe // - Key // - Count func (p *AuroraSchedulerManagerClient) AddInstances(ctx context.Context, key *InstanceKey, count int32) (r *Response, err error) { - var _args143 AuroraSchedulerManagerAddInstancesArgs - _args143.Key = key - _args143.Count = count - var _result144 AuroraSchedulerManagerAddInstancesResult - if err = p.Client_().Call(ctx, "addInstances", &_args143, &_result144); err != nil { + var _args144 AuroraSchedulerManagerAddInstancesArgs + _args144.Key = key + _args144.Count = count + var _result145 AuroraSchedulerManagerAddInstancesResult + if err = p.Client_().Call(ctx, "addInstances", &_args144, &_result145); err != nil { return } - return _result144.GetSuccess(), nil + return _result145.GetSuccess(), nil } // Replaces the template (configuration) for the existing cron job. @@ -20391,13 +21020,13 @@ func (p *AuroraSchedulerManagerClient) AddInstances(ctx context.Context, key *In // Parameters: // - Config func (p *AuroraSchedulerManagerClient) ReplaceCronTemplate(ctx context.Context, config *JobConfiguration) (r *Response, err error) { - var _args145 AuroraSchedulerManagerReplaceCronTemplateArgs - _args145.Config = config - var _result146 AuroraSchedulerManagerReplaceCronTemplateResult - if err = p.Client_().Call(ctx, "replaceCronTemplate", &_args145, &_result146); err != nil { + var _args146 AuroraSchedulerManagerReplaceCronTemplateArgs + _args146.Config = config + var _result147 AuroraSchedulerManagerReplaceCronTemplateResult + if err = p.Client_().Call(ctx, "replaceCronTemplate", &_args146, &_result147); err != nil { return } - return _result146.GetSuccess(), nil + return _result147.GetSuccess(), nil } // Starts update of the existing service job. @@ -20406,14 +21035,14 @@ func (p *AuroraSchedulerManagerClient) ReplaceCronTemplate(ctx context.Context, // - Request: A description of how to change the job. // - Message: A user-specified message to include with the induced job update state change. func (p *AuroraSchedulerManagerClient) StartJobUpdate(ctx context.Context, request *JobUpdateRequest, message string) (r *Response, err error) { - var _args147 AuroraSchedulerManagerStartJobUpdateArgs - _args147.Request = request - _args147.Message = message - var _result148 AuroraSchedulerManagerStartJobUpdateResult - if err = p.Client_().Call(ctx, "startJobUpdate", &_args147, &_result148); err != nil { + var _args148 AuroraSchedulerManagerStartJobUpdateArgs + _args148.Request = request + _args148.Message = message + var _result149 AuroraSchedulerManagerStartJobUpdateResult + if err = p.Client_().Call(ctx, "startJobUpdate", &_args148, &_result149); err != nil { return } - return _result148.GetSuccess(), nil + return _result149.GetSuccess(), nil } // Pauses the specified job update. Can be resumed by resumeUpdate call. @@ -20422,14 +21051,14 @@ func (p *AuroraSchedulerManagerClient) StartJobUpdate(ctx context.Context, reque // - Key: The update to pause. // - Message: A user-specified message to include with the induced job update state change. func (p *AuroraSchedulerManagerClient) PauseJobUpdate(ctx context.Context, key *JobUpdateKey, message string) (r *Response, err error) { - var _args149 AuroraSchedulerManagerPauseJobUpdateArgs - _args149.Key = key - _args149.Message = message - var _result150 AuroraSchedulerManagerPauseJobUpdateResult - if err = p.Client_().Call(ctx, "pauseJobUpdate", &_args149, &_result150); err != nil { + var _args150 AuroraSchedulerManagerPauseJobUpdateArgs + _args150.Key = key + _args150.Message = message + var _result151 AuroraSchedulerManagerPauseJobUpdateResult + if err = p.Client_().Call(ctx, "pauseJobUpdate", &_args150, &_result151); err != nil { return } - return _result150.GetSuccess(), nil + return _result151.GetSuccess(), nil } // Resumes progress of a previously paused job update. @@ -20438,14 +21067,14 @@ func (p *AuroraSchedulerManagerClient) PauseJobUpdate(ctx context.Context, key * // - Key: The update to resume. // - Message: A user-specified message to include with the induced job update state change. func (p *AuroraSchedulerManagerClient) ResumeJobUpdate(ctx context.Context, key *JobUpdateKey, message string) (r *Response, err error) { - var _args151 AuroraSchedulerManagerResumeJobUpdateArgs - _args151.Key = key - _args151.Message = message - var _result152 AuroraSchedulerManagerResumeJobUpdateResult - if err = p.Client_().Call(ctx, "resumeJobUpdate", &_args151, &_result152); err != nil { + var _args152 AuroraSchedulerManagerResumeJobUpdateArgs + _args152.Key = key + _args152.Message = message + var _result153 AuroraSchedulerManagerResumeJobUpdateResult + if err = p.Client_().Call(ctx, "resumeJobUpdate", &_args152, &_result153); err != nil { return } - return _result152.GetSuccess(), nil + return _result153.GetSuccess(), nil } // Permanently aborts the job update. Does not remove the update history. @@ -20454,14 +21083,14 @@ func (p *AuroraSchedulerManagerClient) ResumeJobUpdate(ctx context.Context, key // - Key: The update to abort. // - Message: A user-specified message to include with the induced job update state change. func (p *AuroraSchedulerManagerClient) AbortJobUpdate(ctx context.Context, key *JobUpdateKey, message string) (r *Response, err error) { - var _args153 AuroraSchedulerManagerAbortJobUpdateArgs - _args153.Key = key - _args153.Message = message - var _result154 AuroraSchedulerManagerAbortJobUpdateResult - if err = p.Client_().Call(ctx, "abortJobUpdate", &_args153, &_result154); err != nil { + var _args154 AuroraSchedulerManagerAbortJobUpdateArgs + _args154.Key = key + _args154.Message = message + var _result155 AuroraSchedulerManagerAbortJobUpdateResult + if err = p.Client_().Call(ctx, "abortJobUpdate", &_args154, &_result155); err != nil { return } - return _result154.GetSuccess(), nil + return _result155.GetSuccess(), nil } // Rollbacks the specified active job update to the initial state. @@ -20470,14 +21099,14 @@ func (p *AuroraSchedulerManagerClient) AbortJobUpdate(ctx context.Context, key * // - Key: The update to rollback. // - Message: A user-specified message to include with the induced job update state change. func (p *AuroraSchedulerManagerClient) RollbackJobUpdate(ctx context.Context, key *JobUpdateKey, message string) (r *Response, err error) { - var _args155 AuroraSchedulerManagerRollbackJobUpdateArgs - _args155.Key = key - _args155.Message = message - var _result156 AuroraSchedulerManagerRollbackJobUpdateResult - if err = p.Client_().Call(ctx, "rollbackJobUpdate", &_args155, &_result156); err != nil { + var _args156 AuroraSchedulerManagerRollbackJobUpdateArgs + _args156.Key = key + _args156.Message = message + var _result157 AuroraSchedulerManagerRollbackJobUpdateResult + if err = p.Client_().Call(ctx, "rollbackJobUpdate", &_args156, &_result157); err != nil { return } - return _result156.GetSuccess(), nil + return _result157.GetSuccess(), nil } // Allows progress of the job update in case blockIfNoPulsesAfterMs is specified in @@ -20487,13 +21116,13 @@ func (p *AuroraSchedulerManagerClient) RollbackJobUpdate(ctx context.Context, ke // Parameters: // - Key func (p *AuroraSchedulerManagerClient) PulseJobUpdate(ctx context.Context, key *JobUpdateKey) (r *Response, err error) { - var _args157 AuroraSchedulerManagerPulseJobUpdateArgs - _args157.Key = key - var _result158 AuroraSchedulerManagerPulseJobUpdateResult - if err = p.Client_().Call(ctx, "pulseJobUpdate", &_args157, &_result158); err != nil { + var _args158 AuroraSchedulerManagerPulseJobUpdateArgs + _args158.Key = key + var _result159 AuroraSchedulerManagerPulseJobUpdateResult + if err = p.Client_().Call(ctx, "pulseJobUpdate", &_args158, &_result159); err != nil { return } - return _result158.GetSuccess(), nil + return _result159.GetSuccess(), nil } type AuroraSchedulerManagerProcessor struct { @@ -20501,22 +21130,22 @@ type AuroraSchedulerManagerProcessor struct { } func NewAuroraSchedulerManagerProcessor(handler AuroraSchedulerManager) *AuroraSchedulerManagerProcessor { - self159 := &AuroraSchedulerManagerProcessor{NewReadOnlySchedulerProcessor(handler)} - self159.AddToProcessorMap("createJob", &auroraSchedulerManagerProcessorCreateJob{handler:handler}) - self159.AddToProcessorMap("scheduleCronJob", &auroraSchedulerManagerProcessorScheduleCronJob{handler:handler}) - self159.AddToProcessorMap("descheduleCronJob", &auroraSchedulerManagerProcessorDescheduleCronJob{handler:handler}) - self159.AddToProcessorMap("startCronJob", &auroraSchedulerManagerProcessorStartCronJob{handler:handler}) - self159.AddToProcessorMap("restartShards", &auroraSchedulerManagerProcessorRestartShards{handler:handler}) - self159.AddToProcessorMap("killTasks", &auroraSchedulerManagerProcessorKillTasks{handler:handler}) - self159.AddToProcessorMap("addInstances", &auroraSchedulerManagerProcessorAddInstances{handler:handler}) - self159.AddToProcessorMap("replaceCronTemplate", &auroraSchedulerManagerProcessorReplaceCronTemplate{handler:handler}) - self159.AddToProcessorMap("startJobUpdate", &auroraSchedulerManagerProcessorStartJobUpdate{handler:handler}) - self159.AddToProcessorMap("pauseJobUpdate", &auroraSchedulerManagerProcessorPauseJobUpdate{handler:handler}) - self159.AddToProcessorMap("resumeJobUpdate", &auroraSchedulerManagerProcessorResumeJobUpdate{handler:handler}) - self159.AddToProcessorMap("abortJobUpdate", &auroraSchedulerManagerProcessorAbortJobUpdate{handler:handler}) - self159.AddToProcessorMap("rollbackJobUpdate", &auroraSchedulerManagerProcessorRollbackJobUpdate{handler:handler}) - self159.AddToProcessorMap("pulseJobUpdate", &auroraSchedulerManagerProcessorPulseJobUpdate{handler:handler}) - return self159 + self160 := &AuroraSchedulerManagerProcessor{NewReadOnlySchedulerProcessor(handler)} + self160.AddToProcessorMap("createJob", &auroraSchedulerManagerProcessorCreateJob{handler:handler}) + self160.AddToProcessorMap("scheduleCronJob", &auroraSchedulerManagerProcessorScheduleCronJob{handler:handler}) + self160.AddToProcessorMap("descheduleCronJob", &auroraSchedulerManagerProcessorDescheduleCronJob{handler:handler}) + self160.AddToProcessorMap("startCronJob", &auroraSchedulerManagerProcessorStartCronJob{handler:handler}) + self160.AddToProcessorMap("restartShards", &auroraSchedulerManagerProcessorRestartShards{handler:handler}) + self160.AddToProcessorMap("killTasks", &auroraSchedulerManagerProcessorKillTasks{handler:handler}) + self160.AddToProcessorMap("addInstances", &auroraSchedulerManagerProcessorAddInstances{handler:handler}) + self160.AddToProcessorMap("replaceCronTemplate", &auroraSchedulerManagerProcessorReplaceCronTemplate{handler:handler}) + self160.AddToProcessorMap("startJobUpdate", &auroraSchedulerManagerProcessorStartJobUpdate{handler:handler}) + self160.AddToProcessorMap("pauseJobUpdate", &auroraSchedulerManagerProcessorPauseJobUpdate{handler:handler}) + self160.AddToProcessorMap("resumeJobUpdate", &auroraSchedulerManagerProcessorResumeJobUpdate{handler:handler}) + self160.AddToProcessorMap("abortJobUpdate", &auroraSchedulerManagerProcessorAbortJobUpdate{handler:handler}) + self160.AddToProcessorMap("rollbackJobUpdate", &auroraSchedulerManagerProcessorRollbackJobUpdate{handler:handler}) + self160.AddToProcessorMap("pulseJobUpdate", &auroraSchedulerManagerProcessorPulseJobUpdate{handler:handler}) + return self160 } type auroraSchedulerManagerProcessorCreateJob struct { @@ -22081,13 +22710,13 @@ func (p *AuroraSchedulerManagerRestartShardsArgs) ReadField3(iprot thrift.TProt tSet := make([]int32, 0, size) p.ShardIds = tSet for i := 0; i < size; i ++ { -var _elem160 int32 +var _elem161 int32 if v, err := iprot.ReadI32(); err != nil { return thrift.PrependError("error reading field 0: ", err) } else { - _elem160 = v + _elem161 = v } - p.ShardIds = append(p.ShardIds, _elem160) + p.ShardIds = append(p.ShardIds, _elem161) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -22360,13 +22989,13 @@ func (p *AuroraSchedulerManagerKillTasksArgs) ReadField5(iprot thrift.TProtocol tSet := make([]int32, 0, size) p.Instances = tSet for i := 0; i < size; i ++ { -var _elem161 int32 +var _elem162 int32 if v, err := iprot.ReadI32(); err != nil { return thrift.PrependError("error reading field 0: ", err) } else { - _elem161 = v + _elem162 = v } - p.Instances = append(p.Instances, _elem161) + p.Instances = append(p.Instances, _elem162) } if err := iprot.ReadSetEnd(); err != nil { return thrift.PrependError("error reading set end: ", err) @@ -24467,14 +25096,14 @@ func NewAuroraAdminClient(c thrift.TClient) *AuroraAdminClient { // - OwnerRole // - Quota func (p *AuroraAdminClient) SetQuota(ctx context.Context, ownerRole string, quota *ResourceAggregate) (r *Response, err error) { - var _args316 AuroraAdminSetQuotaArgs - _args316.OwnerRole = ownerRole - _args316.Quota = quota - var _result317 AuroraAdminSetQuotaResult - if err = p.Client_().Call(ctx, "setQuota", &_args316, &_result317); err != nil { + var _args317 AuroraAdminSetQuotaArgs + _args317.OwnerRole = ownerRole + _args317.Quota = quota + var _result318 AuroraAdminSetQuotaResult + if err = p.Client_().Call(ctx, "setQuota", &_args317, &_result318); err != nil { return } - return _result317.GetSuccess(), nil + return _result318.GetSuccess(), nil } // Forces a task into a specific state. This does not guarantee the task will enter the given @@ -24485,34 +25114,34 @@ func (p *AuroraAdminClient) SetQuota(ctx context.Context, ownerRole string, quot // - TaskId // - Status func (p *AuroraAdminClient) ForceTaskState(ctx context.Context, taskId string, status ScheduleStatus) (r *Response, err error) { - var _args318 AuroraAdminForceTaskStateArgs - _args318.TaskId = taskId - _args318.Status = status - var _result319 AuroraAdminForceTaskStateResult - if err = p.Client_().Call(ctx, "forceTaskState", &_args318, &_result319); err != nil { + var _args319 AuroraAdminForceTaskStateArgs + _args319.TaskId = taskId + _args319.Status = status + var _result320 AuroraAdminForceTaskStateResult + if err = p.Client_().Call(ctx, "forceTaskState", &_args319, &_result320); err != nil { return } - return _result319.GetSuccess(), nil + return _result320.GetSuccess(), nil } // Immediately writes a storage snapshot to disk. func (p *AuroraAdminClient) PerformBackup(ctx context.Context) (r *Response, err error) { - var _args320 AuroraAdminPerformBackupArgs - var _result321 AuroraAdminPerformBackupResult - if err = p.Client_().Call(ctx, "performBackup", &_args320, &_result321); err != nil { + var _args321 AuroraAdminPerformBackupArgs + var _result322 AuroraAdminPerformBackupResult + if err = p.Client_().Call(ctx, "performBackup", &_args321, &_result322); err != nil { return } - return _result321.GetSuccess(), nil + return _result322.GetSuccess(), nil } // Lists backups that are available for recovery. func (p *AuroraAdminClient) ListBackups(ctx context.Context) (r *Response, err error) { - var _args322 AuroraAdminListBackupsArgs - var _result323 AuroraAdminListBackupsResult - if err = p.Client_().Call(ctx, "listBackups", &_args322, &_result323); err != nil { + var _args323 AuroraAdminListBackupsArgs + var _result324 AuroraAdminListBackupsResult + if err = p.Client_().Call(ctx, "listBackups", &_args323, &_result324); err != nil { return } - return _result323.GetSuccess(), nil + return _result324.GetSuccess(), nil } // Loads a backup to an in-memory storage. This must precede all other recovery operations. @@ -24520,13 +25149,13 @@ func (p *AuroraAdminClient) ListBackups(ctx context.Context) (r *Response, err e // Parameters: // - BackupId func (p *AuroraAdminClient) StageRecovery(ctx context.Context, backupId string) (r *Response, err error) { - var _args324 AuroraAdminStageRecoveryArgs - _args324.BackupId = backupId - var _result325 AuroraAdminStageRecoveryResult - if err = p.Client_().Call(ctx, "stageRecovery", &_args324, &_result325); err != nil { + var _args325 AuroraAdminStageRecoveryArgs + _args325.BackupId = backupId + var _result326 AuroraAdminStageRecoveryResult + if err = p.Client_().Call(ctx, "stageRecovery", &_args325, &_result326); err != nil { return } - return _result325.GetSuccess(), nil + return _result326.GetSuccess(), nil } // Queries for tasks in a staged recovery. @@ -24534,13 +25163,13 @@ func (p *AuroraAdminClient) StageRecovery(ctx context.Context, backupId string) // Parameters: // - Query func (p *AuroraAdminClient) QueryRecovery(ctx context.Context, query *TaskQuery) (r *Response, err error) { - var _args326 AuroraAdminQueryRecoveryArgs - _args326.Query = query - var _result327 AuroraAdminQueryRecoveryResult - if err = p.Client_().Call(ctx, "queryRecovery", &_args326, &_result327); err != nil { + var _args327 AuroraAdminQueryRecoveryArgs + _args327.Query = query + var _result328 AuroraAdminQueryRecoveryResult + if err = p.Client_().Call(ctx, "queryRecovery", &_args327, &_result328); err != nil { return } - return _result327.GetSuccess(), nil + return _result328.GetSuccess(), nil } // Deletes tasks from a staged recovery. @@ -24548,33 +25177,33 @@ func (p *AuroraAdminClient) QueryRecovery(ctx context.Context, query *TaskQuery) // Parameters: // - Query func (p *AuroraAdminClient) DeleteRecoveryTasks(ctx context.Context, query *TaskQuery) (r *Response, err error) { - var _args328 AuroraAdminDeleteRecoveryTasksArgs - _args328.Query = query - var _result329 AuroraAdminDeleteRecoveryTasksResult - if err = p.Client_().Call(ctx, "deleteRecoveryTasks", &_args328, &_result329); err != nil { + var _args329 AuroraAdminDeleteRecoveryTasksArgs + _args329.Query = query + var _result330 AuroraAdminDeleteRecoveryTasksResult + if err = p.Client_().Call(ctx, "deleteRecoveryTasks", &_args329, &_result330); err != nil { return } - return _result329.GetSuccess(), nil + return _result330.GetSuccess(), nil } // Commits a staged recovery, completely replacing the previous storage state. func (p *AuroraAdminClient) CommitRecovery(ctx context.Context) (r *Response, err error) { - var _args330 AuroraAdminCommitRecoveryArgs - var _result331 AuroraAdminCommitRecoveryResult - if err = p.Client_().Call(ctx, "commitRecovery", &_args330, &_result331); err != nil { + var _args331 AuroraAdminCommitRecoveryArgs + var _result332 AuroraAdminCommitRecoveryResult + if err = p.Client_().Call(ctx, "commitRecovery", &_args331, &_result332); err != nil { return } - return _result331.GetSuccess(), nil + return _result332.GetSuccess(), nil } // Unloads (aborts) a staged recovery. func (p *AuroraAdminClient) UnloadRecovery(ctx context.Context) (r *Response, err error) { - var _args332 AuroraAdminUnloadRecoveryArgs - var _result333 AuroraAdminUnloadRecoveryResult - if err = p.Client_().Call(ctx, "unloadRecovery", &_args332, &_result333); err != nil { + var _args333 AuroraAdminUnloadRecoveryArgs + var _result334 AuroraAdminUnloadRecoveryResult + if err = p.Client_().Call(ctx, "unloadRecovery", &_args333, &_result334); err != nil { return } - return _result333.GetSuccess(), nil + return _result334.GetSuccess(), nil } // Put the given hosts into maintenance mode. @@ -24582,13 +25211,13 @@ func (p *AuroraAdminClient) UnloadRecovery(ctx context.Context) (r *Response, er // Parameters: // - Hosts func (p *AuroraAdminClient) StartMaintenance(ctx context.Context, hosts *Hosts) (r *Response, err error) { - var _args334 AuroraAdminStartMaintenanceArgs - _args334.Hosts = hosts - var _result335 AuroraAdminStartMaintenanceResult - if err = p.Client_().Call(ctx, "startMaintenance", &_args334, &_result335); err != nil { + var _args335 AuroraAdminStartMaintenanceArgs + _args335.Hosts = hosts + var _result336 AuroraAdminStartMaintenanceResult + if err = p.Client_().Call(ctx, "startMaintenance", &_args335, &_result336); err != nil { return } - return _result335.GetSuccess(), nil + return _result336.GetSuccess(), nil } // Ask scheduler to begin moving tasks scheduled on given hosts. @@ -24596,13 +25225,13 @@ func (p *AuroraAdminClient) StartMaintenance(ctx context.Context, hosts *Hosts) // Parameters: // - Hosts func (p *AuroraAdminClient) DrainHosts(ctx context.Context, hosts *Hosts) (r *Response, err error) { - var _args336 AuroraAdminDrainHostsArgs - _args336.Hosts = hosts - var _result337 AuroraAdminDrainHostsResult - if err = p.Client_().Call(ctx, "drainHosts", &_args336, &_result337); err != nil { + var _args337 AuroraAdminDrainHostsArgs + _args337.Hosts = hosts + var _result338 AuroraAdminDrainHostsResult + if err = p.Client_().Call(ctx, "drainHosts", &_args337, &_result338); err != nil { return } - return _result337.GetSuccess(), nil + return _result338.GetSuccess(), nil } // Retrieve the current maintenance states for a group of hosts. @@ -24610,13 +25239,13 @@ func (p *AuroraAdminClient) DrainHosts(ctx context.Context, hosts *Hosts) (r *Re // Parameters: // - Hosts func (p *AuroraAdminClient) MaintenanceStatus(ctx context.Context, hosts *Hosts) (r *Response, err error) { - var _args338 AuroraAdminMaintenanceStatusArgs - _args338.Hosts = hosts - var _result339 AuroraAdminMaintenanceStatusResult - if err = p.Client_().Call(ctx, "maintenanceStatus", &_args338, &_result339); err != nil { + var _args339 AuroraAdminMaintenanceStatusArgs + _args339.Hosts = hosts + var _result340 AuroraAdminMaintenanceStatusResult + if err = p.Client_().Call(ctx, "maintenanceStatus", &_args339, &_result340); err != nil { return } - return _result339.GetSuccess(), nil + return _result340.GetSuccess(), nil } // Set the given hosts back into serving mode. @@ -24624,13 +25253,13 @@ func (p *AuroraAdminClient) MaintenanceStatus(ctx context.Context, hosts *Hosts) // Parameters: // - Hosts func (p *AuroraAdminClient) EndMaintenance(ctx context.Context, hosts *Hosts) (r *Response, err error) { - var _args340 AuroraAdminEndMaintenanceArgs - _args340.Hosts = hosts - var _result341 AuroraAdminEndMaintenanceResult - if err = p.Client_().Call(ctx, "endMaintenance", &_args340, &_result341); err != nil { + var _args341 AuroraAdminEndMaintenanceArgs + _args341.Hosts = hosts + var _result342 AuroraAdminEndMaintenanceResult + if err = p.Client_().Call(ctx, "endMaintenance", &_args341, &_result342); err != nil { return } - return _result341.GetSuccess(), nil + return _result342.GetSuccess(), nil } // Ask scheduler to put hosts into DRAINING mode and move scheduled tasks off of the hosts @@ -24642,25 +25271,25 @@ func (p *AuroraAdminClient) EndMaintenance(ctx context.Context, hosts *Hosts) (r // - DefaultSlaPolicy // - TimeoutSecs func (p *AuroraAdminClient) SlaDrainHosts(ctx context.Context, hosts *Hosts, defaultSlaPolicy *SlaPolicy, timeoutSecs int64) (r *Response, err error) { - var _args342 AuroraAdminSlaDrainHostsArgs - _args342.Hosts = hosts - _args342.DefaultSlaPolicy = defaultSlaPolicy - _args342.TimeoutSecs = timeoutSecs - var _result343 AuroraAdminSlaDrainHostsResult - if err = p.Client_().Call(ctx, "slaDrainHosts", &_args342, &_result343); err != nil { + var _args343 AuroraAdminSlaDrainHostsArgs + _args343.Hosts = hosts + _args343.DefaultSlaPolicy = defaultSlaPolicy + _args343.TimeoutSecs = timeoutSecs + var _result344 AuroraAdminSlaDrainHostsResult + if err = p.Client_().Call(ctx, "slaDrainHosts", &_args343, &_result344); err != nil { return } - return _result343.GetSuccess(), nil + return _result344.GetSuccess(), nil } // Start a storage snapshot and block until it completes. func (p *AuroraAdminClient) Snapshot(ctx context.Context) (r *Response, err error) { - var _args344 AuroraAdminSnapshotArgs - var _result345 AuroraAdminSnapshotResult - if err = p.Client_().Call(ctx, "snapshot", &_args344, &_result345); err != nil { + var _args345 AuroraAdminSnapshotArgs + var _result346 AuroraAdminSnapshotResult + if err = p.Client_().Call(ctx, "snapshot", &_args345, &_result346); err != nil { return } - return _result345.GetSuccess(), nil + return _result346.GetSuccess(), nil } // Tell scheduler to trigger an explicit task reconciliation with the given settings. @@ -24668,23 +25297,23 @@ func (p *AuroraAdminClient) Snapshot(ctx context.Context) (r *Response, err erro // Parameters: // - Settings func (p *AuroraAdminClient) TriggerExplicitTaskReconciliation(ctx context.Context, settings *ExplicitReconciliationSettings) (r *Response, err error) { - var _args346 AuroraAdminTriggerExplicitTaskReconciliationArgs - _args346.Settings = settings - var _result347 AuroraAdminTriggerExplicitTaskReconciliationResult - if err = p.Client_().Call(ctx, "triggerExplicitTaskReconciliation", &_args346, &_result347); err != nil { + var _args347 AuroraAdminTriggerExplicitTaskReconciliationArgs + _args347.Settings = settings + var _result348 AuroraAdminTriggerExplicitTaskReconciliationResult + if err = p.Client_().Call(ctx, "triggerExplicitTaskReconciliation", &_args347, &_result348); err != nil { return } - return _result347.GetSuccess(), nil + return _result348.GetSuccess(), nil } // Tell scheduler to trigger an implicit task reconciliation. func (p *AuroraAdminClient) TriggerImplicitTaskReconciliation(ctx context.Context) (r *Response, err error) { - var _args348 AuroraAdminTriggerImplicitTaskReconciliationArgs - var _result349 AuroraAdminTriggerImplicitTaskReconciliationResult - if err = p.Client_().Call(ctx, "triggerImplicitTaskReconciliation", &_args348, &_result349); err != nil { + var _args349 AuroraAdminTriggerImplicitTaskReconciliationArgs + var _result350 AuroraAdminTriggerImplicitTaskReconciliationResult + if err = p.Client_().Call(ctx, "triggerImplicitTaskReconciliation", &_args349, &_result350); err != nil { return } - return _result349.GetSuccess(), nil + return _result350.GetSuccess(), nil } // Force prune any (terminal) tasks that match the query. If no statuses are supplied with the @@ -24694,13 +25323,13 @@ func (p *AuroraAdminClient) TriggerImplicitTaskReconciliation(ctx context.Contex // Parameters: // - Query func (p *AuroraAdminClient) PruneTasks(ctx context.Context, query *TaskQuery) (r *Response, err error) { - var _args350 AuroraAdminPruneTasksArgs - _args350.Query = query - var _result351 AuroraAdminPruneTasksResult - if err = p.Client_().Call(ctx, "pruneTasks", &_args350, &_result351); err != nil { + var _args351 AuroraAdminPruneTasksArgs + _args351.Query = query + var _result352 AuroraAdminPruneTasksResult + if err = p.Client_().Call(ctx, "pruneTasks", &_args351, &_result352); err != nil { return } - return _result351.GetSuccess(), nil + return _result352.GetSuccess(), nil } type AuroraAdminProcessor struct { @@ -24708,26 +25337,26 @@ type AuroraAdminProcessor struct { } func NewAuroraAdminProcessor(handler AuroraAdmin) *AuroraAdminProcessor { - self352 := &AuroraAdminProcessor{NewAuroraSchedulerManagerProcessor(handler)} - self352.AddToProcessorMap("setQuota", &auroraAdminProcessorSetQuota{handler:handler}) - self352.AddToProcessorMap("forceTaskState", &auroraAdminProcessorForceTaskState{handler:handler}) - self352.AddToProcessorMap("performBackup", &auroraAdminProcessorPerformBackup{handler:handler}) - self352.AddToProcessorMap("listBackups", &auroraAdminProcessorListBackups{handler:handler}) - self352.AddToProcessorMap("stageRecovery", &auroraAdminProcessorStageRecovery{handler:handler}) - self352.AddToProcessorMap("queryRecovery", &auroraAdminProcessorQueryRecovery{handler:handler}) - self352.AddToProcessorMap("deleteRecoveryTasks", &auroraAdminProcessorDeleteRecoveryTasks{handler:handler}) - self352.AddToProcessorMap("commitRecovery", &auroraAdminProcessorCommitRecovery{handler:handler}) - self352.AddToProcessorMap("unloadRecovery", &auroraAdminProcessorUnloadRecovery{handler:handler}) - self352.AddToProcessorMap("startMaintenance", &auroraAdminProcessorStartMaintenance{handler:handler}) - self352.AddToProcessorMap("drainHosts", &auroraAdminProcessorDrainHosts{handler:handler}) - self352.AddToProcessorMap("maintenanceStatus", &auroraAdminProcessorMaintenanceStatus{handler:handler}) - self352.AddToProcessorMap("endMaintenance", &auroraAdminProcessorEndMaintenance{handler:handler}) - self352.AddToProcessorMap("slaDrainHosts", &auroraAdminProcessorSlaDrainHosts{handler:handler}) - self352.AddToProcessorMap("snapshot", &auroraAdminProcessorSnapshot{handler:handler}) - self352.AddToProcessorMap("triggerExplicitTaskReconciliation", &auroraAdminProcessorTriggerExplicitTaskReconciliation{handler:handler}) - self352.AddToProcessorMap("triggerImplicitTaskReconciliation", &auroraAdminProcessorTriggerImplicitTaskReconciliation{handler:handler}) - self352.AddToProcessorMap("pruneTasks", &auroraAdminProcessorPruneTasks{handler:handler}) - return self352 + self353 := &AuroraAdminProcessor{NewAuroraSchedulerManagerProcessor(handler)} + self353.AddToProcessorMap("setQuota", &auroraAdminProcessorSetQuota{handler:handler}) + self353.AddToProcessorMap("forceTaskState", &auroraAdminProcessorForceTaskState{handler:handler}) + self353.AddToProcessorMap("performBackup", &auroraAdminProcessorPerformBackup{handler:handler}) + self353.AddToProcessorMap("listBackups", &auroraAdminProcessorListBackups{handler:handler}) + self353.AddToProcessorMap("stageRecovery", &auroraAdminProcessorStageRecovery{handler:handler}) + self353.AddToProcessorMap("queryRecovery", &auroraAdminProcessorQueryRecovery{handler:handler}) + self353.AddToProcessorMap("deleteRecoveryTasks", &auroraAdminProcessorDeleteRecoveryTasks{handler:handler}) + self353.AddToProcessorMap("commitRecovery", &auroraAdminProcessorCommitRecovery{handler:handler}) + self353.AddToProcessorMap("unloadRecovery", &auroraAdminProcessorUnloadRecovery{handler:handler}) + self353.AddToProcessorMap("startMaintenance", &auroraAdminProcessorStartMaintenance{handler:handler}) + self353.AddToProcessorMap("drainHosts", &auroraAdminProcessorDrainHosts{handler:handler}) + self353.AddToProcessorMap("maintenanceStatus", &auroraAdminProcessorMaintenanceStatus{handler:handler}) + self353.AddToProcessorMap("endMaintenance", &auroraAdminProcessorEndMaintenance{handler:handler}) + self353.AddToProcessorMap("slaDrainHosts", &auroraAdminProcessorSlaDrainHosts{handler:handler}) + self353.AddToProcessorMap("snapshot", &auroraAdminProcessorSnapshot{handler:handler}) + self353.AddToProcessorMap("triggerExplicitTaskReconciliation", &auroraAdminProcessorTriggerExplicitTaskReconciliation{handler:handler}) + self353.AddToProcessorMap("triggerImplicitTaskReconciliation", &auroraAdminProcessorTriggerImplicitTaskReconciliation{handler:handler}) + self353.AddToProcessorMap("pruneTasks", &auroraAdminProcessorPruneTasks{handler:handler}) + return self353 } type auroraAdminProcessorSetQuota struct { diff --git a/gen-go/apache/aurora/aurora_admin-remote/aurora_admin-remote.go b/gen-go/apache/aurora/aurora_admin-remote/aurora_admin-remote.go index b600061..9bd8b7e 100755 --- a/gen-go/apache/aurora/aurora_admin-remote/aurora_admin-remote.go +++ b/gen-go/apache/aurora/aurora_admin-remote/aurora_admin-remote.go @@ -1,22 +1,23 @@ -// Autogenerated by Thrift Compiler (0.12.0) +// Autogenerated by Thrift Compiler (0.13.0) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING package main import ( - "context" - "flag" - "fmt" - "math" - "net" - "net/url" - "os" - "strconv" - "strings" - "github.com/apache/thrift/lib/go/thrift" - "apache/aurora" + "context" + "flag" + "fmt" + "math" + "net" + "net/url" + "os" + "strconv" + "strings" + "github.com/apache/thrift/lib/go/thrift" + "apache/aurora" ) +var _ = aurora.GoUnusedProtection__ func Usage() { fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") @@ -195,19 +196,19 @@ func main() { } argvalue0 := flag.Arg(1) value0 := argvalue0 - arg354 := flag.Arg(2) - mbTrans355 := thrift.NewTMemoryBufferLen(len(arg354)) - defer mbTrans355.Close() - _, err356 := mbTrans355.WriteString(arg354) - if err356 != nil { + arg355 := flag.Arg(2) + mbTrans356 := thrift.NewTMemoryBufferLen(len(arg355)) + defer mbTrans356.Close() + _, err357 := mbTrans356.WriteString(arg355) + if err357 != nil { Usage() return } - factory357 := thrift.NewTJSONProtocolFactory() - jsProt358 := factory357.GetProtocol(mbTrans355) + factory358 := thrift.NewTJSONProtocolFactory() + jsProt359 := factory358.GetProtocol(mbTrans356) argvalue1 := aurora.NewResourceAggregate() - err359 := argvalue1.Read(jsProt358) - if err359 != nil { + err360 := argvalue1.Read(jsProt359) + if err360 != nil { Usage() return } @@ -263,19 +264,19 @@ func main() { fmt.Fprintln(os.Stderr, "QueryRecovery requires 1 args") flag.Usage() } - arg362 := flag.Arg(1) - mbTrans363 := thrift.NewTMemoryBufferLen(len(arg362)) - defer mbTrans363.Close() - _, err364 := mbTrans363.WriteString(arg362) - if err364 != nil { + arg363 := flag.Arg(1) + mbTrans364 := thrift.NewTMemoryBufferLen(len(arg363)) + defer mbTrans364.Close() + _, err365 := mbTrans364.WriteString(arg363) + if err365 != nil { Usage() return } - factory365 := thrift.NewTJSONProtocolFactory() - jsProt366 := factory365.GetProtocol(mbTrans363) + factory366 := thrift.NewTJSONProtocolFactory() + jsProt367 := factory366.GetProtocol(mbTrans364) argvalue0 := aurora.NewTaskQuery() - err367 := argvalue0.Read(jsProt366) - if err367 != nil { + err368 := argvalue0.Read(jsProt367) + if err368 != nil { Usage() return } @@ -288,19 +289,19 @@ func main() { fmt.Fprintln(os.Stderr, "DeleteRecoveryTasks requires 1 args") flag.Usage() } - arg368 := flag.Arg(1) - mbTrans369 := thrift.NewTMemoryBufferLen(len(arg368)) - defer mbTrans369.Close() - _, err370 := mbTrans369.WriteString(arg368) - if err370 != nil { + arg369 := flag.Arg(1) + mbTrans370 := thrift.NewTMemoryBufferLen(len(arg369)) + defer mbTrans370.Close() + _, err371 := mbTrans370.WriteString(arg369) + if err371 != nil { Usage() return } - factory371 := thrift.NewTJSONProtocolFactory() - jsProt372 := factory371.GetProtocol(mbTrans369) + factory372 := thrift.NewTJSONProtocolFactory() + jsProt373 := factory372.GetProtocol(mbTrans370) argvalue0 := aurora.NewTaskQuery() - err373 := argvalue0.Read(jsProt372) - if err373 != nil { + err374 := argvalue0.Read(jsProt373) + if err374 != nil { Usage() return } @@ -329,19 +330,19 @@ func main() { fmt.Fprintln(os.Stderr, "StartMaintenance requires 1 args") flag.Usage() } - arg374 := flag.Arg(1) - mbTrans375 := thrift.NewTMemoryBufferLen(len(arg374)) - defer mbTrans375.Close() - _, err376 := mbTrans375.WriteString(arg374) - if err376 != nil { + arg375 := flag.Arg(1) + mbTrans376 := thrift.NewTMemoryBufferLen(len(arg375)) + defer mbTrans376.Close() + _, err377 := mbTrans376.WriteString(arg375) + if err377 != nil { Usage() return } - factory377 := thrift.NewTJSONProtocolFactory() - jsProt378 := factory377.GetProtocol(mbTrans375) + factory378 := thrift.NewTJSONProtocolFactory() + jsProt379 := factory378.GetProtocol(mbTrans376) argvalue0 := aurora.NewHosts() - err379 := argvalue0.Read(jsProt378) - if err379 != nil { + err380 := argvalue0.Read(jsProt379) + if err380 != nil { Usage() return } @@ -354,19 +355,19 @@ func main() { fmt.Fprintln(os.Stderr, "DrainHosts requires 1 args") flag.Usage() } - arg380 := flag.Arg(1) - mbTrans381 := thrift.NewTMemoryBufferLen(len(arg380)) - defer mbTrans381.Close() - _, err382 := mbTrans381.WriteString(arg380) - if err382 != nil { + arg381 := flag.Arg(1) + mbTrans382 := thrift.NewTMemoryBufferLen(len(arg381)) + defer mbTrans382.Close() + _, err383 := mbTrans382.WriteString(arg381) + if err383 != nil { Usage() return } - factory383 := thrift.NewTJSONProtocolFactory() - jsProt384 := factory383.GetProtocol(mbTrans381) + factory384 := thrift.NewTJSONProtocolFactory() + jsProt385 := factory384.GetProtocol(mbTrans382) argvalue0 := aurora.NewHosts() - err385 := argvalue0.Read(jsProt384) - if err385 != nil { + err386 := argvalue0.Read(jsProt385) + if err386 != nil { Usage() return } @@ -379,19 +380,19 @@ func main() { fmt.Fprintln(os.Stderr, "MaintenanceStatus requires 1 args") flag.Usage() } - arg386 := flag.Arg(1) - mbTrans387 := thrift.NewTMemoryBufferLen(len(arg386)) - defer mbTrans387.Close() - _, err388 := mbTrans387.WriteString(arg386) - if err388 != nil { + arg387 := flag.Arg(1) + mbTrans388 := thrift.NewTMemoryBufferLen(len(arg387)) + defer mbTrans388.Close() + _, err389 := mbTrans388.WriteString(arg387) + if err389 != nil { Usage() return } - factory389 := thrift.NewTJSONProtocolFactory() - jsProt390 := factory389.GetProtocol(mbTrans387) + factory390 := thrift.NewTJSONProtocolFactory() + jsProt391 := factory390.GetProtocol(mbTrans388) argvalue0 := aurora.NewHosts() - err391 := argvalue0.Read(jsProt390) - if err391 != nil { + err392 := argvalue0.Read(jsProt391) + if err392 != nil { Usage() return } @@ -404,19 +405,19 @@ func main() { fmt.Fprintln(os.Stderr, "EndMaintenance requires 1 args") flag.Usage() } - arg392 := flag.Arg(1) - mbTrans393 := thrift.NewTMemoryBufferLen(len(arg392)) - defer mbTrans393.Close() - _, err394 := mbTrans393.WriteString(arg392) - if err394 != nil { + arg393 := flag.Arg(1) + mbTrans394 := thrift.NewTMemoryBufferLen(len(arg393)) + defer mbTrans394.Close() + _, err395 := mbTrans394.WriteString(arg393) + if err395 != nil { Usage() return } - factory395 := thrift.NewTJSONProtocolFactory() - jsProt396 := factory395.GetProtocol(mbTrans393) + factory396 := thrift.NewTJSONProtocolFactory() + jsProt397 := factory396.GetProtocol(mbTrans394) argvalue0 := aurora.NewHosts() - err397 := argvalue0.Read(jsProt396) - if err397 != nil { + err398 := argvalue0.Read(jsProt397) + if err398 != nil { Usage() return } @@ -429,42 +430,42 @@ func main() { fmt.Fprintln(os.Stderr, "SlaDrainHosts requires 3 args") flag.Usage() } - arg398 := flag.Arg(1) - mbTrans399 := thrift.NewTMemoryBufferLen(len(arg398)) - defer mbTrans399.Close() - _, err400 := mbTrans399.WriteString(arg398) - if err400 != nil { + arg399 := flag.Arg(1) + mbTrans400 := thrift.NewTMemoryBufferLen(len(arg399)) + defer mbTrans400.Close() + _, err401 := mbTrans400.WriteString(arg399) + if err401 != nil { Usage() return } - factory401 := thrift.NewTJSONProtocolFactory() - jsProt402 := factory401.GetProtocol(mbTrans399) + factory402 := thrift.NewTJSONProtocolFactory() + jsProt403 := factory402.GetProtocol(mbTrans400) argvalue0 := aurora.NewHosts() - err403 := argvalue0.Read(jsProt402) - if err403 != nil { + err404 := argvalue0.Read(jsProt403) + if err404 != nil { Usage() return } value0 := argvalue0 - arg404 := flag.Arg(2) - mbTrans405 := thrift.NewTMemoryBufferLen(len(arg404)) - defer mbTrans405.Close() - _, err406 := mbTrans405.WriteString(arg404) - if err406 != nil { + arg405 := flag.Arg(2) + mbTrans406 := thrift.NewTMemoryBufferLen(len(arg405)) + defer mbTrans406.Close() + _, err407 := mbTrans406.WriteString(arg405) + if err407 != nil { Usage() return } - factory407 := thrift.NewTJSONProtocolFactory() - jsProt408 := factory407.GetProtocol(mbTrans405) + factory408 := thrift.NewTJSONProtocolFactory() + jsProt409 := factory408.GetProtocol(mbTrans406) argvalue1 := aurora.NewSlaPolicy() - err409 := argvalue1.Read(jsProt408) - if err409 != nil { + err410 := argvalue1.Read(jsProt409) + if err410 != nil { Usage() return } value1 := argvalue1 - argvalue2, err410 := (strconv.ParseInt(flag.Arg(3), 10, 64)) - if err410 != nil { + argvalue2, err411 := (strconv.ParseInt(flag.Arg(3), 10, 64)) + if err411 != nil { Usage() return } @@ -485,19 +486,19 @@ func main() { fmt.Fprintln(os.Stderr, "TriggerExplicitTaskReconciliation requires 1 args") flag.Usage() } - arg411 := flag.Arg(1) - mbTrans412 := thrift.NewTMemoryBufferLen(len(arg411)) - defer mbTrans412.Close() - _, err413 := mbTrans412.WriteString(arg411) - if err413 != nil { + arg412 := flag.Arg(1) + mbTrans413 := thrift.NewTMemoryBufferLen(len(arg412)) + defer mbTrans413.Close() + _, err414 := mbTrans413.WriteString(arg412) + if err414 != nil { Usage() return } - factory414 := thrift.NewTJSONProtocolFactory() - jsProt415 := factory414.GetProtocol(mbTrans412) + factory415 := thrift.NewTJSONProtocolFactory() + jsProt416 := factory415.GetProtocol(mbTrans413) argvalue0 := aurora.NewExplicitReconciliationSettings() - err416 := argvalue0.Read(jsProt415) - if err416 != nil { + err417 := argvalue0.Read(jsProt416) + if err417 != nil { Usage() return } @@ -518,19 +519,19 @@ func main() { fmt.Fprintln(os.Stderr, "PruneTasks requires 1 args") flag.Usage() } - arg417 := flag.Arg(1) - mbTrans418 := thrift.NewTMemoryBufferLen(len(arg417)) - defer mbTrans418.Close() - _, err419 := mbTrans418.WriteString(arg417) - if err419 != nil { + arg418 := flag.Arg(1) + mbTrans419 := thrift.NewTMemoryBufferLen(len(arg418)) + defer mbTrans419.Close() + _, err420 := mbTrans419.WriteString(arg418) + if err420 != nil { Usage() return } - factory420 := thrift.NewTJSONProtocolFactory() - jsProt421 := factory420.GetProtocol(mbTrans418) + factory421 := thrift.NewTJSONProtocolFactory() + jsProt422 := factory421.GetProtocol(mbTrans419) argvalue0 := aurora.NewTaskQuery() - err422 := argvalue0.Read(jsProt421) - if err422 != nil { + err423 := argvalue0.Read(jsProt422) + if err423 != nil { Usage() return } @@ -543,19 +544,19 @@ func main() { fmt.Fprintln(os.Stderr, "CreateJob requires 1 args") flag.Usage() } - arg423 := flag.Arg(1) - mbTrans424 := thrift.NewTMemoryBufferLen(len(arg423)) - defer mbTrans424.Close() - _, err425 := mbTrans424.WriteString(arg423) - if err425 != nil { + arg424 := flag.Arg(1) + mbTrans425 := thrift.NewTMemoryBufferLen(len(arg424)) + defer mbTrans425.Close() + _, err426 := mbTrans425.WriteString(arg424) + if err426 != nil { Usage() return } - factory426 := thrift.NewTJSONProtocolFactory() - jsProt427 := factory426.GetProtocol(mbTrans424) + factory427 := thrift.NewTJSONProtocolFactory() + jsProt428 := factory427.GetProtocol(mbTrans425) argvalue0 := aurora.NewJobConfiguration() - err428 := argvalue0.Read(jsProt427) - if err428 != nil { + err429 := argvalue0.Read(jsProt428) + if err429 != nil { Usage() return } @@ -568,19 +569,19 @@ func main() { fmt.Fprintln(os.Stderr, "ScheduleCronJob requires 1 args") flag.Usage() } - arg429 := flag.Arg(1) - mbTrans430 := thrift.NewTMemoryBufferLen(len(arg429)) - defer mbTrans430.Close() - _, err431 := mbTrans430.WriteString(arg429) - if err431 != nil { + arg430 := flag.Arg(1) + mbTrans431 := thrift.NewTMemoryBufferLen(len(arg430)) + defer mbTrans431.Close() + _, err432 := mbTrans431.WriteString(arg430) + if err432 != nil { Usage() return } - factory432 := thrift.NewTJSONProtocolFactory() - jsProt433 := factory432.GetProtocol(mbTrans430) + factory433 := thrift.NewTJSONProtocolFactory() + jsProt434 := factory433.GetProtocol(mbTrans431) argvalue0 := aurora.NewJobConfiguration() - err434 := argvalue0.Read(jsProt433) - if err434 != nil { + err435 := argvalue0.Read(jsProt434) + if err435 != nil { Usage() return } @@ -593,19 +594,19 @@ func main() { fmt.Fprintln(os.Stderr, "DescheduleCronJob requires 1 args") flag.Usage() } - arg435 := flag.Arg(1) - mbTrans436 := thrift.NewTMemoryBufferLen(len(arg435)) - defer mbTrans436.Close() - _, err437 := mbTrans436.WriteString(arg435) - if err437 != nil { + arg436 := flag.Arg(1) + mbTrans437 := thrift.NewTMemoryBufferLen(len(arg436)) + defer mbTrans437.Close() + _, err438 := mbTrans437.WriteString(arg436) + if err438 != nil { Usage() return } - factory438 := thrift.NewTJSONProtocolFactory() - jsProt439 := factory438.GetProtocol(mbTrans436) + factory439 := thrift.NewTJSONProtocolFactory() + jsProt440 := factory439.GetProtocol(mbTrans437) argvalue0 := aurora.NewJobKey() - err440 := argvalue0.Read(jsProt439) - if err440 != nil { + err441 := argvalue0.Read(jsProt440) + if err441 != nil { Usage() return } @@ -618,19 +619,19 @@ func main() { fmt.Fprintln(os.Stderr, "StartCronJob requires 1 args") flag.Usage() } - arg441 := flag.Arg(1) - mbTrans442 := thrift.NewTMemoryBufferLen(len(arg441)) - defer mbTrans442.Close() - _, err443 := mbTrans442.WriteString(arg441) - if err443 != nil { + arg442 := flag.Arg(1) + mbTrans443 := thrift.NewTMemoryBufferLen(len(arg442)) + defer mbTrans443.Close() + _, err444 := mbTrans443.WriteString(arg442) + if err444 != nil { Usage() return } - factory444 := thrift.NewTJSONProtocolFactory() - jsProt445 := factory444.GetProtocol(mbTrans442) + factory445 := thrift.NewTJSONProtocolFactory() + jsProt446 := factory445.GetProtocol(mbTrans443) argvalue0 := aurora.NewJobKey() - err446 := argvalue0.Read(jsProt445) - if err446 != nil { + err447 := argvalue0.Read(jsProt446) + if err447 != nil { Usage() return } @@ -643,36 +644,36 @@ func main() { fmt.Fprintln(os.Stderr, "RestartShards requires 2 args") flag.Usage() } - arg447 := flag.Arg(1) - mbTrans448 := thrift.NewTMemoryBufferLen(len(arg447)) - defer mbTrans448.Close() - _, err449 := mbTrans448.WriteString(arg447) - if err449 != nil { + arg448 := flag.Arg(1) + mbTrans449 := thrift.NewTMemoryBufferLen(len(arg448)) + defer mbTrans449.Close() + _, err450 := mbTrans449.WriteString(arg448) + if err450 != nil { Usage() return } - factory450 := thrift.NewTJSONProtocolFactory() - jsProt451 := factory450.GetProtocol(mbTrans448) + factory451 := thrift.NewTJSONProtocolFactory() + jsProt452 := factory451.GetProtocol(mbTrans449) argvalue0 := aurora.NewJobKey() - err452 := argvalue0.Read(jsProt451) - if err452 != nil { + err453 := argvalue0.Read(jsProt452) + if err453 != nil { Usage() return } value0 := argvalue0 - arg453 := flag.Arg(2) - mbTrans454 := thrift.NewTMemoryBufferLen(len(arg453)) - defer mbTrans454.Close() - _, err455 := mbTrans454.WriteString(arg453) - if err455 != nil { + arg454 := flag.Arg(2) + mbTrans455 := thrift.NewTMemoryBufferLen(len(arg454)) + defer mbTrans455.Close() + _, err456 := mbTrans455.WriteString(arg454) + if err456 != nil { Usage() return } - factory456 := thrift.NewTJSONProtocolFactory() - jsProt457 := factory456.GetProtocol(mbTrans454) + factory457 := thrift.NewTJSONProtocolFactory() + jsProt458 := factory457.GetProtocol(mbTrans455) containerStruct1 := aurora.NewAuroraAdminRestartShardsArgs() - err458 := containerStruct1.ReadField2(jsProt457) - if err458 != nil { + err459 := containerStruct1.ReadField2(jsProt458) + if err459 != nil { Usage() return } @@ -686,36 +687,36 @@ func main() { fmt.Fprintln(os.Stderr, "KillTasks requires 3 args") flag.Usage() } - arg459 := flag.Arg(1) - mbTrans460 := thrift.NewTMemoryBufferLen(len(arg459)) - defer mbTrans460.Close() - _, err461 := mbTrans460.WriteString(arg459) - if err461 != nil { + arg460 := flag.Arg(1) + mbTrans461 := thrift.NewTMemoryBufferLen(len(arg460)) + defer mbTrans461.Close() + _, err462 := mbTrans461.WriteString(arg460) + if err462 != nil { Usage() return } - factory462 := thrift.NewTJSONProtocolFactory() - jsProt463 := factory462.GetProtocol(mbTrans460) + factory463 := thrift.NewTJSONProtocolFactory() + jsProt464 := factory463.GetProtocol(mbTrans461) argvalue0 := aurora.NewJobKey() - err464 := argvalue0.Read(jsProt463) - if err464 != nil { + err465 := argvalue0.Read(jsProt464) + if err465 != nil { Usage() return } value0 := argvalue0 - arg465 := flag.Arg(2) - mbTrans466 := thrift.NewTMemoryBufferLen(len(arg465)) - defer mbTrans466.Close() - _, err467 := mbTrans466.WriteString(arg465) - if err467 != nil { + arg466 := flag.Arg(2) + mbTrans467 := thrift.NewTMemoryBufferLen(len(arg466)) + defer mbTrans467.Close() + _, err468 := mbTrans467.WriteString(arg466) + if err468 != nil { Usage() return } - factory468 := thrift.NewTJSONProtocolFactory() - jsProt469 := factory468.GetProtocol(mbTrans466) + factory469 := thrift.NewTJSONProtocolFactory() + jsProt470 := factory469.GetProtocol(mbTrans467) containerStruct1 := aurora.NewAuroraAdminKillTasksArgs() - err470 := containerStruct1.ReadField2(jsProt469) - if err470 != nil { + err471 := containerStruct1.ReadField2(jsProt470) + if err471 != nil { Usage() return } @@ -731,25 +732,25 @@ func main() { fmt.Fprintln(os.Stderr, "AddInstances requires 2 args") flag.Usage() } - arg472 := flag.Arg(1) - mbTrans473 := thrift.NewTMemoryBufferLen(len(arg472)) - defer mbTrans473.Close() - _, err474 := mbTrans473.WriteString(arg472) - if err474 != nil { + arg473 := flag.Arg(1) + mbTrans474 := thrift.NewTMemoryBufferLen(len(arg473)) + defer mbTrans474.Close() + _, err475 := mbTrans474.WriteString(arg473) + if err475 != nil { Usage() return } - factory475 := thrift.NewTJSONProtocolFactory() - jsProt476 := factory475.GetProtocol(mbTrans473) + factory476 := thrift.NewTJSONProtocolFactory() + jsProt477 := factory476.GetProtocol(mbTrans474) argvalue0 := aurora.NewInstanceKey() - err477 := argvalue0.Read(jsProt476) - if err477 != nil { + err478 := argvalue0.Read(jsProt477) + if err478 != nil { Usage() return } value0 := argvalue0 - tmp1, err478 := (strconv.Atoi(flag.Arg(2))) - if err478 != nil { + tmp1, err479 := (strconv.Atoi(flag.Arg(2))) + if err479 != nil { Usage() return } @@ -763,19 +764,19 @@ func main() { fmt.Fprintln(os.Stderr, "ReplaceCronTemplate requires 1 args") flag.Usage() } - arg479 := flag.Arg(1) - mbTrans480 := thrift.NewTMemoryBufferLen(len(arg479)) - defer mbTrans480.Close() - _, err481 := mbTrans480.WriteString(arg479) - if err481 != nil { + arg480 := flag.Arg(1) + mbTrans481 := thrift.NewTMemoryBufferLen(len(arg480)) + defer mbTrans481.Close() + _, err482 := mbTrans481.WriteString(arg480) + if err482 != nil { Usage() return } - factory482 := thrift.NewTJSONProtocolFactory() - jsProt483 := factory482.GetProtocol(mbTrans480) + factory483 := thrift.NewTJSONProtocolFactory() + jsProt484 := factory483.GetProtocol(mbTrans481) argvalue0 := aurora.NewJobConfiguration() - err484 := argvalue0.Read(jsProt483) - if err484 != nil { + err485 := argvalue0.Read(jsProt484) + if err485 != nil { Usage() return } @@ -788,19 +789,19 @@ func main() { fmt.Fprintln(os.Stderr, "StartJobUpdate requires 2 args") flag.Usage() } - arg485 := flag.Arg(1) - mbTrans486 := thrift.NewTMemoryBufferLen(len(arg485)) - defer mbTrans486.Close() - _, err487 := mbTrans486.WriteString(arg485) - if err487 != nil { + arg486 := flag.Arg(1) + mbTrans487 := thrift.NewTMemoryBufferLen(len(arg486)) + defer mbTrans487.Close() + _, err488 := mbTrans487.WriteString(arg486) + if err488 != nil { Usage() return } - factory488 := thrift.NewTJSONProtocolFactory() - jsProt489 := factory488.GetProtocol(mbTrans486) + factory489 := thrift.NewTJSONProtocolFactory() + jsProt490 := factory489.GetProtocol(mbTrans487) argvalue0 := aurora.NewJobUpdateRequest() - err490 := argvalue0.Read(jsProt489) - if err490 != nil { + err491 := argvalue0.Read(jsProt490) + if err491 != nil { Usage() return } @@ -815,19 +816,19 @@ func main() { fmt.Fprintln(os.Stderr, "PauseJobUpdate requires 2 args") flag.Usage() } - arg492 := flag.Arg(1) - mbTrans493 := thrift.NewTMemoryBufferLen(len(arg492)) - defer mbTrans493.Close() - _, err494 := mbTrans493.WriteString(arg492) - if err494 != nil { + arg493 := flag.Arg(1) + mbTrans494 := thrift.NewTMemoryBufferLen(len(arg493)) + defer mbTrans494.Close() + _, err495 := mbTrans494.WriteString(arg493) + if err495 != nil { Usage() return } - factory495 := thrift.NewTJSONProtocolFactory() - jsProt496 := factory495.GetProtocol(mbTrans493) + factory496 := thrift.NewTJSONProtocolFactory() + jsProt497 := factory496.GetProtocol(mbTrans494) argvalue0 := aurora.NewJobUpdateKey() - err497 := argvalue0.Read(jsProt496) - if err497 != nil { + err498 := argvalue0.Read(jsProt497) + if err498 != nil { Usage() return } @@ -842,19 +843,19 @@ func main() { fmt.Fprintln(os.Stderr, "ResumeJobUpdate requires 2 args") flag.Usage() } - arg499 := flag.Arg(1) - mbTrans500 := thrift.NewTMemoryBufferLen(len(arg499)) - defer mbTrans500.Close() - _, err501 := mbTrans500.WriteString(arg499) - if err501 != nil { + arg500 := flag.Arg(1) + mbTrans501 := thrift.NewTMemoryBufferLen(len(arg500)) + defer mbTrans501.Close() + _, err502 := mbTrans501.WriteString(arg500) + if err502 != nil { Usage() return } - factory502 := thrift.NewTJSONProtocolFactory() - jsProt503 := factory502.GetProtocol(mbTrans500) + factory503 := thrift.NewTJSONProtocolFactory() + jsProt504 := factory503.GetProtocol(mbTrans501) argvalue0 := aurora.NewJobUpdateKey() - err504 := argvalue0.Read(jsProt503) - if err504 != nil { + err505 := argvalue0.Read(jsProt504) + if err505 != nil { Usage() return } @@ -869,19 +870,19 @@ func main() { fmt.Fprintln(os.Stderr, "AbortJobUpdate requires 2 args") flag.Usage() } - arg506 := flag.Arg(1) - mbTrans507 := thrift.NewTMemoryBufferLen(len(arg506)) - defer mbTrans507.Close() - _, err508 := mbTrans507.WriteString(arg506) - if err508 != nil { + arg507 := flag.Arg(1) + mbTrans508 := thrift.NewTMemoryBufferLen(len(arg507)) + defer mbTrans508.Close() + _, err509 := mbTrans508.WriteString(arg507) + if err509 != nil { Usage() return } - factory509 := thrift.NewTJSONProtocolFactory() - jsProt510 := factory509.GetProtocol(mbTrans507) + factory510 := thrift.NewTJSONProtocolFactory() + jsProt511 := factory510.GetProtocol(mbTrans508) argvalue0 := aurora.NewJobUpdateKey() - err511 := argvalue0.Read(jsProt510) - if err511 != nil { + err512 := argvalue0.Read(jsProt511) + if err512 != nil { Usage() return } @@ -896,19 +897,19 @@ func main() { fmt.Fprintln(os.Stderr, "RollbackJobUpdate requires 2 args") flag.Usage() } - arg513 := flag.Arg(1) - mbTrans514 := thrift.NewTMemoryBufferLen(len(arg513)) - defer mbTrans514.Close() - _, err515 := mbTrans514.WriteString(arg513) - if err515 != nil { + arg514 := flag.Arg(1) + mbTrans515 := thrift.NewTMemoryBufferLen(len(arg514)) + defer mbTrans515.Close() + _, err516 := mbTrans515.WriteString(arg514) + if err516 != nil { Usage() return } - factory516 := thrift.NewTJSONProtocolFactory() - jsProt517 := factory516.GetProtocol(mbTrans514) + factory517 := thrift.NewTJSONProtocolFactory() + jsProt518 := factory517.GetProtocol(mbTrans515) argvalue0 := aurora.NewJobUpdateKey() - err518 := argvalue0.Read(jsProt517) - if err518 != nil { + err519 := argvalue0.Read(jsProt518) + if err519 != nil { Usage() return } @@ -923,19 +924,19 @@ func main() { fmt.Fprintln(os.Stderr, "PulseJobUpdate requires 1 args") flag.Usage() } - arg520 := flag.Arg(1) - mbTrans521 := thrift.NewTMemoryBufferLen(len(arg520)) - defer mbTrans521.Close() - _, err522 := mbTrans521.WriteString(arg520) - if err522 != nil { + arg521 := flag.Arg(1) + mbTrans522 := thrift.NewTMemoryBufferLen(len(arg521)) + defer mbTrans522.Close() + _, err523 := mbTrans522.WriteString(arg521) + if err523 != nil { Usage() return } - factory523 := thrift.NewTJSONProtocolFactory() - jsProt524 := factory523.GetProtocol(mbTrans521) + factory524 := thrift.NewTJSONProtocolFactory() + jsProt525 := factory524.GetProtocol(mbTrans522) argvalue0 := aurora.NewJobUpdateKey() - err525 := argvalue0.Read(jsProt524) - if err525 != nil { + err526 := argvalue0.Read(jsProt525) + if err526 != nil { Usage() return } @@ -966,19 +967,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args") flag.Usage() } - arg527 := flag.Arg(1) - mbTrans528 := thrift.NewTMemoryBufferLen(len(arg527)) - defer mbTrans528.Close() - _, err529 := mbTrans528.WriteString(arg527) - if err529 != nil { + arg528 := flag.Arg(1) + mbTrans529 := thrift.NewTMemoryBufferLen(len(arg528)) + defer mbTrans529.Close() + _, err530 := mbTrans529.WriteString(arg528) + if err530 != nil { Usage() return } - factory530 := thrift.NewTJSONProtocolFactory() - jsProt531 := factory530.GetProtocol(mbTrans528) + factory531 := thrift.NewTJSONProtocolFactory() + jsProt532 := factory531.GetProtocol(mbTrans529) argvalue0 := aurora.NewTaskQuery() - err532 := argvalue0.Read(jsProt531) - if err532 != nil { + err533 := argvalue0.Read(jsProt532) + if err533 != nil { Usage() return } @@ -991,19 +992,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args") flag.Usage() } - arg533 := flag.Arg(1) - mbTrans534 := thrift.NewTMemoryBufferLen(len(arg533)) - defer mbTrans534.Close() - _, err535 := mbTrans534.WriteString(arg533) - if err535 != nil { + arg534 := flag.Arg(1) + mbTrans535 := thrift.NewTMemoryBufferLen(len(arg534)) + defer mbTrans535.Close() + _, err536 := mbTrans535.WriteString(arg534) + if err536 != nil { Usage() return } - factory536 := thrift.NewTJSONProtocolFactory() - jsProt537 := factory536.GetProtocol(mbTrans534) + factory537 := thrift.NewTJSONProtocolFactory() + jsProt538 := factory537.GetProtocol(mbTrans535) argvalue0 := aurora.NewTaskQuery() - err538 := argvalue0.Read(jsProt537) - if err538 != nil { + err539 := argvalue0.Read(jsProt538) + if err539 != nil { Usage() return } @@ -1016,19 +1017,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args") flag.Usage() } - arg539 := flag.Arg(1) - mbTrans540 := thrift.NewTMemoryBufferLen(len(arg539)) - defer mbTrans540.Close() - _, err541 := mbTrans540.WriteString(arg539) - if err541 != nil { + arg540 := flag.Arg(1) + mbTrans541 := thrift.NewTMemoryBufferLen(len(arg540)) + defer mbTrans541.Close() + _, err542 := mbTrans541.WriteString(arg540) + if err542 != nil { Usage() return } - factory542 := thrift.NewTJSONProtocolFactory() - jsProt543 := factory542.GetProtocol(mbTrans540) + factory543 := thrift.NewTJSONProtocolFactory() + jsProt544 := factory543.GetProtocol(mbTrans541) argvalue0 := aurora.NewTaskQuery() - err544 := argvalue0.Read(jsProt543) - if err544 != nil { + err545 := argvalue0.Read(jsProt544) + if err545 != nil { Usage() return } @@ -1041,19 +1042,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args") flag.Usage() } - arg545 := flag.Arg(1) - mbTrans546 := thrift.NewTMemoryBufferLen(len(arg545)) - defer mbTrans546.Close() - _, err547 := mbTrans546.WriteString(arg545) - if err547 != nil { + arg546 := flag.Arg(1) + mbTrans547 := thrift.NewTMemoryBufferLen(len(arg546)) + defer mbTrans547.Close() + _, err548 := mbTrans547.WriteString(arg546) + if err548 != nil { Usage() return } - factory548 := thrift.NewTJSONProtocolFactory() - jsProt549 := factory548.GetProtocol(mbTrans546) + factory549 := thrift.NewTJSONProtocolFactory() + jsProt550 := factory549.GetProtocol(mbTrans547) argvalue0 := aurora.NewJobKey() - err550 := argvalue0.Read(jsProt549) - if err550 != nil { + err551 := argvalue0.Read(jsProt550) + if err551 != nil { Usage() return } @@ -1086,19 +1087,19 @@ func main() { fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args") flag.Usage() } - arg553 := flag.Arg(1) - mbTrans554 := thrift.NewTMemoryBufferLen(len(arg553)) - defer mbTrans554.Close() - _, err555 := mbTrans554.WriteString(arg553) - if err555 != nil { + arg554 := flag.Arg(1) + mbTrans555 := thrift.NewTMemoryBufferLen(len(arg554)) + defer mbTrans555.Close() + _, err556 := mbTrans555.WriteString(arg554) + if err556 != nil { Usage() return } - factory556 := thrift.NewTJSONProtocolFactory() - jsProt557 := factory556.GetProtocol(mbTrans554) + factory557 := thrift.NewTJSONProtocolFactory() + jsProt558 := factory557.GetProtocol(mbTrans555) argvalue0 := aurora.NewJobConfiguration() - err558 := argvalue0.Read(jsProt557) - if err558 != nil { + err559 := argvalue0.Read(jsProt558) + if err559 != nil { Usage() return } @@ -1111,19 +1112,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args") flag.Usage() } - arg559 := flag.Arg(1) - mbTrans560 := thrift.NewTMemoryBufferLen(len(arg559)) - defer mbTrans560.Close() - _, err561 := mbTrans560.WriteString(arg559) - if err561 != nil { + arg560 := flag.Arg(1) + mbTrans561 := thrift.NewTMemoryBufferLen(len(arg560)) + defer mbTrans561.Close() + _, err562 := mbTrans561.WriteString(arg560) + if err562 != nil { Usage() return } - factory562 := thrift.NewTJSONProtocolFactory() - jsProt563 := factory562.GetProtocol(mbTrans560) + factory563 := thrift.NewTJSONProtocolFactory() + jsProt564 := factory563.GetProtocol(mbTrans561) argvalue0 := aurora.NewJobUpdateQuery() - err564 := argvalue0.Read(jsProt563) - if err564 != nil { + err565 := argvalue0.Read(jsProt564) + if err565 != nil { Usage() return } @@ -1136,19 +1137,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args") flag.Usage() } - arg565 := flag.Arg(1) - mbTrans566 := thrift.NewTMemoryBufferLen(len(arg565)) - defer mbTrans566.Close() - _, err567 := mbTrans566.WriteString(arg565) - if err567 != nil { + arg566 := flag.Arg(1) + mbTrans567 := thrift.NewTMemoryBufferLen(len(arg566)) + defer mbTrans567.Close() + _, err568 := mbTrans567.WriteString(arg566) + if err568 != nil { Usage() return } - factory568 := thrift.NewTJSONProtocolFactory() - jsProt569 := factory568.GetProtocol(mbTrans566) + factory569 := thrift.NewTJSONProtocolFactory() + jsProt570 := factory569.GetProtocol(mbTrans567) argvalue0 := aurora.NewJobUpdateQuery() - err570 := argvalue0.Read(jsProt569) - if err570 != nil { + err571 := argvalue0.Read(jsProt570) + if err571 != nil { Usage() return } @@ -1161,19 +1162,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args") flag.Usage() } - arg571 := flag.Arg(1) - mbTrans572 := thrift.NewTMemoryBufferLen(len(arg571)) - defer mbTrans572.Close() - _, err573 := mbTrans572.WriteString(arg571) - if err573 != nil { + arg572 := flag.Arg(1) + mbTrans573 := thrift.NewTMemoryBufferLen(len(arg572)) + defer mbTrans573.Close() + _, err574 := mbTrans573.WriteString(arg572) + if err574 != nil { Usage() return } - factory574 := thrift.NewTJSONProtocolFactory() - jsProt575 := factory574.GetProtocol(mbTrans572) + factory575 := thrift.NewTJSONProtocolFactory() + jsProt576 := factory575.GetProtocol(mbTrans573) argvalue0 := aurora.NewJobUpdateRequest() - err576 := argvalue0.Read(jsProt575) - if err576 != nil { + err577 := argvalue0.Read(jsProt576) + if err577 != nil { Usage() return } diff --git a/gen-go/apache/aurora/aurora_scheduler_manager-remote/aurora_scheduler_manager-remote.go b/gen-go/apache/aurora/aurora_scheduler_manager-remote/aurora_scheduler_manager-remote.go index bc3abe9..9bc3848 100755 --- a/gen-go/apache/aurora/aurora_scheduler_manager-remote/aurora_scheduler_manager-remote.go +++ b/gen-go/apache/aurora/aurora_scheduler_manager-remote/aurora_scheduler_manager-remote.go @@ -1,22 +1,23 @@ -// Autogenerated by Thrift Compiler (0.12.0) +// Autogenerated by Thrift Compiler (0.13.0) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING package main import ( - "context" - "flag" - "fmt" - "math" - "net" - "net/url" - "os" - "strconv" - "strings" - "github.com/apache/thrift/lib/go/thrift" - "apache/aurora" + "context" + "flag" + "fmt" + "math" + "net" + "net/url" + "os" + "strconv" + "strings" + "github.com/apache/thrift/lib/go/thrift" + "apache/aurora" ) +var _ = aurora.GoUnusedProtection__ func Usage() { fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") @@ -175,19 +176,19 @@ func main() { fmt.Fprintln(os.Stderr, "CreateJob requires 1 args") flag.Usage() } - arg162 := flag.Arg(1) - mbTrans163 := thrift.NewTMemoryBufferLen(len(arg162)) - defer mbTrans163.Close() - _, err164 := mbTrans163.WriteString(arg162) - if err164 != nil { + arg163 := flag.Arg(1) + mbTrans164 := thrift.NewTMemoryBufferLen(len(arg163)) + defer mbTrans164.Close() + _, err165 := mbTrans164.WriteString(arg163) + if err165 != nil { Usage() return } - factory165 := thrift.NewTJSONProtocolFactory() - jsProt166 := factory165.GetProtocol(mbTrans163) + factory166 := thrift.NewTJSONProtocolFactory() + jsProt167 := factory166.GetProtocol(mbTrans164) argvalue0 := aurora.NewJobConfiguration() - err167 := argvalue0.Read(jsProt166) - if err167 != nil { + err168 := argvalue0.Read(jsProt167) + if err168 != nil { Usage() return } @@ -200,19 +201,19 @@ func main() { fmt.Fprintln(os.Stderr, "ScheduleCronJob requires 1 args") flag.Usage() } - arg168 := flag.Arg(1) - mbTrans169 := thrift.NewTMemoryBufferLen(len(arg168)) - defer mbTrans169.Close() - _, err170 := mbTrans169.WriteString(arg168) - if err170 != nil { + arg169 := flag.Arg(1) + mbTrans170 := thrift.NewTMemoryBufferLen(len(arg169)) + defer mbTrans170.Close() + _, err171 := mbTrans170.WriteString(arg169) + if err171 != nil { Usage() return } - factory171 := thrift.NewTJSONProtocolFactory() - jsProt172 := factory171.GetProtocol(mbTrans169) + factory172 := thrift.NewTJSONProtocolFactory() + jsProt173 := factory172.GetProtocol(mbTrans170) argvalue0 := aurora.NewJobConfiguration() - err173 := argvalue0.Read(jsProt172) - if err173 != nil { + err174 := argvalue0.Read(jsProt173) + if err174 != nil { Usage() return } @@ -225,19 +226,19 @@ func main() { fmt.Fprintln(os.Stderr, "DescheduleCronJob requires 1 args") flag.Usage() } - arg174 := flag.Arg(1) - mbTrans175 := thrift.NewTMemoryBufferLen(len(arg174)) - defer mbTrans175.Close() - _, err176 := mbTrans175.WriteString(arg174) - if err176 != nil { + arg175 := flag.Arg(1) + mbTrans176 := thrift.NewTMemoryBufferLen(len(arg175)) + defer mbTrans176.Close() + _, err177 := mbTrans176.WriteString(arg175) + if err177 != nil { Usage() return } - factory177 := thrift.NewTJSONProtocolFactory() - jsProt178 := factory177.GetProtocol(mbTrans175) + factory178 := thrift.NewTJSONProtocolFactory() + jsProt179 := factory178.GetProtocol(mbTrans176) argvalue0 := aurora.NewJobKey() - err179 := argvalue0.Read(jsProt178) - if err179 != nil { + err180 := argvalue0.Read(jsProt179) + if err180 != nil { Usage() return } @@ -250,19 +251,19 @@ func main() { fmt.Fprintln(os.Stderr, "StartCronJob requires 1 args") flag.Usage() } - arg180 := flag.Arg(1) - mbTrans181 := thrift.NewTMemoryBufferLen(len(arg180)) - defer mbTrans181.Close() - _, err182 := mbTrans181.WriteString(arg180) - if err182 != nil { + arg181 := flag.Arg(1) + mbTrans182 := thrift.NewTMemoryBufferLen(len(arg181)) + defer mbTrans182.Close() + _, err183 := mbTrans182.WriteString(arg181) + if err183 != nil { Usage() return } - factory183 := thrift.NewTJSONProtocolFactory() - jsProt184 := factory183.GetProtocol(mbTrans181) + factory184 := thrift.NewTJSONProtocolFactory() + jsProt185 := factory184.GetProtocol(mbTrans182) argvalue0 := aurora.NewJobKey() - err185 := argvalue0.Read(jsProt184) - if err185 != nil { + err186 := argvalue0.Read(jsProt185) + if err186 != nil { Usage() return } @@ -275,36 +276,36 @@ func main() { fmt.Fprintln(os.Stderr, "RestartShards requires 2 args") flag.Usage() } - arg186 := flag.Arg(1) - mbTrans187 := thrift.NewTMemoryBufferLen(len(arg186)) - defer mbTrans187.Close() - _, err188 := mbTrans187.WriteString(arg186) - if err188 != nil { + arg187 := flag.Arg(1) + mbTrans188 := thrift.NewTMemoryBufferLen(len(arg187)) + defer mbTrans188.Close() + _, err189 := mbTrans188.WriteString(arg187) + if err189 != nil { Usage() return } - factory189 := thrift.NewTJSONProtocolFactory() - jsProt190 := factory189.GetProtocol(mbTrans187) + factory190 := thrift.NewTJSONProtocolFactory() + jsProt191 := factory190.GetProtocol(mbTrans188) argvalue0 := aurora.NewJobKey() - err191 := argvalue0.Read(jsProt190) - if err191 != nil { + err192 := argvalue0.Read(jsProt191) + if err192 != nil { Usage() return } value0 := argvalue0 - arg192 := flag.Arg(2) - mbTrans193 := thrift.NewTMemoryBufferLen(len(arg192)) - defer mbTrans193.Close() - _, err194 := mbTrans193.WriteString(arg192) - if err194 != nil { + arg193 := flag.Arg(2) + mbTrans194 := thrift.NewTMemoryBufferLen(len(arg193)) + defer mbTrans194.Close() + _, err195 := mbTrans194.WriteString(arg193) + if err195 != nil { Usage() return } - factory195 := thrift.NewTJSONProtocolFactory() - jsProt196 := factory195.GetProtocol(mbTrans193) + factory196 := thrift.NewTJSONProtocolFactory() + jsProt197 := factory196.GetProtocol(mbTrans194) containerStruct1 := aurora.NewAuroraSchedulerManagerRestartShardsArgs() - err197 := containerStruct1.ReadField2(jsProt196) - if err197 != nil { + err198 := containerStruct1.ReadField2(jsProt197) + if err198 != nil { Usage() return } @@ -318,36 +319,36 @@ func main() { fmt.Fprintln(os.Stderr, "KillTasks requires 3 args") flag.Usage() } - arg198 := flag.Arg(1) - mbTrans199 := thrift.NewTMemoryBufferLen(len(arg198)) - defer mbTrans199.Close() - _, err200 := mbTrans199.WriteString(arg198) - if err200 != nil { + arg199 := flag.Arg(1) + mbTrans200 := thrift.NewTMemoryBufferLen(len(arg199)) + defer mbTrans200.Close() + _, err201 := mbTrans200.WriteString(arg199) + if err201 != nil { Usage() return } - factory201 := thrift.NewTJSONProtocolFactory() - jsProt202 := factory201.GetProtocol(mbTrans199) + factory202 := thrift.NewTJSONProtocolFactory() + jsProt203 := factory202.GetProtocol(mbTrans200) argvalue0 := aurora.NewJobKey() - err203 := argvalue0.Read(jsProt202) - if err203 != nil { + err204 := argvalue0.Read(jsProt203) + if err204 != nil { Usage() return } value0 := argvalue0 - arg204 := flag.Arg(2) - mbTrans205 := thrift.NewTMemoryBufferLen(len(arg204)) - defer mbTrans205.Close() - _, err206 := mbTrans205.WriteString(arg204) - if err206 != nil { + arg205 := flag.Arg(2) + mbTrans206 := thrift.NewTMemoryBufferLen(len(arg205)) + defer mbTrans206.Close() + _, err207 := mbTrans206.WriteString(arg205) + if err207 != nil { Usage() return } - factory207 := thrift.NewTJSONProtocolFactory() - jsProt208 := factory207.GetProtocol(mbTrans205) + factory208 := thrift.NewTJSONProtocolFactory() + jsProt209 := factory208.GetProtocol(mbTrans206) containerStruct1 := aurora.NewAuroraSchedulerManagerKillTasksArgs() - err209 := containerStruct1.ReadField2(jsProt208) - if err209 != nil { + err210 := containerStruct1.ReadField2(jsProt209) + if err210 != nil { Usage() return } @@ -363,25 +364,25 @@ func main() { fmt.Fprintln(os.Stderr, "AddInstances requires 2 args") flag.Usage() } - arg211 := flag.Arg(1) - mbTrans212 := thrift.NewTMemoryBufferLen(len(arg211)) - defer mbTrans212.Close() - _, err213 := mbTrans212.WriteString(arg211) - if err213 != nil { + arg212 := flag.Arg(1) + mbTrans213 := thrift.NewTMemoryBufferLen(len(arg212)) + defer mbTrans213.Close() + _, err214 := mbTrans213.WriteString(arg212) + if err214 != nil { Usage() return } - factory214 := thrift.NewTJSONProtocolFactory() - jsProt215 := factory214.GetProtocol(mbTrans212) + factory215 := thrift.NewTJSONProtocolFactory() + jsProt216 := factory215.GetProtocol(mbTrans213) argvalue0 := aurora.NewInstanceKey() - err216 := argvalue0.Read(jsProt215) - if err216 != nil { + err217 := argvalue0.Read(jsProt216) + if err217 != nil { Usage() return } value0 := argvalue0 - tmp1, err217 := (strconv.Atoi(flag.Arg(2))) - if err217 != nil { + tmp1, err218 := (strconv.Atoi(flag.Arg(2))) + if err218 != nil { Usage() return } @@ -395,19 +396,19 @@ func main() { fmt.Fprintln(os.Stderr, "ReplaceCronTemplate requires 1 args") flag.Usage() } - arg218 := flag.Arg(1) - mbTrans219 := thrift.NewTMemoryBufferLen(len(arg218)) - defer mbTrans219.Close() - _, err220 := mbTrans219.WriteString(arg218) - if err220 != nil { + arg219 := flag.Arg(1) + mbTrans220 := thrift.NewTMemoryBufferLen(len(arg219)) + defer mbTrans220.Close() + _, err221 := mbTrans220.WriteString(arg219) + if err221 != nil { Usage() return } - factory221 := thrift.NewTJSONProtocolFactory() - jsProt222 := factory221.GetProtocol(mbTrans219) + factory222 := thrift.NewTJSONProtocolFactory() + jsProt223 := factory222.GetProtocol(mbTrans220) argvalue0 := aurora.NewJobConfiguration() - err223 := argvalue0.Read(jsProt222) - if err223 != nil { + err224 := argvalue0.Read(jsProt223) + if err224 != nil { Usage() return } @@ -420,19 +421,19 @@ func main() { fmt.Fprintln(os.Stderr, "StartJobUpdate requires 2 args") flag.Usage() } - arg224 := flag.Arg(1) - mbTrans225 := thrift.NewTMemoryBufferLen(len(arg224)) - defer mbTrans225.Close() - _, err226 := mbTrans225.WriteString(arg224) - if err226 != nil { + arg225 := flag.Arg(1) + mbTrans226 := thrift.NewTMemoryBufferLen(len(arg225)) + defer mbTrans226.Close() + _, err227 := mbTrans226.WriteString(arg225) + if err227 != nil { Usage() return } - factory227 := thrift.NewTJSONProtocolFactory() - jsProt228 := factory227.GetProtocol(mbTrans225) + factory228 := thrift.NewTJSONProtocolFactory() + jsProt229 := factory228.GetProtocol(mbTrans226) argvalue0 := aurora.NewJobUpdateRequest() - err229 := argvalue0.Read(jsProt228) - if err229 != nil { + err230 := argvalue0.Read(jsProt229) + if err230 != nil { Usage() return } @@ -447,19 +448,19 @@ func main() { fmt.Fprintln(os.Stderr, "PauseJobUpdate requires 2 args") flag.Usage() } - arg231 := flag.Arg(1) - mbTrans232 := thrift.NewTMemoryBufferLen(len(arg231)) - defer mbTrans232.Close() - _, err233 := mbTrans232.WriteString(arg231) - if err233 != nil { + arg232 := flag.Arg(1) + mbTrans233 := thrift.NewTMemoryBufferLen(len(arg232)) + defer mbTrans233.Close() + _, err234 := mbTrans233.WriteString(arg232) + if err234 != nil { Usage() return } - factory234 := thrift.NewTJSONProtocolFactory() - jsProt235 := factory234.GetProtocol(mbTrans232) + factory235 := thrift.NewTJSONProtocolFactory() + jsProt236 := factory235.GetProtocol(mbTrans233) argvalue0 := aurora.NewJobUpdateKey() - err236 := argvalue0.Read(jsProt235) - if err236 != nil { + err237 := argvalue0.Read(jsProt236) + if err237 != nil { Usage() return } @@ -474,19 +475,19 @@ func main() { fmt.Fprintln(os.Stderr, "ResumeJobUpdate requires 2 args") flag.Usage() } - arg238 := flag.Arg(1) - mbTrans239 := thrift.NewTMemoryBufferLen(len(arg238)) - defer mbTrans239.Close() - _, err240 := mbTrans239.WriteString(arg238) - if err240 != nil { + arg239 := flag.Arg(1) + mbTrans240 := thrift.NewTMemoryBufferLen(len(arg239)) + defer mbTrans240.Close() + _, err241 := mbTrans240.WriteString(arg239) + if err241 != nil { Usage() return } - factory241 := thrift.NewTJSONProtocolFactory() - jsProt242 := factory241.GetProtocol(mbTrans239) + factory242 := thrift.NewTJSONProtocolFactory() + jsProt243 := factory242.GetProtocol(mbTrans240) argvalue0 := aurora.NewJobUpdateKey() - err243 := argvalue0.Read(jsProt242) - if err243 != nil { + err244 := argvalue0.Read(jsProt243) + if err244 != nil { Usage() return } @@ -501,19 +502,19 @@ func main() { fmt.Fprintln(os.Stderr, "AbortJobUpdate requires 2 args") flag.Usage() } - arg245 := flag.Arg(1) - mbTrans246 := thrift.NewTMemoryBufferLen(len(arg245)) - defer mbTrans246.Close() - _, err247 := mbTrans246.WriteString(arg245) - if err247 != nil { + arg246 := flag.Arg(1) + mbTrans247 := thrift.NewTMemoryBufferLen(len(arg246)) + defer mbTrans247.Close() + _, err248 := mbTrans247.WriteString(arg246) + if err248 != nil { Usage() return } - factory248 := thrift.NewTJSONProtocolFactory() - jsProt249 := factory248.GetProtocol(mbTrans246) + factory249 := thrift.NewTJSONProtocolFactory() + jsProt250 := factory249.GetProtocol(mbTrans247) argvalue0 := aurora.NewJobUpdateKey() - err250 := argvalue0.Read(jsProt249) - if err250 != nil { + err251 := argvalue0.Read(jsProt250) + if err251 != nil { Usage() return } @@ -528,19 +529,19 @@ func main() { fmt.Fprintln(os.Stderr, "RollbackJobUpdate requires 2 args") flag.Usage() } - arg252 := flag.Arg(1) - mbTrans253 := thrift.NewTMemoryBufferLen(len(arg252)) - defer mbTrans253.Close() - _, err254 := mbTrans253.WriteString(arg252) - if err254 != nil { + arg253 := flag.Arg(1) + mbTrans254 := thrift.NewTMemoryBufferLen(len(arg253)) + defer mbTrans254.Close() + _, err255 := mbTrans254.WriteString(arg253) + if err255 != nil { Usage() return } - factory255 := thrift.NewTJSONProtocolFactory() - jsProt256 := factory255.GetProtocol(mbTrans253) + factory256 := thrift.NewTJSONProtocolFactory() + jsProt257 := factory256.GetProtocol(mbTrans254) argvalue0 := aurora.NewJobUpdateKey() - err257 := argvalue0.Read(jsProt256) - if err257 != nil { + err258 := argvalue0.Read(jsProt257) + if err258 != nil { Usage() return } @@ -555,19 +556,19 @@ func main() { fmt.Fprintln(os.Stderr, "PulseJobUpdate requires 1 args") flag.Usage() } - arg259 := flag.Arg(1) - mbTrans260 := thrift.NewTMemoryBufferLen(len(arg259)) - defer mbTrans260.Close() - _, err261 := mbTrans260.WriteString(arg259) - if err261 != nil { + arg260 := flag.Arg(1) + mbTrans261 := thrift.NewTMemoryBufferLen(len(arg260)) + defer mbTrans261.Close() + _, err262 := mbTrans261.WriteString(arg260) + if err262 != nil { Usage() return } - factory262 := thrift.NewTJSONProtocolFactory() - jsProt263 := factory262.GetProtocol(mbTrans260) + factory263 := thrift.NewTJSONProtocolFactory() + jsProt264 := factory263.GetProtocol(mbTrans261) argvalue0 := aurora.NewJobUpdateKey() - err264 := argvalue0.Read(jsProt263) - if err264 != nil { + err265 := argvalue0.Read(jsProt264) + if err265 != nil { Usage() return } @@ -598,19 +599,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args") flag.Usage() } - arg266 := flag.Arg(1) - mbTrans267 := thrift.NewTMemoryBufferLen(len(arg266)) - defer mbTrans267.Close() - _, err268 := mbTrans267.WriteString(arg266) - if err268 != nil { + arg267 := flag.Arg(1) + mbTrans268 := thrift.NewTMemoryBufferLen(len(arg267)) + defer mbTrans268.Close() + _, err269 := mbTrans268.WriteString(arg267) + if err269 != nil { Usage() return } - factory269 := thrift.NewTJSONProtocolFactory() - jsProt270 := factory269.GetProtocol(mbTrans267) + factory270 := thrift.NewTJSONProtocolFactory() + jsProt271 := factory270.GetProtocol(mbTrans268) argvalue0 := aurora.NewTaskQuery() - err271 := argvalue0.Read(jsProt270) - if err271 != nil { + err272 := argvalue0.Read(jsProt271) + if err272 != nil { Usage() return } @@ -623,19 +624,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args") flag.Usage() } - arg272 := flag.Arg(1) - mbTrans273 := thrift.NewTMemoryBufferLen(len(arg272)) - defer mbTrans273.Close() - _, err274 := mbTrans273.WriteString(arg272) - if err274 != nil { + arg273 := flag.Arg(1) + mbTrans274 := thrift.NewTMemoryBufferLen(len(arg273)) + defer mbTrans274.Close() + _, err275 := mbTrans274.WriteString(arg273) + if err275 != nil { Usage() return } - factory275 := thrift.NewTJSONProtocolFactory() - jsProt276 := factory275.GetProtocol(mbTrans273) + factory276 := thrift.NewTJSONProtocolFactory() + jsProt277 := factory276.GetProtocol(mbTrans274) argvalue0 := aurora.NewTaskQuery() - err277 := argvalue0.Read(jsProt276) - if err277 != nil { + err278 := argvalue0.Read(jsProt277) + if err278 != nil { Usage() return } @@ -648,19 +649,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args") flag.Usage() } - arg278 := flag.Arg(1) - mbTrans279 := thrift.NewTMemoryBufferLen(len(arg278)) - defer mbTrans279.Close() - _, err280 := mbTrans279.WriteString(arg278) - if err280 != nil { + arg279 := flag.Arg(1) + mbTrans280 := thrift.NewTMemoryBufferLen(len(arg279)) + defer mbTrans280.Close() + _, err281 := mbTrans280.WriteString(arg279) + if err281 != nil { Usage() return } - factory281 := thrift.NewTJSONProtocolFactory() - jsProt282 := factory281.GetProtocol(mbTrans279) + factory282 := thrift.NewTJSONProtocolFactory() + jsProt283 := factory282.GetProtocol(mbTrans280) argvalue0 := aurora.NewTaskQuery() - err283 := argvalue0.Read(jsProt282) - if err283 != nil { + err284 := argvalue0.Read(jsProt283) + if err284 != nil { Usage() return } @@ -673,19 +674,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args") flag.Usage() } - arg284 := flag.Arg(1) - mbTrans285 := thrift.NewTMemoryBufferLen(len(arg284)) - defer mbTrans285.Close() - _, err286 := mbTrans285.WriteString(arg284) - if err286 != nil { + arg285 := flag.Arg(1) + mbTrans286 := thrift.NewTMemoryBufferLen(len(arg285)) + defer mbTrans286.Close() + _, err287 := mbTrans286.WriteString(arg285) + if err287 != nil { Usage() return } - factory287 := thrift.NewTJSONProtocolFactory() - jsProt288 := factory287.GetProtocol(mbTrans285) + factory288 := thrift.NewTJSONProtocolFactory() + jsProt289 := factory288.GetProtocol(mbTrans286) argvalue0 := aurora.NewJobKey() - err289 := argvalue0.Read(jsProt288) - if err289 != nil { + err290 := argvalue0.Read(jsProt289) + if err290 != nil { Usage() return } @@ -718,19 +719,19 @@ func main() { fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args") flag.Usage() } - arg292 := flag.Arg(1) - mbTrans293 := thrift.NewTMemoryBufferLen(len(arg292)) - defer mbTrans293.Close() - _, err294 := mbTrans293.WriteString(arg292) - if err294 != nil { + arg293 := flag.Arg(1) + mbTrans294 := thrift.NewTMemoryBufferLen(len(arg293)) + defer mbTrans294.Close() + _, err295 := mbTrans294.WriteString(arg293) + if err295 != nil { Usage() return } - factory295 := thrift.NewTJSONProtocolFactory() - jsProt296 := factory295.GetProtocol(mbTrans293) + factory296 := thrift.NewTJSONProtocolFactory() + jsProt297 := factory296.GetProtocol(mbTrans294) argvalue0 := aurora.NewJobConfiguration() - err297 := argvalue0.Read(jsProt296) - if err297 != nil { + err298 := argvalue0.Read(jsProt297) + if err298 != nil { Usage() return } @@ -743,19 +744,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args") flag.Usage() } - arg298 := flag.Arg(1) - mbTrans299 := thrift.NewTMemoryBufferLen(len(arg298)) - defer mbTrans299.Close() - _, err300 := mbTrans299.WriteString(arg298) - if err300 != nil { + arg299 := flag.Arg(1) + mbTrans300 := thrift.NewTMemoryBufferLen(len(arg299)) + defer mbTrans300.Close() + _, err301 := mbTrans300.WriteString(arg299) + if err301 != nil { Usage() return } - factory301 := thrift.NewTJSONProtocolFactory() - jsProt302 := factory301.GetProtocol(mbTrans299) + factory302 := thrift.NewTJSONProtocolFactory() + jsProt303 := factory302.GetProtocol(mbTrans300) argvalue0 := aurora.NewJobUpdateQuery() - err303 := argvalue0.Read(jsProt302) - if err303 != nil { + err304 := argvalue0.Read(jsProt303) + if err304 != nil { Usage() return } @@ -768,19 +769,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args") flag.Usage() } - arg304 := flag.Arg(1) - mbTrans305 := thrift.NewTMemoryBufferLen(len(arg304)) - defer mbTrans305.Close() - _, err306 := mbTrans305.WriteString(arg304) - if err306 != nil { + arg305 := flag.Arg(1) + mbTrans306 := thrift.NewTMemoryBufferLen(len(arg305)) + defer mbTrans306.Close() + _, err307 := mbTrans306.WriteString(arg305) + if err307 != nil { Usage() return } - factory307 := thrift.NewTJSONProtocolFactory() - jsProt308 := factory307.GetProtocol(mbTrans305) + factory308 := thrift.NewTJSONProtocolFactory() + jsProt309 := factory308.GetProtocol(mbTrans306) argvalue0 := aurora.NewJobUpdateQuery() - err309 := argvalue0.Read(jsProt308) - if err309 != nil { + err310 := argvalue0.Read(jsProt309) + if err310 != nil { Usage() return } @@ -793,19 +794,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args") flag.Usage() } - arg310 := flag.Arg(1) - mbTrans311 := thrift.NewTMemoryBufferLen(len(arg310)) - defer mbTrans311.Close() - _, err312 := mbTrans311.WriteString(arg310) - if err312 != nil { + arg311 := flag.Arg(1) + mbTrans312 := thrift.NewTMemoryBufferLen(len(arg311)) + defer mbTrans312.Close() + _, err313 := mbTrans312.WriteString(arg311) + if err313 != nil { Usage() return } - factory313 := thrift.NewTJSONProtocolFactory() - jsProt314 := factory313.GetProtocol(mbTrans311) + factory314 := thrift.NewTJSONProtocolFactory() + jsProt315 := factory314.GetProtocol(mbTrans312) argvalue0 := aurora.NewJobUpdateRequest() - err315 := argvalue0.Read(jsProt314) - if err315 != nil { + err316 := argvalue0.Read(jsProt315) + if err316 != nil { Usage() return } diff --git a/gen-go/apache/aurora/read_only_scheduler-remote/read_only_scheduler-remote.go b/gen-go/apache/aurora/read_only_scheduler-remote/read_only_scheduler-remote.go index 282c92a..31aac65 100755 --- a/gen-go/apache/aurora/read_only_scheduler-remote/read_only_scheduler-remote.go +++ b/gen-go/apache/aurora/read_only_scheduler-remote/read_only_scheduler-remote.go @@ -1,22 +1,23 @@ -// Autogenerated by Thrift Compiler (0.12.0) +// Autogenerated by Thrift Compiler (0.13.0) // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING package main import ( - "context" - "flag" - "fmt" - "math" - "net" - "net/url" - "os" - "strconv" - "strings" - "github.com/apache/thrift/lib/go/thrift" - "apache/aurora" + "context" + "flag" + "fmt" + "math" + "net" + "net/url" + "os" + "strconv" + "strings" + "github.com/apache/thrift/lib/go/thrift" + "apache/aurora" ) +var _ = aurora.GoUnusedProtection__ func Usage() { fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") @@ -179,19 +180,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args") flag.Usage() } - arg81 := flag.Arg(1) - mbTrans82 := thrift.NewTMemoryBufferLen(len(arg81)) - defer mbTrans82.Close() - _, err83 := mbTrans82.WriteString(arg81) - if err83 != nil { + arg82 := flag.Arg(1) + mbTrans83 := thrift.NewTMemoryBufferLen(len(arg82)) + defer mbTrans83.Close() + _, err84 := mbTrans83.WriteString(arg82) + if err84 != nil { Usage() return } - factory84 := thrift.NewTJSONProtocolFactory() - jsProt85 := factory84.GetProtocol(mbTrans82) + factory85 := thrift.NewTJSONProtocolFactory() + jsProt86 := factory85.GetProtocol(mbTrans83) argvalue0 := aurora.NewTaskQuery() - err86 := argvalue0.Read(jsProt85) - if err86 != nil { + err87 := argvalue0.Read(jsProt86) + if err87 != nil { Usage() return } @@ -204,19 +205,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args") flag.Usage() } - arg87 := flag.Arg(1) - mbTrans88 := thrift.NewTMemoryBufferLen(len(arg87)) - defer mbTrans88.Close() - _, err89 := mbTrans88.WriteString(arg87) - if err89 != nil { + arg88 := flag.Arg(1) + mbTrans89 := thrift.NewTMemoryBufferLen(len(arg88)) + defer mbTrans89.Close() + _, err90 := mbTrans89.WriteString(arg88) + if err90 != nil { Usage() return } - factory90 := thrift.NewTJSONProtocolFactory() - jsProt91 := factory90.GetProtocol(mbTrans88) + factory91 := thrift.NewTJSONProtocolFactory() + jsProt92 := factory91.GetProtocol(mbTrans89) argvalue0 := aurora.NewTaskQuery() - err92 := argvalue0.Read(jsProt91) - if err92 != nil { + err93 := argvalue0.Read(jsProt92) + if err93 != nil { Usage() return } @@ -229,19 +230,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args") flag.Usage() } - arg93 := flag.Arg(1) - mbTrans94 := thrift.NewTMemoryBufferLen(len(arg93)) - defer mbTrans94.Close() - _, err95 := mbTrans94.WriteString(arg93) - if err95 != nil { + arg94 := flag.Arg(1) + mbTrans95 := thrift.NewTMemoryBufferLen(len(arg94)) + defer mbTrans95.Close() + _, err96 := mbTrans95.WriteString(arg94) + if err96 != nil { Usage() return } - factory96 := thrift.NewTJSONProtocolFactory() - jsProt97 := factory96.GetProtocol(mbTrans94) + factory97 := thrift.NewTJSONProtocolFactory() + jsProt98 := factory97.GetProtocol(mbTrans95) argvalue0 := aurora.NewTaskQuery() - err98 := argvalue0.Read(jsProt97) - if err98 != nil { + err99 := argvalue0.Read(jsProt98) + if err99 != nil { Usage() return } @@ -254,19 +255,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args") flag.Usage() } - arg99 := flag.Arg(1) - mbTrans100 := thrift.NewTMemoryBufferLen(len(arg99)) - defer mbTrans100.Close() - _, err101 := mbTrans100.WriteString(arg99) - if err101 != nil { + arg100 := flag.Arg(1) + mbTrans101 := thrift.NewTMemoryBufferLen(len(arg100)) + defer mbTrans101.Close() + _, err102 := mbTrans101.WriteString(arg100) + if err102 != nil { Usage() return } - factory102 := thrift.NewTJSONProtocolFactory() - jsProt103 := factory102.GetProtocol(mbTrans100) + factory103 := thrift.NewTJSONProtocolFactory() + jsProt104 := factory103.GetProtocol(mbTrans101) argvalue0 := aurora.NewJobKey() - err104 := argvalue0.Read(jsProt103) - if err104 != nil { + err105 := argvalue0.Read(jsProt104) + if err105 != nil { Usage() return } @@ -299,19 +300,19 @@ func main() { fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args") flag.Usage() } - arg107 := flag.Arg(1) - mbTrans108 := thrift.NewTMemoryBufferLen(len(arg107)) - defer mbTrans108.Close() - _, err109 := mbTrans108.WriteString(arg107) - if err109 != nil { + arg108 := flag.Arg(1) + mbTrans109 := thrift.NewTMemoryBufferLen(len(arg108)) + defer mbTrans109.Close() + _, err110 := mbTrans109.WriteString(arg108) + if err110 != nil { Usage() return } - factory110 := thrift.NewTJSONProtocolFactory() - jsProt111 := factory110.GetProtocol(mbTrans108) + factory111 := thrift.NewTJSONProtocolFactory() + jsProt112 := factory111.GetProtocol(mbTrans109) argvalue0 := aurora.NewJobConfiguration() - err112 := argvalue0.Read(jsProt111) - if err112 != nil { + err113 := argvalue0.Read(jsProt112) + if err113 != nil { Usage() return } @@ -324,19 +325,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args") flag.Usage() } - arg113 := flag.Arg(1) - mbTrans114 := thrift.NewTMemoryBufferLen(len(arg113)) - defer mbTrans114.Close() - _, err115 := mbTrans114.WriteString(arg113) - if err115 != nil { + arg114 := flag.Arg(1) + mbTrans115 := thrift.NewTMemoryBufferLen(len(arg114)) + defer mbTrans115.Close() + _, err116 := mbTrans115.WriteString(arg114) + if err116 != nil { Usage() return } - factory116 := thrift.NewTJSONProtocolFactory() - jsProt117 := factory116.GetProtocol(mbTrans114) + factory117 := thrift.NewTJSONProtocolFactory() + jsProt118 := factory117.GetProtocol(mbTrans115) argvalue0 := aurora.NewJobUpdateQuery() - err118 := argvalue0.Read(jsProt117) - if err118 != nil { + err119 := argvalue0.Read(jsProt118) + if err119 != nil { Usage() return } @@ -349,19 +350,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args") flag.Usage() } - arg119 := flag.Arg(1) - mbTrans120 := thrift.NewTMemoryBufferLen(len(arg119)) - defer mbTrans120.Close() - _, err121 := mbTrans120.WriteString(arg119) - if err121 != nil { + arg120 := flag.Arg(1) + mbTrans121 := thrift.NewTMemoryBufferLen(len(arg120)) + defer mbTrans121.Close() + _, err122 := mbTrans121.WriteString(arg120) + if err122 != nil { Usage() return } - factory122 := thrift.NewTJSONProtocolFactory() - jsProt123 := factory122.GetProtocol(mbTrans120) + factory123 := thrift.NewTJSONProtocolFactory() + jsProt124 := factory123.GetProtocol(mbTrans121) argvalue0 := aurora.NewJobUpdateQuery() - err124 := argvalue0.Read(jsProt123) - if err124 != nil { + err125 := argvalue0.Read(jsProt124) + if err125 != nil { Usage() return } @@ -374,19 +375,19 @@ func main() { fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args") flag.Usage() } - arg125 := flag.Arg(1) - mbTrans126 := thrift.NewTMemoryBufferLen(len(arg125)) - defer mbTrans126.Close() - _, err127 := mbTrans126.WriteString(arg125) - if err127 != nil { + arg126 := flag.Arg(1) + mbTrans127 := thrift.NewTMemoryBufferLen(len(arg126)) + defer mbTrans127.Close() + _, err128 := mbTrans127.WriteString(arg126) + if err128 != nil { Usage() return } - factory128 := thrift.NewTJSONProtocolFactory() - jsProt129 := factory128.GetProtocol(mbTrans126) + factory129 := thrift.NewTJSONProtocolFactory() + jsProt130 := factory129.GetProtocol(mbTrans127) argvalue0 := aurora.NewJobUpdateRequest() - err130 := argvalue0.Read(jsProt129) - if err130 != nil { + err131 := argvalue0.Read(jsProt130) + if err131 != nil { Usage() return } diff --git a/generateBindings.sh b/generateBindings.sh index 9ff0205..03acd6d 100755 --- a/generateBindings.sh +++ b/generateBindings.sh @@ -1,6 +1,6 @@ #! /bin/bash -THRIFT_VER=0.12.0 +THRIFT_VER=0.13.0 if [[ $(thrift -version | grep -e $THRIFT_VER -c) -ne 1 ]]; then echo "Warning: This wrapper has only been tested with version" $THRIFT_VER; diff --git a/go.mod b/go.mod index 7a69f0d..09f4e2a 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,6 @@ require ( github.com/stretchr/testify v1.5.0 ) -replace github.com/apache/thrift v0.12.0 => github.com/ridv/thrift v0.12.2 +replace github.com/apache/thrift v0.13.0 => github.com/ridv/thrift v0.13.1 go 1.13 diff --git a/jobUpdate.go b/jobUpdate.go index b4fbfea..6b285df 100644 --- a/jobUpdate.go +++ b/jobUpdate.go @@ -31,32 +31,36 @@ type JobUpdate struct { func NewJobUpdate() *JobUpdate { newTask := NewTask() - req := aurora.JobUpdateRequest{} - req.TaskConfig = newTask.TaskConfig() - req.Settings = newUpdateSettings() - - return &JobUpdate{task: newTask, request: &req} + return &JobUpdate{ + task: newTask, + request: &aurora.JobUpdateRequest{TaskConfig: newTask.TaskConfig(), Settings: newUpdateSettings()}, + } } +// Creates an update with default values using an AuroraTask as the underlying task configuration. +// This function has a high level understanding of Aurora Tasks and thus will support copying a task that is configured +// to use Thermos. func JobUpdateFromAuroraTask(task *AuroraTask) *JobUpdate { newTask := task.Clone() - req := aurora.JobUpdateRequest{} - req.TaskConfig = newTask.TaskConfig() - req.Settings = newUpdateSettings() - - return &JobUpdate{task: newTask, request: &req} + return &JobUpdate{ + task: newTask, + request: &aurora.JobUpdateRequest{TaskConfig: newTask.TaskConfig(), Settings: newUpdateSettings()}, + } } +// JobUpdateFromConfig creates an update with default values using an aurora.TaskConfig +// primitive as the underlying task configuration. +// This function should not be used unless the implications of using a primitive value are understood. +// For example, the primitive has no concept of Thermos. func JobUpdateFromConfig(task *aurora.TaskConfig) *JobUpdate { // Perform a deep copy to avoid unexpected behavior newTask := TaskFromThrift(task) - req := aurora.JobUpdateRequest{} - req.TaskConfig = newTask.TaskConfig() - req.Settings = newUpdateSettings() - - return &JobUpdate{task: newTask, request: &req} + return &JobUpdate{ + task: newTask, + request: &aurora.JobUpdateRequest{TaskConfig: newTask.TaskConfig(), Settings: newUpdateSettings()}, + } } // Set instance count the job will have after the update. @@ -106,6 +110,26 @@ func (j *JobUpdate) PulseIntervalTimeout(timeout time.Duration) *JobUpdate { j.request.Settings.BlockIfNoPulsesAfterMs = thrift.Int32Ptr(int32(timeout.Seconds() * 1000)) return j } +func (j *JobUpdate) BatchUpdateStrategy(autoPause bool, batchSize int32) *JobUpdate { + j.request.Settings.UpdateStrategy = &aurora.JobUpdateStrategy{ + BatchStrategy: &aurora.BatchJobUpdateStrategy{GroupSize: batchSize, AutopauseAfterBatch: autoPause}, + } + return j +} + +func (j *JobUpdate) QueueUpdateStrategy(groupSize int32) *JobUpdate { + j.request.Settings.UpdateStrategy = &aurora.JobUpdateStrategy{ + QueueStrategy: &aurora.QueueJobUpdateStrategy{GroupSize: groupSize}, + } + return j +} + +func (j *JobUpdate) VariableBatchStrategy(autoPause bool, batchSizes ...int32) *JobUpdate { + j.request.Settings.UpdateStrategy = &aurora.JobUpdateStrategy{ + VarBatchStrategy: &aurora.VariableBatchJobUpdateStrategy{GroupSizes: batchSizes, AutopauseAfterBatch: autoPause}, + } + return j +} func newUpdateSettings() *aurora.JobUpdateSettings { diff --git a/monitors.go b/monitors.go index 52bcf5b..963c017 100644 --- a/monitors.go +++ b/monitors.go @@ -244,3 +244,68 @@ func (c *Client) MonitorHostMaintenance(hosts []string, } } } + +// AutoPaused monitor is a special monitor for auto pause enabled batch updates. This monitor ensures that the update +// being monitored is capable of auto pausing and has auto pausing enabled. After verifying this information, +// the monitor watches for the job to enter the ROLL_FORWARD_PAUSED state and calculates the current batch +// the update is in using information from the update configuration. +func (c *Client) MonitorAutoPausedUpdate(key aurora.JobUpdateKey, interval, timeout time.Duration) (int, error) { + key.Job = &aurora.JobKey{ + Role: key.Job.Role, + Environment: key.Job.Environment, + Name: key.Job.Name, + } + query := aurora.JobUpdateQuery{ + UpdateStatuses: aurora.ACTIVE_JOB_UPDATE_STATES, + Limit: 1, + Key: &key, + } + + updateDetails, err := c.JobUpdateDetails(query) + if err != nil { + return -1, errors.Wrap(err, "unable to get information about update") + } + + if len(updateDetails) == 0 { + return -1, errors.Errorf("details for update could not be found") + } + + updateStrategy := updateDetails[0].Update.Instructions.Settings.UpdateStrategy + + var batchSizes []int32 + switch { + case updateStrategy.IsSetVarBatchStrategy(): + batchSizes = updateStrategy.VarBatchStrategy.GroupSizes + if !updateStrategy.VarBatchStrategy.AutopauseAfterBatch { + return -1, errors.Errorf("update does not have auto pause enabled") + } + case updateStrategy.IsSetBatchStrategy(): + batchSizes = []int32{updateStrategy.BatchStrategy.GroupSize} + if !updateStrategy.BatchStrategy.AutopauseAfterBatch { + return -1, errors.Errorf("update does not have auto pause enabled") + } + default: + return -1, errors.Errorf("update is not using a batch update strategy") + } + + query.UpdateStatuses = append(TerminalUpdateStates(), aurora.JobUpdateStatus_ROLL_FORWARD_PAUSED) + summary, err := c.MonitorJobUpdateQuery(query, interval, timeout) + if err != nil { + return -1, err + } + + // Summary 0 is assumed to exist because MonitorJobUpdateQuery will return an error if there is Summaries + if summary[0].State.Status != aurora.JobUpdateStatus_ROLL_FORWARD_PAUSED { + return -1, errors.Errorf("update is in a terminal state %v", summary[0].State.Status) + } + + updatingInstances := make(map[int32]struct{}) + for _, e := range updateDetails[0].InstanceEvents { + // We only care about INSTANCE_UPDATING actions because we only care that they've been attempted + if e != nil && e.GetAction() == aurora.JobUpdateAction_INSTANCE_UPDATING { + updatingInstances[e.GetInstanceId()] = struct{}{} + } + } + + return calculateCurrentBatch(int32(len(updatingInstances)), batchSizes), nil +} diff --git a/realis_e2e_test.go b/realis_e2e_test.go index a74617a..38e39e7 100644 --- a/realis_e2e_test.go +++ b/realis_e2e_test.go @@ -26,6 +26,7 @@ import ( "github.com/aurora-scheduler/gorealis/v2/gen-go/apache/aurora" "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var r *realis.Client @@ -698,7 +699,6 @@ func TestRealisClient_PartitionPolicy(t *testing.T) { Environment("prod"). Role(role). Name("create_thermos_job_partition_policy_test"). - ExecutorName(aurora.AURORA_EXECUTOR_NAME). ThermosExecutor(thermosExec). CPU(.5). RAM(64). @@ -723,3 +723,103 @@ func TestRealisClient_PartitionPolicy(t *testing.T) { } } + +func TestRealisClient_UpdateStrategies(t *testing.T) { + // Create a single job + job := realis.NewJob(). + Environment("prod"). + Role("vagrant"). + ThermosExecutor(thermosExec). + CPU(.01). + RAM(4). + Disk(10). + InstanceCount(6). + IsService(true) + + // Needed to populate the task config correctly + assert.NoError(t, job.BuildThermosPayload()) + + strategies := []struct { + jobUpdate *realis.JobUpdate + Name string + }{ + { + jobUpdate: realis.JobUpdateFromAuroraTask(job.AuroraTask()). + QueueUpdateStrategy(2). + InstanceCount(6). + WatchTime(1000), + Name: "Queue", + }, + { + jobUpdate: realis.JobUpdateFromAuroraTask(job.AuroraTask()). + BatchUpdateStrategy(false, 2). + InstanceCount(6). + WatchTime(1000), + Name: "Batch", + }, + { + jobUpdate: realis.JobUpdateFromAuroraTask(job.AuroraTask()). + VariableBatchStrategy(false, 1, 2, 3). + InstanceCount(6). + WatchTime(1000), + Name: "VarBatch", + }, + } + + for _, strategy := range strategies { + t.Run("TestRealisClient_UpdateStrategies_"+strategy.Name, func(t *testing.T) { + strategy.jobUpdate.Name("update_strategies_" + strategy.Name) + result, err := r.StartJobUpdate(strategy.jobUpdate, "") + + require.NoError(t, err) + assert.NotNil(t, result) + + var ok bool + var mErr error + key := *result.GetKey() + + if ok, mErr = r.MonitorJobUpdate(key, 5, 240); !ok || mErr != nil { + // Update may already be in a terminal state so don't check for error + assert.NoError(t, r.AbortJobUpdate(key, "Monitor timed out.")) + } + assert.NoError(t, r.KillJob(strategy.jobUpdate.JobKey())) + }) + } +} + +func TestRealisClient_BatchAwareAutoPause(t *testing.T) { + // Create a single job + job := realis.NewJob(). + Environment("prod"). + Role("vagrant"). + Name("BatchAwareAutoPauseTest"). + ThermosExecutor(thermosExec). + CPU(.01). + RAM(4). + Disk(10). + InstanceCount(6). + IsService(true) + updateGroups := []int32{1, 2, 3} + strategy := realis.JobUpdateFromAuroraTask(job.AuroraTask()). + VariableBatchStrategy(true, updateGroups...). + InstanceCount(6). + WatchTime(1000) + + result, err := r.StartJobUpdate(strategy, "") + require.NoError(t, err) + require.NotNil(t, result) + + key := *result.GetKey() + + for i := range updateGroups { + curStep, mErr := r.MonitorAutoPausedUpdate(key, time.Second*5, time.Second*240) + if mErr != nil { + // Update may already be in a terminal state so don't check for error + assert.NoError(t, r.AbortJobUpdate(key, "Monitor timed out.")) + } + + assert.Equal(t, i, curStep) + require.NoError(t, r.ResumeJobUpdate(key, "auto resuming test")) + } + assert.NoError(t, r.KillJob(strategy.JobKey())) +} diff --git a/util.go b/util.go index 74b0b30..90e3dcf 100644 --- a/util.go +++ b/util.go @@ -40,6 +40,18 @@ func init() { } } +// TerminalJobUpdateStates returns a slice containing all the terminal states an update may end up in. +// This is a function in order to avoid having a slice that can be accidentally mutated. +func TerminalUpdateStates() []aurora.JobUpdateStatus { + return []aurora.JobUpdateStatus{ + aurora.JobUpdateStatus_ROLLED_FORWARD, + aurora.JobUpdateStatus_ROLLED_BACK, + aurora.JobUpdateStatus_ABORTED, + aurora.JobUpdateStatus_ERROR, + aurora.JobUpdateStatus_FAILED, + } +} + func validateAuroraAddress(address string) (string, error) { // If no protocol defined, assume http @@ -73,3 +85,22 @@ func validateAuroraAddress(address string) (string, error) { return u.String(), nil } + +func calculateCurrentBatch(updatingInstances int32, batchSizes []int32) int { + for i, size := range batchSizes { + updatingInstances -= size + if updatingInstances <= 0 { + return i + } + } + + // Overflow batches + batchCount := len(batchSizes) - 1 + lastBatchIndex := len(batchSizes) - 1 + batchCount += int(updatingInstances / batchSizes[lastBatchIndex]) + + if updatingInstances%batchSizes[lastBatchIndex] != 0 { + batchCount++ + } + return batchCount +} diff --git a/util_test.go b/util_test.go new file mode 100644 index 0000000..45aaf75 --- /dev/null +++ b/util_test.go @@ -0,0 +1,58 @@ +/** + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package realis + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCurrentBatchCalculator(t *testing.T) { + t.Run("singleBatchOverflow", func(t *testing.T) { + curBatch := calculateCurrentBatch(10, []int32{2}) + assert.Equal(t, 4, curBatch) + }) + + t.Run("noInstancesUpdating", func(t *testing.T) { + curBatch := calculateCurrentBatch(0, []int32{2}) + assert.Equal(t, 0, curBatch) + }) + + t.Run("evenMatchSingleBatch", func(t *testing.T) { + curBatch := calculateCurrentBatch(2, []int32{2}) + assert.Equal(t, 0, curBatch) + }) + + t.Run("moreInstancesThanBatches", func(t *testing.T) { + curBatch := calculateCurrentBatch(5, []int32{1, 2}) + assert.Equal(t, 2, curBatch) + }) + + t.Run("moreInstancesThanBatchesDecreasing", func(t *testing.T) { + curBatch := calculateCurrentBatch(5, []int32{2, 1}) + assert.Equal(t, 3, curBatch) + }) + + t.Run("unevenFit", func(t *testing.T) { + curBatch := calculateCurrentBatch(2, []int32{1, 2}) + assert.Equal(t, 1, curBatch) + }) + + t.Run("halfWay", func(t *testing.T) { + curBatch := calculateCurrentBatch(1, []int32{1, 2}) + assert.Equal(t, 0, curBatch) + }) +}