Compare commits

...

24 commits

Author SHA1 Message Date
lawwong1
d2fd7b9ba9
merge retry mechanism change from gorealis v1 to gorealis v2 (#21) 2023-01-26 13:36:40 -08:00
lawwong1
8db625730f
Support Australis API to get aurora master nodes and mesos master nodes (#20) 2022-08-24 08:51:12 -07:00
Tan N. Le
e33a2d99d8
release 2.28.0 (#19) 2022-08-02 09:55:36 -07:00
Tan N. Le
4258634ccf
Capacity report (#18)
- pull capacity report via /offers endpoint.
- calculate how many tasks (with resource and constraints) can be fit in the cluster.
examples of using the above 2 features are in aurora-scheduler/australis#33
2022-07-28 19:27:53 -07:00
Tan N. Le
5d0998647a
default policy for slaDrainHosts (#17) 2021-11-01 18:15:51 -07:00
Renán I. Del Valle
907430768c
Misc. fixes for tests (#16)
* Bumping up CI to go1.17 and enabling CI for PRs.

* Adding go.sum now that issues seem to have gone away.

* Bump up aurora to 0.25.0 and mesos to 1.9.0

* Fixing Mac tests. Adding extra time for killing thermos jobs.

* Reduce the thermos overhead for unit tests

Co-authored-by: lenhattan86 <lenhattan86@users.noreply.github.com>
2021-10-25 12:39:13 -07:00
lenhattan86
fe664178ce
Add tier & production in task config (#14) 2021-10-15 12:18:26 -07:00
lenhattan86
a75b691d72
Merge pull request #15 from lenhattan86/fix_unit_test
Fix unit test for GetJobSummary
2021-10-07 13:14:25 -07:00
lenhattan86
306603795b fix unit test error for GetJobSummary 2021-10-06 22:37:54 -07:00
lenhattan86
045a4869a5
Merge branch 'aurora-scheduler:master' into master 2021-10-06 14:01:03 -07:00
lenhattan86
425faf28b8
Adds priority for aurora-scheduler (#13)
Adds priority for task config
2021-09-16 16:29:25 -07:00
lenhattan86
2d81147aaa Merge branch 'aurora-scheduler:master' into master 2021-06-01 21:35:42 -07:00
Renán I. Del Valle
983bf44b9f
Update thrift to 0.14.0 (#9)
Generated thrift stubs using 0.14.0 compiler version.
Script now tells user to use version 0.14.0 of thrift compiler.
2021-03-01 16:52:25 -08:00
Renán I. Del Valle
d0be43b8ac
Dropping support for dep (#10)
Dep files are no longer necessary.
2021-03-01 15:36:28 -08:00
lenhattan86
b1661698c2
GetJobSummary API (#8)
* Adds GetJobSummary API
2021-01-12 16:18:09 -08:00
lenhattan86
364ee93202
Merge pull request #1 from aurora-scheduler/master
pull from upstream
2021-01-12 15:09:36 -08:00
Renan DelValle
755f99fb76
Fixes style issue with jobupdate file. 2020-11-16 21:51:02 -08:00
Renán I. Del Valle
caf1444250
Removes variables from github actions
Github Actions deprecated support for using env files without previously setting them. Adjusting CI scripts accordingly.
2020-11-16 21:45:00 -08:00
lenhattan86
c3dbeba2bd
Adds ability to fetch Mesos Master leader (#7)
* Adds ability to fetch Mesos Master leader from Zookeeper
2020-11-15 16:44:21 -08:00
Renan DelValle
6c639362c8
Bumping up go tests time to 30m. 2020-07-27 21:44:35 -07:00
Renán I. Del Valle
4cf60775f5
Bumping up thrift go library version to v0.13.2 (#6)
Thrift v0.13.2 is a forked version of v0.13.0 with a patch to not panic when trying to write to a closed buffer. Instead we get an error back and we can handle it appropriately.
2020-05-26 20:32:51 -07:00
Renán I. Del Valle
30f804bc53
Update using-the-sample-client.md
Fixing typo on doc.
2020-05-20 18:26:33 -07:00
Renán I. Del Valle
e5d63579e8
Update using-the-sample-client.md
Updating instructions for using the sample client.
2020-05-20 18:26:05 -07:00
Renán I. Del Valle
34a950306d
Update developing.md
Updating documentation for developing gorealis
2020-05-20 18:21:47 -07:00
40 changed files with 12455 additions and 7677 deletions

View file

@ -1 +1 @@
0.21.0
0.26.0

View file

@ -1,26 +1,32 @@
name: CI
on: [push]
on:
push:
branches:
- master
pull_request:
branches:
- master
jobs:
build:
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- name: Setup Go for use with actions
uses: actions/setup-go@v1.0.0
uses: actions/setup-go@v2
with:
version: 1.13
go-version: 1.17
- name: Install goimports
run: go get golang.org/x/tools/cmd/goimports
- name: Set env with list of directories in repo containin go code
run: echo GO_USR_DIRS=$(go list -f {{.Dir}} ./... | grep -E -v "/gen-go/|/vendor/") >> $GITHUB_ENV
- name: Run goimports check
run: test -z "`for d in $GO_USR_DIRS; do goimports -d $d/*.go | tee /dev/stderr; done`"
env:
GO_USR_DIRS: $(go list -f {{.Dir}} ./... | grep -E -v "/gen-go/|/vendor/")
- name: Create aurora/mesos docker cluster
run: docker-compose up -d
- name: Run tests
run: go test -timeout 20m -race -coverprofile=coverage.txt -covermode=atomic -v github.com/aurora-scheduler/gorealis/v2
run: go test -timeout 35m -race -coverprofile=coverage.txt -covermode=atomic -v github.com/aurora-scheduler/gorealis/v2

3
.gitignore vendored
View file

@ -41,6 +41,3 @@ _testmain.go
# Example client build
examples/client
examples/jsonClient
# Use checksum database
go.sum

60
Gopkg.lock generated
View file

@ -1,60 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "0.12.0"
digest = "1:89696c38cec777120b8b1bb5e2d363d655cf2e1e7d8c851919aaa0fd576d9b86"
name = "github.com/apache/thrift"
packages = ["lib/go/thrift"]
pruneopts = ""
revision = "384647d290e2e4a55a14b1b7ef1b7e66293a2c33"
[[projects]]
digest = "1:0deddd908b6b4b768cfc272c16ee61e7088a60f7fe2f06c547bd3d8e1f8b8e77"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = ""
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
digest = "1:df48fb76fb2a40edea0c9b3d960bc95e326660d82ff1114e1f88001f7a236b40"
name = "github.com/pkg/errors"
packages = ["."]
pruneopts = ""
revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc"
[[projects]]
digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411"
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
pruneopts = ""
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
digest = "1:78bea5e26e82826dacc5fd64a1013a6711b7075ec8072819b89e6ad76cb8196d"
name = "github.com/samuel/go-zookeeper"
packages = ["zk"]
pruneopts = ""
revision = "471cd4e61d7a78ece1791fa5faa0345dc8c7d5a5"
[[projects]]
digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75"
name = "github.com/stretchr/testify"
packages = ["assert"]
pruneopts = ""
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
version = "v1.2.2"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/apache/thrift/lib/go/thrift",
"github.com/pkg/errors",
"github.com/samuel/go-zookeeper/zk",
"github.com/stretchr/testify/assert",
]
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -1,16 +0,0 @@
[[constraint]]
name = "github.com/apache/thrift"
branch = "0.12.0"
[[constraint]]
name = "github.com/pkg/errors"
revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc"
[[constraint]]
name = "github.com/samuel/go-zookeeper"
revision = "471cd4e61d7a78ece1791fa5faa0345dc8c7d5a5"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.2.0"

View file

@ -28,6 +28,7 @@ type Cluster struct {
ZK string `json:"zk"`
ZKPort int `json:"zk_port"`
SchedZKPath string `json:"scheduler_zk_path"`
MesosZKPath string `json:"mesos_zk_path"`
SchedURI string `json:"scheduler_uri"`
ProxyURL string `json:"proxy_url"`
AuthMechanism string `json:"auth_mechanism"`
@ -61,6 +62,7 @@ func GetDefaultClusterFromZKUrl(zkURL string) *Cluster {
AuthMechanism: "UNAUTHENTICATED",
ZK: zkURL,
SchedZKPath: "/aurora/scheduler",
MesosZKPath: "/mesos",
AgentRunDir: "latest",
AgentRoot: "/var/lib/mesos",
}

View file

@ -32,6 +32,7 @@ func TestLoadClusters(t *testing.T) {
assert.Equal(t, clusters["devcluster"].Name, "devcluster")
assert.Equal(t, clusters["devcluster"].ZK, "192.168.33.7")
assert.Equal(t, clusters["devcluster"].SchedZKPath, "/aurora/scheduler")
assert.Equal(t, clusters["devcluster"].MesosZKPath, "/mesos")
assert.Equal(t, clusters["devcluster"].AuthMechanism, "UNAUTHENTICATED")
assert.Equal(t, clusters["devcluster"].AgentRunDir, "latest")
assert.Equal(t, clusters["devcluster"].AgentRoot, "/var/lib/mesos")

View file

@ -14,7 +14,7 @@ services:
ipv4_address: 192.168.33.2
master:
image: rdelvalle/mesos-master:1.6.2
image: quay.io/aurorascheduler/mesos-master:1.9.0
restart: on-failure
ports:
- "5050:5050"
@ -32,7 +32,7 @@ services:
- zk
agent-one:
image: rdelvalle/mesos-agent:1.6.2
image: quay.io/aurorascheduler/mesos-agent:1.9.0
pid: host
restart: on-failure
ports:
@ -41,10 +41,11 @@ services:
MESOS_MASTER: zk://192.168.33.2:2181/mesos
MESOS_CONTAINERIZERS: docker,mesos
MESOS_PORT: 5051
MESOS_HOSTNAME: localhost
MESOS_HOSTNAME: agent-one
MESOS_RESOURCES: ports(*):[11000-11999]
MESOS_SYSTEMD_ENABLE_SUPPORT: 'false'
MESOS_WORK_DIR: /tmp/mesos
MESOS_ATTRIBUTES: 'host:agent-one;rack:1;zone:west'
networks:
aurora_cluster:
ipv4_address: 192.168.33.4
@ -55,8 +56,58 @@ services:
depends_on:
- zk
agent-two:
image: quay.io/aurorascheduler/mesos-agent:1.9.0
pid: host
restart: on-failure
ports:
- "5052:5051"
environment:
MESOS_MASTER: zk://192.168.33.2:2181/mesos
MESOS_CONTAINERIZERS: docker,mesos
MESOS_PORT: 5051
MESOS_HOSTNAME: agent-two
MESOS_RESOURCES: ports(*):[11000-11999]
MESOS_SYSTEMD_ENABLE_SUPPORT: 'false'
MESOS_WORK_DIR: /tmp/mesos
MESOS_ATTRIBUTES: 'host:agent-two;rack:2;zone:west'
networks:
aurora_cluster:
ipv4_address: 192.168.33.5
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- zk
agent-three:
image: quay.io/aurorascheduler/mesos-agent:1.9.0
pid: host
restart: on-failure
ports:
- "5053:5051"
environment:
MESOS_MASTER: zk://192.168.33.2:2181/mesos
MESOS_CONTAINERIZERS: docker,mesos
MESOS_PORT: 5051
MESOS_HOSTNAME: agent-three
MESOS_RESOURCES: ports(*):[11000-11999]
MESOS_SYSTEMD_ENABLE_SUPPORT: 'false'
MESOS_WORK_DIR: /tmp/mesos
MESOS_ATTRIBUTES: 'host:agent-three;rack:2;zone:west;dedicated:vagrant/bar'
networks:
aurora_cluster:
ipv4_address: 192.168.33.6
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- zk
aurora-one:
image: rdelvalle/aurora:0.22.0
image: quay.io/aurorascheduler/scheduler:0.25.0
pid: host
ports:
- "8081:8081"
@ -70,6 +121,7 @@ services:
-shiro_realm_modules=INI_AUTHNZ
-shiro_ini_path=/etc/aurora/security.ini
-min_required_instances_for_sla_check=1
-thermos_executor_cpu=0.09
volumes:
- ./.aurora-config:/etc/aurora
networks:

View file

@ -19,25 +19,18 @@ This also allows us to delete and recreate our development cluster very quickly.
To install docker-compose please follow the instructions for your platform
[here](https://docs.docker.com/compose/install/).
### Getting the source code
As of go 1.10.x, GOPATH is still relevant. This may change in the future but
for the sake of making development less error prone, it is suggested that the following
directories be created:
`$ git clone https://github.com/aurora-scheduler/gorealis`
`$ mkdir -p $GOPATH/src/github.com/paypal`
Inside of the newly cloned repo you may download dependencies to the local cache using go mod
And then clone the master branch into the newly created folder:
`$ cd $GOPATH/src/github.com/paypal; git clone git@github.com:paypal/gorealis.git`
Since we check in our vendor folder, gorealis no further set up is needed.
`$ go mod download`
### Bringing up the cluster
To develop gorealis, you will need a fully functioning Mesos cluster along with
Apache Aurora.
To develop gorealis, you will need a fully functioning Mesos cluster along with
the Aurora Scheduler.
In order to bring up our docker-compose set up execute the following command from the root
of the git repository:
@ -62,14 +55,14 @@ environment but not when running under MacOS. To run code involving the ZK leade
For example, running the tests in a container can be done through the following command from
the root of the git repository:
`$ docker run -t -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10.3-alpine go test github.com/paypal/gorealis`
`$ docker run -t -v $(pwd):/go/src/github.com/aurora-scheduler/gorealis --network gorealis_aurora_cluster golang:1.14.3-alpine go test github.com/paypal/gorealis`
Or
`$ ./runTestsMac.sh`
Alternatively, if an interactive shell is necessary, the following command may be used:
`$ docker run -it -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10.3-alpine /bin/sh`
`$ docker run -it -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.14.3-alpine /bin/sh`
### Cleaning up the cluster
@ -85,6 +78,3 @@ Once development is done, the environment may be torn down by executing (from th
git directory):
`$ docker-compose down`

View file

@ -247,6 +247,9 @@ job = realis.NewJob().
RAM(64).
Disk(100).
IsService(false).
Production(false).
Tier("preemptible").
Priority(0).
InstanceCount(1).
AddPorts(1).
AddLabel("fileName", "sample-app/docker-compose.yml").
@ -291,6 +294,9 @@ job = realis.NewJob().
RAM(64).
Disk(100).
IsService(true).
Production(false).
Tier("preemptible").
Priority(0).
InstanceCount(1).
AddPorts(1)
```

View file

@ -25,6 +25,9 @@ job = realis.NewJob().
RAM(64).
Disk(100).
IsService(false).
Production(false).
Tier("preemptible").
Priority(0).
InstanceCount(1).
AddPorts(1).
AddLabel("fileName", "sample-app/docker-compose.yml").

View file

@ -22,28 +22,25 @@ Usage of ./client:
```
## Sample commands:
These commands are set to run on a vagrant box. To be able to run the docker compose
executor examples, the vagrant box must be configured properly to use the docker compose executor.
### Thermos
#### Creating a Thermos job
```
$ cd $GOPATH/src/github.com/paypal/gorealis/examples
$ go run client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=create
$ go run examples/client.go -url=http://localhost:8081 -executor=thermos -cmd=create
```
#### Kill a Thermos job
```
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=kill
$ go run examples/client.go -url=http://localhost:8081 -executor=thermos -cmd=kill
```
### Docker Compose executor (custom executor)
#### Creating Docker Compose executor job
```
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=create
$ go run examples/client.go -url=http://192.168.33.7:8081 -executor=compose -cmd=create
```
#### Kill a Docker Compose executor job
```
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=kill
$ go run examples/client.go -url=http://192.168.33.7:8081 -executor=compose -cmd=kill
```

View file

@ -124,6 +124,9 @@ func main() {
RAM(64).
Disk(100).
IsService(true).
Production(false).
Tier("preemptible").
Priority(0).
InstanceCount(1).
AddPorts(1).
ThermosExecutor(thermosExec)
@ -138,6 +141,9 @@ func main() {
RAM(512).
Disk(100).
IsService(true).
Production(false).
Tier("preemptible").
Priority(0).
InstanceCount(1).
AddPorts(4).
AddLabel("fileName", "sample-app/docker-compose.yml").
@ -151,6 +157,9 @@ func main() {
RAM(64).
Disk(100).
IsService(true).
Production(false).
Tier("preemptible").
Priority(0).
InstanceCount(1).
AddPorts(1)
default:

View file

@ -2,6 +2,7 @@
"name": "devcluster",
"zk": "192.168.33.7",
"scheduler_zk_path": "/aurora/scheduler",
"mesos_zk_path": "/mesos",
"auth_mechanism": "UNAUTHENTICATED",
"slave_run_directory": "latest",
"slave_root": "/var/lib/mesos"

View file

@ -177,6 +177,8 @@ func main() {
RAM(job.RAM).
Disk(job.Disk).
IsService(job.Service).
Tier("preemptible").
Priority(0).
InstanceCount(job.Instances).
AddPorts(job.Ports)

View file

@ -1,5 +1,4 @@
// Autogenerated by Thrift Compiler (0.13.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
package aurora

View file

@ -1,13 +1,12 @@
// Autogenerated by Thrift Compiler (0.13.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
package aurora
import(
"bytes"
"context"
"reflect"
"fmt"
"time"
"github.com/apache/thrift/lib/go/thrift"
)
@ -15,7 +14,7 @@ import(
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = reflect.DeepEqual
var _ = time.Now
var _ = bytes.Equal
const AURORA_EXECUTOR_NAME = "AuroraExecutor"

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,4 @@
// Autogenerated by Thrift Compiler (0.13.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
package main
@ -196,19 +195,19 @@ func main() {
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
arg355 := flag.Arg(2)
mbTrans356 := thrift.NewTMemoryBufferLen(len(arg355))
defer mbTrans356.Close()
_, err357 := mbTrans356.WriteString(arg355)
if err357 != nil {
arg405 := flag.Arg(2)
mbTrans406 := thrift.NewTMemoryBufferLen(len(arg405))
defer mbTrans406.Close()
_, err407 := mbTrans406.WriteString(arg405)
if err407 != nil {
Usage()
return
}
factory358 := thrift.NewTJSONProtocolFactory()
jsProt359 := factory358.GetProtocol(mbTrans356)
factory408 := thrift.NewTJSONProtocolFactory()
jsProt409 := factory408.GetProtocol(mbTrans406)
argvalue1 := aurora.NewResourceAggregate()
err360 := argvalue1.Read(jsProt359)
if err360 != nil {
err410 := argvalue1.Read(context.Background(), jsProt409)
if err410 != nil {
Usage()
return
}
@ -264,19 +263,19 @@ func main() {
fmt.Fprintln(os.Stderr, "QueryRecovery requires 1 args")
flag.Usage()
}
arg363 := flag.Arg(1)
mbTrans364 := thrift.NewTMemoryBufferLen(len(arg363))
defer mbTrans364.Close()
_, err365 := mbTrans364.WriteString(arg363)
if err365 != nil {
arg413 := flag.Arg(1)
mbTrans414 := thrift.NewTMemoryBufferLen(len(arg413))
defer mbTrans414.Close()
_, err415 := mbTrans414.WriteString(arg413)
if err415 != nil {
Usage()
return
}
factory366 := thrift.NewTJSONProtocolFactory()
jsProt367 := factory366.GetProtocol(mbTrans364)
factory416 := thrift.NewTJSONProtocolFactory()
jsProt417 := factory416.GetProtocol(mbTrans414)
argvalue0 := aurora.NewTaskQuery()
err368 := argvalue0.Read(jsProt367)
if err368 != nil {
err418 := argvalue0.Read(context.Background(), jsProt417)
if err418 != nil {
Usage()
return
}
@ -289,19 +288,19 @@ func main() {
fmt.Fprintln(os.Stderr, "DeleteRecoveryTasks requires 1 args")
flag.Usage()
}
arg369 := flag.Arg(1)
mbTrans370 := thrift.NewTMemoryBufferLen(len(arg369))
defer mbTrans370.Close()
_, err371 := mbTrans370.WriteString(arg369)
if err371 != nil {
arg419 := flag.Arg(1)
mbTrans420 := thrift.NewTMemoryBufferLen(len(arg419))
defer mbTrans420.Close()
_, err421 := mbTrans420.WriteString(arg419)
if err421 != nil {
Usage()
return
}
factory372 := thrift.NewTJSONProtocolFactory()
jsProt373 := factory372.GetProtocol(mbTrans370)
factory422 := thrift.NewTJSONProtocolFactory()
jsProt423 := factory422.GetProtocol(mbTrans420)
argvalue0 := aurora.NewTaskQuery()
err374 := argvalue0.Read(jsProt373)
if err374 != nil {
err424 := argvalue0.Read(context.Background(), jsProt423)
if err424 != nil {
Usage()
return
}
@ -330,19 +329,19 @@ func main() {
fmt.Fprintln(os.Stderr, "StartMaintenance requires 1 args")
flag.Usage()
}
arg375 := flag.Arg(1)
mbTrans376 := thrift.NewTMemoryBufferLen(len(arg375))
defer mbTrans376.Close()
_, err377 := mbTrans376.WriteString(arg375)
if err377 != nil {
arg425 := flag.Arg(1)
mbTrans426 := thrift.NewTMemoryBufferLen(len(arg425))
defer mbTrans426.Close()
_, err427 := mbTrans426.WriteString(arg425)
if err427 != nil {
Usage()
return
}
factory378 := thrift.NewTJSONProtocolFactory()
jsProt379 := factory378.GetProtocol(mbTrans376)
factory428 := thrift.NewTJSONProtocolFactory()
jsProt429 := factory428.GetProtocol(mbTrans426)
argvalue0 := aurora.NewHosts()
err380 := argvalue0.Read(jsProt379)
if err380 != nil {
err430 := argvalue0.Read(context.Background(), jsProt429)
if err430 != nil {
Usage()
return
}
@ -355,19 +354,19 @@ func main() {
fmt.Fprintln(os.Stderr, "DrainHosts requires 1 args")
flag.Usage()
}
arg381 := flag.Arg(1)
mbTrans382 := thrift.NewTMemoryBufferLen(len(arg381))
defer mbTrans382.Close()
_, err383 := mbTrans382.WriteString(arg381)
if err383 != nil {
arg431 := flag.Arg(1)
mbTrans432 := thrift.NewTMemoryBufferLen(len(arg431))
defer mbTrans432.Close()
_, err433 := mbTrans432.WriteString(arg431)
if err433 != nil {
Usage()
return
}
factory384 := thrift.NewTJSONProtocolFactory()
jsProt385 := factory384.GetProtocol(mbTrans382)
factory434 := thrift.NewTJSONProtocolFactory()
jsProt435 := factory434.GetProtocol(mbTrans432)
argvalue0 := aurora.NewHosts()
err386 := argvalue0.Read(jsProt385)
if err386 != nil {
err436 := argvalue0.Read(context.Background(), jsProt435)
if err436 != nil {
Usage()
return
}
@ -380,19 +379,19 @@ func main() {
fmt.Fprintln(os.Stderr, "MaintenanceStatus requires 1 args")
flag.Usage()
}
arg387 := flag.Arg(1)
mbTrans388 := thrift.NewTMemoryBufferLen(len(arg387))
defer mbTrans388.Close()
_, err389 := mbTrans388.WriteString(arg387)
if err389 != nil {
arg437 := flag.Arg(1)
mbTrans438 := thrift.NewTMemoryBufferLen(len(arg437))
defer mbTrans438.Close()
_, err439 := mbTrans438.WriteString(arg437)
if err439 != nil {
Usage()
return
}
factory390 := thrift.NewTJSONProtocolFactory()
jsProt391 := factory390.GetProtocol(mbTrans388)
factory440 := thrift.NewTJSONProtocolFactory()
jsProt441 := factory440.GetProtocol(mbTrans438)
argvalue0 := aurora.NewHosts()
err392 := argvalue0.Read(jsProt391)
if err392 != nil {
err442 := argvalue0.Read(context.Background(), jsProt441)
if err442 != nil {
Usage()
return
}
@ -405,19 +404,19 @@ func main() {
fmt.Fprintln(os.Stderr, "EndMaintenance requires 1 args")
flag.Usage()
}
arg393 := flag.Arg(1)
mbTrans394 := thrift.NewTMemoryBufferLen(len(arg393))
defer mbTrans394.Close()
_, err395 := mbTrans394.WriteString(arg393)
if err395 != nil {
arg443 := flag.Arg(1)
mbTrans444 := thrift.NewTMemoryBufferLen(len(arg443))
defer mbTrans444.Close()
_, err445 := mbTrans444.WriteString(arg443)
if err445 != nil {
Usage()
return
}
factory396 := thrift.NewTJSONProtocolFactory()
jsProt397 := factory396.GetProtocol(mbTrans394)
factory446 := thrift.NewTJSONProtocolFactory()
jsProt447 := factory446.GetProtocol(mbTrans444)
argvalue0 := aurora.NewHosts()
err398 := argvalue0.Read(jsProt397)
if err398 != nil {
err448 := argvalue0.Read(context.Background(), jsProt447)
if err448 != nil {
Usage()
return
}
@ -430,42 +429,42 @@ func main() {
fmt.Fprintln(os.Stderr, "SlaDrainHosts requires 3 args")
flag.Usage()
}
arg399 := flag.Arg(1)
mbTrans400 := thrift.NewTMemoryBufferLen(len(arg399))
defer mbTrans400.Close()
_, err401 := mbTrans400.WriteString(arg399)
if err401 != nil {
arg449 := flag.Arg(1)
mbTrans450 := thrift.NewTMemoryBufferLen(len(arg449))
defer mbTrans450.Close()
_, err451 := mbTrans450.WriteString(arg449)
if err451 != nil {
Usage()
return
}
factory402 := thrift.NewTJSONProtocolFactory()
jsProt403 := factory402.GetProtocol(mbTrans400)
factory452 := thrift.NewTJSONProtocolFactory()
jsProt453 := factory452.GetProtocol(mbTrans450)
argvalue0 := aurora.NewHosts()
err404 := argvalue0.Read(jsProt403)
if err404 != nil {
err454 := argvalue0.Read(context.Background(), jsProt453)
if err454 != nil {
Usage()
return
}
value0 := argvalue0
arg405 := flag.Arg(2)
mbTrans406 := thrift.NewTMemoryBufferLen(len(arg405))
defer mbTrans406.Close()
_, err407 := mbTrans406.WriteString(arg405)
if err407 != nil {
arg455 := flag.Arg(2)
mbTrans456 := thrift.NewTMemoryBufferLen(len(arg455))
defer mbTrans456.Close()
_, err457 := mbTrans456.WriteString(arg455)
if err457 != nil {
Usage()
return
}
factory408 := thrift.NewTJSONProtocolFactory()
jsProt409 := factory408.GetProtocol(mbTrans406)
factory458 := thrift.NewTJSONProtocolFactory()
jsProt459 := factory458.GetProtocol(mbTrans456)
argvalue1 := aurora.NewSlaPolicy()
err410 := argvalue1.Read(jsProt409)
if err410 != nil {
err460 := argvalue1.Read(context.Background(), jsProt459)
if err460 != nil {
Usage()
return
}
value1 := argvalue1
argvalue2, err411 := (strconv.ParseInt(flag.Arg(3), 10, 64))
if err411 != nil {
argvalue2, err461 := (strconv.ParseInt(flag.Arg(3), 10, 64))
if err461 != nil {
Usage()
return
}
@ -486,19 +485,19 @@ func main() {
fmt.Fprintln(os.Stderr, "TriggerExplicitTaskReconciliation requires 1 args")
flag.Usage()
}
arg412 := flag.Arg(1)
mbTrans413 := thrift.NewTMemoryBufferLen(len(arg412))
defer mbTrans413.Close()
_, err414 := mbTrans413.WriteString(arg412)
if err414 != nil {
arg462 := flag.Arg(1)
mbTrans463 := thrift.NewTMemoryBufferLen(len(arg462))
defer mbTrans463.Close()
_, err464 := mbTrans463.WriteString(arg462)
if err464 != nil {
Usage()
return
}
factory415 := thrift.NewTJSONProtocolFactory()
jsProt416 := factory415.GetProtocol(mbTrans413)
factory465 := thrift.NewTJSONProtocolFactory()
jsProt466 := factory465.GetProtocol(mbTrans463)
argvalue0 := aurora.NewExplicitReconciliationSettings()
err417 := argvalue0.Read(jsProt416)
if err417 != nil {
err467 := argvalue0.Read(context.Background(), jsProt466)
if err467 != nil {
Usage()
return
}
@ -519,19 +518,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PruneTasks requires 1 args")
flag.Usage()
}
arg418 := flag.Arg(1)
mbTrans419 := thrift.NewTMemoryBufferLen(len(arg418))
defer mbTrans419.Close()
_, err420 := mbTrans419.WriteString(arg418)
if err420 != nil {
arg468 := flag.Arg(1)
mbTrans469 := thrift.NewTMemoryBufferLen(len(arg468))
defer mbTrans469.Close()
_, err470 := mbTrans469.WriteString(arg468)
if err470 != nil {
Usage()
return
}
factory421 := thrift.NewTJSONProtocolFactory()
jsProt422 := factory421.GetProtocol(mbTrans419)
factory471 := thrift.NewTJSONProtocolFactory()
jsProt472 := factory471.GetProtocol(mbTrans469)
argvalue0 := aurora.NewTaskQuery()
err423 := argvalue0.Read(jsProt422)
if err423 != nil {
err473 := argvalue0.Read(context.Background(), jsProt472)
if err473 != nil {
Usage()
return
}
@ -544,19 +543,19 @@ func main() {
fmt.Fprintln(os.Stderr, "CreateJob requires 1 args")
flag.Usage()
}
arg424 := flag.Arg(1)
mbTrans425 := thrift.NewTMemoryBufferLen(len(arg424))
defer mbTrans425.Close()
_, err426 := mbTrans425.WriteString(arg424)
if err426 != nil {
arg474 := flag.Arg(1)
mbTrans475 := thrift.NewTMemoryBufferLen(len(arg474))
defer mbTrans475.Close()
_, err476 := mbTrans475.WriteString(arg474)
if err476 != nil {
Usage()
return
}
factory427 := thrift.NewTJSONProtocolFactory()
jsProt428 := factory427.GetProtocol(mbTrans425)
factory477 := thrift.NewTJSONProtocolFactory()
jsProt478 := factory477.GetProtocol(mbTrans475)
argvalue0 := aurora.NewJobConfiguration()
err429 := argvalue0.Read(jsProt428)
if err429 != nil {
err479 := argvalue0.Read(context.Background(), jsProt478)
if err479 != nil {
Usage()
return
}
@ -569,19 +568,19 @@ func main() {
fmt.Fprintln(os.Stderr, "ScheduleCronJob requires 1 args")
flag.Usage()
}
arg430 := flag.Arg(1)
mbTrans431 := thrift.NewTMemoryBufferLen(len(arg430))
defer mbTrans431.Close()
_, err432 := mbTrans431.WriteString(arg430)
if err432 != nil {
arg480 := flag.Arg(1)
mbTrans481 := thrift.NewTMemoryBufferLen(len(arg480))
defer mbTrans481.Close()
_, err482 := mbTrans481.WriteString(arg480)
if err482 != nil {
Usage()
return
}
factory433 := thrift.NewTJSONProtocolFactory()
jsProt434 := factory433.GetProtocol(mbTrans431)
factory483 := thrift.NewTJSONProtocolFactory()
jsProt484 := factory483.GetProtocol(mbTrans481)
argvalue0 := aurora.NewJobConfiguration()
err435 := argvalue0.Read(jsProt434)
if err435 != nil {
err485 := argvalue0.Read(context.Background(), jsProt484)
if err485 != nil {
Usage()
return
}
@ -594,19 +593,19 @@ func main() {
fmt.Fprintln(os.Stderr, "DescheduleCronJob requires 1 args")
flag.Usage()
}
arg436 := flag.Arg(1)
mbTrans437 := thrift.NewTMemoryBufferLen(len(arg436))
defer mbTrans437.Close()
_, err438 := mbTrans437.WriteString(arg436)
if err438 != nil {
arg486 := flag.Arg(1)
mbTrans487 := thrift.NewTMemoryBufferLen(len(arg486))
defer mbTrans487.Close()
_, err488 := mbTrans487.WriteString(arg486)
if err488 != nil {
Usage()
return
}
factory439 := thrift.NewTJSONProtocolFactory()
jsProt440 := factory439.GetProtocol(mbTrans437)
factory489 := thrift.NewTJSONProtocolFactory()
jsProt490 := factory489.GetProtocol(mbTrans487)
argvalue0 := aurora.NewJobKey()
err441 := argvalue0.Read(jsProt440)
if err441 != nil {
err491 := argvalue0.Read(context.Background(), jsProt490)
if err491 != nil {
Usage()
return
}
@ -619,19 +618,19 @@ func main() {
fmt.Fprintln(os.Stderr, "StartCronJob requires 1 args")
flag.Usage()
}
arg442 := flag.Arg(1)
mbTrans443 := thrift.NewTMemoryBufferLen(len(arg442))
defer mbTrans443.Close()
_, err444 := mbTrans443.WriteString(arg442)
if err444 != nil {
arg492 := flag.Arg(1)
mbTrans493 := thrift.NewTMemoryBufferLen(len(arg492))
defer mbTrans493.Close()
_, err494 := mbTrans493.WriteString(arg492)
if err494 != nil {
Usage()
return
}
factory445 := thrift.NewTJSONProtocolFactory()
jsProt446 := factory445.GetProtocol(mbTrans443)
factory495 := thrift.NewTJSONProtocolFactory()
jsProt496 := factory495.GetProtocol(mbTrans493)
argvalue0 := aurora.NewJobKey()
err447 := argvalue0.Read(jsProt446)
if err447 != nil {
err497 := argvalue0.Read(context.Background(), jsProt496)
if err497 != nil {
Usage()
return
}
@ -644,36 +643,36 @@ func main() {
fmt.Fprintln(os.Stderr, "RestartShards requires 2 args")
flag.Usage()
}
arg448 := flag.Arg(1)
mbTrans449 := thrift.NewTMemoryBufferLen(len(arg448))
defer mbTrans449.Close()
_, err450 := mbTrans449.WriteString(arg448)
if err450 != nil {
arg498 := flag.Arg(1)
mbTrans499 := thrift.NewTMemoryBufferLen(len(arg498))
defer mbTrans499.Close()
_, err500 := mbTrans499.WriteString(arg498)
if err500 != nil {
Usage()
return
}
factory451 := thrift.NewTJSONProtocolFactory()
jsProt452 := factory451.GetProtocol(mbTrans449)
factory501 := thrift.NewTJSONProtocolFactory()
jsProt502 := factory501.GetProtocol(mbTrans499)
argvalue0 := aurora.NewJobKey()
err453 := argvalue0.Read(jsProt452)
if err453 != nil {
err503 := argvalue0.Read(context.Background(), jsProt502)
if err503 != nil {
Usage()
return
}
value0 := argvalue0
arg454 := flag.Arg(2)
mbTrans455 := thrift.NewTMemoryBufferLen(len(arg454))
defer mbTrans455.Close()
_, err456 := mbTrans455.WriteString(arg454)
if err456 != nil {
arg504 := flag.Arg(2)
mbTrans505 := thrift.NewTMemoryBufferLen(len(arg504))
defer mbTrans505.Close()
_, err506 := mbTrans505.WriteString(arg504)
if err506 != nil {
Usage()
return
}
factory457 := thrift.NewTJSONProtocolFactory()
jsProt458 := factory457.GetProtocol(mbTrans455)
containerStruct1 := aurora.NewAuroraAdminRestartShardsArgs()
err459 := containerStruct1.ReadField2(jsProt458)
if err459 != nil {
factory507 := thrift.NewTJSONProtocolFactory()
jsProt508 := factory507.GetProtocol(mbTrans505)
containerStruct1 := aurora.NewAuroraSchedulerManagerRestartShardsArgs()
err509 := containerStruct1.ReadField2(context.Background(), jsProt508)
if err509 != nil {
Usage()
return
}
@ -687,36 +686,36 @@ func main() {
fmt.Fprintln(os.Stderr, "KillTasks requires 3 args")
flag.Usage()
}
arg460 := flag.Arg(1)
mbTrans461 := thrift.NewTMemoryBufferLen(len(arg460))
defer mbTrans461.Close()
_, err462 := mbTrans461.WriteString(arg460)
if err462 != nil {
arg510 := flag.Arg(1)
mbTrans511 := thrift.NewTMemoryBufferLen(len(arg510))
defer mbTrans511.Close()
_, err512 := mbTrans511.WriteString(arg510)
if err512 != nil {
Usage()
return
}
factory463 := thrift.NewTJSONProtocolFactory()
jsProt464 := factory463.GetProtocol(mbTrans461)
factory513 := thrift.NewTJSONProtocolFactory()
jsProt514 := factory513.GetProtocol(mbTrans511)
argvalue0 := aurora.NewJobKey()
err465 := argvalue0.Read(jsProt464)
if err465 != nil {
err515 := argvalue0.Read(context.Background(), jsProt514)
if err515 != nil {
Usage()
return
}
value0 := argvalue0
arg466 := flag.Arg(2)
mbTrans467 := thrift.NewTMemoryBufferLen(len(arg466))
defer mbTrans467.Close()
_, err468 := mbTrans467.WriteString(arg466)
if err468 != nil {
arg516 := flag.Arg(2)
mbTrans517 := thrift.NewTMemoryBufferLen(len(arg516))
defer mbTrans517.Close()
_, err518 := mbTrans517.WriteString(arg516)
if err518 != nil {
Usage()
return
}
factory469 := thrift.NewTJSONProtocolFactory()
jsProt470 := factory469.GetProtocol(mbTrans467)
containerStruct1 := aurora.NewAuroraAdminKillTasksArgs()
err471 := containerStruct1.ReadField2(jsProt470)
if err471 != nil {
factory519 := thrift.NewTJSONProtocolFactory()
jsProt520 := factory519.GetProtocol(mbTrans517)
containerStruct1 := aurora.NewAuroraSchedulerManagerKillTasksArgs()
err521 := containerStruct1.ReadField2(context.Background(), jsProt520)
if err521 != nil {
Usage()
return
}
@ -732,25 +731,25 @@ func main() {
fmt.Fprintln(os.Stderr, "AddInstances requires 2 args")
flag.Usage()
}
arg473 := flag.Arg(1)
mbTrans474 := thrift.NewTMemoryBufferLen(len(arg473))
defer mbTrans474.Close()
_, err475 := mbTrans474.WriteString(arg473)
if err475 != nil {
arg523 := flag.Arg(1)
mbTrans524 := thrift.NewTMemoryBufferLen(len(arg523))
defer mbTrans524.Close()
_, err525 := mbTrans524.WriteString(arg523)
if err525 != nil {
Usage()
return
}
factory476 := thrift.NewTJSONProtocolFactory()
jsProt477 := factory476.GetProtocol(mbTrans474)
factory526 := thrift.NewTJSONProtocolFactory()
jsProt527 := factory526.GetProtocol(mbTrans524)
argvalue0 := aurora.NewInstanceKey()
err478 := argvalue0.Read(jsProt477)
if err478 != nil {
err528 := argvalue0.Read(context.Background(), jsProt527)
if err528 != nil {
Usage()
return
}
value0 := argvalue0
tmp1, err479 := (strconv.Atoi(flag.Arg(2)))
if err479 != nil {
tmp1, err529 := (strconv.Atoi(flag.Arg(2)))
if err529 != nil {
Usage()
return
}
@ -764,19 +763,19 @@ func main() {
fmt.Fprintln(os.Stderr, "ReplaceCronTemplate requires 1 args")
flag.Usage()
}
arg480 := flag.Arg(1)
mbTrans481 := thrift.NewTMemoryBufferLen(len(arg480))
defer mbTrans481.Close()
_, err482 := mbTrans481.WriteString(arg480)
if err482 != nil {
arg530 := flag.Arg(1)
mbTrans531 := thrift.NewTMemoryBufferLen(len(arg530))
defer mbTrans531.Close()
_, err532 := mbTrans531.WriteString(arg530)
if err532 != nil {
Usage()
return
}
factory483 := thrift.NewTJSONProtocolFactory()
jsProt484 := factory483.GetProtocol(mbTrans481)
factory533 := thrift.NewTJSONProtocolFactory()
jsProt534 := factory533.GetProtocol(mbTrans531)
argvalue0 := aurora.NewJobConfiguration()
err485 := argvalue0.Read(jsProt484)
if err485 != nil {
err535 := argvalue0.Read(context.Background(), jsProt534)
if err535 != nil {
Usage()
return
}
@ -789,19 +788,19 @@ func main() {
fmt.Fprintln(os.Stderr, "StartJobUpdate requires 2 args")
flag.Usage()
}
arg486 := flag.Arg(1)
mbTrans487 := thrift.NewTMemoryBufferLen(len(arg486))
defer mbTrans487.Close()
_, err488 := mbTrans487.WriteString(arg486)
if err488 != nil {
arg536 := flag.Arg(1)
mbTrans537 := thrift.NewTMemoryBufferLen(len(arg536))
defer mbTrans537.Close()
_, err538 := mbTrans537.WriteString(arg536)
if err538 != nil {
Usage()
return
}
factory489 := thrift.NewTJSONProtocolFactory()
jsProt490 := factory489.GetProtocol(mbTrans487)
factory539 := thrift.NewTJSONProtocolFactory()
jsProt540 := factory539.GetProtocol(mbTrans537)
argvalue0 := aurora.NewJobUpdateRequest()
err491 := argvalue0.Read(jsProt490)
if err491 != nil {
err541 := argvalue0.Read(context.Background(), jsProt540)
if err541 != nil {
Usage()
return
}
@ -816,19 +815,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PauseJobUpdate requires 2 args")
flag.Usage()
}
arg493 := flag.Arg(1)
mbTrans494 := thrift.NewTMemoryBufferLen(len(arg493))
defer mbTrans494.Close()
_, err495 := mbTrans494.WriteString(arg493)
if err495 != nil {
arg543 := flag.Arg(1)
mbTrans544 := thrift.NewTMemoryBufferLen(len(arg543))
defer mbTrans544.Close()
_, err545 := mbTrans544.WriteString(arg543)
if err545 != nil {
Usage()
return
}
factory496 := thrift.NewTJSONProtocolFactory()
jsProt497 := factory496.GetProtocol(mbTrans494)
factory546 := thrift.NewTJSONProtocolFactory()
jsProt547 := factory546.GetProtocol(mbTrans544)
argvalue0 := aurora.NewJobUpdateKey()
err498 := argvalue0.Read(jsProt497)
if err498 != nil {
err548 := argvalue0.Read(context.Background(), jsProt547)
if err548 != nil {
Usage()
return
}
@ -843,19 +842,19 @@ func main() {
fmt.Fprintln(os.Stderr, "ResumeJobUpdate requires 2 args")
flag.Usage()
}
arg500 := flag.Arg(1)
mbTrans501 := thrift.NewTMemoryBufferLen(len(arg500))
defer mbTrans501.Close()
_, err502 := mbTrans501.WriteString(arg500)
if err502 != nil {
arg550 := flag.Arg(1)
mbTrans551 := thrift.NewTMemoryBufferLen(len(arg550))
defer mbTrans551.Close()
_, err552 := mbTrans551.WriteString(arg550)
if err552 != nil {
Usage()
return
}
factory503 := thrift.NewTJSONProtocolFactory()
jsProt504 := factory503.GetProtocol(mbTrans501)
factory553 := thrift.NewTJSONProtocolFactory()
jsProt554 := factory553.GetProtocol(mbTrans551)
argvalue0 := aurora.NewJobUpdateKey()
err505 := argvalue0.Read(jsProt504)
if err505 != nil {
err555 := argvalue0.Read(context.Background(), jsProt554)
if err555 != nil {
Usage()
return
}
@ -870,19 +869,19 @@ func main() {
fmt.Fprintln(os.Stderr, "AbortJobUpdate requires 2 args")
flag.Usage()
}
arg507 := flag.Arg(1)
mbTrans508 := thrift.NewTMemoryBufferLen(len(arg507))
defer mbTrans508.Close()
_, err509 := mbTrans508.WriteString(arg507)
if err509 != nil {
arg557 := flag.Arg(1)
mbTrans558 := thrift.NewTMemoryBufferLen(len(arg557))
defer mbTrans558.Close()
_, err559 := mbTrans558.WriteString(arg557)
if err559 != nil {
Usage()
return
}
factory510 := thrift.NewTJSONProtocolFactory()
jsProt511 := factory510.GetProtocol(mbTrans508)
factory560 := thrift.NewTJSONProtocolFactory()
jsProt561 := factory560.GetProtocol(mbTrans558)
argvalue0 := aurora.NewJobUpdateKey()
err512 := argvalue0.Read(jsProt511)
if err512 != nil {
err562 := argvalue0.Read(context.Background(), jsProt561)
if err562 != nil {
Usage()
return
}
@ -897,19 +896,19 @@ func main() {
fmt.Fprintln(os.Stderr, "RollbackJobUpdate requires 2 args")
flag.Usage()
}
arg514 := flag.Arg(1)
mbTrans515 := thrift.NewTMemoryBufferLen(len(arg514))
defer mbTrans515.Close()
_, err516 := mbTrans515.WriteString(arg514)
if err516 != nil {
arg564 := flag.Arg(1)
mbTrans565 := thrift.NewTMemoryBufferLen(len(arg564))
defer mbTrans565.Close()
_, err566 := mbTrans565.WriteString(arg564)
if err566 != nil {
Usage()
return
}
factory517 := thrift.NewTJSONProtocolFactory()
jsProt518 := factory517.GetProtocol(mbTrans515)
factory567 := thrift.NewTJSONProtocolFactory()
jsProt568 := factory567.GetProtocol(mbTrans565)
argvalue0 := aurora.NewJobUpdateKey()
err519 := argvalue0.Read(jsProt518)
if err519 != nil {
err569 := argvalue0.Read(context.Background(), jsProt568)
if err569 != nil {
Usage()
return
}
@ -924,19 +923,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PulseJobUpdate requires 1 args")
flag.Usage()
}
arg521 := flag.Arg(1)
mbTrans522 := thrift.NewTMemoryBufferLen(len(arg521))
defer mbTrans522.Close()
_, err523 := mbTrans522.WriteString(arg521)
if err523 != nil {
arg571 := flag.Arg(1)
mbTrans572 := thrift.NewTMemoryBufferLen(len(arg571))
defer mbTrans572.Close()
_, err573 := mbTrans572.WriteString(arg571)
if err573 != nil {
Usage()
return
}
factory524 := thrift.NewTJSONProtocolFactory()
jsProt525 := factory524.GetProtocol(mbTrans522)
factory574 := thrift.NewTJSONProtocolFactory()
jsProt575 := factory574.GetProtocol(mbTrans572)
argvalue0 := aurora.NewJobUpdateKey()
err526 := argvalue0.Read(jsProt525)
if err526 != nil {
err576 := argvalue0.Read(context.Background(), jsProt575)
if err576 != nil {
Usage()
return
}
@ -967,19 +966,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
flag.Usage()
}
arg528 := flag.Arg(1)
mbTrans529 := thrift.NewTMemoryBufferLen(len(arg528))
defer mbTrans529.Close()
_, err530 := mbTrans529.WriteString(arg528)
if err530 != nil {
arg578 := flag.Arg(1)
mbTrans579 := thrift.NewTMemoryBufferLen(len(arg578))
defer mbTrans579.Close()
_, err580 := mbTrans579.WriteString(arg578)
if err580 != nil {
Usage()
return
}
factory531 := thrift.NewTJSONProtocolFactory()
jsProt532 := factory531.GetProtocol(mbTrans529)
factory581 := thrift.NewTJSONProtocolFactory()
jsProt582 := factory581.GetProtocol(mbTrans579)
argvalue0 := aurora.NewTaskQuery()
err533 := argvalue0.Read(jsProt532)
if err533 != nil {
err583 := argvalue0.Read(context.Background(), jsProt582)
if err583 != nil {
Usage()
return
}
@ -992,19 +991,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
flag.Usage()
}
arg534 := flag.Arg(1)
mbTrans535 := thrift.NewTMemoryBufferLen(len(arg534))
defer mbTrans535.Close()
_, err536 := mbTrans535.WriteString(arg534)
if err536 != nil {
arg584 := flag.Arg(1)
mbTrans585 := thrift.NewTMemoryBufferLen(len(arg584))
defer mbTrans585.Close()
_, err586 := mbTrans585.WriteString(arg584)
if err586 != nil {
Usage()
return
}
factory537 := thrift.NewTJSONProtocolFactory()
jsProt538 := factory537.GetProtocol(mbTrans535)
factory587 := thrift.NewTJSONProtocolFactory()
jsProt588 := factory587.GetProtocol(mbTrans585)
argvalue0 := aurora.NewTaskQuery()
err539 := argvalue0.Read(jsProt538)
if err539 != nil {
err589 := argvalue0.Read(context.Background(), jsProt588)
if err589 != nil {
Usage()
return
}
@ -1017,19 +1016,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
flag.Usage()
}
arg540 := flag.Arg(1)
mbTrans541 := thrift.NewTMemoryBufferLen(len(arg540))
defer mbTrans541.Close()
_, err542 := mbTrans541.WriteString(arg540)
if err542 != nil {
arg590 := flag.Arg(1)
mbTrans591 := thrift.NewTMemoryBufferLen(len(arg590))
defer mbTrans591.Close()
_, err592 := mbTrans591.WriteString(arg590)
if err592 != nil {
Usage()
return
}
factory543 := thrift.NewTJSONProtocolFactory()
jsProt544 := factory543.GetProtocol(mbTrans541)
factory593 := thrift.NewTJSONProtocolFactory()
jsProt594 := factory593.GetProtocol(mbTrans591)
argvalue0 := aurora.NewTaskQuery()
err545 := argvalue0.Read(jsProt544)
if err545 != nil {
err595 := argvalue0.Read(context.Background(), jsProt594)
if err595 != nil {
Usage()
return
}
@ -1042,19 +1041,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
flag.Usage()
}
arg546 := flag.Arg(1)
mbTrans547 := thrift.NewTMemoryBufferLen(len(arg546))
defer mbTrans547.Close()
_, err548 := mbTrans547.WriteString(arg546)
if err548 != nil {
arg596 := flag.Arg(1)
mbTrans597 := thrift.NewTMemoryBufferLen(len(arg596))
defer mbTrans597.Close()
_, err598 := mbTrans597.WriteString(arg596)
if err598 != nil {
Usage()
return
}
factory549 := thrift.NewTJSONProtocolFactory()
jsProt550 := factory549.GetProtocol(mbTrans547)
factory599 := thrift.NewTJSONProtocolFactory()
jsProt600 := factory599.GetProtocol(mbTrans597)
argvalue0 := aurora.NewJobKey()
err551 := argvalue0.Read(jsProt550)
if err551 != nil {
err601 := argvalue0.Read(context.Background(), jsProt600)
if err601 != nil {
Usage()
return
}
@ -1087,19 +1086,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
flag.Usage()
}
arg554 := flag.Arg(1)
mbTrans555 := thrift.NewTMemoryBufferLen(len(arg554))
defer mbTrans555.Close()
_, err556 := mbTrans555.WriteString(arg554)
if err556 != nil {
arg604 := flag.Arg(1)
mbTrans605 := thrift.NewTMemoryBufferLen(len(arg604))
defer mbTrans605.Close()
_, err606 := mbTrans605.WriteString(arg604)
if err606 != nil {
Usage()
return
}
factory557 := thrift.NewTJSONProtocolFactory()
jsProt558 := factory557.GetProtocol(mbTrans555)
factory607 := thrift.NewTJSONProtocolFactory()
jsProt608 := factory607.GetProtocol(mbTrans605)
argvalue0 := aurora.NewJobConfiguration()
err559 := argvalue0.Read(jsProt558)
if err559 != nil {
err609 := argvalue0.Read(context.Background(), jsProt608)
if err609 != nil {
Usage()
return
}
@ -1112,19 +1111,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
flag.Usage()
}
arg560 := flag.Arg(1)
mbTrans561 := thrift.NewTMemoryBufferLen(len(arg560))
defer mbTrans561.Close()
_, err562 := mbTrans561.WriteString(arg560)
if err562 != nil {
arg610 := flag.Arg(1)
mbTrans611 := thrift.NewTMemoryBufferLen(len(arg610))
defer mbTrans611.Close()
_, err612 := mbTrans611.WriteString(arg610)
if err612 != nil {
Usage()
return
}
factory563 := thrift.NewTJSONProtocolFactory()
jsProt564 := factory563.GetProtocol(mbTrans561)
factory613 := thrift.NewTJSONProtocolFactory()
jsProt614 := factory613.GetProtocol(mbTrans611)
argvalue0 := aurora.NewJobUpdateQuery()
err565 := argvalue0.Read(jsProt564)
if err565 != nil {
err615 := argvalue0.Read(context.Background(), jsProt614)
if err615 != nil {
Usage()
return
}
@ -1137,19 +1136,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
flag.Usage()
}
arg566 := flag.Arg(1)
mbTrans567 := thrift.NewTMemoryBufferLen(len(arg566))
defer mbTrans567.Close()
_, err568 := mbTrans567.WriteString(arg566)
if err568 != nil {
arg616 := flag.Arg(1)
mbTrans617 := thrift.NewTMemoryBufferLen(len(arg616))
defer mbTrans617.Close()
_, err618 := mbTrans617.WriteString(arg616)
if err618 != nil {
Usage()
return
}
factory569 := thrift.NewTJSONProtocolFactory()
jsProt570 := factory569.GetProtocol(mbTrans567)
factory619 := thrift.NewTJSONProtocolFactory()
jsProt620 := factory619.GetProtocol(mbTrans617)
argvalue0 := aurora.NewJobUpdateQuery()
err571 := argvalue0.Read(jsProt570)
if err571 != nil {
err621 := argvalue0.Read(context.Background(), jsProt620)
if err621 != nil {
Usage()
return
}
@ -1162,19 +1161,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
flag.Usage()
}
arg572 := flag.Arg(1)
mbTrans573 := thrift.NewTMemoryBufferLen(len(arg572))
defer mbTrans573.Close()
_, err574 := mbTrans573.WriteString(arg572)
if err574 != nil {
arg622 := flag.Arg(1)
mbTrans623 := thrift.NewTMemoryBufferLen(len(arg622))
defer mbTrans623.Close()
_, err624 := mbTrans623.WriteString(arg622)
if err624 != nil {
Usage()
return
}
factory575 := thrift.NewTJSONProtocolFactory()
jsProt576 := factory575.GetProtocol(mbTrans573)
factory625 := thrift.NewTJSONProtocolFactory()
jsProt626 := factory625.GetProtocol(mbTrans623)
argvalue0 := aurora.NewJobUpdateRequest()
err577 := argvalue0.Read(jsProt576)
if err577 != nil {
err627 := argvalue0.Read(context.Background(), jsProt626)
if err627 != nil {
Usage()
return
}

View file

@ -1,5 +1,4 @@
// Autogenerated by Thrift Compiler (0.13.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
package main
@ -176,19 +175,19 @@ func main() {
fmt.Fprintln(os.Stderr, "CreateJob requires 1 args")
flag.Usage()
}
arg163 := flag.Arg(1)
mbTrans164 := thrift.NewTMemoryBufferLen(len(arg163))
defer mbTrans164.Close()
_, err165 := mbTrans164.WriteString(arg163)
if err165 != nil {
arg213 := flag.Arg(1)
mbTrans214 := thrift.NewTMemoryBufferLen(len(arg213))
defer mbTrans214.Close()
_, err215 := mbTrans214.WriteString(arg213)
if err215 != nil {
Usage()
return
}
factory166 := thrift.NewTJSONProtocolFactory()
jsProt167 := factory166.GetProtocol(mbTrans164)
factory216 := thrift.NewTJSONProtocolFactory()
jsProt217 := factory216.GetProtocol(mbTrans214)
argvalue0 := aurora.NewJobConfiguration()
err168 := argvalue0.Read(jsProt167)
if err168 != nil {
err218 := argvalue0.Read(context.Background(), jsProt217)
if err218 != nil {
Usage()
return
}
@ -201,19 +200,19 @@ func main() {
fmt.Fprintln(os.Stderr, "ScheduleCronJob requires 1 args")
flag.Usage()
}
arg169 := flag.Arg(1)
mbTrans170 := thrift.NewTMemoryBufferLen(len(arg169))
defer mbTrans170.Close()
_, err171 := mbTrans170.WriteString(arg169)
if err171 != nil {
arg219 := flag.Arg(1)
mbTrans220 := thrift.NewTMemoryBufferLen(len(arg219))
defer mbTrans220.Close()
_, err221 := mbTrans220.WriteString(arg219)
if err221 != nil {
Usage()
return
}
factory172 := thrift.NewTJSONProtocolFactory()
jsProt173 := factory172.GetProtocol(mbTrans170)
factory222 := thrift.NewTJSONProtocolFactory()
jsProt223 := factory222.GetProtocol(mbTrans220)
argvalue0 := aurora.NewJobConfiguration()
err174 := argvalue0.Read(jsProt173)
if err174 != nil {
err224 := argvalue0.Read(context.Background(), jsProt223)
if err224 != nil {
Usage()
return
}
@ -226,19 +225,19 @@ func main() {
fmt.Fprintln(os.Stderr, "DescheduleCronJob requires 1 args")
flag.Usage()
}
arg175 := flag.Arg(1)
mbTrans176 := thrift.NewTMemoryBufferLen(len(arg175))
defer mbTrans176.Close()
_, err177 := mbTrans176.WriteString(arg175)
if err177 != nil {
arg225 := flag.Arg(1)
mbTrans226 := thrift.NewTMemoryBufferLen(len(arg225))
defer mbTrans226.Close()
_, err227 := mbTrans226.WriteString(arg225)
if err227 != nil {
Usage()
return
}
factory178 := thrift.NewTJSONProtocolFactory()
jsProt179 := factory178.GetProtocol(mbTrans176)
factory228 := thrift.NewTJSONProtocolFactory()
jsProt229 := factory228.GetProtocol(mbTrans226)
argvalue0 := aurora.NewJobKey()
err180 := argvalue0.Read(jsProt179)
if err180 != nil {
err230 := argvalue0.Read(context.Background(), jsProt229)
if err230 != nil {
Usage()
return
}
@ -251,19 +250,19 @@ func main() {
fmt.Fprintln(os.Stderr, "StartCronJob requires 1 args")
flag.Usage()
}
arg181 := flag.Arg(1)
mbTrans182 := thrift.NewTMemoryBufferLen(len(arg181))
defer mbTrans182.Close()
_, err183 := mbTrans182.WriteString(arg181)
if err183 != nil {
arg231 := flag.Arg(1)
mbTrans232 := thrift.NewTMemoryBufferLen(len(arg231))
defer mbTrans232.Close()
_, err233 := mbTrans232.WriteString(arg231)
if err233 != nil {
Usage()
return
}
factory184 := thrift.NewTJSONProtocolFactory()
jsProt185 := factory184.GetProtocol(mbTrans182)
factory234 := thrift.NewTJSONProtocolFactory()
jsProt235 := factory234.GetProtocol(mbTrans232)
argvalue0 := aurora.NewJobKey()
err186 := argvalue0.Read(jsProt185)
if err186 != nil {
err236 := argvalue0.Read(context.Background(), jsProt235)
if err236 != nil {
Usage()
return
}
@ -276,36 +275,36 @@ func main() {
fmt.Fprintln(os.Stderr, "RestartShards requires 2 args")
flag.Usage()
}
arg187 := flag.Arg(1)
mbTrans188 := thrift.NewTMemoryBufferLen(len(arg187))
defer mbTrans188.Close()
_, err189 := mbTrans188.WriteString(arg187)
if err189 != nil {
arg237 := flag.Arg(1)
mbTrans238 := thrift.NewTMemoryBufferLen(len(arg237))
defer mbTrans238.Close()
_, err239 := mbTrans238.WriteString(arg237)
if err239 != nil {
Usage()
return
}
factory190 := thrift.NewTJSONProtocolFactory()
jsProt191 := factory190.GetProtocol(mbTrans188)
factory240 := thrift.NewTJSONProtocolFactory()
jsProt241 := factory240.GetProtocol(mbTrans238)
argvalue0 := aurora.NewJobKey()
err192 := argvalue0.Read(jsProt191)
if err192 != nil {
err242 := argvalue0.Read(context.Background(), jsProt241)
if err242 != nil {
Usage()
return
}
value0 := argvalue0
arg193 := flag.Arg(2)
mbTrans194 := thrift.NewTMemoryBufferLen(len(arg193))
defer mbTrans194.Close()
_, err195 := mbTrans194.WriteString(arg193)
if err195 != nil {
arg243 := flag.Arg(2)
mbTrans244 := thrift.NewTMemoryBufferLen(len(arg243))
defer mbTrans244.Close()
_, err245 := mbTrans244.WriteString(arg243)
if err245 != nil {
Usage()
return
}
factory196 := thrift.NewTJSONProtocolFactory()
jsProt197 := factory196.GetProtocol(mbTrans194)
factory246 := thrift.NewTJSONProtocolFactory()
jsProt247 := factory246.GetProtocol(mbTrans244)
containerStruct1 := aurora.NewAuroraSchedulerManagerRestartShardsArgs()
err198 := containerStruct1.ReadField2(jsProt197)
if err198 != nil {
err248 := containerStruct1.ReadField2(context.Background(), jsProt247)
if err248 != nil {
Usage()
return
}
@ -319,36 +318,36 @@ func main() {
fmt.Fprintln(os.Stderr, "KillTasks requires 3 args")
flag.Usage()
}
arg199 := flag.Arg(1)
mbTrans200 := thrift.NewTMemoryBufferLen(len(arg199))
defer mbTrans200.Close()
_, err201 := mbTrans200.WriteString(arg199)
if err201 != nil {
arg249 := flag.Arg(1)
mbTrans250 := thrift.NewTMemoryBufferLen(len(arg249))
defer mbTrans250.Close()
_, err251 := mbTrans250.WriteString(arg249)
if err251 != nil {
Usage()
return
}
factory202 := thrift.NewTJSONProtocolFactory()
jsProt203 := factory202.GetProtocol(mbTrans200)
factory252 := thrift.NewTJSONProtocolFactory()
jsProt253 := factory252.GetProtocol(mbTrans250)
argvalue0 := aurora.NewJobKey()
err204 := argvalue0.Read(jsProt203)
if err204 != nil {
err254 := argvalue0.Read(context.Background(), jsProt253)
if err254 != nil {
Usage()
return
}
value0 := argvalue0
arg205 := flag.Arg(2)
mbTrans206 := thrift.NewTMemoryBufferLen(len(arg205))
defer mbTrans206.Close()
_, err207 := mbTrans206.WriteString(arg205)
if err207 != nil {
arg255 := flag.Arg(2)
mbTrans256 := thrift.NewTMemoryBufferLen(len(arg255))
defer mbTrans256.Close()
_, err257 := mbTrans256.WriteString(arg255)
if err257 != nil {
Usage()
return
}
factory208 := thrift.NewTJSONProtocolFactory()
jsProt209 := factory208.GetProtocol(mbTrans206)
factory258 := thrift.NewTJSONProtocolFactory()
jsProt259 := factory258.GetProtocol(mbTrans256)
containerStruct1 := aurora.NewAuroraSchedulerManagerKillTasksArgs()
err210 := containerStruct1.ReadField2(jsProt209)
if err210 != nil {
err260 := containerStruct1.ReadField2(context.Background(), jsProt259)
if err260 != nil {
Usage()
return
}
@ -364,25 +363,25 @@ func main() {
fmt.Fprintln(os.Stderr, "AddInstances requires 2 args")
flag.Usage()
}
arg212 := flag.Arg(1)
mbTrans213 := thrift.NewTMemoryBufferLen(len(arg212))
defer mbTrans213.Close()
_, err214 := mbTrans213.WriteString(arg212)
if err214 != nil {
arg262 := flag.Arg(1)
mbTrans263 := thrift.NewTMemoryBufferLen(len(arg262))
defer mbTrans263.Close()
_, err264 := mbTrans263.WriteString(arg262)
if err264 != nil {
Usage()
return
}
factory215 := thrift.NewTJSONProtocolFactory()
jsProt216 := factory215.GetProtocol(mbTrans213)
factory265 := thrift.NewTJSONProtocolFactory()
jsProt266 := factory265.GetProtocol(mbTrans263)
argvalue0 := aurora.NewInstanceKey()
err217 := argvalue0.Read(jsProt216)
if err217 != nil {
err267 := argvalue0.Read(context.Background(), jsProt266)
if err267 != nil {
Usage()
return
}
value0 := argvalue0
tmp1, err218 := (strconv.Atoi(flag.Arg(2)))
if err218 != nil {
tmp1, err268 := (strconv.Atoi(flag.Arg(2)))
if err268 != nil {
Usage()
return
}
@ -396,19 +395,19 @@ func main() {
fmt.Fprintln(os.Stderr, "ReplaceCronTemplate requires 1 args")
flag.Usage()
}
arg219 := flag.Arg(1)
mbTrans220 := thrift.NewTMemoryBufferLen(len(arg219))
defer mbTrans220.Close()
_, err221 := mbTrans220.WriteString(arg219)
if err221 != nil {
arg269 := flag.Arg(1)
mbTrans270 := thrift.NewTMemoryBufferLen(len(arg269))
defer mbTrans270.Close()
_, err271 := mbTrans270.WriteString(arg269)
if err271 != nil {
Usage()
return
}
factory222 := thrift.NewTJSONProtocolFactory()
jsProt223 := factory222.GetProtocol(mbTrans220)
factory272 := thrift.NewTJSONProtocolFactory()
jsProt273 := factory272.GetProtocol(mbTrans270)
argvalue0 := aurora.NewJobConfiguration()
err224 := argvalue0.Read(jsProt223)
if err224 != nil {
err274 := argvalue0.Read(context.Background(), jsProt273)
if err274 != nil {
Usage()
return
}
@ -421,19 +420,19 @@ func main() {
fmt.Fprintln(os.Stderr, "StartJobUpdate requires 2 args")
flag.Usage()
}
arg225 := flag.Arg(1)
mbTrans226 := thrift.NewTMemoryBufferLen(len(arg225))
defer mbTrans226.Close()
_, err227 := mbTrans226.WriteString(arg225)
if err227 != nil {
arg275 := flag.Arg(1)
mbTrans276 := thrift.NewTMemoryBufferLen(len(arg275))
defer mbTrans276.Close()
_, err277 := mbTrans276.WriteString(arg275)
if err277 != nil {
Usage()
return
}
factory228 := thrift.NewTJSONProtocolFactory()
jsProt229 := factory228.GetProtocol(mbTrans226)
factory278 := thrift.NewTJSONProtocolFactory()
jsProt279 := factory278.GetProtocol(mbTrans276)
argvalue0 := aurora.NewJobUpdateRequest()
err230 := argvalue0.Read(jsProt229)
if err230 != nil {
err280 := argvalue0.Read(context.Background(), jsProt279)
if err280 != nil {
Usage()
return
}
@ -448,19 +447,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PauseJobUpdate requires 2 args")
flag.Usage()
}
arg232 := flag.Arg(1)
mbTrans233 := thrift.NewTMemoryBufferLen(len(arg232))
defer mbTrans233.Close()
_, err234 := mbTrans233.WriteString(arg232)
if err234 != nil {
arg282 := flag.Arg(1)
mbTrans283 := thrift.NewTMemoryBufferLen(len(arg282))
defer mbTrans283.Close()
_, err284 := mbTrans283.WriteString(arg282)
if err284 != nil {
Usage()
return
}
factory235 := thrift.NewTJSONProtocolFactory()
jsProt236 := factory235.GetProtocol(mbTrans233)
factory285 := thrift.NewTJSONProtocolFactory()
jsProt286 := factory285.GetProtocol(mbTrans283)
argvalue0 := aurora.NewJobUpdateKey()
err237 := argvalue0.Read(jsProt236)
if err237 != nil {
err287 := argvalue0.Read(context.Background(), jsProt286)
if err287 != nil {
Usage()
return
}
@ -475,19 +474,19 @@ func main() {
fmt.Fprintln(os.Stderr, "ResumeJobUpdate requires 2 args")
flag.Usage()
}
arg239 := flag.Arg(1)
mbTrans240 := thrift.NewTMemoryBufferLen(len(arg239))
defer mbTrans240.Close()
_, err241 := mbTrans240.WriteString(arg239)
if err241 != nil {
arg289 := flag.Arg(1)
mbTrans290 := thrift.NewTMemoryBufferLen(len(arg289))
defer mbTrans290.Close()
_, err291 := mbTrans290.WriteString(arg289)
if err291 != nil {
Usage()
return
}
factory242 := thrift.NewTJSONProtocolFactory()
jsProt243 := factory242.GetProtocol(mbTrans240)
factory292 := thrift.NewTJSONProtocolFactory()
jsProt293 := factory292.GetProtocol(mbTrans290)
argvalue0 := aurora.NewJobUpdateKey()
err244 := argvalue0.Read(jsProt243)
if err244 != nil {
err294 := argvalue0.Read(context.Background(), jsProt293)
if err294 != nil {
Usage()
return
}
@ -502,19 +501,19 @@ func main() {
fmt.Fprintln(os.Stderr, "AbortJobUpdate requires 2 args")
flag.Usage()
}
arg246 := flag.Arg(1)
mbTrans247 := thrift.NewTMemoryBufferLen(len(arg246))
defer mbTrans247.Close()
_, err248 := mbTrans247.WriteString(arg246)
if err248 != nil {
arg296 := flag.Arg(1)
mbTrans297 := thrift.NewTMemoryBufferLen(len(arg296))
defer mbTrans297.Close()
_, err298 := mbTrans297.WriteString(arg296)
if err298 != nil {
Usage()
return
}
factory249 := thrift.NewTJSONProtocolFactory()
jsProt250 := factory249.GetProtocol(mbTrans247)
factory299 := thrift.NewTJSONProtocolFactory()
jsProt300 := factory299.GetProtocol(mbTrans297)
argvalue0 := aurora.NewJobUpdateKey()
err251 := argvalue0.Read(jsProt250)
if err251 != nil {
err301 := argvalue0.Read(context.Background(), jsProt300)
if err301 != nil {
Usage()
return
}
@ -529,19 +528,19 @@ func main() {
fmt.Fprintln(os.Stderr, "RollbackJobUpdate requires 2 args")
flag.Usage()
}
arg253 := flag.Arg(1)
mbTrans254 := thrift.NewTMemoryBufferLen(len(arg253))
defer mbTrans254.Close()
_, err255 := mbTrans254.WriteString(arg253)
if err255 != nil {
arg303 := flag.Arg(1)
mbTrans304 := thrift.NewTMemoryBufferLen(len(arg303))
defer mbTrans304.Close()
_, err305 := mbTrans304.WriteString(arg303)
if err305 != nil {
Usage()
return
}
factory256 := thrift.NewTJSONProtocolFactory()
jsProt257 := factory256.GetProtocol(mbTrans254)
factory306 := thrift.NewTJSONProtocolFactory()
jsProt307 := factory306.GetProtocol(mbTrans304)
argvalue0 := aurora.NewJobUpdateKey()
err258 := argvalue0.Read(jsProt257)
if err258 != nil {
err308 := argvalue0.Read(context.Background(), jsProt307)
if err308 != nil {
Usage()
return
}
@ -556,19 +555,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PulseJobUpdate requires 1 args")
flag.Usage()
}
arg260 := flag.Arg(1)
mbTrans261 := thrift.NewTMemoryBufferLen(len(arg260))
defer mbTrans261.Close()
_, err262 := mbTrans261.WriteString(arg260)
if err262 != nil {
arg310 := flag.Arg(1)
mbTrans311 := thrift.NewTMemoryBufferLen(len(arg310))
defer mbTrans311.Close()
_, err312 := mbTrans311.WriteString(arg310)
if err312 != nil {
Usage()
return
}
factory263 := thrift.NewTJSONProtocolFactory()
jsProt264 := factory263.GetProtocol(mbTrans261)
factory313 := thrift.NewTJSONProtocolFactory()
jsProt314 := factory313.GetProtocol(mbTrans311)
argvalue0 := aurora.NewJobUpdateKey()
err265 := argvalue0.Read(jsProt264)
if err265 != nil {
err315 := argvalue0.Read(context.Background(), jsProt314)
if err315 != nil {
Usage()
return
}
@ -599,19 +598,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
flag.Usage()
}
arg267 := flag.Arg(1)
mbTrans268 := thrift.NewTMemoryBufferLen(len(arg267))
defer mbTrans268.Close()
_, err269 := mbTrans268.WriteString(arg267)
if err269 != nil {
arg317 := flag.Arg(1)
mbTrans318 := thrift.NewTMemoryBufferLen(len(arg317))
defer mbTrans318.Close()
_, err319 := mbTrans318.WriteString(arg317)
if err319 != nil {
Usage()
return
}
factory270 := thrift.NewTJSONProtocolFactory()
jsProt271 := factory270.GetProtocol(mbTrans268)
factory320 := thrift.NewTJSONProtocolFactory()
jsProt321 := factory320.GetProtocol(mbTrans318)
argvalue0 := aurora.NewTaskQuery()
err272 := argvalue0.Read(jsProt271)
if err272 != nil {
err322 := argvalue0.Read(context.Background(), jsProt321)
if err322 != nil {
Usage()
return
}
@ -624,19 +623,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
flag.Usage()
}
arg273 := flag.Arg(1)
mbTrans274 := thrift.NewTMemoryBufferLen(len(arg273))
defer mbTrans274.Close()
_, err275 := mbTrans274.WriteString(arg273)
if err275 != nil {
arg323 := flag.Arg(1)
mbTrans324 := thrift.NewTMemoryBufferLen(len(arg323))
defer mbTrans324.Close()
_, err325 := mbTrans324.WriteString(arg323)
if err325 != nil {
Usage()
return
}
factory276 := thrift.NewTJSONProtocolFactory()
jsProt277 := factory276.GetProtocol(mbTrans274)
factory326 := thrift.NewTJSONProtocolFactory()
jsProt327 := factory326.GetProtocol(mbTrans324)
argvalue0 := aurora.NewTaskQuery()
err278 := argvalue0.Read(jsProt277)
if err278 != nil {
err328 := argvalue0.Read(context.Background(), jsProt327)
if err328 != nil {
Usage()
return
}
@ -649,19 +648,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
flag.Usage()
}
arg279 := flag.Arg(1)
mbTrans280 := thrift.NewTMemoryBufferLen(len(arg279))
defer mbTrans280.Close()
_, err281 := mbTrans280.WriteString(arg279)
if err281 != nil {
arg329 := flag.Arg(1)
mbTrans330 := thrift.NewTMemoryBufferLen(len(arg329))
defer mbTrans330.Close()
_, err331 := mbTrans330.WriteString(arg329)
if err331 != nil {
Usage()
return
}
factory282 := thrift.NewTJSONProtocolFactory()
jsProt283 := factory282.GetProtocol(mbTrans280)
factory332 := thrift.NewTJSONProtocolFactory()
jsProt333 := factory332.GetProtocol(mbTrans330)
argvalue0 := aurora.NewTaskQuery()
err284 := argvalue0.Read(jsProt283)
if err284 != nil {
err334 := argvalue0.Read(context.Background(), jsProt333)
if err334 != nil {
Usage()
return
}
@ -674,19 +673,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
flag.Usage()
}
arg285 := flag.Arg(1)
mbTrans286 := thrift.NewTMemoryBufferLen(len(arg285))
defer mbTrans286.Close()
_, err287 := mbTrans286.WriteString(arg285)
if err287 != nil {
arg335 := flag.Arg(1)
mbTrans336 := thrift.NewTMemoryBufferLen(len(arg335))
defer mbTrans336.Close()
_, err337 := mbTrans336.WriteString(arg335)
if err337 != nil {
Usage()
return
}
factory288 := thrift.NewTJSONProtocolFactory()
jsProt289 := factory288.GetProtocol(mbTrans286)
factory338 := thrift.NewTJSONProtocolFactory()
jsProt339 := factory338.GetProtocol(mbTrans336)
argvalue0 := aurora.NewJobKey()
err290 := argvalue0.Read(jsProt289)
if err290 != nil {
err340 := argvalue0.Read(context.Background(), jsProt339)
if err340 != nil {
Usage()
return
}
@ -719,19 +718,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
flag.Usage()
}
arg293 := flag.Arg(1)
mbTrans294 := thrift.NewTMemoryBufferLen(len(arg293))
defer mbTrans294.Close()
_, err295 := mbTrans294.WriteString(arg293)
if err295 != nil {
arg343 := flag.Arg(1)
mbTrans344 := thrift.NewTMemoryBufferLen(len(arg343))
defer mbTrans344.Close()
_, err345 := mbTrans344.WriteString(arg343)
if err345 != nil {
Usage()
return
}
factory296 := thrift.NewTJSONProtocolFactory()
jsProt297 := factory296.GetProtocol(mbTrans294)
factory346 := thrift.NewTJSONProtocolFactory()
jsProt347 := factory346.GetProtocol(mbTrans344)
argvalue0 := aurora.NewJobConfiguration()
err298 := argvalue0.Read(jsProt297)
if err298 != nil {
err348 := argvalue0.Read(context.Background(), jsProt347)
if err348 != nil {
Usage()
return
}
@ -744,19 +743,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
flag.Usage()
}
arg299 := flag.Arg(1)
mbTrans300 := thrift.NewTMemoryBufferLen(len(arg299))
defer mbTrans300.Close()
_, err301 := mbTrans300.WriteString(arg299)
if err301 != nil {
arg349 := flag.Arg(1)
mbTrans350 := thrift.NewTMemoryBufferLen(len(arg349))
defer mbTrans350.Close()
_, err351 := mbTrans350.WriteString(arg349)
if err351 != nil {
Usage()
return
}
factory302 := thrift.NewTJSONProtocolFactory()
jsProt303 := factory302.GetProtocol(mbTrans300)
factory352 := thrift.NewTJSONProtocolFactory()
jsProt353 := factory352.GetProtocol(mbTrans350)
argvalue0 := aurora.NewJobUpdateQuery()
err304 := argvalue0.Read(jsProt303)
if err304 != nil {
err354 := argvalue0.Read(context.Background(), jsProt353)
if err354 != nil {
Usage()
return
}
@ -769,19 +768,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
flag.Usage()
}
arg305 := flag.Arg(1)
mbTrans306 := thrift.NewTMemoryBufferLen(len(arg305))
defer mbTrans306.Close()
_, err307 := mbTrans306.WriteString(arg305)
if err307 != nil {
arg355 := flag.Arg(1)
mbTrans356 := thrift.NewTMemoryBufferLen(len(arg355))
defer mbTrans356.Close()
_, err357 := mbTrans356.WriteString(arg355)
if err357 != nil {
Usage()
return
}
factory308 := thrift.NewTJSONProtocolFactory()
jsProt309 := factory308.GetProtocol(mbTrans306)
factory358 := thrift.NewTJSONProtocolFactory()
jsProt359 := factory358.GetProtocol(mbTrans356)
argvalue0 := aurora.NewJobUpdateQuery()
err310 := argvalue0.Read(jsProt309)
if err310 != nil {
err360 := argvalue0.Read(context.Background(), jsProt359)
if err360 != nil {
Usage()
return
}
@ -794,19 +793,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
flag.Usage()
}
arg311 := flag.Arg(1)
mbTrans312 := thrift.NewTMemoryBufferLen(len(arg311))
defer mbTrans312.Close()
_, err313 := mbTrans312.WriteString(arg311)
if err313 != nil {
arg361 := flag.Arg(1)
mbTrans362 := thrift.NewTMemoryBufferLen(len(arg361))
defer mbTrans362.Close()
_, err363 := mbTrans362.WriteString(arg361)
if err363 != nil {
Usage()
return
}
factory314 := thrift.NewTJSONProtocolFactory()
jsProt315 := factory314.GetProtocol(mbTrans312)
factory364 := thrift.NewTJSONProtocolFactory()
jsProt365 := factory364.GetProtocol(mbTrans362)
argvalue0 := aurora.NewJobUpdateRequest()
err316 := argvalue0.Read(jsProt315)
if err316 != nil {
err366 := argvalue0.Read(context.Background(), jsProt365)
if err366 != nil {
Usage()
return
}

View file

@ -1,5 +1,4 @@
// Autogenerated by Thrift Compiler (0.13.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
package main
@ -180,19 +179,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
flag.Usage()
}
arg82 := flag.Arg(1)
mbTrans83 := thrift.NewTMemoryBufferLen(len(arg82))
defer mbTrans83.Close()
_, err84 := mbTrans83.WriteString(arg82)
if err84 != nil {
arg132 := flag.Arg(1)
mbTrans133 := thrift.NewTMemoryBufferLen(len(arg132))
defer mbTrans133.Close()
_, err134 := mbTrans133.WriteString(arg132)
if err134 != nil {
Usage()
return
}
factory85 := thrift.NewTJSONProtocolFactory()
jsProt86 := factory85.GetProtocol(mbTrans83)
factory135 := thrift.NewTJSONProtocolFactory()
jsProt136 := factory135.GetProtocol(mbTrans133)
argvalue0 := aurora.NewTaskQuery()
err87 := argvalue0.Read(jsProt86)
if err87 != nil {
err137 := argvalue0.Read(context.Background(), jsProt136)
if err137 != nil {
Usage()
return
}
@ -205,19 +204,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
flag.Usage()
}
arg88 := flag.Arg(1)
mbTrans89 := thrift.NewTMemoryBufferLen(len(arg88))
defer mbTrans89.Close()
_, err90 := mbTrans89.WriteString(arg88)
if err90 != nil {
arg138 := flag.Arg(1)
mbTrans139 := thrift.NewTMemoryBufferLen(len(arg138))
defer mbTrans139.Close()
_, err140 := mbTrans139.WriteString(arg138)
if err140 != nil {
Usage()
return
}
factory91 := thrift.NewTJSONProtocolFactory()
jsProt92 := factory91.GetProtocol(mbTrans89)
factory141 := thrift.NewTJSONProtocolFactory()
jsProt142 := factory141.GetProtocol(mbTrans139)
argvalue0 := aurora.NewTaskQuery()
err93 := argvalue0.Read(jsProt92)
if err93 != nil {
err143 := argvalue0.Read(context.Background(), jsProt142)
if err143 != nil {
Usage()
return
}
@ -230,19 +229,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
flag.Usage()
}
arg94 := flag.Arg(1)
mbTrans95 := thrift.NewTMemoryBufferLen(len(arg94))
defer mbTrans95.Close()
_, err96 := mbTrans95.WriteString(arg94)
if err96 != nil {
arg144 := flag.Arg(1)
mbTrans145 := thrift.NewTMemoryBufferLen(len(arg144))
defer mbTrans145.Close()
_, err146 := mbTrans145.WriteString(arg144)
if err146 != nil {
Usage()
return
}
factory97 := thrift.NewTJSONProtocolFactory()
jsProt98 := factory97.GetProtocol(mbTrans95)
factory147 := thrift.NewTJSONProtocolFactory()
jsProt148 := factory147.GetProtocol(mbTrans145)
argvalue0 := aurora.NewTaskQuery()
err99 := argvalue0.Read(jsProt98)
if err99 != nil {
err149 := argvalue0.Read(context.Background(), jsProt148)
if err149 != nil {
Usage()
return
}
@ -255,19 +254,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
flag.Usage()
}
arg100 := flag.Arg(1)
mbTrans101 := thrift.NewTMemoryBufferLen(len(arg100))
defer mbTrans101.Close()
_, err102 := mbTrans101.WriteString(arg100)
if err102 != nil {
arg150 := flag.Arg(1)
mbTrans151 := thrift.NewTMemoryBufferLen(len(arg150))
defer mbTrans151.Close()
_, err152 := mbTrans151.WriteString(arg150)
if err152 != nil {
Usage()
return
}
factory103 := thrift.NewTJSONProtocolFactory()
jsProt104 := factory103.GetProtocol(mbTrans101)
factory153 := thrift.NewTJSONProtocolFactory()
jsProt154 := factory153.GetProtocol(mbTrans151)
argvalue0 := aurora.NewJobKey()
err105 := argvalue0.Read(jsProt104)
if err105 != nil {
err155 := argvalue0.Read(context.Background(), jsProt154)
if err155 != nil {
Usage()
return
}
@ -300,19 +299,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
flag.Usage()
}
arg108 := flag.Arg(1)
mbTrans109 := thrift.NewTMemoryBufferLen(len(arg108))
defer mbTrans109.Close()
_, err110 := mbTrans109.WriteString(arg108)
if err110 != nil {
arg158 := flag.Arg(1)
mbTrans159 := thrift.NewTMemoryBufferLen(len(arg158))
defer mbTrans159.Close()
_, err160 := mbTrans159.WriteString(arg158)
if err160 != nil {
Usage()
return
}
factory111 := thrift.NewTJSONProtocolFactory()
jsProt112 := factory111.GetProtocol(mbTrans109)
factory161 := thrift.NewTJSONProtocolFactory()
jsProt162 := factory161.GetProtocol(mbTrans159)
argvalue0 := aurora.NewJobConfiguration()
err113 := argvalue0.Read(jsProt112)
if err113 != nil {
err163 := argvalue0.Read(context.Background(), jsProt162)
if err163 != nil {
Usage()
return
}
@ -325,19 +324,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
flag.Usage()
}
arg114 := flag.Arg(1)
mbTrans115 := thrift.NewTMemoryBufferLen(len(arg114))
defer mbTrans115.Close()
_, err116 := mbTrans115.WriteString(arg114)
if err116 != nil {
arg164 := flag.Arg(1)
mbTrans165 := thrift.NewTMemoryBufferLen(len(arg164))
defer mbTrans165.Close()
_, err166 := mbTrans165.WriteString(arg164)
if err166 != nil {
Usage()
return
}
factory117 := thrift.NewTJSONProtocolFactory()
jsProt118 := factory117.GetProtocol(mbTrans115)
factory167 := thrift.NewTJSONProtocolFactory()
jsProt168 := factory167.GetProtocol(mbTrans165)
argvalue0 := aurora.NewJobUpdateQuery()
err119 := argvalue0.Read(jsProt118)
if err119 != nil {
err169 := argvalue0.Read(context.Background(), jsProt168)
if err169 != nil {
Usage()
return
}
@ -350,19 +349,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
flag.Usage()
}
arg120 := flag.Arg(1)
mbTrans121 := thrift.NewTMemoryBufferLen(len(arg120))
defer mbTrans121.Close()
_, err122 := mbTrans121.WriteString(arg120)
if err122 != nil {
arg170 := flag.Arg(1)
mbTrans171 := thrift.NewTMemoryBufferLen(len(arg170))
defer mbTrans171.Close()
_, err172 := mbTrans171.WriteString(arg170)
if err172 != nil {
Usage()
return
}
factory123 := thrift.NewTJSONProtocolFactory()
jsProt124 := factory123.GetProtocol(mbTrans121)
factory173 := thrift.NewTJSONProtocolFactory()
jsProt174 := factory173.GetProtocol(mbTrans171)
argvalue0 := aurora.NewJobUpdateQuery()
err125 := argvalue0.Read(jsProt124)
if err125 != nil {
err175 := argvalue0.Read(context.Background(), jsProt174)
if err175 != nil {
Usage()
return
}
@ -375,19 +374,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
flag.Usage()
}
arg126 := flag.Arg(1)
mbTrans127 := thrift.NewTMemoryBufferLen(len(arg126))
defer mbTrans127.Close()
_, err128 := mbTrans127.WriteString(arg126)
if err128 != nil {
arg176 := flag.Arg(1)
mbTrans177 := thrift.NewTMemoryBufferLen(len(arg176))
defer mbTrans177.Close()
_, err178 := mbTrans177.WriteString(arg176)
if err178 != nil {
Usage()
return
}
factory129 := thrift.NewTJSONProtocolFactory()
jsProt130 := factory129.GetProtocol(mbTrans127)
factory179 := thrift.NewTJSONProtocolFactory()
jsProt180 := factory179.GetProtocol(mbTrans177)
argvalue0 := aurora.NewJobUpdateRequest()
err131 := argvalue0.Read(jsProt130)
if err131 != nil {
err181 := argvalue0.Read(context.Background(), jsProt180)
if err181 != nil {
Usage()
return
}

View file

@ -1,6 +1,6 @@
#! /bin/bash
THRIFT_VER=0.13.0
THRIFT_VER=0.14.0
if [[ $(thrift -version | grep -e $THRIFT_VER -c) -ne 1 ]]; then
echo "Warning: This wrapper has only been tested with version" $THRIFT_VER;

8
go.mod
View file

@ -1,12 +1,10 @@
module github.com/aurora-scheduler/gorealis/v2
require (
github.com/apache/thrift v0.12.0
github.com/apache/thrift v0.14.0
github.com/pkg/errors v0.9.1
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a
github.com/stretchr/testify v1.5.0
github.com/stretchr/testify v1.7.0
)
replace github.com/apache/thrift v0.13.0 => github.com/ridv/thrift v0.13.1
go 1.13
go 1.16

22
go.sum Normal file
View file

@ -0,0 +1,22 @@
github.com/apache/thrift v0.14.0 h1:vqZ2DP42i8th2OsgCcYZkirtbzvpZEFx53LiWDJXIAs=
github.com/apache/thrift v0.14.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a h1:EYL2xz/Zdo0hyqdZMXR4lmT2O11jDLTPCEqIe/FR6W4=
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.5.0 h1:DMOzIV76tmoDNE9pX6RSN0aDtCYeCg5VueieJaAo1uw=
github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

23
helpers.go Normal file
View file

@ -0,0 +1,23 @@
package realis
import (
"context"
"github.com/aurora-scheduler/gorealis/v2/gen-go/apache/aurora"
)
func (r *Client) JobExists(key aurora.JobKey) (bool, error) {
resp, err := r.client.GetConfigSummary(context.TODO(), &key)
if err != nil {
return false, err
}
return resp != nil &&
resp.GetResult_() != nil &&
resp.GetResult_().GetConfigSummaryResult_() != nil &&
resp.GetResult_().GetConfigSummaryResult_().GetSummary() != nil &&
resp.GetResult_().GetConfigSummaryResult_().GetSummary().GetGroups() != nil &&
len(resp.GetResult_().GetConfigSummaryResult_().GetSummary().GetGroups()) > 0 &&
resp.GetResponseCode() == aurora.ResponseCode_OK,
nil
}

10
job.go
View file

@ -156,6 +156,16 @@ func (j *AuroraJob) IsService(isService bool) *AuroraJob {
return j
}
func (j *AuroraJob) Priority(priority int32) *AuroraJob {
j.task.Priority(priority)
return j
}
func (j *AuroraJob) Production(production bool) *AuroraJob {
j.task.Production(production)
return j
}
func (j *AuroraJob) TaskConfig() *aurora.TaskConfig {
return j.task.TaskConfig()
}

View file

@ -77,7 +77,7 @@ func (j *JobUpdate) BatchSize(size int32) *JobUpdate {
// Minimum number of seconds a shard must remain in RUNNING state before considered a success.
func (j *JobUpdate) WatchTime(timeout time.Duration) *JobUpdate {
j.request.Settings.MinWaitInInstanceRunningMs = int32(timeout.Seconds() * 1000)
j.request.Settings.MinWaitInInstanceRunningMs = int32(timeout.Milliseconds())
return j
}
@ -137,10 +137,11 @@ func (j *JobUpdate) SlaAware(slaAware bool) *JobUpdate {
j.request.Settings.SlaAware = &slaAware
return j
}
// AddInstanceRange allows updates to only touch a certain specific range of instances
func (j *JobUpdate) AddInstanceRange(first, last int32) *JobUpdate {
j.request.Settings.UpdateOnlyTheseInstances = append(j.request.Settings.UpdateOnlyTheseInstances,
&aurora.Range{First: first, Last: last})
&aurora.Range{First: first, Last: last})
return j
}
@ -220,6 +221,16 @@ func (j *JobUpdate) IsService(isService bool) *JobUpdate {
return j
}
func (j *JobUpdate) Priority(priority int32) *JobUpdate {
j.task.Priority(priority)
return j
}
func (j *JobUpdate) Production(production bool) *JobUpdate {
j.task.Production(production)
return j
}
func (j *JobUpdate) TaskConfig() *aurora.TaskConfig {
return j.task.TaskConfig()
}

View file

@ -245,7 +245,7 @@ func (c *Client) MonitorHostMaintenance(hosts []string,
}
}
// AutoPaused monitor is a special monitor for auto pause enabled batch updates. This monitor ensures that the update
// MonitorAutoPausedUpdate is a special monitor for auto pause enabled batch updates. This monitor ensures that the update
// being monitored is capable of auto pausing and has auto pausing enabled. After verifying this information,
// the monitor watches for the job to enter the ROLL_FORWARD_PAUSED state and calculates the current batch
// the update is in using information from the update configuration.
@ -294,8 +294,9 @@ func (c *Client) MonitorAutoPausedUpdate(key aurora.JobUpdateKey, interval, time
return -1, err
}
// Summary 0 is assumed to exist because MonitorJobUpdateQuery will return an error if there is Summaries
if summary[0].State.Status != aurora.JobUpdateStatus_ROLL_FORWARD_PAUSED {
// Summary 0 is assumed to exist because MonitorJobUpdateQuery will return an error if there is no summaries
if !(summary[0].State.Status == aurora.JobUpdateStatus_ROLL_FORWARD_PAUSED ||
summary[0].State.Status == aurora.JobUpdateStatus_ROLLED_FORWARD) {
return -1, errors.Errorf("update is in a terminal state %v", summary[0].State.Status)
}

434
offer.go Normal file
View file

@ -0,0 +1,434 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"net/http"
"strings"
"github.com/aurora-scheduler/gorealis/v2/gen-go/apache/aurora"
)
// Offers on [aurora-scheduler]/offers endpoint
type Offer struct {
ID struct {
Value string `json:"value"`
} `json:"id"`
FrameworkID struct {
Value string `json:"value"`
} `json:"framework_id"`
AgentID struct {
Value string `json:"value"`
} `json:"agent_id"`
Hostname string `json:"hostname"`
URL struct {
Scheme string `json:"scheme"`
Address struct {
Hostname string `json:"hostname"`
IP string `json:"ip"`
Port int `json:"port"`
} `json:"address"`
Path string `json:"path"`
Query []interface{} `json:"query"`
} `json:"url"`
Resources []struct {
Name string `json:"name"`
Type string `json:"type"`
Ranges struct {
Range []struct {
Begin int `json:"begin"`
End int `json:"end"`
} `json:"range"`
} `json:"ranges,omitempty"`
Role string `json:"role"`
Reservations []interface{} `json:"reservations"`
Scalar struct {
Value float64 `json:"value"`
} `json:"scalar,omitempty"`
} `json:"resources"`
Attributes []struct {
Name string `json:"name"`
Type string `json:"type"`
Text struct {
Value string `json:"value"`
} `json:"text"`
} `json:"attributes"`
ExecutorIds []struct {
Value string `json:"value"`
} `json:"executor_ids"`
}
// hosts on [aurora-scheduler]/maintenance endpoint
type MaintenanceList struct {
Drained []string `json:"DRAINED"`
Scheduled []string `json:"SCHEDULED"`
Draining map[string][]string `json:"DRAINING"`
}
type OfferCount map[float64]int64
type OfferGroupReport map[string]OfferCount
type OfferReport map[string]OfferGroupReport
// MaintenanceHosts list all the hosts under maintenance
func (c *Client) MaintenanceHosts() ([]string, error) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: c.config.insecureSkipVerify},
}
request := &http.Client{Transport: tr}
resp, err := request.Get(fmt.Sprintf("%s/maintenance", c.GetSchedulerURL()))
if err != nil {
return nil, err
}
defer resp.Body.Close()
buf := new(bytes.Buffer)
if _, err := buf.ReadFrom(resp.Body); err != nil {
return nil, err
}
var list MaintenanceList
if err := json.Unmarshal(buf.Bytes(), &list); err != nil {
return nil, err
}
hosts := append(list.Drained, list.Scheduled...)
for drainingHost := range list.Draining {
hosts = append(hosts, drainingHost)
}
return hosts, nil
}
// Offers pulls data from /offers endpoint
func (c *Client) Offers() ([]Offer, error) {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: c.config.insecureSkipVerify},
}
request := &http.Client{Transport: tr}
resp, err := request.Get(fmt.Sprintf("%s/offers", c.GetSchedulerURL()))
if err != nil {
return []Offer{}, err
}
defer resp.Body.Close()
buf := new(bytes.Buffer)
if _, err := buf.ReadFrom(resp.Body); err != nil {
return nil, err
}
var offers []Offer
if err := json.Unmarshal(buf.Bytes(), &offers); err != nil {
return []Offer{}, err
}
return offers, nil
}
// AvailOfferReport returns a detailed summary of offers available for use.
// For example, 2 nodes offer 32 cpus and 10 nodes offer 1 cpus.
func (c *Client) AvailOfferReport() (OfferReport, error) {
maintHosts, err := c.MaintenanceHosts()
if err != nil {
return nil, err
}
maintHostSet := map[string]bool{}
for _, h := range maintHosts {
maintHostSet[h] = true
}
// Get a list of offers
offers, err := c.Offers()
if err != nil {
return nil, err
}
report := OfferReport{}
for _, o := range offers {
if maintHostSet[o.Hostname] {
continue
}
group := "non-dedicated"
for _, a := range o.Attributes {
if a.Name == "dedicated" {
group = a.Text.Value
break
}
}
if _, ok := report[group]; !ok {
report[group] = map[string]OfferCount{}
}
for _, r := range o.Resources {
if _, ok := report[group][r.Name]; !ok {
report[group][r.Name] = OfferCount{}
}
val := 0.0
switch r.Type {
case "SCALAR":
val = r.Scalar.Value
case "RANGES":
for _, pr := range r.Ranges.Range {
val += float64(pr.End - pr.Begin + 1)
}
default:
return nil, fmt.Errorf("%s is not supported", r.Type)
}
report[group][r.Name][val]++
}
}
return report, nil
}
// FitTasks computes the number tasks can be fit in a list of offer
func (c *Client) FitTasks(taskConfig *aurora.TaskConfig, offers []Offer) (int64, error) {
// count the number of tasks per limit contraint: limit.name -> limit.value -> count
limitCounts := map[string]map[string]int64{}
for _, c := range taskConfig.Constraints {
if c.Constraint.Limit != nil {
limitCounts[c.Name] = map[string]int64{}
}
}
request := ResourcesToMap(taskConfig.Resources)
// validate resource request
if len(request) == 0 {
return -1, fmt.Errorf("Resource request %v must not be empty", request)
}
isValid := false
for _, resVal := range request {
if resVal > 0 {
isValid = true
break
}
}
if !isValid {
return -1, fmt.Errorf("Resource request %v is not valid", request)
}
// pull the list of hosts under maintenance
maintHosts, err := c.MaintenanceHosts()
if err != nil {
return -1, err
}
maintHostSet := map[string]bool{}
for _, h := range maintHosts {
maintHostSet[h] = true
}
numTasks := int64(0)
for _, o := range offers {
// skip the hosts under maintenance
if maintHostSet[o.Hostname] {
continue
}
numTasksPerOffer := int64(-1)
for resName, resVal := range request {
// skip as we can fit a infinite number of tasks with 0 demand.
if resVal == 0 {
continue
}
avail := 0.0
for _, r := range o.Resources {
if r.Name != resName {
continue
}
switch r.Type {
case "SCALAR":
avail = r.Scalar.Value
case "RANGES":
for _, pr := range r.Ranges.Range {
avail += float64(pr.End - pr.Begin + 1)
}
default:
return -1, fmt.Errorf("%s is not supported", r.Type)
}
}
numTasksPerResource := int64(avail / resVal)
if numTasksPerResource < numTasksPerOffer || numTasksPerOffer < 0 {
numTasksPerOffer = numTasksPerResource
}
}
numTasks += fitConstraints(taskConfig, &o, limitCounts, numTasksPerOffer)
}
return numTasks, nil
}
func fitConstraints(taskConfig *aurora.TaskConfig,
offer *Offer,
limitCounts map[string]map[string]int64,
numTasksPerOffer int64) int64 {
// check dedicated attributes vs. constraints
if !isDedicated(offer, taskConfig.Job.Role, taskConfig.Constraints) {
return 0
}
limitConstraints := []*aurora.Constraint{}
for _, c := range taskConfig.Constraints {
// look for corresponding attribute
attFound := false
for _, a := range offer.Attributes {
if a.Name == c.Name {
attFound = true
}
}
// constraint not found in offer's attributes
if !attFound {
return 0
}
if c.Constraint.Value != nil && !valueConstraint(offer, c) {
// value constraint is not satisfied
return 0
} else if c.Constraint.Limit != nil {
limitConstraints = append(limitConstraints, c)
limit := limitConstraint(offer, c, limitCounts)
if numTasksPerOffer > limit && limit >= 0 {
numTasksPerOffer = limit
}
}
}
// update limitCounts
for _, c := range limitConstraints {
for _, a := range offer.Attributes {
if a.Name == c.Name {
limitCounts[a.Name][a.Text.Value] += numTasksPerOffer
}
}
}
return numTasksPerOffer
}
func isDedicated(offer *Offer, role string, constraints []*aurora.Constraint) bool {
// get all dedicated attributes of an offer
dedicatedAtts := map[string]bool{}
for _, a := range offer.Attributes {
if a.Name == "dedicated" {
dedicatedAtts[a.Text.Value] = true
}
}
if len(dedicatedAtts) == 0 {
return true
}
// check if constraints are matching dedicated attributes
matched := false
for _, c := range constraints {
if c.Name == "dedicated" && c.Constraint.Value != nil {
found := false
for _, v := range c.Constraint.Value.Values {
if dedicatedAtts[v] && strings.HasPrefix(v, fmt.Sprintf("%s/", role)) {
found = true
break
}
}
if found {
matched = true
} else {
return false
}
}
}
return matched
}
// valueConstraint checks Value Contraints of task if the are matched by the offer.
// more details can be found here https://aurora.apache.org/documentation/latest/features/constraints/
func valueConstraint(offer *Offer, constraint *aurora.Constraint) bool {
matched := false
for _, a := range offer.Attributes {
if a.Name == constraint.Name {
for _, v := range constraint.Constraint.Value.Values {
matched = (a.Text.Value == v && !constraint.Constraint.Value.Negated) ||
(a.Text.Value != v && constraint.Constraint.Value.Negated)
if matched {
break
}
}
if matched {
break
}
}
}
return matched
}
// limitConstraint limits the number of pods on a group which has the same attribute.
// more details can be found here https://aurora.apache.org/documentation/latest/features/constraints/
func limitConstraint(offer *Offer, constraint *aurora.Constraint, limitCounts map[string]map[string]int64) int64 {
limit := int64(-1)
for _, a := range offer.Attributes {
// limit constraint found
if a.Name == constraint.Name {
curr := limitCounts[a.Name][a.Text.Value]
currLimit := int64(constraint.Constraint.Limit.Limit)
if curr >= currLimit {
return 0
}
if currLimit-curr < limit || limit < 0 {
limit = currLimit - curr
}
}
}
return limit
}

253
realis.go
View file

@ -36,7 +36,7 @@ import (
"github.com/pkg/errors"
)
const VERSION = "2.22.1"
const VERSION = "2.28.0"
type Client struct {
config *clientConfig
@ -147,6 +147,8 @@ func NewClient(options ...ClientOption) (*Client, error) {
return nil, errors.New("incomplete Options -- url, cluster.json, or Zookeeper address required")
}
config.url = url
url, err = validateAuroraAddress(url)
if err != nil {
return nil, errors.Wrap(err, "unable to create realis object, invalid url")
@ -313,11 +315,13 @@ func (c *Client) GetInstanceIds(key aurora.JobKey, states []aurora.ScheduleStatu
Statuses: states,
}
c.logger.DebugPrintf("GetTasksWithoutConfigs Thrift Payload: %+v\n", taskQ)
c.logger.DebugPrintf("GetInstanceIds Thrift Payload: %+v\n", taskQ)
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.GetTasksWithoutConfigs(context.TODO(), taskQ)
})
},
nil,
)
// If we encountered an error we couldn't recover from by retrying, return an error to the user
if retryErr != nil {
@ -339,8 +343,13 @@ func (c *Client) GetJobUpdateSummaries(jobUpdateQuery *aurora.JobUpdateQuery) (*
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.readonlyClient.GetJobUpdateSummaries(context.TODO(), jobUpdateQuery)
})
},
nil,
)
if resp == nil || resp.GetResult_() == nil || resp.GetResult_().GetGetJobUpdateSummariesResult_() == nil {
return nil, errors.New("unexpected response from scheduler")
}
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error getting job update summaries from Aurora Scheduler")
}
@ -348,23 +357,39 @@ func (c *Client) GetJobUpdateSummaries(jobUpdateQuery *aurora.JobUpdateQuery) (*
return resp.GetResult_().GetGetJobUpdateSummariesResult_(), nil
}
func (c *Client) GetJobs(role string) (*aurora.GetJobsResult_, error) {
func (c *Client) GetJobSummary(role string) (*aurora.JobSummaryResult_, error) {
var result *aurora.GetJobsResult_
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.readonlyClient.GetJobSummary(context.TODO(), role)
},
nil,
)
if resp == nil || resp.GetResult_() == nil || resp.GetResult_().GetJobSummaryResult_() == nil {
return nil, errors.New("unexpected response from scheduler")
}
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error getting job summaries from Aurora Scheduler")
}
return resp.GetResult_().GetJobSummaryResult_(), nil
}
func (c *Client) GetJobs(role string) (*aurora.GetJobsResult_, error) {
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.readonlyClient.GetJobs(context.TODO(), role)
})
},
nil,
)
if retryErr != nil {
return result, errors.Wrap(retryErr, "error getting Jobs from Aurora Scheduler")
return nil, errors.Wrap(retryErr, "error getting Jobs from Aurora Scheduler")
}
if resp == nil || resp.GetResult_() == nil {
return nil, errors.New("unexpected response from scheduler")
}
if resp.GetResult_() != nil {
result = resp.GetResult_().GetJobsResult_
}
return result, nil
return resp.GetResult_().GetJobsResult_, nil
}
// Kill specific instances of a job. Returns true, nil if a task was actually killed as a result of this API call.
@ -374,19 +399,19 @@ func (c *Client) KillInstances(key aurora.JobKey, instances ...int32) (bool, err
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.KillTasks(context.TODO(), &key, instances, "")
})
},
nil,
)
if retryErr != nil {
return false, errors.Wrap(retryErr, "error sending Kill command to Aurora Scheduler")
}
if len(resp.GetDetails()) > 0 {
if resp == nil || len(resp.GetDetails()) > 0 {
c.logger.Println("KillTasks was called but no tasks killed as a result.")
return false, nil
} else {
return true, nil
}
return true, nil
}
func (c *Client) RealisConfig() *clientConfig {
@ -401,7 +426,9 @@ func (c *Client) KillJob(key aurora.JobKey) error {
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
// Giving the KillTasks thrift call an empty set tells the Aurora scheduler to kill all active shards
return c.client.KillTasks(context.TODO(), &key, nil, "")
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "error sending Kill command to Aurora Scheduler")
@ -423,9 +450,27 @@ func (c *Client) CreateJob(auroraJob *AuroraJob) error {
return errors.Wrap(err, "unable to create Thermos payload")
}
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.CreateJob(context.TODO(), auroraJob.JobConfig())
})
// Response is checked by the thrift retry code
_, retryErr := c.thriftCallWithRetries(
false,
func() (*aurora.Response, error) {
return c.client.CreateJob(context.TODO(), auroraJob.JobConfig())
},
// On a client timeout, attempt to verify that payload made to the Scheduler by
// trying to get the config summary for the job key
func() (*aurora.Response, bool) {
exists, err := c.JobExists(auroraJob.JobKey())
if err != nil {
c.logger.Print("verification failed ", err)
}
if exists {
return &aurora.Response{ResponseCode: aurora.ResponseCode_OK}, true
}
return nil, false
},
)
if retryErr != nil {
return errors.Wrap(retryErr, "error sending Create command to Aurora Scheduler")
@ -456,7 +501,9 @@ func (c *Client) ScheduleCronJob(auroraJob *AuroraJob) error {
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.ScheduleCronJob(context.TODO(), auroraJob.JobConfig())
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "error sending Cron AuroraJob Schedule message to Aurora Scheduler")
@ -470,7 +517,9 @@ func (c *Client) DescheduleCronJob(key aurora.JobKey) error {
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.DescheduleCronJob(context.TODO(), &key)
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "error sending Cron AuroraJob De-schedule message to Aurora Scheduler")
@ -486,7 +535,9 @@ func (c *Client) StartCronJob(key aurora.JobKey) error {
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.StartCronJob(context.TODO(), &key)
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "error sending Start Cron AuroraJob message to Aurora Scheduler")
@ -501,7 +552,9 @@ func (c *Client) RestartInstances(key aurora.JobKey, instances ...int32) error {
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.RestartShards(context.TODO(), &key, instances)
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "error sending Restart command to Aurora Scheduler")
@ -522,16 +575,17 @@ func (c *Client) RestartJob(key aurora.JobKey) error {
if len(instanceIds) > 0 {
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.RestartShards(context.TODO(), &key, instanceIds)
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "error sending Restart command to Aurora Scheduler")
}
return nil
} else {
return errors.New("no tasks in the Active state")
}
return errors.New("no tasks in the Active state")
}
// Update all tasks under a job configuration. Currently gorealis doesn't support for canary deployments.
@ -543,34 +597,80 @@ func (c *Client) StartJobUpdate(updateJob *JobUpdate, message string) (*aurora.S
c.logger.DebugPrintf("StartJobUpdate Thrift Payload: %+v %v\n", updateJob, message)
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.StartJobUpdate(nil, updateJob.request, message)
})
resp, retryErr := c.thriftCallWithRetries(false,
func() (*aurora.Response, error) {
return c.client.StartJobUpdate(context.TODO(), updateJob.request, message)
},
func() (*aurora.Response, bool) {
key := updateJob.JobKey()
summariesResp, err := c.readonlyClient.GetJobUpdateSummaries(
context.TODO(),
&aurora.JobUpdateQuery{
JobKey: &key,
UpdateStatuses: aurora.ACTIVE_JOB_UPDATE_STATES,
Limit: 1,
})
if err != nil {
c.logger.Print("verification failed ", err)
return nil, false
}
summaries := response.JobUpdateSummaries(summariesResp)
if len(summaries) == 0 {
return nil, false
}
return &aurora.Response{
ResponseCode: aurora.ResponseCode_OK,
Result_: &aurora.Result_{
StartJobUpdateResult_: &aurora.StartJobUpdateResult_{
UpdateSummary: summaries[0],
Key: summaries[0].Key,
},
},
}, true
},
)
if retryErr != nil {
// A timeout took place when attempting this call, attempt to recover
if IsTimeout(retryErr) {
return nil, retryErr
}
return nil, errors.Wrap(retryErr, "error sending StartJobUpdate command to Aurora Scheduler")
}
if resp.GetResult_() != nil && resp.GetResult_().GetStartJobUpdateResult_() != nil {
return resp.GetResult_().GetStartJobUpdateResult_(), nil
if resp == nil || resp.GetResult_() == nil || resp.GetResult_().GetStartJobUpdateResult_() == nil {
return nil, errors.New("unexpected response from scheduler")
}
return nil, errors.New("thrift error: Field in response is nil unexpectedly.")
return resp.GetResult_().GetStartJobUpdateResult_(), nil
}
// Abort AuroraJob Update on Aurora. Requires the updateId which can be obtained on the Aurora web UI.
// AbortJobUpdate terminates a job update in the scheduler.
// It requires the updateId which can be obtained on the Aurora web UI.
// This API is meant to be synchronous. It will attempt to wait until the update transitions to the aborted state.
// However, if the job update does not transition to the ABORT state an error will be returned.
func (c *Client) AbortJobUpdate(updateKey aurora.JobUpdateKey, message string) error {
c.logger.DebugPrintf("AbortJobUpdate Thrift Payload: %+v %v\n", updateKey, message)
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.AbortJobUpdate(context.TODO(), &updateKey, message)
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "error sending AbortJobUpdate command to Aurora Scheduler")
}
return nil
// Make this call synchronous by blocking until it job has successfully transitioned to aborted
_, err := c.MonitorJobUpdateStatus(
updateKey,
[]aurora.JobUpdateStatus{aurora.JobUpdateStatus_ABORTED},
time.Second*5,
time.Minute)
return err
}
// Pause AuroraJob Update. UpdateID is returned from StartJobUpdate or the Aurora web UI.
@ -590,7 +690,9 @@ func (c *Client) PauseJobUpdate(updateKey *aurora.JobUpdateKey, message string)
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.PauseJobUpdate(nil, updateKeyLocal, message)
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "error sending PauseJobUpdate command to Aurora Scheduler")
@ -617,7 +719,9 @@ func (c *Client) ResumeJobUpdate(updateKey aurora.JobUpdateKey, message string)
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.ResumeJobUpdate(context.TODO(), &updateKey, message)
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "error sending ResumeJobUpdate command to Aurora Scheduler")
@ -638,18 +742,19 @@ func (c *Client) PulseJobUpdate(updateKey aurora.JobUpdateKey) (aurora.JobUpdate
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.PulseJobUpdate(context.TODO(), &updateKey)
})
},
nil,
)
if retryErr != nil {
return aurora.JobUpdatePulseStatus(0), errors.Wrap(retryErr, "error sending PulseJobUpdate command to Aurora Scheduler")
}
if resp.GetResult_() != nil && resp.GetResult_().GetPulseJobUpdateResult_() != nil {
return resp.GetResult_().GetPulseJobUpdateResult_().GetStatus(), nil
} else {
return aurora.JobUpdatePulseStatus(0), errors.New("thrift error, field was nil unexpectedly")
if resp == nil || resp.GetResult_() == nil || resp.GetResult_().GetPulseJobUpdateResult_() == nil {
return aurora.JobUpdatePulseStatus(0), errors.New("unexpected response from scheduler")
}
return resp.GetResult_().GetPulseJobUpdateResult_().GetStatus(), nil
}
// Scale up the number of instances under a job configuration using the configuration for specific
@ -666,7 +771,9 @@ func (c *Client) AddInstances(instKey aurora.InstanceKey, count int32) error {
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.AddInstances(context.TODO(), &instKey, count)
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "error sending AddInstances command to Aurora Scheduler")
@ -711,11 +818,16 @@ func (c *Client) GetTaskStatus(query *aurora.TaskQuery) ([]*aurora.ScheduledTask
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.GetTasksStatus(context.TODO(), query)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error querying Aurora Scheduler for task status")
}
if resp == nil {
return nil, errors.New("unexpected response from scheduler")
}
return response.ScheduleStatusResult(resp).GetTasks(), nil
}
@ -727,29 +839,32 @@ func (c *Client) GetPendingReason(query *aurora.TaskQuery) ([]*aurora.PendingRea
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.GetPendingReason(context.TODO(), query)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error querying Aurora Scheduler for pending Reasons")
}
var result []*aurora.PendingReason
if resp.GetResult_() != nil {
result = resp.GetResult_().GetGetPendingReasonResult_().GetReasons()
if resp == nil || resp.GetResult_() == nil || resp.GetResult_().GetGetPendingReasonResult_() == nil {
return nil, errors.New("unexpected response from scheduler")
}
return result, nil
return resp.GetResult_().GetGetPendingReasonResult_().GetReasons(), nil
}
// Get information about task including without a task configuration object
// GetTasksWithoutConfigs gets information about task including without a task configuration object.
// This is a more lightweight version of GetTaskStatus but contains less information as a result.
func (c *Client) GetTasksWithoutConfigs(query *aurora.TaskQuery) ([]*aurora.ScheduledTask, error) {
c.logger.DebugPrintf("GetTasksWithoutConfigs Thrift Payload: %+v\n", query)
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.GetTasksWithoutConfigs(context.TODO(), query)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error querying Aurora Scheduler for task status without configs")
@ -776,7 +891,9 @@ func (c *Client) FetchTaskConfig(instKey aurora.InstanceKey) (*aurora.TaskConfig
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.GetTasksStatus(context.TODO(), taskQ)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error querying Aurora Scheduler for task configuration")
@ -802,17 +919,19 @@ func (c *Client) JobUpdateDetails(updateQuery aurora.JobUpdateQuery) ([]*aurora.
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.GetJobUpdateDetails(context.TODO(), &updateQuery)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "unable to get job update details")
}
if resp.GetResult_() != nil && resp.GetResult_().GetGetJobUpdateDetailsResult_() != nil {
return resp.GetResult_().GetGetJobUpdateDetailsResult_().GetDetailsList(), nil
} else {
return nil, errors.New("unknown Thrift error, field is nil.")
if resp == nil || resp.GetResult_() == nil || resp.GetResult_().GetGetJobUpdateDetailsResult_() == nil {
return nil, errors.New("unexpected response from scheduler")
}
return resp.GetResult_().GetGetJobUpdateDetailsResult_().GetDetailsList(), nil
}
func (c *Client) RollbackJobUpdate(key aurora.JobUpdateKey, message string) error {
@ -821,10 +940,16 @@ func (c *Client) RollbackJobUpdate(key aurora.JobUpdateKey, message string) erro
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.client.RollbackJobUpdate(context.TODO(), &key, message)
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "unable to roll back job update")
}
return nil
}
func (c *Client) GetSchedulerURL() string {
return c.config.url
}

View file

@ -37,17 +37,19 @@ func (c *Client) DrainHosts(hosts ...string) ([]*aurora.HostStatus, error) {
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.adminClient.DrainHosts(context.TODO(), drainList)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "unable to recover connection")
}
if resp.GetResult_() != nil && resp.GetResult_().GetDrainHostsResult_() != nil {
return resp.GetResult_().GetDrainHostsResult_().GetStatuses(), nil
} else {
return nil, errors.New("thrift error: Field in response is nil unexpectedly.")
if resp == nil || resp.GetResult_() == nil || resp.GetResult_().GetDrainHostsResult_() == nil {
return nil, errors.New("unexpected response from scheduler")
}
return resp.GetResult_().GetDrainHostsResult_().GetStatuses(), nil
}
// Start SLA Aware Drain.
@ -59,6 +61,18 @@ func (c *Client) SLADrainHosts(policy *aurora.SlaPolicy, timeout int64, hosts ..
return nil, errors.New("no hosts provided to drain")
}
if policy == nil || policy.CountSetFieldsSlaPolicy() == 0 {
policy = &defaultSlaPolicy
c.logger.Printf("Warning: start draining with default sla policy %v", policy)
}
if timeout < 0 {
c.logger.Printf("Warning: timeout %d secs is invalid, draining with default timeout %d secs",
timeout,
defaultSlaDrainTimeoutSecs)
timeout = defaultSlaDrainTimeoutSecs
}
drainList := aurora.NewHosts()
drainList.HostNames = hosts
@ -66,17 +80,19 @@ func (c *Client) SLADrainHosts(policy *aurora.SlaPolicy, timeout int64, hosts ..
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.adminClient.SlaDrainHosts(context.TODO(), drainList, policy, timeout)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "unable to recover connection")
}
if resp.GetResult_() != nil && resp.GetResult_().GetDrainHostsResult_() != nil {
return resp.GetResult_().GetDrainHostsResult_().GetStatuses(), nil
} else {
return nil, errors.New("thrift error: Field in response is nil unexpectedly.")
if resp == nil || resp.GetResult_() == nil || resp.GetResult_().GetDrainHostsResult_() == nil {
return nil, errors.New("unexpected response from scheduler")
}
return resp.GetResult_().GetDrainHostsResult_().GetStatuses(), nil
}
func (c *Client) StartMaintenance(hosts ...string) ([]*aurora.HostStatus, error) {
@ -92,17 +108,19 @@ func (c *Client) StartMaintenance(hosts ...string) ([]*aurora.HostStatus, error)
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.adminClient.StartMaintenance(context.TODO(), hostList)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "unable to recover connection")
}
if resp.GetResult_() != nil && resp.GetResult_().GetStartMaintenanceResult_() != nil {
return resp.GetResult_().GetStartMaintenanceResult_().GetStatuses(), nil
} else {
return nil, errors.New("thrift error: Field in response is nil unexpectedly.")
if resp == nil || resp.GetResult_() == nil || resp.GetResult_().GetStartMaintenanceResult_() == nil {
return nil, errors.New("unexpected response from scheduler")
}
return resp.GetResult_().GetStartMaintenanceResult_().GetStatuses(), nil
}
func (c *Client) EndMaintenance(hosts ...string) ([]*aurora.HostStatus, error) {
@ -118,24 +136,20 @@ func (c *Client) EndMaintenance(hosts ...string) ([]*aurora.HostStatus, error) {
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.adminClient.EndMaintenance(context.TODO(), hostList)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "unable to recover connection")
}
if resp.GetResult_() != nil && resp.GetResult_().GetEndMaintenanceResult_() != nil {
return resp.GetResult_().GetEndMaintenanceResult_().GetStatuses(), nil
} else {
return nil, errors.New("thrift error: Field in response is nil unexpectedly.")
if resp == nil || resp.GetResult_() == nil || resp.GetResult_().GetEndMaintenanceResult_() == nil {
return nil, errors.New("unexpected response from scheduler")
}
return resp.GetResult_().GetEndMaintenanceResult_().GetStatuses(), nil
}
func (c *Client) MaintenanceStatus(hosts ...string) (*aurora.MaintenanceStatusResult_, error) {
var result *aurora.MaintenanceStatusResult_
if len(hosts) == 0 {
return nil, errors.New("no hosts provided to get maintenance status from")
}
@ -149,17 +163,18 @@ func (c *Client) MaintenanceStatus(hosts ...string) (*aurora.MaintenanceStatusRe
// and continue trying to resend command until we run out of retries.
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.adminClient.MaintenanceStatus(context.TODO(), hostList)
})
},
nil,
)
if retryErr != nil {
return result, errors.Wrap(retryErr, "unable to recover connection")
return nil, errors.Wrap(retryErr, "unable to recover connection")
}
if resp == nil || resp.GetResult_() == nil {
return nil, errors.New("unexpected response from scheduler")
}
if resp.GetResult_() != nil {
result = resp.GetResult_().GetMaintenanceStatusResult_()
}
return result, nil
return resp.GetResult_().GetMaintenanceStatusResult_(), nil
}
// SetQuota sets a quota aggregate for the given role
@ -177,7 +192,9 @@ func (c *Client) SetQuota(role string, cpu *float64, ramMb *int64, diskMb *int64
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.adminClient.SetQuota(context.TODO(), role, quota)
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "unable to set role quota")
@ -191,17 +208,18 @@ func (c *Client) GetQuota(role string) (*aurora.GetQuotaResult_, error) {
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.adminClient.GetQuota(context.TODO(), role)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "unable to get role quota")
}
if resp.GetResult_() != nil {
return resp.GetResult_().GetGetQuotaResult_(), nil
} else {
return nil, errors.New("thrift error: Field in response is nil unexpectedly.")
if resp == nil || resp.GetResult_() == nil {
return nil, errors.New("unexpected response from scheduler")
}
return resp.GetResult_().GetGetQuotaResult_(), nil
}
// Force Aurora Scheduler to perform a snapshot and write to Mesos log
@ -209,7 +227,9 @@ func (c *Client) Snapshot() error {
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.adminClient.Snapshot(context.TODO())
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "unable to recover connection")
@ -223,7 +243,9 @@ func (c *Client) PerformBackup() error {
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.adminClient.PerformBackup(context.TODO())
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "unable to recover connection")
@ -237,7 +259,9 @@ func (c *Client) ForceImplicitTaskReconciliation() error {
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.adminClient.TriggerImplicitTaskReconciliation(context.TODO())
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "unable to recover connection")
@ -258,7 +282,9 @@ func (c *Client) ForceExplicitTaskReconciliation(batchSize *int32) error {
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
return c.adminClient.TriggerExplicitTaskReconciliation(context.TODO(), settings)
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "unable to recover connection")

View file

@ -19,6 +19,7 @@ import (
"time"
"github.com/apache/thrift/lib/go/thrift"
"github.com/aurora-scheduler/gorealis/v2/gen-go/apache/aurora"
)
type clientConfig struct {
@ -48,6 +49,15 @@ var defaultBackoff = Backoff{
Jitter: 0.1,
}
var defaultSlaPolicy = aurora.SlaPolicy{
PercentageSlaPolicy: &aurora.PercentageSlaPolicy{
Percentage: 66,
DurationSecs: 300,
},
}
const defaultSlaDrainTimeoutSecs = 900
type TransportProtocol int
const (

View file

@ -94,12 +94,15 @@ func TestBadCredentials(t *testing.T) {
job := realis.NewJob().
Environment("prod").
Role("vagrant").
Name("create_thermos_job_test").
Name("create_thermos_job_bad_creds_test").
ThermosExecutor(thermosExec).
CPU(.5).
RAM(64).
Disk(100).
IsService(true).
Production(false).
Tier("preemptible").
Priority(0).
InstanceCount(2).
AddPorts(1)
@ -177,6 +180,35 @@ func TestLeaderFromZK(t *testing.T) {
assert.Equal(t, "http://192.168.33.7:8081", url)
}
func TestMasterFromZK(t *testing.T) {
cluster := realis.GetDefaultClusterFromZKUrl("192.168.33.2:2181")
masterNodesMap, err := realis.MasterNodesFromZK(*cluster)
assert.NoError(t, err)
for _, hostnames := range masterNodesMap {
for _, hostname := range hostnames {
assert.NoError(t, err)
assert.Equal(t, "192.168.33.7", hostname)
}
}
}
func TestMesosMasterFromZK(t *testing.T) {
cluster := realis.GetDefaultClusterFromZKUrl("192.168.33.2:2181")
masterNodesMap, err := realis.MesosMasterNodesFromZK(*cluster)
assert.NoError(t, err)
for _, hostnames := range masterNodesMap {
for _, hostname := range hostnames {
assert.NoError(t, err)
assert.Equal(t, "localhost", hostname)
}
}
}
func TestInvalidAuroraURL(t *testing.T) {
for _, url := range []string{
"http://doesntexist.com:8081/apitest",
@ -206,7 +238,6 @@ func TestValidAuroraURL(t *testing.T) {
}
func TestRealisClient_ReestablishConn(t *testing.T) {
// Test that we're able to tear down the old connection and create a new one.
err := r.ReestablishConn()
@ -217,11 +248,9 @@ func TestGetCACerts(t *testing.T) {
certs, err := realis.GetCerts("./examples/certs")
assert.NoError(t, err)
assert.Equal(t, len(certs.Subjects()), 2)
}
func TestRealisClient_CreateJob_Thermos(t *testing.T) {
role := "vagrant"
job := realis.NewJob().
Environment("prod").
@ -232,6 +261,9 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
RAM(64).
Disk(100).
IsService(true).
Production(false).
Tier("preemptible").
Priority(0).
InstanceCount(2).
AddPorts(1)
@ -245,7 +277,7 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
// Fetch all Jobs
result, err := r.GetJobs(role)
fmt.Printf("GetJobs length: %+v \n", len(result.Configs))
fmt.Println("GetJobs length: ", len(result.Configs))
assert.Len(t, result.Configs, 1)
assert.NoError(t, err)
@ -266,7 +298,7 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
err := r.KillJob(job.JobKey())
assert.NoError(t, err)
success, err := r.MonitorInstances(job.JobKey(), 0, 1*time.Second, 60*time.Second)
success, err := r.MonitorInstances(job.JobKey(), 0, 1*time.Second, 90*time.Second)
assert.True(t, success)
assert.NoError(t, err)
})
@ -274,7 +306,6 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
// Test configuring an executor that doesn't exist for CreateJob API
func TestRealisClient_CreateJob_ExecutorDoesNotExist(t *testing.T) {
// Create a single job
job := realis.NewJob().
Environment("prod").
@ -293,7 +324,6 @@ func TestRealisClient_CreateJob_ExecutorDoesNotExist(t *testing.T) {
// Test configuring an executor that doesn't exist for CreateJob API
func TestRealisClient_GetPendingReason(t *testing.T) {
env := "prod"
role := "vagrant"
name := "pending_reason_test"
@ -324,10 +354,13 @@ func TestRealisClient_GetPendingReason(t *testing.T) {
err = r.KillJob(job.JobKey())
assert.NoError(t, err)
success, err := r.MonitorInstances(job.JobKey(), 0, 1*time.Second, 90*time.Second)
assert.True(t, success)
assert.NoError(t, err)
}
func TestRealisClient_CreateService_WithPulse_Thermos(t *testing.T) {
fmt.Println("Creating service")
role := "vagrant"
job := realis.NewJobUpdate().
@ -339,6 +372,9 @@ func TestRealisClient_CreateService_WithPulse_Thermos(t *testing.T) {
Disk(100).
ThermosExecutor(thermosExec).
IsService(true).
Production(false).
Tier("preemptible").
Priority(0).
InstanceCount(2).
AddPorts(1).
AddLabel("currentTime", time.Now().String()).
@ -407,6 +443,10 @@ pulseLoop:
err = r.KillJob(job.JobKey())
assert.NoError(t, err)
success, err := r.MonitorInstances(job.JobKey(), 0, 1*time.Second, 90*time.Second)
assert.True(t, success)
assert.NoError(t, err)
}
// Test configuring an executor that doesn't exist for CreateJob API
@ -424,6 +464,9 @@ func TestRealisClient_CreateService(t *testing.T) {
InstanceCount(3).
WatchTime(20 * time.Second).
IsService(true).
Production(false).
Tier("preemptible").
Priority(0).
BatchSize(2)
result, err := r.CreateService(job)
@ -434,13 +477,15 @@ func TestRealisClient_CreateService(t *testing.T) {
var ok bool
var mErr error
if ok, mErr = r.MonitorJobUpdate(*result.GetKey(), 5*time.Second, 4*time.Minute); !ok || mErr != nil {
// Update may already be in a terminal state so don't check for error
err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out.")
if result != nil {
if ok, mErr = r.MonitorJobUpdate(*result.GetKey(), 5*time.Second, 4*time.Minute); !ok || mErr != nil {
// Update may already be in a terminal state so don't check for error
err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out.")
err = r.KillJob(job.JobKey())
err = r.KillJob(job.JobKey())
assert.NoError(t, err)
assert.NoError(t, err)
}
}
assert.True(t, ok)
@ -448,7 +493,10 @@ func TestRealisClient_CreateService(t *testing.T) {
// Kill task test task after confirming it came up fine
err = r.KillJob(job.JobKey())
assert.NoError(t, err)
success, err := r.MonitorInstances(job.JobKey(), 0, 1*time.Second, 90*time.Second)
assert.True(t, success)
assert.NoError(t, err)
}
@ -488,6 +536,9 @@ func TestRealisClient_ScheduleCronJob_Thermos(t *testing.T) {
RAM(64).
Disk(100).
IsService(true).
Production(false).
Tier("preemptible").
Priority(0).
InstanceCount(1).
AddPorts(1).
CronSchedule("* * * * *").
@ -504,10 +555,17 @@ func TestRealisClient_ScheduleCronJob_Thermos(t *testing.T) {
t.Run("TestRealisClient_DeschedulerCronJob_Thermos", func(t *testing.T) {
err := r.DescheduleCronJob(job.JobKey())
assert.NoError(t, err)
err = r.KillJob(job.JobKey())
assert.NoError(t, err)
success, err := r.MonitorInstances(job.JobKey(), 0, 1*time.Second, 90*time.Second)
assert.True(t, success)
assert.NoError(t, err)
})
}
func TestRealisClient_StartMaintenance(t *testing.T) {
hosts := []string{"localhost"}
hosts := []string{"agent-one"}
_, err := r.StartMaintenance(hosts...)
assert.NoError(t, err)
@ -517,7 +575,7 @@ func TestRealisClient_StartMaintenance(t *testing.T) {
[]aurora.MaintenanceMode{aurora.MaintenanceMode_SCHEDULED},
1*time.Second,
50*time.Second)
assert.Equal(t, map[string]bool{"localhost": true}, hostResults)
assert.Equal(t, map[string]bool{"agent-one": true}, hostResults)
assert.NoError(t, err)
_, err = r.EndMaintenance(hosts...)
@ -533,7 +591,7 @@ func TestRealisClient_StartMaintenance(t *testing.T) {
}
func TestRealisClient_DrainHosts(t *testing.T) {
hosts := []string{"localhost"}
hosts := []string{"agent-one"}
_, err := r.DrainHosts(hosts...)
assert.NoError(t, err)
@ -543,7 +601,7 @@ func TestRealisClient_DrainHosts(t *testing.T) {
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
1*time.Second,
50*time.Second)
assert.Equal(t, map[string]bool{"localhost": true}, hostResults)
assert.Equal(t, map[string]bool{"agent-one": true}, hostResults)
assert.NoError(t, err)
t.Run("TestRealisClient_MonitorNontransitioned", func(t *testing.T) {
@ -556,7 +614,7 @@ func TestRealisClient_DrainHosts(t *testing.T) {
// Assert monitor returned an error that was not nil, and also a list of the non-transitioned hosts
assert.Error(t, err)
assert.Equal(t, map[string]bool{"localhost": true, "IMAGINARY_HOST": false}, hostResults)
assert.Equal(t, map[string]bool{"agent-one": true, "IMAGINARY_HOST": false}, hostResults)
})
t.Run("TestRealisClient_EndMaintenance", func(t *testing.T) {
@ -575,7 +633,7 @@ func TestRealisClient_DrainHosts(t *testing.T) {
}
func TestRealisClient_SLADrainHosts(t *testing.T) {
hosts := []string{"localhost"}
hosts := []string{"agent-one"}
policy := aurora.SlaPolicy{PercentageSlaPolicy: &aurora.PercentageSlaPolicy{Percentage: 50.0}}
_, err := r.SLADrainHosts(&policy, 30, hosts...)
@ -590,7 +648,7 @@ func TestRealisClient_SLADrainHosts(t *testing.T) {
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
1*time.Second,
50*time.Second)
assert.Equal(t, map[string]bool{"localhost": true}, hostResults)
assert.Equal(t, map[string]bool{"agent-one": true}, hostResults)
assert.NoError(t, err)
_, err = r.EndMaintenance(hosts...)
@ -603,6 +661,39 @@ func TestRealisClient_SLADrainHosts(t *testing.T) {
5*time.Second,
10*time.Second)
assert.NoError(t, err)
// slaDrainHosts goes with default policy if no policy is specified
_, err = r.SLADrainHosts(nil, 30, hosts...)
if err != nil {
fmt.Printf("error: %+v\n", err.Error())
os.Exit(1)
}
hostResults, err = r.MonitorHostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
1*time.Second,
50*time.Second)
assert.Equal(t, map[string]bool{"agent-one": true}, hostResults)
assert.NoError(t, err)
_, err = r.EndMaintenance(hosts...)
assert.NoError(t, err)
_, err = r.SLADrainHosts(&aurora.SlaPolicy{}, 30, hosts...)
if err != nil {
fmt.Printf("error: %+v\n", err.Error())
os.Exit(1)
}
hostResults, err = r.MonitorHostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
1*time.Second,
50*time.Second)
assert.Equal(t, map[string]bool{"agent-one": true}, hostResults)
assert.NoError(t, err)
_, err = r.EndMaintenance(hosts...)
assert.NoError(t, err)
}
// Test multiple go routines using a single connection
@ -639,6 +730,9 @@ func TestRealisClient_SessionThreadSafety(t *testing.T) {
err = r.KillJob(job.JobKey())
assert.NoError(t, err)
success, err = r.MonitorInstances(job.JobKey(), 0, 1*time.Second, 90*time.Second)
assert.True(t, success)
assert.NoError(t, err)
}()
}
@ -704,6 +798,9 @@ func TestRealisClient_PartitionPolicy(t *testing.T) {
RAM(64).
Disk(100).
IsService(true).
Production(false).
Tier("preemptible").
Priority(0).
InstanceCount(2).
BatchSize(2).
PartitionPolicy(true, partitionDelay)
@ -722,6 +819,12 @@ func TestRealisClient_PartitionPolicy(t *testing.T) {
assert.NoError(t, err)
}
err = r.KillJob(job.JobKey())
assert.NoError(t, err)
success, err := r.MonitorInstances(job.JobKey(), 0, 1*time.Second, 90*time.Second)
assert.True(t, success)
assert.NoError(t, err)
}
func TestRealisClient_UpdateStrategies(t *testing.T) {
@ -734,7 +837,10 @@ func TestRealisClient_UpdateStrategies(t *testing.T) {
RAM(4).
Disk(10).
InstanceCount(6).
IsService(true)
IsService(true).
Production(false).
Tier("preemptible").
Priority(0)
// Needed to populate the task config correctly
assert.NoError(t, job.BuildThermosPayload())
@ -783,6 +889,10 @@ func TestRealisClient_UpdateStrategies(t *testing.T) {
assert.NoError(t, r.AbortJobUpdate(key, "Monitor timed out."))
}
assert.NoError(t, r.KillJob(strategy.jobUpdate.JobKey()))
success, err := r.MonitorInstances(strategy.jobUpdate.JobKey(), 0, 1*time.Second, 90*time.Second)
assert.True(t, success)
assert.NoError(t, err)
})
}
}
@ -792,14 +902,15 @@ func TestRealisClient_BatchAwareAutoPause(t *testing.T) {
job := realis.NewJob().
Environment("prod").
Role("vagrant").
Name("BatchAwareAutoPauseTest").
Name("batch_aware_auto_pause_test").
ThermosExecutor(thermosExec).
CPU(.01).
RAM(4).
Disk(10).
InstanceCount(6).
IsService(true)
updateGroups := []int32{1, 2, 3}
updateGroups := []int32{1, 3}
strategy := realis.JobUpdateFromAuroraTask(job.AuroraTask()).
VariableBatchStrategy(true, updateGroups...).
InstanceCount(6).
@ -812,14 +923,583 @@ func TestRealisClient_BatchAwareAutoPause(t *testing.T) {
key := *result.GetKey()
for i := range updateGroups {
curStep, mErr := r.MonitorAutoPausedUpdate(key, time.Second*5, time.Second*240)
curStep, mErr := r.MonitorAutoPausedUpdate(key, time.Second*5, time.Minute*5)
if mErr != nil {
fmt.Println(mErr)
// Update may already be in a terminal state so don't check for error
assert.NoError(t, r.AbortJobUpdate(key, "Monitor timed out."))
_ = r.AbortJobUpdate(key, "Monitor timed out.")
}
assert.Equal(t, i, curStep)
require.NoError(t, r.ResumeJobUpdate(key, "auto resuming test"))
if i != len(updateGroups)-1 {
require.NoError(t, err)
require.NoError(t, r.ResumeJobUpdate(key, "auto resuming test"))
}
}
assert.NoError(t, r.AbortJobUpdate(key, ""))
assert.NoError(t, r.KillJob(strategy.JobKey()))
success, err := r.MonitorInstances(job.JobKey(), 0, 1*time.Second, 90*time.Second)
assert.True(t, success)
assert.NoError(t, err)
}
func TestRealisClient_GetJobSummary(t *testing.T) {
role := "vagrant"
env := "prod"
name := "test_get_job_summary"
// Create a single job
job := realis.NewJob().
Environment(env).
Role(role).
Name(name).
ThermosExecutor(thermosExec).
CPU(.25).
RAM(4).
Disk(10).
InstanceCount(3).
IsService(true).
Production(false).
Tier("preemptible").
Priority(0)
err := r.CreateJob(job)
assert.NoError(t, err)
success, err := r.MonitorScheduleStatus(job.JobKey(),
job.GetInstanceCount(),
aurora.ACTIVE_STATES,
1*time.Second,
150*time.Second)
assert.True(t, success)
assert.NoError(t, err)
// get job summaries of the role
summary, err := r.GetJobSummary(role)
assert.NoError(t, err)
assert.NotNil(t, summary)
jobCount := 0
for _, s := range summary.Summaries {
jobKey := s.Job.TaskConfig.Job
if jobKey.Environment == env && jobKey.Name == name {
jobCount++
}
}
assert.Equal(t, 1, jobCount)
err = r.KillJob(job.JobKey())
assert.NoError(t, err)
success, err = r.MonitorInstances(job.JobKey(), 0, 1*time.Second, 90*time.Second)
assert.True(t, success)
assert.NoError(t, err)
}
func TestRealisClient_Offers(t *testing.T) {
var offers []realis.Offer
// since offers are being recycled, it take a few tries to get all of them.
i := 0
for ; len(offers) < 3 && i < 5; i++ {
offers, _ = r.Offers()
time.Sleep(5 * time.Second)
}
assert.NotEqual(t, i, 5)
}
func TestRealisClient_MaintenanceHosts(t *testing.T) {
offers, err := r.Offers()
assert.NoError(t, err)
for i := 0; i < len(offers); i++ {
_, err := r.DrainHosts(offers[i].Hostname)
assert.NoError(t, err)
hosts, err := r.MaintenanceHosts()
assert.Equal(t, i+1, len(hosts))
}
// clean up
for i := 0; i < len(offers); i++ {
_, err := r.EndMaintenance(offers[i].Hostname)
assert.NoError(t, err)
// Monitor change to DRAINING and DRAINED mode
_, err = r.MonitorHostMaintenance(
[]string{offers[i].Hostname},
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
5*time.Second,
10*time.Second)
assert.NoError(t, err)
}
}
func TestRealisClient_AvailOfferReport(t *testing.T) {
var offers []realis.Offer
i := 0
for ; len(offers) < 3 && i < 5; i++ {
offers, _ = r.Offers()
time.Sleep(5 * time.Second)
}
assert.NotEqual(t, i, 3)
capacity, err := r.AvailOfferReport()
assert.NoError(t, err)
// 2 groups for non-dedicated & dedicated
assert.Equal(t, 2, len(capacity))
// 4 resources: cpus, disk, mem, ports
assert.Equal(t, 4, len(capacity["non-dedicated"]))
}
func TestRealisClient_FitTasks(t *testing.T) {
var offers []realis.Offer
i := 0
for ; len(offers) < 3 && i < 5; i++ {
offers, _ = r.Offers()
time.Sleep(5 * time.Second)
}
assert.NotEqual(t, i, 5)
cpuPerOffer := 0.0
for _, r := range offers[0].Resources {
if r.Name == "cpus" {
cpuPerOffer = r.Scalar.Value
}
}
// make sure all offers have no running executor
for _, o := range offers {
assert.Equal(t, o.ExecutorIds[:0], o.ExecutorIds)
}
validCpu := cpuPerOffer / 2
inValidCpu := cpuPerOffer + 1
gpu := int64(1)
tests := []struct {
message string
role string
request aurora.Resource
constraints []*aurora.Constraint
expected int64
isError bool
}{
{
message: "task with gpu request",
role: "vagrant",
request: aurora.Resource{
NumGpus: &gpu,
},
expected: 0,
isError: false,
},
{
message: "empty resource request",
role: "vagrant",
request: aurora.Resource{},
expected: -1,
isError: true,
},
{
message: "valid resource request",
role: "vagrant",
request: aurora.Resource{
NumCpus: &validCpu,
},
expected: 4,
isError: false,
},
{
message: "invalid cpu request",
role: "vagrant",
request: aurora.Resource{
NumCpus: &inValidCpu,
},
expected: 0,
isError: false,
},
{
message: "dedicated constraint",
role: "vagrant",
request: aurora.Resource{
NumCpus: &validCpu,
},
constraints: []*aurora.Constraint{
{
Name: "dedicated",
Constraint: &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: false,
Values: []string{"vagrant/bar"},
},
},
},
},
expected: 2,
isError: false,
},
{
message: "dedicated constraint with unauthorized role",
role: "unauthorized",
request: aurora.Resource{
NumCpus: &validCpu,
},
constraints: []*aurora.Constraint{
{
Name: "dedicated",
Constraint: &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: false,
Values: []string{"vagrant/bar"},
},
},
},
},
expected: 0,
isError: false,
},
{
message: "value constraint on zone",
role: "vagrant",
request: aurora.Resource{
NumCpus: &validCpu,
},
constraints: []*aurora.Constraint{
{
Name: "zone",
Constraint: &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: false,
Values: []string{"west"},
},
},
},
},
expected: 4,
isError: false,
},
{
message: "negative value constraint on zone",
role: "vagrant",
request: aurora.Resource{
NumCpus: &validCpu,
},
constraints: []*aurora.Constraint{
{
Name: "zone",
Constraint: &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: true,
Values: []string{"west"},
},
},
},
},
expected: 0,
isError: false,
},
{
message: "negative value constraint on host",
role: "vagrant",
request: aurora.Resource{
NumCpus: &validCpu,
},
constraints: []*aurora.Constraint{
{
Name: "host",
Constraint: &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: true,
Values: []string{"agent-one"},
},
},
},
},
expected: 2,
isError: false,
},
{
message: "value constraint on unavailable zone",
role: "vagrant",
request: aurora.Resource{
NumCpus: &validCpu,
},
constraints: []*aurora.Constraint{
{
Name: "zone",
Constraint: &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: false,
Values: []string{"east"},
},
},
},
},
expected: 0,
isError: false,
},
{
message: "value constraint on unavailable attribute",
role: "vagrant",
request: aurora.Resource{
NumCpus: &validCpu,
},
constraints: []*aurora.Constraint{
{
Name: "os",
Constraint: &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: false,
Values: []string{"windows"},
},
},
},
},
expected: 0,
isError: false,
},
{
message: "1 value constraint with 2 values",
role: "vagrant",
request: aurora.Resource{
NumCpus: &validCpu,
},
constraints: []*aurora.Constraint{
{
Name: "host",
Constraint: &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: false,
Values: []string{"agent-one", "agent-two"},
},
},
},
},
expected: 4,
isError: false,
},
{
message: "2 value constraints",
role: "vagrant",
request: aurora.Resource{
NumCpus: &validCpu,
},
constraints: []*aurora.Constraint{
{
Name: "host",
Constraint: &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: false,
Values: []string{"agent-one"},
},
},
},
{
Name: "rack",
Constraint: &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: false,
Values: []string{"2"},
},
},
},
},
expected: 0,
isError: false,
},
{
message: "limit constraint on host",
role: "vagrant",
request: aurora.Resource{
NumCpus: &validCpu,
},
constraints: []*aurora.Constraint{
{
Name: "host",
Constraint: &aurora.TaskConstraint{
Limit: &aurora.LimitConstraint{
Limit: 1,
},
},
},
},
expected: 2,
isError: false,
},
{
message: "limit constraint on zone",
role: "vagrant",
request: aurora.Resource{
NumCpus: &validCpu,
},
constraints: []*aurora.Constraint{
{
Name: "zone",
Constraint: &aurora.TaskConstraint{
Limit: &aurora.LimitConstraint{
Limit: 1,
},
},
},
},
expected: 1,
isError: false,
},
{
message: "limit constraint on zone & host",
role: "vagrant",
request: aurora.Resource{
NumCpus: &validCpu,
},
constraints: []*aurora.Constraint{
{
Name: "host",
Constraint: &aurora.TaskConstraint{
Limit: &aurora.LimitConstraint{
Limit: 1,
},
},
},
{
Name: "zone",
Constraint: &aurora.TaskConstraint{
Limit: &aurora.LimitConstraint{
Limit: 1,
},
},
},
},
expected: 1,
isError: false,
},
{
message: "limit constraint on unavailable zone",
role: "vagrant",
request: aurora.Resource{
NumCpus: &validCpu,
},
constraints: []*aurora.Constraint{
{
Name: "gpu-host", // no host has gpu-host attribute
Constraint: &aurora.TaskConstraint{
Limit: &aurora.LimitConstraint{
Limit: 1,
},
},
},
},
expected: 0,
isError: false,
},
{
message: "limit & dedicated constraint",
role: "vagrant",
request: aurora.Resource{
NumCpus: &validCpu,
},
constraints: []*aurora.Constraint{
{
Name: "dedicated",
Constraint: &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: false,
Values: []string{"vagrant/bar"},
},
},
},
{
Name: "host",
Constraint: &aurora.TaskConstraint{
Limit: &aurora.LimitConstraint{
Limit: 1,
},
},
},
},
expected: 1,
isError: false,
},
}
for _, tc := range tests {
task := aurora.NewTaskConfig()
task.Resources = []*aurora.Resource{&tc.request}
task.Constraints = tc.constraints
task.Job = &aurora.JobKey{
Role: tc.role,
}
numTasks, err := r.FitTasks(task, offers)
if !tc.isError {
assert.NoError(t, err)
assert.Equal(t, tc.expected, numTasks, tc.message)
} else {
assert.Error(t, err)
}
}
}
func TestRealisClient_JobExists(t *testing.T) {
role := "vagrant"
env := "prod"
name := "test_job_exists"
// Create a good single job
job := realis.NewJob().
Environment(env).
Role(role).
Name(name).
ThermosExecutor(thermosExec).
CPU(.25).
RAM(4).
Disk(10).
InstanceCount(3).
IsService(true).
Production(false).
Tier("preemptible").
Priority(0)
// Check if job exists before creating
exists, err := r.JobExists(job.JobKey())
assert.NoError(t, err)
assert.False(t, exists)
err = r.CreateJob(job)
assert.NoError(t, err)
exists, err = r.JobExists(job.JobKey())
assert.NoError(t, err)
assert.True(t, exists)
// Create a single bad job
badJob := realis.NewJob().
Environment("prod").
Role("vagrant").
Name("executordoesntexist").
ExecutorName("idontexist").
ExecutorData("").
CPU(.25).
RAM(4).
Disk(10).
InstanceCount(1)
// Check if job exists before creating
exists, err = r.JobExists(badJob.JobKey())
assert.NoError(t, err)
assert.False(t, exists)
err = r.CreateJob(badJob)
assert.Error(t, err)
exists, err = r.JobExists(badJob.JobKey())
assert.NoError(t, err)
assert.False(t, exists)
}

View file

@ -35,6 +35,10 @@ func ScheduleStatusResult(resp *aurora.Response) *aurora.ScheduleStatusResult_ {
}
func JobUpdateSummaries(resp *aurora.Response) []*aurora.JobUpdateSummary {
if resp == nil || resp.GetResult_() == nil || resp.GetResult_().GetGetJobUpdateSummariesResult_() == nil {
return nil
}
return resp.GetResult_().GetGetJobUpdateSummariesResult_().GetUpdateSummaries()
}

216
retry.go
View file

@ -17,10 +17,7 @@ package realis
import (
"io"
"math/rand"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/apache/thrift/lib/go/thrift"
@ -29,9 +26,11 @@ import (
"github.com/pkg/errors"
)
// Backoff determines how the retry mechanism should react after each failure and how many failures it should
// tolerate.
type Backoff struct {
Duration time.Duration // the base duration
Factor float64 // Duration is multipled by factor each iteration
Factor float64 // Duration is multiplied by a factor each iteration
Jitter float64 // The amount of jitter applied each iteration
Steps int // Exit with error after this many steps
}
@ -53,18 +52,15 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration {
// if the loop should be aborted.
type ConditionFunc func() (done bool, err error)
// Modified version of the Kubernetes exponential-backoff code.
// ExponentialBackoff repeats a condition check with exponential backoff.
//
// It checks the condition up to Steps times, increasing the wait by multiplying
// the previous duration by Factor.
// ExponentialBackoff is a modified version of the Kubernetes exponential-backoff code.
// It repeats a condition check with exponential backoff and checks the condition up to
// Steps times, increasing the wait by multiplying the previous duration by Factor.
//
// If Jitter is greater than zero, a random amount of each duration is added
// (between duration and duration*(1+jitter)).
//
// If the condition never returns true, ErrWaitTimeout is returned. Errors
// do not cause the function to return.
func ExponentialBackoff(backoff Backoff, logger Logger, condition ConditionFunc) error {
var err error
var ok bool
@ -98,10 +94,9 @@ func ExponentialBackoff(backoff Backoff, logger Logger, condition ConditionFunc)
// If the error is temporary, continue retrying.
if !IsTemporary(err) {
return err
} else {
// Print out the temporary error we experienced.
logger.Println(err)
}
// Print out the temporary error we experienced.
logger.Println(err)
}
}
@ -112,19 +107,28 @@ func ExponentialBackoff(backoff Backoff, logger Logger, condition ConditionFunc)
// Provide more information to the user wherever possible
if err != nil {
return newRetryError(errors.Wrap(err, "ran out of retries"), curStep)
} else {
return newRetryError(errors.New("ran out of retries"), curStep)
}
return newRetryError(errors.New("ran out of retries"), curStep)
}
type auroraThriftCall func() (resp *aurora.Response, err error)
// verifyOntimeout defines the type of function that will be used to verify whether a Thirft call to the Scheduler
// made it to the scheduler or not. In general, these types of functions will have to interact with the scheduler
// through the very same Thrift API which previously encountered a time-out from the client.
// This means that the functions themselves should be kept to a minimum number of Thrift calls.
// It should also be noted that this is a best effort mechanism and
// is likely to fail for the same reasons that the original call failed.
type verifyOnTimeout func() (*aurora.Response, bool)
// Duplicates the functionality of ExponentialBackoff but is specifically targeted towards ThriftCalls.
func (c *Client) thriftCallWithRetries(returnOnTimeout bool, thriftCall auroraThriftCall) (*aurora.Response, error) {
func (c *Client) thriftCallWithRetries(returnOnTimeout bool, thriftCall auroraThriftCall,
verifyOnTimeout verifyOnTimeout) (*aurora.Response, error) {
var resp *aurora.Response
var clientErr error
var curStep int
var timeouts int
timeouts := 0
backoff := c.config.backoff
duration := backoff.Duration
@ -138,7 +142,10 @@ func (c *Client) thriftCallWithRetries(returnOnTimeout bool, thriftCall auroraTh
adjusted = Jitter(duration, backoff.Jitter)
}
c.logger.Printf("A retryable error occurred during thrift call, backing off for %v before retry %v\n", adjusted, curStep)
c.logger.Printf(
"A retryable error occurred during thrift call, backing off for %v before retry %v",
adjusted,
curStep)
time.Sleep(adjusted)
duration = time.Duration(float64(duration) * backoff.Factor)
@ -153,105 +160,132 @@ func (c *Client) thriftCallWithRetries(returnOnTimeout bool, thriftCall auroraTh
resp, clientErr = thriftCall()
c.logger.TracePrintf("Aurora Thrift Call ended resp: %v clientErr: %v\n", resp, clientErr)
c.logger.TracePrintf("Aurora Thrift Call ended resp: %v clientErr: %v", resp, clientErr)
}()
// Check if our thrift call is returning an error. This is a retryable event as we don't know
// if it was caused by network issues.
if clientErr != nil {
// Print out the error to the user
c.logger.Printf("Client Error: %v\n", clientErr)
c.logger.Printf("Client Error: %v", clientErr)
// Determine if error is a temporary URL error by going up the stack
e, ok := clientErr.(thrift.TTransportException)
if ok {
c.logger.DebugPrint("Encountered a transport exception")
temporary, timedout := isConnectionError(clientErr)
if !temporary && c.RealisConfig().failOnPermanentErrors {
return nil, errors.Wrap(clientErr, "permanent connection error")
}
// TODO(rdelvalle): Figure out a better way to obtain the error code as this is a very brittle solution
// 401 Unauthorized means the wrong username and password were provided
if strings.Contains(e.Error(), strconv.Itoa(http.StatusUnauthorized)) {
return nil, errors.Wrap(clientErr, "wrong username or password provided")
}
e, ok := e.Err().(*url.Error)
if ok {
// EOF error occurs when the server closes the read buffer of the client. This is common
// when the server is overloaded and should be retried. All other errors that are permanent
// will not be retried.
if e.Err != io.EOF && !e.Temporary() && c.RealisConfig().failOnPermanentErrors {
return nil, errors.Wrap(clientErr, "permanent connection error")
}
// Corner case where thrift payload was received by Aurora but connection timedout before Aurora was
// able to reply. In this case we will return whatever response was received and a TimedOut behaving
// error. Users can take special action on a timeout by using IsTimedout and reacting accordingly.
if e.Timeout() {
timeouts++
c.logger.DebugPrintf(
"Client closed connection (timedout) %d times before server responded,"+
" consider increasing connection timeout",
timeouts)
if returnOnTimeout {
return resp,
newTimedoutError(errors.New("client connection closed before server answer"))
}
}
}
// There exists a corner case where thrift payload was received by Aurora but
// connection timed out before Aurora was able to reply.
// Users can take special action on a timeout by using IsTimedout and reacting accordingly
// if they have configured the client to return on a timeout.
if timedout && returnOnTimeout {
return resp, newTimedoutError(errors.New("client connection closed before server answer"))
}
// In the future, reestablish connection should be able to check if it is actually possible
// to make a thrift call to Aurora. For now, a reconnect should always lead to a retry.
// Ignoring error due to the fact that an error should be retried regardless
_ = c.ReestablishConn()
} else {
// If there was no client error, but the response is nil, something went wrong.
// Ideally, we'll never encounter this but we're placing a safeguard here.
if resp == nil {
return nil, errors.New("response from aurora is nil")
reestablishErr := c.ReestablishConn()
if reestablishErr != nil {
c.logger.DebugPrintf("error re-establishing connection ", reestablishErr)
}
// Check Response Code from thrift and make a decision to continue retrying or not.
switch responseCode := resp.GetResponseCode(); responseCode {
// If users did not opt for a return on timeout in order to react to a timedout error,
// attempt to verify that the call made it to the scheduler after the connection was re-established.
if timedout {
timeouts++
c.logger.DebugPrintf(
"Client closed connection %d times before server responded, "+
"consider increasing connection timeout",
timeouts)
// If the thrift call succeeded, stop retrying
case aurora.ResponseCode_OK:
return resp, nil
// If the response code is transient, continue retrying
case aurora.ResponseCode_ERROR_TRANSIENT:
c.logger.Println("Aurora replied with Transient error code, retrying")
continue
// Failure scenarios, these indicate a bad payload or a bad clientConfig. Stop retrying.
case aurora.ResponseCode_INVALID_REQUEST,
aurora.ResponseCode_ERROR,
aurora.ResponseCode_AUTH_FAILED,
aurora.ResponseCode_JOB_UPDATING_ERROR:
c.logger.Printf("Terminal Response Code %v from Aurora, won't retry\n", resp.GetResponseCode().String())
return resp, errors.New(response.CombineMessage(resp))
// The only case that should fall down to here is a WARNING response code.
// It is currently not used as a response in the scheduler so it is unknown how to handle it.
default:
c.logger.DebugPrintf("unhandled response code %v received from Aurora\n", responseCode)
return nil, errors.Errorf("unhandled response code from Aurora %v", responseCode.String())
// Allow caller to provide a function which checks if the original call was successful before
// it timed out.
if verifyOnTimeout != nil {
if verifyResp, ok := verifyOnTimeout(); ok {
c.logger.Print("verified that the call went through successfully after a client timeout")
// Response here might be different than the original as it is no longer constructed
// by the scheduler but mimicked.
// This is OK since the scheduler is very unlikely to change responses at this point in its
// development cycle but we must be careful to not return an incorrectly constructed response.
return verifyResp, nil
}
}
}
// Retry the thrift payload
continue
}
// If there was no client error, but the response is nil, something went wrong.
// Ideally, we'll never encounter this but we're placing a safeguard here.
if resp == nil {
return nil, errors.New("response from aurora is nil")
}
// Check Response Code from thrift and make a decision to continue retrying or not.
switch responseCode := resp.GetResponseCode(); responseCode {
// If the thrift call succeeded, stop retrying
case aurora.ResponseCode_OK:
return resp, nil
// If the response code is transient, continue retrying
case aurora.ResponseCode_ERROR_TRANSIENT:
c.logger.Println("Aurora replied with Transient error code, retrying")
continue
// Failure scenarios, these indicate a bad payload or a bad clientConfig. Stop retrying.
case aurora.ResponseCode_INVALID_REQUEST,
aurora.ResponseCode_ERROR,
aurora.ResponseCode_AUTH_FAILED,
aurora.ResponseCode_JOB_UPDATING_ERROR:
c.logger.Printf("Terminal Response Code %v from Aurora, won't retry\n", resp.GetResponseCode().String())
return resp, errors.New(response.CombineMessage(resp))
// The only case that should fall down to here is a WARNING response code.
// It is currently not used as a response in the scheduler so it is unknown how to handle it.
default:
c.logger.DebugPrintf("unhandled response code %v received from Aurora\n", responseCode)
return nil, errors.Errorf("unhandled response code from Aurora %v", responseCode.String())
}
}
c.logger.DebugPrintf("it took %v retries to complete this operation\n", curStep)
if curStep > 1 {
c.config.logger.Printf("retried this thrift call %d time(s)", curStep)
c.config.logger.Printf("this thrift call was retried %d time(s)", curStep)
}
// Provide more information to the user wherever possible.
if clientErr != nil {
return nil, newRetryError(errors.Wrap(clientErr, "ran out of retries, including latest error"), curStep)
} else {
return nil, newRetryError(errors.New("ran out of retries"), curStep)
}
return nil, newRetryError(errors.New("ran out of retries"), curStep)
}
// isConnectionError processes the error received by the client.
// The return values indicate whether this was determined to be a temporary error
// and whether it was determined to be a timeout error
func isConnectionError(err error) (bool, bool) {
// Determine if error is a temporary URL error by going up the stack
transportException, ok := err.(thrift.TTransportException)
if !ok {
return false, false
}
urlError, ok := transportException.Err().(*url.Error)
if !ok {
return false, false
}
// EOF error occurs when the server closes the read buffer of the client. This is common
// when the server is overloaded and we consider it temporary.
// All other which are not temporary as per the member function Temporary(),
// are considered not temporary (permanent).
if urlError.Err != io.EOF && !urlError.Temporary() {
return false, false
}
return true, urlError.Timeout()
}

View file

@ -1,4 +1,4 @@
#!/bin/bash
# Since we run our docker compose setup in bridge mode to be able to run on MacOS, we have to launch a Docker container within the bridge network in order to avoid any routing issues.
docker run --rm -t -v $(pwd):/go/src/github.com/aurora-scheduler/gorealis --network gorealis_aurora_cluster golang:1.13-stretch go test -v github.com/aurora-scheduler/gorealis $@
docker run --rm -t -w /gorealis -v $GOPATH/pkg:/go/pkg -v $(pwd):/gorealis --network gorealis_aurora_cluster golang:1.17-buster go test -v github.com/aurora-scheduler/gorealis/v2 $@

18
task.go
View file

@ -78,12 +78,17 @@ func TaskFromThrift(config *aurora.TaskConfig) *AuroraTask {
Role(config.Job.Role).
Name(config.Job.Name).
MaxFailure(config.MaxTaskFailures).
IsService(config.IsService)
IsService(config.IsService).
Priority(config.Priority)
if config.Tier != nil {
newTask.Tier(*config.Tier)
}
if config.Production != nil {
newTask.Production(*config.Production)
}
if config.ExecutorConfig != nil {
newTask.
ExecutorName(config.ExecutorConfig.Name).
@ -287,6 +292,17 @@ func (t *AuroraTask) IsService(isService bool) *AuroraTask {
return t
}
//set priority for preemption or priority-queueing
func (t *AuroraTask) Priority(priority int32) *AuroraTask {
t.task.Priority = priority
return t
}
func (t *AuroraTask) Production(production bool) *AuroraTask {
t.task.Production = &production
return t
}
// Add a list of URIs with the same extract and cache configuration. Scheduler must have
// --enable_mesos_fetcher flag enabled. Currently there is no duplicate detection.
func (t *AuroraTask) AddURIs(extract bool, cache bool, values ...string) *AuroraTask {

View file

@ -34,6 +34,8 @@ func TestAuroraTask_Clone(t *testing.T) {
RAM(643).
Disk(1000).
IsService(true).
Priority(1).
Production(false).
AddPorts(10).
Tier("preferred").
MaxFailure(23).

22
util.go
View file

@ -40,7 +40,7 @@ func init() {
}
}
// TerminalJobUpdateStates returns a slice containing all the terminal states an update may end up in.
// TerminalUpdateStates returns a slice containing all the terminal states an update may be in.
// This is a function in order to avoid having a slice that can be accidentally mutated.
func TerminalUpdateStates() []aurora.JobUpdateStatus {
return []aurora.JobUpdateStatus{
@ -104,3 +104,23 @@ func calculateCurrentBatch(updatingInstances int32, batchSizes []int32) int {
}
return batchCount
}
func ResourcesToMap(resources []*aurora.Resource) map[string]float64 {
result := map[string]float64{}
for _, resource := range resources {
if resource.NumCpus != nil {
result["cpus"] += *resource.NumCpus
} else if resource.RamMb != nil {
result["mem"] += float64(*resource.RamMb)
} else if resource.DiskMb != nil {
result["disk"] += float64(*resource.DiskMb)
} else if resource.NamedPort != nil {
result["ports"]++
} else if resource.NumGpus != nil {
result["gpus"] += float64(*resource.NumGpus)
}
}
return result
}

315
zk.go
View file

@ -16,6 +16,7 @@ package realis
import (
"encoding/json"
"math"
"strconv"
"strings"
"time"
@ -35,6 +36,18 @@ type ServiceInstance struct {
Status string `json:"status"`
}
type MesosAddress struct {
Hostname string `json:"hostname"`
IP string `json:"ip"`
Port uint16 `json:"port"`
}
// MesosInstance is defined for mesos json stored in ZK.
type MesosInstance struct {
Address MesosAddress `json:"address"`
Version string `json:"version"`
}
type zkConfig struct {
endpoints []string
path string
@ -176,3 +189,305 @@ func LeaderFromZKOpts(options ...ZKOpt) (string, error) {
return leaderURL, nil
}
// Retrieves current mesos leader from ZK with a custom configuration.
func MesosFromZKOpts(options ...ZKOpt) (string, error) {
var mesosURL string
// Load the default configuration for Zookeeper followed by overriding values with those provided by the caller.
config := &zkConfig{backoff: defaultBackoff, timeout: time.Second * 10, logger: NoopLogger{}}
for _, opt := range options {
opt(config)
}
if len(config.endpoints) == 0 {
return "", errors.New("no Zookeeper endpoints supplied")
}
if config.path == "" {
return "", errors.New("no Zookeeper path supplied")
}
// Create a closure that allows us to use the ExponentialBackoff function.
retryErr := ExponentialBackoff(config.backoff, config.logger, func() (bool, error) {
c, _, err := zk.Connect(config.endpoints, config.timeout, func(c *zk.Conn) { c.SetLogger(config.logger) })
if err != nil {
return false, NewTemporaryError(errors.Wrap(err, "Failed to connect to Zookeeper"))
}
defer c.Close()
// Open up descriptor for the ZK path given
children, _, _, err := c.ChildrenW(config.path)
if err != nil {
// Sentinel error check as there is no other way to check.
if err == zk.ErrInvalidPath {
return false, errors.Wrapf(err, "path %s is an invalid Zookeeper path", config.path)
}
return false,
NewTemporaryError(errors.Wrapf(err, "path %s doesn't exist on Zookeeper ", config.path))
}
// Search for the leader through all the children in the given path
minScore := math.MaxInt64
var mesosInstance MesosInstance
for _, child := range children {
// Only the leader will start with json.info_
if strings.HasPrefix(child, "json.info_") {
strs := strings.Split(child, "_")
if len(strs) < 2 {
config.logger.Printf("Zk node %v/%v's name is malformed.", config.path, child)
continue
}
score, err := strconv.Atoi(strs[1])
if err != nil {
return false, NewTemporaryError(errors.Wrap(err, "unable to read the zk node for Mesos."))
}
// get the leader from the child with the smallest score.
if score < minScore {
minScore = score
childPath := config.path + "/" + child
data, _, err := c.Get(childPath)
if err != nil {
if err == zk.ErrInvalidPath {
return false, errors.Wrapf(err, "path %s is an invalid Zookeeper path", childPath)
}
return false, NewTemporaryError(errors.Wrap(err, "error fetching contents of leader"))
}
err = json.Unmarshal([]byte(data), &mesosInstance)
if err != nil {
config.logger.Printf("%s", err)
return false,
NewTemporaryError(errors.Wrap(err, "unable to unmarshal contents of leader"))
}
mesosURL = mesosInstance.Address.IP + ":" + strconv.Itoa(int(mesosInstance.Address.Port))
}
}
}
if len(mesosURL) > 0 {
return true, nil
}
// Leader data might not be available yet, try to fetch again.
return false, NewTemporaryError(errors.New("no leader found"))
})
if retryErr != nil {
config.logger.Printf("Failed to determine leader after %v attempts", config.backoff.Steps)
return "", retryErr
}
return mesosURL, nil
}
// Retrieves current Aurora master nodes from ZK.
func MasterNodesFromZK(cluster Cluster) (map[string][]string, error) {
return MasterNodesFromZKOpts(ZKEndpoints(strings.Split(cluster.ZK, ",")...), ZKPath(cluster.SchedZKPath))
}
// Retrieves current Mesos master nodes/leader from ZK with a custom configuration.
func MasterNodesFromZKOpts(options ...ZKOpt) (map[string][]string, error) {
result := make(map[string][]string)
// Load the default configuration for Zookeeper followed by overriding values with those provided by the caller.
config := &zkConfig{backoff: defaultBackoff, timeout: time.Second * 10, logger: NoopLogger{}}
for _, opt := range options {
opt(config)
}
if len(config.endpoints) == 0 {
return nil, errors.New("no Zookeeper endpoints supplied")
}
if config.path == "" {
return nil, errors.New("no Zookeeper path supplied")
}
// Create a closure that allows us to use the ExponentialBackoff function.
retryErr := ExponentialBackoff(config.backoff, config.logger, func() (bool, error) {
c, _, err := zk.Connect(config.endpoints, config.timeout, func(c *zk.Conn) { c.SetLogger(config.logger) })
if err != nil {
return false, NewTemporaryError(errors.Wrap(err, "Failed to connect to Zookeeper"))
}
defer c.Close()
// Open up descriptor for the ZK path given
children, _, _, err := c.ChildrenW(config.path)
if err != nil {
// Sentinel error check as there is no other way to check.
if err == zk.ErrInvalidPath {
return false, errors.Wrapf(err, "path %s is an invalid Zookeeper path", config.path)
}
return false,
NewTemporaryError(errors.Wrapf(err, "path %s doesn't exist on Zookeeper ", config.path))
}
// Get all the master nodes through all the children in the given path
serviceInst := new(ServiceInstance)
var hosts []string
for _, child := range children {
childPath := config.path + "/" + child
data, _, err := c.Get(childPath)
if err != nil {
if err == zk.ErrInvalidPath {
return false, errors.Wrapf(err, "path %s is an invalid Zookeeper path", childPath)
}
return false, NewTemporaryError(errors.Wrap(err, "error fetching contents of leader"))
}
// Only leader is in json format. Have to parse data differently between member_ and not member_
if strings.HasPrefix(child, "member_") {
err = json.Unmarshal([]byte(data), &serviceInst)
if err != nil {
return false,
NewTemporaryError(errors.Wrap(err, "unable to unmarshal contents of leader"))
}
// Should only be one endpoint.
// This should never be encountered as it would indicate Aurora
// writing bad info into Zookeeper but is kept here as a safety net.
if len(serviceInst.AdditionalEndpoints) > 1 {
return false,
NewTemporaryError(
errors.New("ambiguous endpoints in json blob, Aurora wrote bad info to ZK"))
}
for _, v := range serviceInst.AdditionalEndpoints {
result["leader"] = append(result["leader"], v.Host)
}
} else {
// data is not in a json format
hosts = append(hosts, string(data))
}
}
result["masterNodes"] = hosts
// Master nodes data might not be available yet, try to fetch again.
if len(result["masterNodes"]) == 0 {
return false, NewTemporaryError(errors.New("no master nodes found"))
}
return true, nil
})
if retryErr != nil {
config.logger.Printf("Failed to get master nodes after %v attempts", config.backoff.Steps)
return nil, retryErr
}
return result, nil
}
// Retrieves current Mesos Aurora master nodes from ZK.
func MesosMasterNodesFromZK(cluster Cluster) (map[string][]string, error) {
return MesosMasterNodesFromZKOpts(ZKEndpoints(strings.Split(cluster.ZK, ",")...), ZKPath(cluster.MesosZKPath))
}
// Retrieves current mesos master nodes/leader from ZK with a custom configuration.
func MesosMasterNodesFromZKOpts(options ...ZKOpt) (map[string][]string, error) {
result := make(map[string][]string)
// Load the default configuration for Zookeeper followed by overriding values with those provided by the caller.]
config := &zkConfig{backoff: defaultBackoff, timeout: time.Second * 10, logger: NoopLogger{}}
for _, opt := range options {
opt(config)
}
if len(config.endpoints) == 0 {
return nil, errors.New("no Zookeeper endpoints supplied")
}
if config.path == "" {
return nil, errors.New("no Zookeeper path supplied")
}
// Create a closure that allows us to use the ExponentialBackoff function.
retryErr := ExponentialBackoff(config.backoff, config.logger, func() (bool, error) {
c, _, err := zk.Connect(config.endpoints, config.timeout, func(c *zk.Conn) { c.SetLogger(config.logger) })
if err != nil {
return false, NewTemporaryError(errors.Wrap(err, "Failed to connect to Zookeeper"))
}
defer c.Close()
// Open up descriptor for the ZK path given
children, _, _, err := c.ChildrenW(config.path)
if err != nil {
// Sentinel error check as there is no other way to check.
if err == zk.ErrInvalidPath {
return false, errors.Wrapf(err, "path %s is an invalid Zookeeper path", config.path)
}
return false,
NewTemporaryError(errors.Wrapf(err, "path %s doesn't exist on Zookeeper ", config.path))
}
// Get all the master nodes through all the children in the given path
minScore := math.MaxInt64
var mesosInstance MesosInstance
var hosts []string
for _, child := range children {
// Only the master nodes will start with json.info_
if strings.HasPrefix(child, "json.info_") {
strs := strings.Split(child, "_")
if len(strs) < 2 {
config.logger.Printf("Zk node %v/%v's name is malformed.", config.path, child)
continue
}
score, err := strconv.Atoi(strs[1])
if err != nil {
return false, NewTemporaryError(errors.Wrap(err, "unable to read the zk node for Mesos."))
}
childPath := config.path + "/" + child
data, _, err := c.Get(childPath)
if err != nil {
if err == zk.ErrInvalidPath {
return false, errors.Wrapf(err, "path %s is an invalid Zookeeper path", childPath)
}
return false, NewTemporaryError(errors.Wrap(err, "error fetching contents of leader"))
}
err = json.Unmarshal([]byte(data), &mesosInstance)
if err != nil {
config.logger.Printf("%s", err)
return false,
NewTemporaryError(errors.Wrap(err, "unable to unmarshal contents of leader"))
}
// Combine all master nodes into comma-separated
// Return hostname instead of ip to be consistent with aurora master nodes
hosts = append(hosts, mesosInstance.Address.Hostname)
// get the leader from the child with the smallest score.
if score < minScore {
minScore = score
result["leader"] = append(result["leader"], mesosInstance.Address.Hostname)
}
}
}
result["masterNodes"] = hosts
// Master nodes data might not be available yet, try to fetch again.
if len(result["masterNodes"]) == 0 {
return false, NewTemporaryError(errors.New("no mesos master nodes found"))
}
return true, nil
})
if retryErr != nil {
config.logger.Printf("Failed to get mesos master nodes after %v attempts", config.backoff.Steps)
return nil, retryErr
}
return result, nil
}