Compare commits

...

24 commits

Author SHA1 Message Date
Tan N. Le
db10285368
Update CHANGELOG.md 2022-10-12 21:47:07 -07:00
shivrsrivastava
8454a6ebf3
Adding priority to the task () 2022-10-12 21:46:07 -07:00
Tan N. Le
c318042e96
release 1.24.0 () 2021-11-09 09:00:35 -08:00
Tan N. Le
db9bebb802
enable default sla for slaDrain () 2021-11-01 18:17:49 -07:00
Renán I. Del Valle
fff2c16751
Enabling code analysis 2021-09-01 10:09:11 -07:00
Renán I. Del Valle
c59d01ab51
Changes Travis CI badge to Github Actions badge () 2021-08-06 18:16:06 -07:00
Renán I. Del Valle
62df98a3c8
Bug fix for auto paused update monitor ()
Returns success if the update has finished updating successfully.
2021-08-06 16:02:52 -07:00
Renán I. Del Valle
5c39a23eb2
Enable Github Actions for PRs
Run CI on pull requests and when the branch is pushed.
2021-08-06 15:52:46 -07:00
Renán I. Del Valle
dbc396b0db
Disables Travis CI ()
Travis CI is no longer needed as we have migrated to Github Actions
2021-08-06 10:58:34 -07:00
Renán I. Del Valle
86eb045808
Adds go.sum and removes dep files () 2021-08-06 10:57:45 -07:00
Renán I. Del Valle
c7e309f421
Actions fix ()
* Moving main.yml to the right place.
2021-08-06 10:06:12 -07:00
Renán I. Del Valle
49877b7d41
Adds support for running CI on github actions. () 2021-08-06 10:00:57 -07:00
Renán I. Del Valle
82b40a53f0
Add verification to retry mechanism ()
CreateJob, CreateService, and StartJobUpdate now include a rudimentary verification function to check if the call made it to the Aurora Scheduler when the client experiences a timeout.
2021-05-11 13:37:23 -07:00
Renán I. Del Valle
a9d99067ee
Documentation fix ()
Fixes documentation so that it is more compliant with godoc format.
2021-04-29 10:48:43 -07:00
Renán Del Valle
e7f9c0cba9 Bumping up version to 1.23.1 2021-02-25 17:59:02 -08:00
Renán Del Valle
fbaf218dfb Preparing release notes for 1.23.0 2021-02-25 17:59:02 -08:00
Renán I. Del Valle
6a1cf25c87
Upgrading Mesos to 1.7.2 and Aurora Scheduler to 0.23.0 () 2021-02-25 17:55:42 -08:00
Renán Del Valle
4aaec87d32 Bumping up version to 1.23.0 2021-02-25 17:34:48 -08:00
Renán Del Valle
895d810b6c Releasing Version 1.22.5 2021-02-25 17:34:48 -08:00
Renán I. Del Valle
511e76a0ac
Upgrading to Thrift 0.14.0 ()
Upgrading thrift to 0.14.0 in order to pick up bug fixes, including the fix for trying to write to closed connections.
2021-02-25 16:37:46 -08:00
Renan DelValle
8be66da88a
Bumping up go version to 1.15 and removing v2 tests from Travis CI config file. 2020-09-28 11:13:29 -07:00
Renan DelValle
6d20f347f7
Update latest version gorealis has been tested against. 2020-09-28 10:32:36 -07:00
Renan DelValle
74b12c36b1
Adding version to README and changing badge to point to the right place. 2020-09-28 10:32:13 -07:00
Renan DelValle
269e0208c1
Change travis CI configuration to use main branch. 2020-09-28 10:30:48 -07:00
29 changed files with 10974 additions and 7597 deletions

View file

@ -1 +1 @@
0.22.0
0.23.0

25
.github/main.yml vendored Normal file
View file

@ -0,0 +1,25 @@
name: CI
on: [push]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup Go for use with actions
uses: actions/setup-go@v2
with:
go-version: 1.16
- name: Install goimports
run: go get golang.org/x/tools/cmd/goimports
- name: Set env with list of directories in repo containin go code
run: echo GO_USR_DIRS=$(go list -f {{.Dir}} ./... | grep -E -v "/gen-go/|/vendor/") >> $GITHUB_ENV
- name: Run goimports check
run: test -z "`for d in $GO_USR_DIRS; do goimports -d $d/*.go | tee /dev/stderr; done`"
- name: Create aurora/mesos docker cluster
run: docker-compose up -d
- name: Run tests
run: go test -timeout 35m -race -coverprofile=coverage.txt -covermode=atomic -v github.com/paypal/gorealis

57
.github/workflows/codeql-analysis.yml vendored Normal file
View file

@ -0,0 +1,57 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ main ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ main ]
schedule:
- cron: '34 4 * * 3'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
- run: go build examples/client.go
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

30
.github/workflows/main.yml vendored Normal file
View file

@ -0,0 +1,30 @@
name: CI
on:
push:
branches:
- main
pull_request:
branches:
- main
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup Go for use with actions
uses: actions/setup-go@v2
with:
go-version: 1.16
- name: Install goimports
run: go get golang.org/x/tools/cmd/goimports
- name: Set env with list of directories in repo containin go code
run: echo GO_USR_DIRS=$(go list -f {{.Dir}} ./... | grep -E -v "/gen-go/|/vendor/") >> $GITHUB_ENV
- name: Run goimports check
run: test -z "`for d in $GO_USR_DIRS; do goimports -d $d/*.go | tee /dev/stderr; done`"
- name: Create aurora/mesos docker cluster
run: docker-compose up -d
- name: Run tests
run: go test -timeout 35m -race -coverprofile=coverage.txt -covermode=atomic -v github.com/paypal/gorealis

View file

@ -1,34 +0,0 @@
os: linux
dist: xenial
language: go
branches:
only:
- master
- master-v2.0
- future
go:
- "1.14.x"
env:
global:
- GO_USR_DIRS=$(go list -f {{.Dir}} ./... | grep -E -v "/gen-go/|/vendor/")
services:
- docker
before_install:
- go get golang.org/x/tools/cmd/goimports
- test -z "`for d in $GO_USR_DIRS; do goimports -d $d/*.go | tee /dev/stderr; done`"
install:
- go mod download
- docker-compose up -d
script:
- go test -timeout 30m -race -coverprofile=coverage.txt -covermode=atomic -v github.com/paypal/gorealis
after_success:
- bash <(curl -s https://codecov.io/bash)

View file

@ -1,4 +1,37 @@
1.22.1 (unreleased)
1.25.1 (unreleased)
1.25.0
* Add priority api
1.24.0
* enable default sla for slaDrain
* Changes Travis CI badge to Github Actions badge
* Bug fix for auto paused update monitor
* Adds support for running CI on github actions
1.23.0
* First release tested against Aurora Scheduler 0.23.0
1.22.5
* Upgrading to thrift 0.14.0
1.22.4
* Updates which result in a no-op now return a response value so that the caller may analyze it to determine what happened
1.22.3
* Contains a monitor timeout fix. Previously an error was being left unchecked which made a specific monitor timining out not be handled properly.
1.22.2
* Bug fix: Change in retry mechanism created a deadlock. This release reverts that particular change.
1.22.1
* Adding safeguards against setting multiple constraints with the same name for a single task.

64
Gopkg.lock generated
View file

@ -1,64 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:89696c38cec777120b8b1bb5e2d363d655cf2e1e7d8c851919aaa0fd576d9b86"
name = "github.com/apache/thrift"
packages = ["lib/go/thrift"]
pruneopts = ""
revision = "384647d290e2e4a55a14b1b7ef1b7e66293a2c33"
version = "v0.12.0"
[[projects]]
digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = ""
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
digest = "1:df48fb76fb2a40edea0c9b3d960bc95e326660d82ff1114e1f88001f7a236b40"
name = "github.com/pkg/errors"
packages = ["."]
pruneopts = ""
revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc"
[[projects]]
digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411"
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
pruneopts = ""
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
digest = "1:78bea5e26e82826dacc5fd64a1013a6711b7075ec8072819b89e6ad76cb8196d"
name = "github.com/samuel/go-zookeeper"
packages = ["zk"]
pruneopts = ""
revision = "471cd4e61d7a78ece1791fa5faa0345dc8c7d5a5"
[[projects]]
digest = "1:381bcbeb112a51493d9d998bbba207a529c73dbb49b3fd789e48c63fac1f192c"
name = "github.com/stretchr/testify"
packages = [
"assert",
"require",
]
pruneopts = ""
revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053"
version = "v1.3.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/apache/thrift/lib/go/thrift",
"github.com/pkg/errors",
"github.com/samuel/go-zookeeper/zk",
"github.com/stretchr/testify/assert",
"github.com/stretchr/testify/require",
]
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -1,16 +0,0 @@
[[constraint]]
name = "github.com/apache/thrift"
version = "0.12.0"
[[constraint]]
name = "github.com/pkg/errors"
revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc"
[[constraint]]
name = "github.com/samuel/go-zookeeper"
revision = "471cd4e61d7a78ece1791fa5faa0345dc8c7d5a5"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.3.0"

View file

@ -1,6 +1,8 @@
# gorealis [![GoDoc](https://godoc.org/github.com/paypal/gorealis?status.svg)](https://godoc.org/github.com/paypal/gorealis) [![Build Status](https://travis-ci.org/paypal/gorealis.svg?branch=master)](https://travis-ci.org/paypal/gorealis) [![codecov](https://codecov.io/gh/paypal/gorealis/branch/master/graph/badge.svg)](https://codecov.io/gh/paypal/gorealis)
# gorealis [![GoDoc](https://godoc.org/github.com/paypal/gorealis?status.svg)](https://godoc.org/github.com/paypal/gorealis) ![CI Build Status](https://github.com/paypal/gorealis/actions/workflows/main.yml/badge.svg) [![codecov](https://codecov.io/gh/paypal/gorealis/branch/main/graph/badge.svg)](https://codecov.io/gh/paypal/gorealis)
Go library for interacting with [Aurora Scheduler](https://github.com/aurora-scheduler/aurora).
Version 1 of Go library for interacting with [Aurora Scheduler](https://github.com/aurora-scheduler/aurora).
Version 2 of this library can be found [here](https://github.com/aurora-scheduler/gorealis).
### Aurora version compatibility
Please see [.auroraversion](./.auroraversion) to see the latest Aurora version against which this

View file

@ -14,7 +14,7 @@ services:
ipv4_address: 192.168.33.2
master:
image: rdelvalle/mesos-master:1.6.2
image: aurorascheduler/mesos-master:1.7.2
restart: on-failure
ports:
- "5050:5050"
@ -32,7 +32,7 @@ services:
- zk
agent-one:
image: rdelvalle/mesos-agent:1.6.2
image: aurorascheduler/mesos-agent:1.7.2
pid: host
restart: on-failure
ports:
@ -57,7 +57,7 @@ services:
- zk
agent-two:
image: rdelvalle/mesos-agent:1.6.2
image: aurorascheduler/mesos-agent:1.7.2
pid: host
restart: on-failure
ports:
@ -82,7 +82,7 @@ services:
- zk
aurora-one:
image: rdelvalle/aurora:0.22.0
image: aurorascheduler/scheduler:0.23.0
pid: host
ports:
- "8081:8081"

View file

@ -1,5 +1,4 @@
// Autogenerated by Thrift Compiler (0.13.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
package aurora

View file

@ -1,13 +1,12 @@
// Autogenerated by Thrift Compiler (0.13.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
package aurora
import(
"bytes"
"context"
"reflect"
"fmt"
"time"
"github.com/apache/thrift/lib/go/thrift"
)
@ -15,7 +14,7 @@ import(
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = reflect.DeepEqual
var _ = time.Now
var _ = bytes.Equal
const AURORA_EXECUTOR_NAME = "AuroraExecutor"

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,4 @@
// Autogenerated by Thrift Compiler (0.13.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
package main
@ -196,19 +195,19 @@ func main() {
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
arg355 := flag.Arg(2)
mbTrans356 := thrift.NewTMemoryBufferLen(len(arg355))
defer mbTrans356.Close()
_, err357 := mbTrans356.WriteString(arg355)
if err357 != nil {
arg405 := flag.Arg(2)
mbTrans406 := thrift.NewTMemoryBufferLen(len(arg405))
defer mbTrans406.Close()
_, err407 := mbTrans406.WriteString(arg405)
if err407 != nil {
Usage()
return
}
factory358 := thrift.NewTJSONProtocolFactory()
jsProt359 := factory358.GetProtocol(mbTrans356)
factory408 := thrift.NewTJSONProtocolFactory()
jsProt409 := factory408.GetProtocol(mbTrans406)
argvalue1 := aurora.NewResourceAggregate()
err360 := argvalue1.Read(jsProt359)
if err360 != nil {
err410 := argvalue1.Read(context.Background(), jsProt409)
if err410 != nil {
Usage()
return
}
@ -264,19 +263,19 @@ func main() {
fmt.Fprintln(os.Stderr, "QueryRecovery requires 1 args")
flag.Usage()
}
arg363 := flag.Arg(1)
mbTrans364 := thrift.NewTMemoryBufferLen(len(arg363))
defer mbTrans364.Close()
_, err365 := mbTrans364.WriteString(arg363)
if err365 != nil {
arg413 := flag.Arg(1)
mbTrans414 := thrift.NewTMemoryBufferLen(len(arg413))
defer mbTrans414.Close()
_, err415 := mbTrans414.WriteString(arg413)
if err415 != nil {
Usage()
return
}
factory366 := thrift.NewTJSONProtocolFactory()
jsProt367 := factory366.GetProtocol(mbTrans364)
factory416 := thrift.NewTJSONProtocolFactory()
jsProt417 := factory416.GetProtocol(mbTrans414)
argvalue0 := aurora.NewTaskQuery()
err368 := argvalue0.Read(jsProt367)
if err368 != nil {
err418 := argvalue0.Read(context.Background(), jsProt417)
if err418 != nil {
Usage()
return
}
@ -289,19 +288,19 @@ func main() {
fmt.Fprintln(os.Stderr, "DeleteRecoveryTasks requires 1 args")
flag.Usage()
}
arg369 := flag.Arg(1)
mbTrans370 := thrift.NewTMemoryBufferLen(len(arg369))
defer mbTrans370.Close()
_, err371 := mbTrans370.WriteString(arg369)
if err371 != nil {
arg419 := flag.Arg(1)
mbTrans420 := thrift.NewTMemoryBufferLen(len(arg419))
defer mbTrans420.Close()
_, err421 := mbTrans420.WriteString(arg419)
if err421 != nil {
Usage()
return
}
factory372 := thrift.NewTJSONProtocolFactory()
jsProt373 := factory372.GetProtocol(mbTrans370)
factory422 := thrift.NewTJSONProtocolFactory()
jsProt423 := factory422.GetProtocol(mbTrans420)
argvalue0 := aurora.NewTaskQuery()
err374 := argvalue0.Read(jsProt373)
if err374 != nil {
err424 := argvalue0.Read(context.Background(), jsProt423)
if err424 != nil {
Usage()
return
}
@ -330,19 +329,19 @@ func main() {
fmt.Fprintln(os.Stderr, "StartMaintenance requires 1 args")
flag.Usage()
}
arg375 := flag.Arg(1)
mbTrans376 := thrift.NewTMemoryBufferLen(len(arg375))
defer mbTrans376.Close()
_, err377 := mbTrans376.WriteString(arg375)
if err377 != nil {
arg425 := flag.Arg(1)
mbTrans426 := thrift.NewTMemoryBufferLen(len(arg425))
defer mbTrans426.Close()
_, err427 := mbTrans426.WriteString(arg425)
if err427 != nil {
Usage()
return
}
factory378 := thrift.NewTJSONProtocolFactory()
jsProt379 := factory378.GetProtocol(mbTrans376)
factory428 := thrift.NewTJSONProtocolFactory()
jsProt429 := factory428.GetProtocol(mbTrans426)
argvalue0 := aurora.NewHosts()
err380 := argvalue0.Read(jsProt379)
if err380 != nil {
err430 := argvalue0.Read(context.Background(), jsProt429)
if err430 != nil {
Usage()
return
}
@ -355,19 +354,19 @@ func main() {
fmt.Fprintln(os.Stderr, "DrainHosts requires 1 args")
flag.Usage()
}
arg381 := flag.Arg(1)
mbTrans382 := thrift.NewTMemoryBufferLen(len(arg381))
defer mbTrans382.Close()
_, err383 := mbTrans382.WriteString(arg381)
if err383 != nil {
arg431 := flag.Arg(1)
mbTrans432 := thrift.NewTMemoryBufferLen(len(arg431))
defer mbTrans432.Close()
_, err433 := mbTrans432.WriteString(arg431)
if err433 != nil {
Usage()
return
}
factory384 := thrift.NewTJSONProtocolFactory()
jsProt385 := factory384.GetProtocol(mbTrans382)
factory434 := thrift.NewTJSONProtocolFactory()
jsProt435 := factory434.GetProtocol(mbTrans432)
argvalue0 := aurora.NewHosts()
err386 := argvalue0.Read(jsProt385)
if err386 != nil {
err436 := argvalue0.Read(context.Background(), jsProt435)
if err436 != nil {
Usage()
return
}
@ -380,19 +379,19 @@ func main() {
fmt.Fprintln(os.Stderr, "MaintenanceStatus requires 1 args")
flag.Usage()
}
arg387 := flag.Arg(1)
mbTrans388 := thrift.NewTMemoryBufferLen(len(arg387))
defer mbTrans388.Close()
_, err389 := mbTrans388.WriteString(arg387)
if err389 != nil {
arg437 := flag.Arg(1)
mbTrans438 := thrift.NewTMemoryBufferLen(len(arg437))
defer mbTrans438.Close()
_, err439 := mbTrans438.WriteString(arg437)
if err439 != nil {
Usage()
return
}
factory390 := thrift.NewTJSONProtocolFactory()
jsProt391 := factory390.GetProtocol(mbTrans388)
factory440 := thrift.NewTJSONProtocolFactory()
jsProt441 := factory440.GetProtocol(mbTrans438)
argvalue0 := aurora.NewHosts()
err392 := argvalue0.Read(jsProt391)
if err392 != nil {
err442 := argvalue0.Read(context.Background(), jsProt441)
if err442 != nil {
Usage()
return
}
@ -405,19 +404,19 @@ func main() {
fmt.Fprintln(os.Stderr, "EndMaintenance requires 1 args")
flag.Usage()
}
arg393 := flag.Arg(1)
mbTrans394 := thrift.NewTMemoryBufferLen(len(arg393))
defer mbTrans394.Close()
_, err395 := mbTrans394.WriteString(arg393)
if err395 != nil {
arg443 := flag.Arg(1)
mbTrans444 := thrift.NewTMemoryBufferLen(len(arg443))
defer mbTrans444.Close()
_, err445 := mbTrans444.WriteString(arg443)
if err445 != nil {
Usage()
return
}
factory396 := thrift.NewTJSONProtocolFactory()
jsProt397 := factory396.GetProtocol(mbTrans394)
factory446 := thrift.NewTJSONProtocolFactory()
jsProt447 := factory446.GetProtocol(mbTrans444)
argvalue0 := aurora.NewHosts()
err398 := argvalue0.Read(jsProt397)
if err398 != nil {
err448 := argvalue0.Read(context.Background(), jsProt447)
if err448 != nil {
Usage()
return
}
@ -430,42 +429,42 @@ func main() {
fmt.Fprintln(os.Stderr, "SlaDrainHosts requires 3 args")
flag.Usage()
}
arg399 := flag.Arg(1)
mbTrans400 := thrift.NewTMemoryBufferLen(len(arg399))
defer mbTrans400.Close()
_, err401 := mbTrans400.WriteString(arg399)
if err401 != nil {
arg449 := flag.Arg(1)
mbTrans450 := thrift.NewTMemoryBufferLen(len(arg449))
defer mbTrans450.Close()
_, err451 := mbTrans450.WriteString(arg449)
if err451 != nil {
Usage()
return
}
factory402 := thrift.NewTJSONProtocolFactory()
jsProt403 := factory402.GetProtocol(mbTrans400)
factory452 := thrift.NewTJSONProtocolFactory()
jsProt453 := factory452.GetProtocol(mbTrans450)
argvalue0 := aurora.NewHosts()
err404 := argvalue0.Read(jsProt403)
if err404 != nil {
err454 := argvalue0.Read(context.Background(), jsProt453)
if err454 != nil {
Usage()
return
}
value0 := argvalue0
arg405 := flag.Arg(2)
mbTrans406 := thrift.NewTMemoryBufferLen(len(arg405))
defer mbTrans406.Close()
_, err407 := mbTrans406.WriteString(arg405)
if err407 != nil {
arg455 := flag.Arg(2)
mbTrans456 := thrift.NewTMemoryBufferLen(len(arg455))
defer mbTrans456.Close()
_, err457 := mbTrans456.WriteString(arg455)
if err457 != nil {
Usage()
return
}
factory408 := thrift.NewTJSONProtocolFactory()
jsProt409 := factory408.GetProtocol(mbTrans406)
factory458 := thrift.NewTJSONProtocolFactory()
jsProt459 := factory458.GetProtocol(mbTrans456)
argvalue1 := aurora.NewSlaPolicy()
err410 := argvalue1.Read(jsProt409)
if err410 != nil {
err460 := argvalue1.Read(context.Background(), jsProt459)
if err460 != nil {
Usage()
return
}
value1 := argvalue1
argvalue2, err411 := (strconv.ParseInt(flag.Arg(3), 10, 64))
if err411 != nil {
argvalue2, err461 := (strconv.ParseInt(flag.Arg(3), 10, 64))
if err461 != nil {
Usage()
return
}
@ -486,19 +485,19 @@ func main() {
fmt.Fprintln(os.Stderr, "TriggerExplicitTaskReconciliation requires 1 args")
flag.Usage()
}
arg412 := flag.Arg(1)
mbTrans413 := thrift.NewTMemoryBufferLen(len(arg412))
defer mbTrans413.Close()
_, err414 := mbTrans413.WriteString(arg412)
if err414 != nil {
arg462 := flag.Arg(1)
mbTrans463 := thrift.NewTMemoryBufferLen(len(arg462))
defer mbTrans463.Close()
_, err464 := mbTrans463.WriteString(arg462)
if err464 != nil {
Usage()
return
}
factory415 := thrift.NewTJSONProtocolFactory()
jsProt416 := factory415.GetProtocol(mbTrans413)
factory465 := thrift.NewTJSONProtocolFactory()
jsProt466 := factory465.GetProtocol(mbTrans463)
argvalue0 := aurora.NewExplicitReconciliationSettings()
err417 := argvalue0.Read(jsProt416)
if err417 != nil {
err467 := argvalue0.Read(context.Background(), jsProt466)
if err467 != nil {
Usage()
return
}
@ -519,19 +518,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PruneTasks requires 1 args")
flag.Usage()
}
arg418 := flag.Arg(1)
mbTrans419 := thrift.NewTMemoryBufferLen(len(arg418))
defer mbTrans419.Close()
_, err420 := mbTrans419.WriteString(arg418)
if err420 != nil {
arg468 := flag.Arg(1)
mbTrans469 := thrift.NewTMemoryBufferLen(len(arg468))
defer mbTrans469.Close()
_, err470 := mbTrans469.WriteString(arg468)
if err470 != nil {
Usage()
return
}
factory421 := thrift.NewTJSONProtocolFactory()
jsProt422 := factory421.GetProtocol(mbTrans419)
factory471 := thrift.NewTJSONProtocolFactory()
jsProt472 := factory471.GetProtocol(mbTrans469)
argvalue0 := aurora.NewTaskQuery()
err423 := argvalue0.Read(jsProt422)
if err423 != nil {
err473 := argvalue0.Read(context.Background(), jsProt472)
if err473 != nil {
Usage()
return
}
@ -544,19 +543,19 @@ func main() {
fmt.Fprintln(os.Stderr, "CreateJob requires 1 args")
flag.Usage()
}
arg424 := flag.Arg(1)
mbTrans425 := thrift.NewTMemoryBufferLen(len(arg424))
defer mbTrans425.Close()
_, err426 := mbTrans425.WriteString(arg424)
if err426 != nil {
arg474 := flag.Arg(1)
mbTrans475 := thrift.NewTMemoryBufferLen(len(arg474))
defer mbTrans475.Close()
_, err476 := mbTrans475.WriteString(arg474)
if err476 != nil {
Usage()
return
}
factory427 := thrift.NewTJSONProtocolFactory()
jsProt428 := factory427.GetProtocol(mbTrans425)
factory477 := thrift.NewTJSONProtocolFactory()
jsProt478 := factory477.GetProtocol(mbTrans475)
argvalue0 := aurora.NewJobConfiguration()
err429 := argvalue0.Read(jsProt428)
if err429 != nil {
err479 := argvalue0.Read(context.Background(), jsProt478)
if err479 != nil {
Usage()
return
}
@ -569,19 +568,19 @@ func main() {
fmt.Fprintln(os.Stderr, "ScheduleCronJob requires 1 args")
flag.Usage()
}
arg430 := flag.Arg(1)
mbTrans431 := thrift.NewTMemoryBufferLen(len(arg430))
defer mbTrans431.Close()
_, err432 := mbTrans431.WriteString(arg430)
if err432 != nil {
arg480 := flag.Arg(1)
mbTrans481 := thrift.NewTMemoryBufferLen(len(arg480))
defer mbTrans481.Close()
_, err482 := mbTrans481.WriteString(arg480)
if err482 != nil {
Usage()
return
}
factory433 := thrift.NewTJSONProtocolFactory()
jsProt434 := factory433.GetProtocol(mbTrans431)
factory483 := thrift.NewTJSONProtocolFactory()
jsProt484 := factory483.GetProtocol(mbTrans481)
argvalue0 := aurora.NewJobConfiguration()
err435 := argvalue0.Read(jsProt434)
if err435 != nil {
err485 := argvalue0.Read(context.Background(), jsProt484)
if err485 != nil {
Usage()
return
}
@ -594,19 +593,19 @@ func main() {
fmt.Fprintln(os.Stderr, "DescheduleCronJob requires 1 args")
flag.Usage()
}
arg436 := flag.Arg(1)
mbTrans437 := thrift.NewTMemoryBufferLen(len(arg436))
defer mbTrans437.Close()
_, err438 := mbTrans437.WriteString(arg436)
if err438 != nil {
arg486 := flag.Arg(1)
mbTrans487 := thrift.NewTMemoryBufferLen(len(arg486))
defer mbTrans487.Close()
_, err488 := mbTrans487.WriteString(arg486)
if err488 != nil {
Usage()
return
}
factory439 := thrift.NewTJSONProtocolFactory()
jsProt440 := factory439.GetProtocol(mbTrans437)
factory489 := thrift.NewTJSONProtocolFactory()
jsProt490 := factory489.GetProtocol(mbTrans487)
argvalue0 := aurora.NewJobKey()
err441 := argvalue0.Read(jsProt440)
if err441 != nil {
err491 := argvalue0.Read(context.Background(), jsProt490)
if err491 != nil {
Usage()
return
}
@ -619,19 +618,19 @@ func main() {
fmt.Fprintln(os.Stderr, "StartCronJob requires 1 args")
flag.Usage()
}
arg442 := flag.Arg(1)
mbTrans443 := thrift.NewTMemoryBufferLen(len(arg442))
defer mbTrans443.Close()
_, err444 := mbTrans443.WriteString(arg442)
if err444 != nil {
arg492 := flag.Arg(1)
mbTrans493 := thrift.NewTMemoryBufferLen(len(arg492))
defer mbTrans493.Close()
_, err494 := mbTrans493.WriteString(arg492)
if err494 != nil {
Usage()
return
}
factory445 := thrift.NewTJSONProtocolFactory()
jsProt446 := factory445.GetProtocol(mbTrans443)
factory495 := thrift.NewTJSONProtocolFactory()
jsProt496 := factory495.GetProtocol(mbTrans493)
argvalue0 := aurora.NewJobKey()
err447 := argvalue0.Read(jsProt446)
if err447 != nil {
err497 := argvalue0.Read(context.Background(), jsProt496)
if err497 != nil {
Usage()
return
}
@ -644,36 +643,36 @@ func main() {
fmt.Fprintln(os.Stderr, "RestartShards requires 2 args")
flag.Usage()
}
arg448 := flag.Arg(1)
mbTrans449 := thrift.NewTMemoryBufferLen(len(arg448))
defer mbTrans449.Close()
_, err450 := mbTrans449.WriteString(arg448)
if err450 != nil {
arg498 := flag.Arg(1)
mbTrans499 := thrift.NewTMemoryBufferLen(len(arg498))
defer mbTrans499.Close()
_, err500 := mbTrans499.WriteString(arg498)
if err500 != nil {
Usage()
return
}
factory451 := thrift.NewTJSONProtocolFactory()
jsProt452 := factory451.GetProtocol(mbTrans449)
factory501 := thrift.NewTJSONProtocolFactory()
jsProt502 := factory501.GetProtocol(mbTrans499)
argvalue0 := aurora.NewJobKey()
err453 := argvalue0.Read(jsProt452)
if err453 != nil {
err503 := argvalue0.Read(context.Background(), jsProt502)
if err503 != nil {
Usage()
return
}
value0 := argvalue0
arg454 := flag.Arg(2)
mbTrans455 := thrift.NewTMemoryBufferLen(len(arg454))
defer mbTrans455.Close()
_, err456 := mbTrans455.WriteString(arg454)
if err456 != nil {
arg504 := flag.Arg(2)
mbTrans505 := thrift.NewTMemoryBufferLen(len(arg504))
defer mbTrans505.Close()
_, err506 := mbTrans505.WriteString(arg504)
if err506 != nil {
Usage()
return
}
factory457 := thrift.NewTJSONProtocolFactory()
jsProt458 := factory457.GetProtocol(mbTrans455)
containerStruct1 := aurora.NewAuroraAdminRestartShardsArgs()
err459 := containerStruct1.ReadField2(jsProt458)
if err459 != nil {
factory507 := thrift.NewTJSONProtocolFactory()
jsProt508 := factory507.GetProtocol(mbTrans505)
containerStruct1 := aurora.NewAuroraSchedulerManagerRestartShardsArgs()
err509 := containerStruct1.ReadField2(context.Background(), jsProt508)
if err509 != nil {
Usage()
return
}
@ -687,36 +686,36 @@ func main() {
fmt.Fprintln(os.Stderr, "KillTasks requires 3 args")
flag.Usage()
}
arg460 := flag.Arg(1)
mbTrans461 := thrift.NewTMemoryBufferLen(len(arg460))
defer mbTrans461.Close()
_, err462 := mbTrans461.WriteString(arg460)
if err462 != nil {
arg510 := flag.Arg(1)
mbTrans511 := thrift.NewTMemoryBufferLen(len(arg510))
defer mbTrans511.Close()
_, err512 := mbTrans511.WriteString(arg510)
if err512 != nil {
Usage()
return
}
factory463 := thrift.NewTJSONProtocolFactory()
jsProt464 := factory463.GetProtocol(mbTrans461)
factory513 := thrift.NewTJSONProtocolFactory()
jsProt514 := factory513.GetProtocol(mbTrans511)
argvalue0 := aurora.NewJobKey()
err465 := argvalue0.Read(jsProt464)
if err465 != nil {
err515 := argvalue0.Read(context.Background(), jsProt514)
if err515 != nil {
Usage()
return
}
value0 := argvalue0
arg466 := flag.Arg(2)
mbTrans467 := thrift.NewTMemoryBufferLen(len(arg466))
defer mbTrans467.Close()
_, err468 := mbTrans467.WriteString(arg466)
if err468 != nil {
arg516 := flag.Arg(2)
mbTrans517 := thrift.NewTMemoryBufferLen(len(arg516))
defer mbTrans517.Close()
_, err518 := mbTrans517.WriteString(arg516)
if err518 != nil {
Usage()
return
}
factory469 := thrift.NewTJSONProtocolFactory()
jsProt470 := factory469.GetProtocol(mbTrans467)
containerStruct1 := aurora.NewAuroraAdminKillTasksArgs()
err471 := containerStruct1.ReadField2(jsProt470)
if err471 != nil {
factory519 := thrift.NewTJSONProtocolFactory()
jsProt520 := factory519.GetProtocol(mbTrans517)
containerStruct1 := aurora.NewAuroraSchedulerManagerKillTasksArgs()
err521 := containerStruct1.ReadField2(context.Background(), jsProt520)
if err521 != nil {
Usage()
return
}
@ -732,25 +731,25 @@ func main() {
fmt.Fprintln(os.Stderr, "AddInstances requires 2 args")
flag.Usage()
}
arg473 := flag.Arg(1)
mbTrans474 := thrift.NewTMemoryBufferLen(len(arg473))
defer mbTrans474.Close()
_, err475 := mbTrans474.WriteString(arg473)
if err475 != nil {
arg523 := flag.Arg(1)
mbTrans524 := thrift.NewTMemoryBufferLen(len(arg523))
defer mbTrans524.Close()
_, err525 := mbTrans524.WriteString(arg523)
if err525 != nil {
Usage()
return
}
factory476 := thrift.NewTJSONProtocolFactory()
jsProt477 := factory476.GetProtocol(mbTrans474)
factory526 := thrift.NewTJSONProtocolFactory()
jsProt527 := factory526.GetProtocol(mbTrans524)
argvalue0 := aurora.NewInstanceKey()
err478 := argvalue0.Read(jsProt477)
if err478 != nil {
err528 := argvalue0.Read(context.Background(), jsProt527)
if err528 != nil {
Usage()
return
}
value0 := argvalue0
tmp1, err479 := (strconv.Atoi(flag.Arg(2)))
if err479 != nil {
tmp1, err529 := (strconv.Atoi(flag.Arg(2)))
if err529 != nil {
Usage()
return
}
@ -764,19 +763,19 @@ func main() {
fmt.Fprintln(os.Stderr, "ReplaceCronTemplate requires 1 args")
flag.Usage()
}
arg480 := flag.Arg(1)
mbTrans481 := thrift.NewTMemoryBufferLen(len(arg480))
defer mbTrans481.Close()
_, err482 := mbTrans481.WriteString(arg480)
if err482 != nil {
arg530 := flag.Arg(1)
mbTrans531 := thrift.NewTMemoryBufferLen(len(arg530))
defer mbTrans531.Close()
_, err532 := mbTrans531.WriteString(arg530)
if err532 != nil {
Usage()
return
}
factory483 := thrift.NewTJSONProtocolFactory()
jsProt484 := factory483.GetProtocol(mbTrans481)
factory533 := thrift.NewTJSONProtocolFactory()
jsProt534 := factory533.GetProtocol(mbTrans531)
argvalue0 := aurora.NewJobConfiguration()
err485 := argvalue0.Read(jsProt484)
if err485 != nil {
err535 := argvalue0.Read(context.Background(), jsProt534)
if err535 != nil {
Usage()
return
}
@ -789,19 +788,19 @@ func main() {
fmt.Fprintln(os.Stderr, "StartJobUpdate requires 2 args")
flag.Usage()
}
arg486 := flag.Arg(1)
mbTrans487 := thrift.NewTMemoryBufferLen(len(arg486))
defer mbTrans487.Close()
_, err488 := mbTrans487.WriteString(arg486)
if err488 != nil {
arg536 := flag.Arg(1)
mbTrans537 := thrift.NewTMemoryBufferLen(len(arg536))
defer mbTrans537.Close()
_, err538 := mbTrans537.WriteString(arg536)
if err538 != nil {
Usage()
return
}
factory489 := thrift.NewTJSONProtocolFactory()
jsProt490 := factory489.GetProtocol(mbTrans487)
factory539 := thrift.NewTJSONProtocolFactory()
jsProt540 := factory539.GetProtocol(mbTrans537)
argvalue0 := aurora.NewJobUpdateRequest()
err491 := argvalue0.Read(jsProt490)
if err491 != nil {
err541 := argvalue0.Read(context.Background(), jsProt540)
if err541 != nil {
Usage()
return
}
@ -816,19 +815,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PauseJobUpdate requires 2 args")
flag.Usage()
}
arg493 := flag.Arg(1)
mbTrans494 := thrift.NewTMemoryBufferLen(len(arg493))
defer mbTrans494.Close()
_, err495 := mbTrans494.WriteString(arg493)
if err495 != nil {
arg543 := flag.Arg(1)
mbTrans544 := thrift.NewTMemoryBufferLen(len(arg543))
defer mbTrans544.Close()
_, err545 := mbTrans544.WriteString(arg543)
if err545 != nil {
Usage()
return
}
factory496 := thrift.NewTJSONProtocolFactory()
jsProt497 := factory496.GetProtocol(mbTrans494)
factory546 := thrift.NewTJSONProtocolFactory()
jsProt547 := factory546.GetProtocol(mbTrans544)
argvalue0 := aurora.NewJobUpdateKey()
err498 := argvalue0.Read(jsProt497)
if err498 != nil {
err548 := argvalue0.Read(context.Background(), jsProt547)
if err548 != nil {
Usage()
return
}
@ -843,19 +842,19 @@ func main() {
fmt.Fprintln(os.Stderr, "ResumeJobUpdate requires 2 args")
flag.Usage()
}
arg500 := flag.Arg(1)
mbTrans501 := thrift.NewTMemoryBufferLen(len(arg500))
defer mbTrans501.Close()
_, err502 := mbTrans501.WriteString(arg500)
if err502 != nil {
arg550 := flag.Arg(1)
mbTrans551 := thrift.NewTMemoryBufferLen(len(arg550))
defer mbTrans551.Close()
_, err552 := mbTrans551.WriteString(arg550)
if err552 != nil {
Usage()
return
}
factory503 := thrift.NewTJSONProtocolFactory()
jsProt504 := factory503.GetProtocol(mbTrans501)
factory553 := thrift.NewTJSONProtocolFactory()
jsProt554 := factory553.GetProtocol(mbTrans551)
argvalue0 := aurora.NewJobUpdateKey()
err505 := argvalue0.Read(jsProt504)
if err505 != nil {
err555 := argvalue0.Read(context.Background(), jsProt554)
if err555 != nil {
Usage()
return
}
@ -870,19 +869,19 @@ func main() {
fmt.Fprintln(os.Stderr, "AbortJobUpdate requires 2 args")
flag.Usage()
}
arg507 := flag.Arg(1)
mbTrans508 := thrift.NewTMemoryBufferLen(len(arg507))
defer mbTrans508.Close()
_, err509 := mbTrans508.WriteString(arg507)
if err509 != nil {
arg557 := flag.Arg(1)
mbTrans558 := thrift.NewTMemoryBufferLen(len(arg557))
defer mbTrans558.Close()
_, err559 := mbTrans558.WriteString(arg557)
if err559 != nil {
Usage()
return
}
factory510 := thrift.NewTJSONProtocolFactory()
jsProt511 := factory510.GetProtocol(mbTrans508)
factory560 := thrift.NewTJSONProtocolFactory()
jsProt561 := factory560.GetProtocol(mbTrans558)
argvalue0 := aurora.NewJobUpdateKey()
err512 := argvalue0.Read(jsProt511)
if err512 != nil {
err562 := argvalue0.Read(context.Background(), jsProt561)
if err562 != nil {
Usage()
return
}
@ -897,19 +896,19 @@ func main() {
fmt.Fprintln(os.Stderr, "RollbackJobUpdate requires 2 args")
flag.Usage()
}
arg514 := flag.Arg(1)
mbTrans515 := thrift.NewTMemoryBufferLen(len(arg514))
defer mbTrans515.Close()
_, err516 := mbTrans515.WriteString(arg514)
if err516 != nil {
arg564 := flag.Arg(1)
mbTrans565 := thrift.NewTMemoryBufferLen(len(arg564))
defer mbTrans565.Close()
_, err566 := mbTrans565.WriteString(arg564)
if err566 != nil {
Usage()
return
}
factory517 := thrift.NewTJSONProtocolFactory()
jsProt518 := factory517.GetProtocol(mbTrans515)
factory567 := thrift.NewTJSONProtocolFactory()
jsProt568 := factory567.GetProtocol(mbTrans565)
argvalue0 := aurora.NewJobUpdateKey()
err519 := argvalue0.Read(jsProt518)
if err519 != nil {
err569 := argvalue0.Read(context.Background(), jsProt568)
if err569 != nil {
Usage()
return
}
@ -924,19 +923,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PulseJobUpdate requires 1 args")
flag.Usage()
}
arg521 := flag.Arg(1)
mbTrans522 := thrift.NewTMemoryBufferLen(len(arg521))
defer mbTrans522.Close()
_, err523 := mbTrans522.WriteString(arg521)
if err523 != nil {
arg571 := flag.Arg(1)
mbTrans572 := thrift.NewTMemoryBufferLen(len(arg571))
defer mbTrans572.Close()
_, err573 := mbTrans572.WriteString(arg571)
if err573 != nil {
Usage()
return
}
factory524 := thrift.NewTJSONProtocolFactory()
jsProt525 := factory524.GetProtocol(mbTrans522)
factory574 := thrift.NewTJSONProtocolFactory()
jsProt575 := factory574.GetProtocol(mbTrans572)
argvalue0 := aurora.NewJobUpdateKey()
err526 := argvalue0.Read(jsProt525)
if err526 != nil {
err576 := argvalue0.Read(context.Background(), jsProt575)
if err576 != nil {
Usage()
return
}
@ -967,19 +966,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
flag.Usage()
}
arg528 := flag.Arg(1)
mbTrans529 := thrift.NewTMemoryBufferLen(len(arg528))
defer mbTrans529.Close()
_, err530 := mbTrans529.WriteString(arg528)
if err530 != nil {
arg578 := flag.Arg(1)
mbTrans579 := thrift.NewTMemoryBufferLen(len(arg578))
defer mbTrans579.Close()
_, err580 := mbTrans579.WriteString(arg578)
if err580 != nil {
Usage()
return
}
factory531 := thrift.NewTJSONProtocolFactory()
jsProt532 := factory531.GetProtocol(mbTrans529)
factory581 := thrift.NewTJSONProtocolFactory()
jsProt582 := factory581.GetProtocol(mbTrans579)
argvalue0 := aurora.NewTaskQuery()
err533 := argvalue0.Read(jsProt532)
if err533 != nil {
err583 := argvalue0.Read(context.Background(), jsProt582)
if err583 != nil {
Usage()
return
}
@ -992,19 +991,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
flag.Usage()
}
arg534 := flag.Arg(1)
mbTrans535 := thrift.NewTMemoryBufferLen(len(arg534))
defer mbTrans535.Close()
_, err536 := mbTrans535.WriteString(arg534)
if err536 != nil {
arg584 := flag.Arg(1)
mbTrans585 := thrift.NewTMemoryBufferLen(len(arg584))
defer mbTrans585.Close()
_, err586 := mbTrans585.WriteString(arg584)
if err586 != nil {
Usage()
return
}
factory537 := thrift.NewTJSONProtocolFactory()
jsProt538 := factory537.GetProtocol(mbTrans535)
factory587 := thrift.NewTJSONProtocolFactory()
jsProt588 := factory587.GetProtocol(mbTrans585)
argvalue0 := aurora.NewTaskQuery()
err539 := argvalue0.Read(jsProt538)
if err539 != nil {
err589 := argvalue0.Read(context.Background(), jsProt588)
if err589 != nil {
Usage()
return
}
@ -1017,19 +1016,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
flag.Usage()
}
arg540 := flag.Arg(1)
mbTrans541 := thrift.NewTMemoryBufferLen(len(arg540))
defer mbTrans541.Close()
_, err542 := mbTrans541.WriteString(arg540)
if err542 != nil {
arg590 := flag.Arg(1)
mbTrans591 := thrift.NewTMemoryBufferLen(len(arg590))
defer mbTrans591.Close()
_, err592 := mbTrans591.WriteString(arg590)
if err592 != nil {
Usage()
return
}
factory543 := thrift.NewTJSONProtocolFactory()
jsProt544 := factory543.GetProtocol(mbTrans541)
factory593 := thrift.NewTJSONProtocolFactory()
jsProt594 := factory593.GetProtocol(mbTrans591)
argvalue0 := aurora.NewTaskQuery()
err545 := argvalue0.Read(jsProt544)
if err545 != nil {
err595 := argvalue0.Read(context.Background(), jsProt594)
if err595 != nil {
Usage()
return
}
@ -1042,19 +1041,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
flag.Usage()
}
arg546 := flag.Arg(1)
mbTrans547 := thrift.NewTMemoryBufferLen(len(arg546))
defer mbTrans547.Close()
_, err548 := mbTrans547.WriteString(arg546)
if err548 != nil {
arg596 := flag.Arg(1)
mbTrans597 := thrift.NewTMemoryBufferLen(len(arg596))
defer mbTrans597.Close()
_, err598 := mbTrans597.WriteString(arg596)
if err598 != nil {
Usage()
return
}
factory549 := thrift.NewTJSONProtocolFactory()
jsProt550 := factory549.GetProtocol(mbTrans547)
factory599 := thrift.NewTJSONProtocolFactory()
jsProt600 := factory599.GetProtocol(mbTrans597)
argvalue0 := aurora.NewJobKey()
err551 := argvalue0.Read(jsProt550)
if err551 != nil {
err601 := argvalue0.Read(context.Background(), jsProt600)
if err601 != nil {
Usage()
return
}
@ -1087,19 +1086,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
flag.Usage()
}
arg554 := flag.Arg(1)
mbTrans555 := thrift.NewTMemoryBufferLen(len(arg554))
defer mbTrans555.Close()
_, err556 := mbTrans555.WriteString(arg554)
if err556 != nil {
arg604 := flag.Arg(1)
mbTrans605 := thrift.NewTMemoryBufferLen(len(arg604))
defer mbTrans605.Close()
_, err606 := mbTrans605.WriteString(arg604)
if err606 != nil {
Usage()
return
}
factory557 := thrift.NewTJSONProtocolFactory()
jsProt558 := factory557.GetProtocol(mbTrans555)
factory607 := thrift.NewTJSONProtocolFactory()
jsProt608 := factory607.GetProtocol(mbTrans605)
argvalue0 := aurora.NewJobConfiguration()
err559 := argvalue0.Read(jsProt558)
if err559 != nil {
err609 := argvalue0.Read(context.Background(), jsProt608)
if err609 != nil {
Usage()
return
}
@ -1112,19 +1111,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
flag.Usage()
}
arg560 := flag.Arg(1)
mbTrans561 := thrift.NewTMemoryBufferLen(len(arg560))
defer mbTrans561.Close()
_, err562 := mbTrans561.WriteString(arg560)
if err562 != nil {
arg610 := flag.Arg(1)
mbTrans611 := thrift.NewTMemoryBufferLen(len(arg610))
defer mbTrans611.Close()
_, err612 := mbTrans611.WriteString(arg610)
if err612 != nil {
Usage()
return
}
factory563 := thrift.NewTJSONProtocolFactory()
jsProt564 := factory563.GetProtocol(mbTrans561)
factory613 := thrift.NewTJSONProtocolFactory()
jsProt614 := factory613.GetProtocol(mbTrans611)
argvalue0 := aurora.NewJobUpdateQuery()
err565 := argvalue0.Read(jsProt564)
if err565 != nil {
err615 := argvalue0.Read(context.Background(), jsProt614)
if err615 != nil {
Usage()
return
}
@ -1137,19 +1136,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
flag.Usage()
}
arg566 := flag.Arg(1)
mbTrans567 := thrift.NewTMemoryBufferLen(len(arg566))
defer mbTrans567.Close()
_, err568 := mbTrans567.WriteString(arg566)
if err568 != nil {
arg616 := flag.Arg(1)
mbTrans617 := thrift.NewTMemoryBufferLen(len(arg616))
defer mbTrans617.Close()
_, err618 := mbTrans617.WriteString(arg616)
if err618 != nil {
Usage()
return
}
factory569 := thrift.NewTJSONProtocolFactory()
jsProt570 := factory569.GetProtocol(mbTrans567)
factory619 := thrift.NewTJSONProtocolFactory()
jsProt620 := factory619.GetProtocol(mbTrans617)
argvalue0 := aurora.NewJobUpdateQuery()
err571 := argvalue0.Read(jsProt570)
if err571 != nil {
err621 := argvalue0.Read(context.Background(), jsProt620)
if err621 != nil {
Usage()
return
}
@ -1162,19 +1161,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
flag.Usage()
}
arg572 := flag.Arg(1)
mbTrans573 := thrift.NewTMemoryBufferLen(len(arg572))
defer mbTrans573.Close()
_, err574 := mbTrans573.WriteString(arg572)
if err574 != nil {
arg622 := flag.Arg(1)
mbTrans623 := thrift.NewTMemoryBufferLen(len(arg622))
defer mbTrans623.Close()
_, err624 := mbTrans623.WriteString(arg622)
if err624 != nil {
Usage()
return
}
factory575 := thrift.NewTJSONProtocolFactory()
jsProt576 := factory575.GetProtocol(mbTrans573)
factory625 := thrift.NewTJSONProtocolFactory()
jsProt626 := factory625.GetProtocol(mbTrans623)
argvalue0 := aurora.NewJobUpdateRequest()
err577 := argvalue0.Read(jsProt576)
if err577 != nil {
err627 := argvalue0.Read(context.Background(), jsProt626)
if err627 != nil {
Usage()
return
}

View file

@ -1,5 +1,4 @@
// Autogenerated by Thrift Compiler (0.13.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
package main
@ -176,19 +175,19 @@ func main() {
fmt.Fprintln(os.Stderr, "CreateJob requires 1 args")
flag.Usage()
}
arg163 := flag.Arg(1)
mbTrans164 := thrift.NewTMemoryBufferLen(len(arg163))
defer mbTrans164.Close()
_, err165 := mbTrans164.WriteString(arg163)
if err165 != nil {
arg213 := flag.Arg(1)
mbTrans214 := thrift.NewTMemoryBufferLen(len(arg213))
defer mbTrans214.Close()
_, err215 := mbTrans214.WriteString(arg213)
if err215 != nil {
Usage()
return
}
factory166 := thrift.NewTJSONProtocolFactory()
jsProt167 := factory166.GetProtocol(mbTrans164)
factory216 := thrift.NewTJSONProtocolFactory()
jsProt217 := factory216.GetProtocol(mbTrans214)
argvalue0 := aurora.NewJobConfiguration()
err168 := argvalue0.Read(jsProt167)
if err168 != nil {
err218 := argvalue0.Read(context.Background(), jsProt217)
if err218 != nil {
Usage()
return
}
@ -201,19 +200,19 @@ func main() {
fmt.Fprintln(os.Stderr, "ScheduleCronJob requires 1 args")
flag.Usage()
}
arg169 := flag.Arg(1)
mbTrans170 := thrift.NewTMemoryBufferLen(len(arg169))
defer mbTrans170.Close()
_, err171 := mbTrans170.WriteString(arg169)
if err171 != nil {
arg219 := flag.Arg(1)
mbTrans220 := thrift.NewTMemoryBufferLen(len(arg219))
defer mbTrans220.Close()
_, err221 := mbTrans220.WriteString(arg219)
if err221 != nil {
Usage()
return
}
factory172 := thrift.NewTJSONProtocolFactory()
jsProt173 := factory172.GetProtocol(mbTrans170)
factory222 := thrift.NewTJSONProtocolFactory()
jsProt223 := factory222.GetProtocol(mbTrans220)
argvalue0 := aurora.NewJobConfiguration()
err174 := argvalue0.Read(jsProt173)
if err174 != nil {
err224 := argvalue0.Read(context.Background(), jsProt223)
if err224 != nil {
Usage()
return
}
@ -226,19 +225,19 @@ func main() {
fmt.Fprintln(os.Stderr, "DescheduleCronJob requires 1 args")
flag.Usage()
}
arg175 := flag.Arg(1)
mbTrans176 := thrift.NewTMemoryBufferLen(len(arg175))
defer mbTrans176.Close()
_, err177 := mbTrans176.WriteString(arg175)
if err177 != nil {
arg225 := flag.Arg(1)
mbTrans226 := thrift.NewTMemoryBufferLen(len(arg225))
defer mbTrans226.Close()
_, err227 := mbTrans226.WriteString(arg225)
if err227 != nil {
Usage()
return
}
factory178 := thrift.NewTJSONProtocolFactory()
jsProt179 := factory178.GetProtocol(mbTrans176)
factory228 := thrift.NewTJSONProtocolFactory()
jsProt229 := factory228.GetProtocol(mbTrans226)
argvalue0 := aurora.NewJobKey()
err180 := argvalue0.Read(jsProt179)
if err180 != nil {
err230 := argvalue0.Read(context.Background(), jsProt229)
if err230 != nil {
Usage()
return
}
@ -251,19 +250,19 @@ func main() {
fmt.Fprintln(os.Stderr, "StartCronJob requires 1 args")
flag.Usage()
}
arg181 := flag.Arg(1)
mbTrans182 := thrift.NewTMemoryBufferLen(len(arg181))
defer mbTrans182.Close()
_, err183 := mbTrans182.WriteString(arg181)
if err183 != nil {
arg231 := flag.Arg(1)
mbTrans232 := thrift.NewTMemoryBufferLen(len(arg231))
defer mbTrans232.Close()
_, err233 := mbTrans232.WriteString(arg231)
if err233 != nil {
Usage()
return
}
factory184 := thrift.NewTJSONProtocolFactory()
jsProt185 := factory184.GetProtocol(mbTrans182)
factory234 := thrift.NewTJSONProtocolFactory()
jsProt235 := factory234.GetProtocol(mbTrans232)
argvalue0 := aurora.NewJobKey()
err186 := argvalue0.Read(jsProt185)
if err186 != nil {
err236 := argvalue0.Read(context.Background(), jsProt235)
if err236 != nil {
Usage()
return
}
@ -276,36 +275,36 @@ func main() {
fmt.Fprintln(os.Stderr, "RestartShards requires 2 args")
flag.Usage()
}
arg187 := flag.Arg(1)
mbTrans188 := thrift.NewTMemoryBufferLen(len(arg187))
defer mbTrans188.Close()
_, err189 := mbTrans188.WriteString(arg187)
if err189 != nil {
arg237 := flag.Arg(1)
mbTrans238 := thrift.NewTMemoryBufferLen(len(arg237))
defer mbTrans238.Close()
_, err239 := mbTrans238.WriteString(arg237)
if err239 != nil {
Usage()
return
}
factory190 := thrift.NewTJSONProtocolFactory()
jsProt191 := factory190.GetProtocol(mbTrans188)
factory240 := thrift.NewTJSONProtocolFactory()
jsProt241 := factory240.GetProtocol(mbTrans238)
argvalue0 := aurora.NewJobKey()
err192 := argvalue0.Read(jsProt191)
if err192 != nil {
err242 := argvalue0.Read(context.Background(), jsProt241)
if err242 != nil {
Usage()
return
}
value0 := argvalue0
arg193 := flag.Arg(2)
mbTrans194 := thrift.NewTMemoryBufferLen(len(arg193))
defer mbTrans194.Close()
_, err195 := mbTrans194.WriteString(arg193)
if err195 != nil {
arg243 := flag.Arg(2)
mbTrans244 := thrift.NewTMemoryBufferLen(len(arg243))
defer mbTrans244.Close()
_, err245 := mbTrans244.WriteString(arg243)
if err245 != nil {
Usage()
return
}
factory196 := thrift.NewTJSONProtocolFactory()
jsProt197 := factory196.GetProtocol(mbTrans194)
factory246 := thrift.NewTJSONProtocolFactory()
jsProt247 := factory246.GetProtocol(mbTrans244)
containerStruct1 := aurora.NewAuroraSchedulerManagerRestartShardsArgs()
err198 := containerStruct1.ReadField2(jsProt197)
if err198 != nil {
err248 := containerStruct1.ReadField2(context.Background(), jsProt247)
if err248 != nil {
Usage()
return
}
@ -319,36 +318,36 @@ func main() {
fmt.Fprintln(os.Stderr, "KillTasks requires 3 args")
flag.Usage()
}
arg199 := flag.Arg(1)
mbTrans200 := thrift.NewTMemoryBufferLen(len(arg199))
defer mbTrans200.Close()
_, err201 := mbTrans200.WriteString(arg199)
if err201 != nil {
arg249 := flag.Arg(1)
mbTrans250 := thrift.NewTMemoryBufferLen(len(arg249))
defer mbTrans250.Close()
_, err251 := mbTrans250.WriteString(arg249)
if err251 != nil {
Usage()
return
}
factory202 := thrift.NewTJSONProtocolFactory()
jsProt203 := factory202.GetProtocol(mbTrans200)
factory252 := thrift.NewTJSONProtocolFactory()
jsProt253 := factory252.GetProtocol(mbTrans250)
argvalue0 := aurora.NewJobKey()
err204 := argvalue0.Read(jsProt203)
if err204 != nil {
err254 := argvalue0.Read(context.Background(), jsProt253)
if err254 != nil {
Usage()
return
}
value0 := argvalue0
arg205 := flag.Arg(2)
mbTrans206 := thrift.NewTMemoryBufferLen(len(arg205))
defer mbTrans206.Close()
_, err207 := mbTrans206.WriteString(arg205)
if err207 != nil {
arg255 := flag.Arg(2)
mbTrans256 := thrift.NewTMemoryBufferLen(len(arg255))
defer mbTrans256.Close()
_, err257 := mbTrans256.WriteString(arg255)
if err257 != nil {
Usage()
return
}
factory208 := thrift.NewTJSONProtocolFactory()
jsProt209 := factory208.GetProtocol(mbTrans206)
factory258 := thrift.NewTJSONProtocolFactory()
jsProt259 := factory258.GetProtocol(mbTrans256)
containerStruct1 := aurora.NewAuroraSchedulerManagerKillTasksArgs()
err210 := containerStruct1.ReadField2(jsProt209)
if err210 != nil {
err260 := containerStruct1.ReadField2(context.Background(), jsProt259)
if err260 != nil {
Usage()
return
}
@ -364,25 +363,25 @@ func main() {
fmt.Fprintln(os.Stderr, "AddInstances requires 2 args")
flag.Usage()
}
arg212 := flag.Arg(1)
mbTrans213 := thrift.NewTMemoryBufferLen(len(arg212))
defer mbTrans213.Close()
_, err214 := mbTrans213.WriteString(arg212)
if err214 != nil {
arg262 := flag.Arg(1)
mbTrans263 := thrift.NewTMemoryBufferLen(len(arg262))
defer mbTrans263.Close()
_, err264 := mbTrans263.WriteString(arg262)
if err264 != nil {
Usage()
return
}
factory215 := thrift.NewTJSONProtocolFactory()
jsProt216 := factory215.GetProtocol(mbTrans213)
factory265 := thrift.NewTJSONProtocolFactory()
jsProt266 := factory265.GetProtocol(mbTrans263)
argvalue0 := aurora.NewInstanceKey()
err217 := argvalue0.Read(jsProt216)
if err217 != nil {
err267 := argvalue0.Read(context.Background(), jsProt266)
if err267 != nil {
Usage()
return
}
value0 := argvalue0
tmp1, err218 := (strconv.Atoi(flag.Arg(2)))
if err218 != nil {
tmp1, err268 := (strconv.Atoi(flag.Arg(2)))
if err268 != nil {
Usage()
return
}
@ -396,19 +395,19 @@ func main() {
fmt.Fprintln(os.Stderr, "ReplaceCronTemplate requires 1 args")
flag.Usage()
}
arg219 := flag.Arg(1)
mbTrans220 := thrift.NewTMemoryBufferLen(len(arg219))
defer mbTrans220.Close()
_, err221 := mbTrans220.WriteString(arg219)
if err221 != nil {
arg269 := flag.Arg(1)
mbTrans270 := thrift.NewTMemoryBufferLen(len(arg269))
defer mbTrans270.Close()
_, err271 := mbTrans270.WriteString(arg269)
if err271 != nil {
Usage()
return
}
factory222 := thrift.NewTJSONProtocolFactory()
jsProt223 := factory222.GetProtocol(mbTrans220)
factory272 := thrift.NewTJSONProtocolFactory()
jsProt273 := factory272.GetProtocol(mbTrans270)
argvalue0 := aurora.NewJobConfiguration()
err224 := argvalue0.Read(jsProt223)
if err224 != nil {
err274 := argvalue0.Read(context.Background(), jsProt273)
if err274 != nil {
Usage()
return
}
@ -421,19 +420,19 @@ func main() {
fmt.Fprintln(os.Stderr, "StartJobUpdate requires 2 args")
flag.Usage()
}
arg225 := flag.Arg(1)
mbTrans226 := thrift.NewTMemoryBufferLen(len(arg225))
defer mbTrans226.Close()
_, err227 := mbTrans226.WriteString(arg225)
if err227 != nil {
arg275 := flag.Arg(1)
mbTrans276 := thrift.NewTMemoryBufferLen(len(arg275))
defer mbTrans276.Close()
_, err277 := mbTrans276.WriteString(arg275)
if err277 != nil {
Usage()
return
}
factory228 := thrift.NewTJSONProtocolFactory()
jsProt229 := factory228.GetProtocol(mbTrans226)
factory278 := thrift.NewTJSONProtocolFactory()
jsProt279 := factory278.GetProtocol(mbTrans276)
argvalue0 := aurora.NewJobUpdateRequest()
err230 := argvalue0.Read(jsProt229)
if err230 != nil {
err280 := argvalue0.Read(context.Background(), jsProt279)
if err280 != nil {
Usage()
return
}
@ -448,19 +447,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PauseJobUpdate requires 2 args")
flag.Usage()
}
arg232 := flag.Arg(1)
mbTrans233 := thrift.NewTMemoryBufferLen(len(arg232))
defer mbTrans233.Close()
_, err234 := mbTrans233.WriteString(arg232)
if err234 != nil {
arg282 := flag.Arg(1)
mbTrans283 := thrift.NewTMemoryBufferLen(len(arg282))
defer mbTrans283.Close()
_, err284 := mbTrans283.WriteString(arg282)
if err284 != nil {
Usage()
return
}
factory235 := thrift.NewTJSONProtocolFactory()
jsProt236 := factory235.GetProtocol(mbTrans233)
factory285 := thrift.NewTJSONProtocolFactory()
jsProt286 := factory285.GetProtocol(mbTrans283)
argvalue0 := aurora.NewJobUpdateKey()
err237 := argvalue0.Read(jsProt236)
if err237 != nil {
err287 := argvalue0.Read(context.Background(), jsProt286)
if err287 != nil {
Usage()
return
}
@ -475,19 +474,19 @@ func main() {
fmt.Fprintln(os.Stderr, "ResumeJobUpdate requires 2 args")
flag.Usage()
}
arg239 := flag.Arg(1)
mbTrans240 := thrift.NewTMemoryBufferLen(len(arg239))
defer mbTrans240.Close()
_, err241 := mbTrans240.WriteString(arg239)
if err241 != nil {
arg289 := flag.Arg(1)
mbTrans290 := thrift.NewTMemoryBufferLen(len(arg289))
defer mbTrans290.Close()
_, err291 := mbTrans290.WriteString(arg289)
if err291 != nil {
Usage()
return
}
factory242 := thrift.NewTJSONProtocolFactory()
jsProt243 := factory242.GetProtocol(mbTrans240)
factory292 := thrift.NewTJSONProtocolFactory()
jsProt293 := factory292.GetProtocol(mbTrans290)
argvalue0 := aurora.NewJobUpdateKey()
err244 := argvalue0.Read(jsProt243)
if err244 != nil {
err294 := argvalue0.Read(context.Background(), jsProt293)
if err294 != nil {
Usage()
return
}
@ -502,19 +501,19 @@ func main() {
fmt.Fprintln(os.Stderr, "AbortJobUpdate requires 2 args")
flag.Usage()
}
arg246 := flag.Arg(1)
mbTrans247 := thrift.NewTMemoryBufferLen(len(arg246))
defer mbTrans247.Close()
_, err248 := mbTrans247.WriteString(arg246)
if err248 != nil {
arg296 := flag.Arg(1)
mbTrans297 := thrift.NewTMemoryBufferLen(len(arg296))
defer mbTrans297.Close()
_, err298 := mbTrans297.WriteString(arg296)
if err298 != nil {
Usage()
return
}
factory249 := thrift.NewTJSONProtocolFactory()
jsProt250 := factory249.GetProtocol(mbTrans247)
factory299 := thrift.NewTJSONProtocolFactory()
jsProt300 := factory299.GetProtocol(mbTrans297)
argvalue0 := aurora.NewJobUpdateKey()
err251 := argvalue0.Read(jsProt250)
if err251 != nil {
err301 := argvalue0.Read(context.Background(), jsProt300)
if err301 != nil {
Usage()
return
}
@ -529,19 +528,19 @@ func main() {
fmt.Fprintln(os.Stderr, "RollbackJobUpdate requires 2 args")
flag.Usage()
}
arg253 := flag.Arg(1)
mbTrans254 := thrift.NewTMemoryBufferLen(len(arg253))
defer mbTrans254.Close()
_, err255 := mbTrans254.WriteString(arg253)
if err255 != nil {
arg303 := flag.Arg(1)
mbTrans304 := thrift.NewTMemoryBufferLen(len(arg303))
defer mbTrans304.Close()
_, err305 := mbTrans304.WriteString(arg303)
if err305 != nil {
Usage()
return
}
factory256 := thrift.NewTJSONProtocolFactory()
jsProt257 := factory256.GetProtocol(mbTrans254)
factory306 := thrift.NewTJSONProtocolFactory()
jsProt307 := factory306.GetProtocol(mbTrans304)
argvalue0 := aurora.NewJobUpdateKey()
err258 := argvalue0.Read(jsProt257)
if err258 != nil {
err308 := argvalue0.Read(context.Background(), jsProt307)
if err308 != nil {
Usage()
return
}
@ -556,19 +555,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PulseJobUpdate requires 1 args")
flag.Usage()
}
arg260 := flag.Arg(1)
mbTrans261 := thrift.NewTMemoryBufferLen(len(arg260))
defer mbTrans261.Close()
_, err262 := mbTrans261.WriteString(arg260)
if err262 != nil {
arg310 := flag.Arg(1)
mbTrans311 := thrift.NewTMemoryBufferLen(len(arg310))
defer mbTrans311.Close()
_, err312 := mbTrans311.WriteString(arg310)
if err312 != nil {
Usage()
return
}
factory263 := thrift.NewTJSONProtocolFactory()
jsProt264 := factory263.GetProtocol(mbTrans261)
factory313 := thrift.NewTJSONProtocolFactory()
jsProt314 := factory313.GetProtocol(mbTrans311)
argvalue0 := aurora.NewJobUpdateKey()
err265 := argvalue0.Read(jsProt264)
if err265 != nil {
err315 := argvalue0.Read(context.Background(), jsProt314)
if err315 != nil {
Usage()
return
}
@ -599,19 +598,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
flag.Usage()
}
arg267 := flag.Arg(1)
mbTrans268 := thrift.NewTMemoryBufferLen(len(arg267))
defer mbTrans268.Close()
_, err269 := mbTrans268.WriteString(arg267)
if err269 != nil {
arg317 := flag.Arg(1)
mbTrans318 := thrift.NewTMemoryBufferLen(len(arg317))
defer mbTrans318.Close()
_, err319 := mbTrans318.WriteString(arg317)
if err319 != nil {
Usage()
return
}
factory270 := thrift.NewTJSONProtocolFactory()
jsProt271 := factory270.GetProtocol(mbTrans268)
factory320 := thrift.NewTJSONProtocolFactory()
jsProt321 := factory320.GetProtocol(mbTrans318)
argvalue0 := aurora.NewTaskQuery()
err272 := argvalue0.Read(jsProt271)
if err272 != nil {
err322 := argvalue0.Read(context.Background(), jsProt321)
if err322 != nil {
Usage()
return
}
@ -624,19 +623,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
flag.Usage()
}
arg273 := flag.Arg(1)
mbTrans274 := thrift.NewTMemoryBufferLen(len(arg273))
defer mbTrans274.Close()
_, err275 := mbTrans274.WriteString(arg273)
if err275 != nil {
arg323 := flag.Arg(1)
mbTrans324 := thrift.NewTMemoryBufferLen(len(arg323))
defer mbTrans324.Close()
_, err325 := mbTrans324.WriteString(arg323)
if err325 != nil {
Usage()
return
}
factory276 := thrift.NewTJSONProtocolFactory()
jsProt277 := factory276.GetProtocol(mbTrans274)
factory326 := thrift.NewTJSONProtocolFactory()
jsProt327 := factory326.GetProtocol(mbTrans324)
argvalue0 := aurora.NewTaskQuery()
err278 := argvalue0.Read(jsProt277)
if err278 != nil {
err328 := argvalue0.Read(context.Background(), jsProt327)
if err328 != nil {
Usage()
return
}
@ -649,19 +648,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
flag.Usage()
}
arg279 := flag.Arg(1)
mbTrans280 := thrift.NewTMemoryBufferLen(len(arg279))
defer mbTrans280.Close()
_, err281 := mbTrans280.WriteString(arg279)
if err281 != nil {
arg329 := flag.Arg(1)
mbTrans330 := thrift.NewTMemoryBufferLen(len(arg329))
defer mbTrans330.Close()
_, err331 := mbTrans330.WriteString(arg329)
if err331 != nil {
Usage()
return
}
factory282 := thrift.NewTJSONProtocolFactory()
jsProt283 := factory282.GetProtocol(mbTrans280)
factory332 := thrift.NewTJSONProtocolFactory()
jsProt333 := factory332.GetProtocol(mbTrans330)
argvalue0 := aurora.NewTaskQuery()
err284 := argvalue0.Read(jsProt283)
if err284 != nil {
err334 := argvalue0.Read(context.Background(), jsProt333)
if err334 != nil {
Usage()
return
}
@ -674,19 +673,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
flag.Usage()
}
arg285 := flag.Arg(1)
mbTrans286 := thrift.NewTMemoryBufferLen(len(arg285))
defer mbTrans286.Close()
_, err287 := mbTrans286.WriteString(arg285)
if err287 != nil {
arg335 := flag.Arg(1)
mbTrans336 := thrift.NewTMemoryBufferLen(len(arg335))
defer mbTrans336.Close()
_, err337 := mbTrans336.WriteString(arg335)
if err337 != nil {
Usage()
return
}
factory288 := thrift.NewTJSONProtocolFactory()
jsProt289 := factory288.GetProtocol(mbTrans286)
factory338 := thrift.NewTJSONProtocolFactory()
jsProt339 := factory338.GetProtocol(mbTrans336)
argvalue0 := aurora.NewJobKey()
err290 := argvalue0.Read(jsProt289)
if err290 != nil {
err340 := argvalue0.Read(context.Background(), jsProt339)
if err340 != nil {
Usage()
return
}
@ -719,19 +718,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
flag.Usage()
}
arg293 := flag.Arg(1)
mbTrans294 := thrift.NewTMemoryBufferLen(len(arg293))
defer mbTrans294.Close()
_, err295 := mbTrans294.WriteString(arg293)
if err295 != nil {
arg343 := flag.Arg(1)
mbTrans344 := thrift.NewTMemoryBufferLen(len(arg343))
defer mbTrans344.Close()
_, err345 := mbTrans344.WriteString(arg343)
if err345 != nil {
Usage()
return
}
factory296 := thrift.NewTJSONProtocolFactory()
jsProt297 := factory296.GetProtocol(mbTrans294)
factory346 := thrift.NewTJSONProtocolFactory()
jsProt347 := factory346.GetProtocol(mbTrans344)
argvalue0 := aurora.NewJobConfiguration()
err298 := argvalue0.Read(jsProt297)
if err298 != nil {
err348 := argvalue0.Read(context.Background(), jsProt347)
if err348 != nil {
Usage()
return
}
@ -744,19 +743,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
flag.Usage()
}
arg299 := flag.Arg(1)
mbTrans300 := thrift.NewTMemoryBufferLen(len(arg299))
defer mbTrans300.Close()
_, err301 := mbTrans300.WriteString(arg299)
if err301 != nil {
arg349 := flag.Arg(1)
mbTrans350 := thrift.NewTMemoryBufferLen(len(arg349))
defer mbTrans350.Close()
_, err351 := mbTrans350.WriteString(arg349)
if err351 != nil {
Usage()
return
}
factory302 := thrift.NewTJSONProtocolFactory()
jsProt303 := factory302.GetProtocol(mbTrans300)
factory352 := thrift.NewTJSONProtocolFactory()
jsProt353 := factory352.GetProtocol(mbTrans350)
argvalue0 := aurora.NewJobUpdateQuery()
err304 := argvalue0.Read(jsProt303)
if err304 != nil {
err354 := argvalue0.Read(context.Background(), jsProt353)
if err354 != nil {
Usage()
return
}
@ -769,19 +768,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
flag.Usage()
}
arg305 := flag.Arg(1)
mbTrans306 := thrift.NewTMemoryBufferLen(len(arg305))
defer mbTrans306.Close()
_, err307 := mbTrans306.WriteString(arg305)
if err307 != nil {
arg355 := flag.Arg(1)
mbTrans356 := thrift.NewTMemoryBufferLen(len(arg355))
defer mbTrans356.Close()
_, err357 := mbTrans356.WriteString(arg355)
if err357 != nil {
Usage()
return
}
factory308 := thrift.NewTJSONProtocolFactory()
jsProt309 := factory308.GetProtocol(mbTrans306)
factory358 := thrift.NewTJSONProtocolFactory()
jsProt359 := factory358.GetProtocol(mbTrans356)
argvalue0 := aurora.NewJobUpdateQuery()
err310 := argvalue0.Read(jsProt309)
if err310 != nil {
err360 := argvalue0.Read(context.Background(), jsProt359)
if err360 != nil {
Usage()
return
}
@ -794,19 +793,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
flag.Usage()
}
arg311 := flag.Arg(1)
mbTrans312 := thrift.NewTMemoryBufferLen(len(arg311))
defer mbTrans312.Close()
_, err313 := mbTrans312.WriteString(arg311)
if err313 != nil {
arg361 := flag.Arg(1)
mbTrans362 := thrift.NewTMemoryBufferLen(len(arg361))
defer mbTrans362.Close()
_, err363 := mbTrans362.WriteString(arg361)
if err363 != nil {
Usage()
return
}
factory314 := thrift.NewTJSONProtocolFactory()
jsProt315 := factory314.GetProtocol(mbTrans312)
factory364 := thrift.NewTJSONProtocolFactory()
jsProt365 := factory364.GetProtocol(mbTrans362)
argvalue0 := aurora.NewJobUpdateRequest()
err316 := argvalue0.Read(jsProt315)
if err316 != nil {
err366 := argvalue0.Read(context.Background(), jsProt365)
if err366 != nil {
Usage()
return
}

View file

@ -1,5 +1,4 @@
// Autogenerated by Thrift Compiler (0.13.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
package main
@ -180,19 +179,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
flag.Usage()
}
arg82 := flag.Arg(1)
mbTrans83 := thrift.NewTMemoryBufferLen(len(arg82))
defer mbTrans83.Close()
_, err84 := mbTrans83.WriteString(arg82)
if err84 != nil {
arg132 := flag.Arg(1)
mbTrans133 := thrift.NewTMemoryBufferLen(len(arg132))
defer mbTrans133.Close()
_, err134 := mbTrans133.WriteString(arg132)
if err134 != nil {
Usage()
return
}
factory85 := thrift.NewTJSONProtocolFactory()
jsProt86 := factory85.GetProtocol(mbTrans83)
factory135 := thrift.NewTJSONProtocolFactory()
jsProt136 := factory135.GetProtocol(mbTrans133)
argvalue0 := aurora.NewTaskQuery()
err87 := argvalue0.Read(jsProt86)
if err87 != nil {
err137 := argvalue0.Read(context.Background(), jsProt136)
if err137 != nil {
Usage()
return
}
@ -205,19 +204,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
flag.Usage()
}
arg88 := flag.Arg(1)
mbTrans89 := thrift.NewTMemoryBufferLen(len(arg88))
defer mbTrans89.Close()
_, err90 := mbTrans89.WriteString(arg88)
if err90 != nil {
arg138 := flag.Arg(1)
mbTrans139 := thrift.NewTMemoryBufferLen(len(arg138))
defer mbTrans139.Close()
_, err140 := mbTrans139.WriteString(arg138)
if err140 != nil {
Usage()
return
}
factory91 := thrift.NewTJSONProtocolFactory()
jsProt92 := factory91.GetProtocol(mbTrans89)
factory141 := thrift.NewTJSONProtocolFactory()
jsProt142 := factory141.GetProtocol(mbTrans139)
argvalue0 := aurora.NewTaskQuery()
err93 := argvalue0.Read(jsProt92)
if err93 != nil {
err143 := argvalue0.Read(context.Background(), jsProt142)
if err143 != nil {
Usage()
return
}
@ -230,19 +229,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
flag.Usage()
}
arg94 := flag.Arg(1)
mbTrans95 := thrift.NewTMemoryBufferLen(len(arg94))
defer mbTrans95.Close()
_, err96 := mbTrans95.WriteString(arg94)
if err96 != nil {
arg144 := flag.Arg(1)
mbTrans145 := thrift.NewTMemoryBufferLen(len(arg144))
defer mbTrans145.Close()
_, err146 := mbTrans145.WriteString(arg144)
if err146 != nil {
Usage()
return
}
factory97 := thrift.NewTJSONProtocolFactory()
jsProt98 := factory97.GetProtocol(mbTrans95)
factory147 := thrift.NewTJSONProtocolFactory()
jsProt148 := factory147.GetProtocol(mbTrans145)
argvalue0 := aurora.NewTaskQuery()
err99 := argvalue0.Read(jsProt98)
if err99 != nil {
err149 := argvalue0.Read(context.Background(), jsProt148)
if err149 != nil {
Usage()
return
}
@ -255,19 +254,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
flag.Usage()
}
arg100 := flag.Arg(1)
mbTrans101 := thrift.NewTMemoryBufferLen(len(arg100))
defer mbTrans101.Close()
_, err102 := mbTrans101.WriteString(arg100)
if err102 != nil {
arg150 := flag.Arg(1)
mbTrans151 := thrift.NewTMemoryBufferLen(len(arg150))
defer mbTrans151.Close()
_, err152 := mbTrans151.WriteString(arg150)
if err152 != nil {
Usage()
return
}
factory103 := thrift.NewTJSONProtocolFactory()
jsProt104 := factory103.GetProtocol(mbTrans101)
factory153 := thrift.NewTJSONProtocolFactory()
jsProt154 := factory153.GetProtocol(mbTrans151)
argvalue0 := aurora.NewJobKey()
err105 := argvalue0.Read(jsProt104)
if err105 != nil {
err155 := argvalue0.Read(context.Background(), jsProt154)
if err155 != nil {
Usage()
return
}
@ -300,19 +299,19 @@ func main() {
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
flag.Usage()
}
arg108 := flag.Arg(1)
mbTrans109 := thrift.NewTMemoryBufferLen(len(arg108))
defer mbTrans109.Close()
_, err110 := mbTrans109.WriteString(arg108)
if err110 != nil {
arg158 := flag.Arg(1)
mbTrans159 := thrift.NewTMemoryBufferLen(len(arg158))
defer mbTrans159.Close()
_, err160 := mbTrans159.WriteString(arg158)
if err160 != nil {
Usage()
return
}
factory111 := thrift.NewTJSONProtocolFactory()
jsProt112 := factory111.GetProtocol(mbTrans109)
factory161 := thrift.NewTJSONProtocolFactory()
jsProt162 := factory161.GetProtocol(mbTrans159)
argvalue0 := aurora.NewJobConfiguration()
err113 := argvalue0.Read(jsProt112)
if err113 != nil {
err163 := argvalue0.Read(context.Background(), jsProt162)
if err163 != nil {
Usage()
return
}
@ -325,19 +324,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
flag.Usage()
}
arg114 := flag.Arg(1)
mbTrans115 := thrift.NewTMemoryBufferLen(len(arg114))
defer mbTrans115.Close()
_, err116 := mbTrans115.WriteString(arg114)
if err116 != nil {
arg164 := flag.Arg(1)
mbTrans165 := thrift.NewTMemoryBufferLen(len(arg164))
defer mbTrans165.Close()
_, err166 := mbTrans165.WriteString(arg164)
if err166 != nil {
Usage()
return
}
factory117 := thrift.NewTJSONProtocolFactory()
jsProt118 := factory117.GetProtocol(mbTrans115)
factory167 := thrift.NewTJSONProtocolFactory()
jsProt168 := factory167.GetProtocol(mbTrans165)
argvalue0 := aurora.NewJobUpdateQuery()
err119 := argvalue0.Read(jsProt118)
if err119 != nil {
err169 := argvalue0.Read(context.Background(), jsProt168)
if err169 != nil {
Usage()
return
}
@ -350,19 +349,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
flag.Usage()
}
arg120 := flag.Arg(1)
mbTrans121 := thrift.NewTMemoryBufferLen(len(arg120))
defer mbTrans121.Close()
_, err122 := mbTrans121.WriteString(arg120)
if err122 != nil {
arg170 := flag.Arg(1)
mbTrans171 := thrift.NewTMemoryBufferLen(len(arg170))
defer mbTrans171.Close()
_, err172 := mbTrans171.WriteString(arg170)
if err172 != nil {
Usage()
return
}
factory123 := thrift.NewTJSONProtocolFactory()
jsProt124 := factory123.GetProtocol(mbTrans121)
factory173 := thrift.NewTJSONProtocolFactory()
jsProt174 := factory173.GetProtocol(mbTrans171)
argvalue0 := aurora.NewJobUpdateQuery()
err125 := argvalue0.Read(jsProt124)
if err125 != nil {
err175 := argvalue0.Read(context.Background(), jsProt174)
if err175 != nil {
Usage()
return
}
@ -375,19 +374,19 @@ func main() {
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
flag.Usage()
}
arg126 := flag.Arg(1)
mbTrans127 := thrift.NewTMemoryBufferLen(len(arg126))
defer mbTrans127.Close()
_, err128 := mbTrans127.WriteString(arg126)
if err128 != nil {
arg176 := flag.Arg(1)
mbTrans177 := thrift.NewTMemoryBufferLen(len(arg176))
defer mbTrans177.Close()
_, err178 := mbTrans177.WriteString(arg176)
if err178 != nil {
Usage()
return
}
factory129 := thrift.NewTJSONProtocolFactory()
jsProt130 := factory129.GetProtocol(mbTrans127)
factory179 := thrift.NewTJSONProtocolFactory()
jsProt180 := factory179.GetProtocol(mbTrans177)
argvalue0 := aurora.NewJobUpdateRequest()
err131 := argvalue0.Read(jsProt130)
if err131 != nil {
err181 := argvalue0.Read(context.Background(), jsProt180)
if err181 != nil {
Usage()
return
}

View file

@ -1,6 +1,6 @@
#! /bin/bash
THRIFT_VER=0.13.0
THRIFT_VER=0.14.0
if [[ $(thrift -version | grep -e $THRIFT_VER -c) -ne 1 ]]; then
echo "Warning: This wrapper has only been tested with version" $THRIFT_VER;

10
go.mod
View file

@ -3,12 +3,10 @@ module github.com/paypal/gorealis
go 1.13
require (
github.com/apache/thrift v0.13.0
github.com/davecgh/go-spew v1.1.0
github.com/apache/thrift v0.14.0
github.com/davecgh/go-spew v1.1.0 // indirect
github.com/pkg/errors v0.9.1
github.com/pmezard/go-difflib v1.0.0
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a
github.com/stretchr/testify v1.2.0
github.com/stretchr/testify v1.7.0
)
replace github.com/apache/thrift v0.13.0 => github.com/ridv/thrift v0.13.2

30
go.sum Normal file
View file

@ -0,0 +1,30 @@
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.14.0 h1:vqZ2DP42i8th2OsgCcYZkirtbzvpZEFx53LiWDJXIAs=
github.com/apache/thrift v0.14.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e h1:+RHxT/gm0O3UF7nLJbdNzAmULvCFt4XfXHWzh3XI/zs=
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/ridv/thrift v0.12.1 h1:b80V1Oa2Mbd++jrlJZbJsIybO5/MCfbXKzd1A5v4aSo=
github.com/ridv/thrift v0.12.1/go.mod h1:yTMRF94RCZjO1fY1xt69yncvMbQCPdRL8BhbwIrjPx8=
github.com/ridv/thrift v0.13.1 h1:/8XnTRUqJJeiuqoL7mfnJQmXQa4GJn9tUCiP7+i6Y9o=
github.com/ridv/thrift v0.13.1/go.mod h1:yTMRF94RCZjO1fY1xt69yncvMbQCPdRL8BhbwIrjPx8=
github.com/ridv/thrift v0.13.2 h1:Q3Smr8poXd7VkWZPHvdJZzlQCJO+b5W37ECfoUL4qHc=
github.com/ridv/thrift v0.13.2/go.mod h1:yTMRF94RCZjO1fY1xt69yncvMbQCPdRL8BhbwIrjPx8=
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a h1:EYL2xz/Zdo0hyqdZMXR4lmT2O11jDLTPCEqIe/FR6W4=
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.0 h1:LThGCOvhuJic9Gyd1VBCkhyUXmO8vKaBFvBsJ2k03rg=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

21
helpers.go Normal file
View file

@ -0,0 +1,21 @@
package realis
import (
"context"
"github.com/paypal/gorealis/gen-go/apache/aurora"
)
func (r *realisClient) jobExists(key aurora.JobKey) (bool, error) {
resp, err := r.client.GetConfigSummary(context.TODO(), &key)
if err != nil {
return false, err
}
return resp == nil ||
resp.GetResult_() == nil ||
resp.GetResult_().GetConfigSummaryResult_() == nil ||
resp.GetResult_().GetConfigSummaryResult_().GetSummary() == nil ||
resp.GetResponseCode() != aurora.ResponseCode_OK,
nil
}

6
job.go
View file

@ -62,6 +62,7 @@ type Job interface {
PartitionPolicy(policy *aurora.PartitionPolicy) Job
Tier(tier string) Job
SlaPolicy(policy *aurora.SlaPolicy) Job
Priority(priority int32) Job
}
type resourceType int
@ -383,3 +384,8 @@ func (j *AuroraJob) SlaPolicy(policy *aurora.SlaPolicy) Job {
return j
}
func (j *AuroraJob) Priority(priority int32) Job {
j.jobConfig.TaskConfig.Priority = priority
return j
}

View file

@ -117,7 +117,7 @@ func (m *Monitor) JobUpdateQuery(
}
}
// AutoPaused monitor is a special monitor for auto pause enabled batch updates. This monitor ensures that the update
// AutoPausedUpdateMonitor is a special monitor for auto pause enabled batch updates. This monitor ensures that the update
// being monitored is capable of auto pausing and has auto pausing enabled. After verifying this information,
// the monitor watches for the job to enter the ROLL_FORWARD_PAUSED state and calculates the current batch
// the update is in using information from the update configuration.
@ -168,7 +168,8 @@ func (m *Monitor) AutoPausedUpdateMonitor(key aurora.JobUpdateKey, interval, tim
return -1, err
}
if summary[0].State.Status != aurora.JobUpdateStatus_ROLL_FORWARD_PAUSED {
if !(summary[0].State.Status == aurora.JobUpdateStatus_ROLL_FORWARD_PAUSED ||
summary[0].State.Status == aurora.JobUpdateStatus_ROLLED_FORWARD) {
return -1, errors.Errorf("update is in a terminal state %v", summary[0].State.Status)
}
@ -183,7 +184,7 @@ func (m *Monitor) AutoPausedUpdateMonitor(key aurora.JobUpdateKey, interval, tim
return calculateCurrentBatch(int32(len(updatingInstances)), batchSizes), nil
}
// Monitor a Job until all instances enter one of the LIVE_STATES
// Instances will monitor a Job until all instances enter one of the LIVE_STATES
func (m *Monitor) Instances(key *aurora.JobKey, instances int32, interval, timeout int) (bool, error) {
return m.ScheduleStatus(key, instances, LiveStates, interval, timeout)
}

233
realis.go
View file

@ -35,7 +35,7 @@ import (
"github.com/paypal/gorealis/response"
)
const version = "1.21.1"
const version = "1.24.1"
// Realis is an interface that defines the various APIs that may be used to communicate with
// the Apache Aurora scheduler.
@ -65,7 +65,6 @@ type Realis interface {
RollbackJobUpdate(key aurora.JobUpdateKey, message string) (*aurora.Response, error)
ScheduleCronJob(auroraJob Job) (*aurora.Response, error)
StartJobUpdate(updateJob *UpdateJob, message string) (*aurora.Response, error)
PauseJobUpdate(key *aurora.JobUpdateKey, message string) (*aurora.Response, error)
ResumeJobUpdate(key *aurora.JobUpdateKey, message string) (*aurora.Response, error)
PulseJobUpdate(key *aurora.JobUpdateKey) (*aurora.Response, error)
@ -130,6 +129,15 @@ var defaultBackoff = Backoff{
Jitter: 0.1,
}
var defaultSlaPolicy = aurora.SlaPolicy{
PercentageSlaPolicy: &aurora.PercentageSlaPolicy{
Percentage: 66,
DurationSecs: 300,
},
}
const defaultSlaDrainTimeoutSecs = 900
// ClientOption is an alias for a function that modifies the realis config object
type ClientOption func(*config)
@ -386,7 +394,7 @@ func NewRealisClient(options ...ClientOption) (Realis, error) {
return nil, errors.New("incomplete Options -- url, cluster.json, or Zookeeper address required")
}
config.logger.Println("Addresss obtained: ", url)
config.logger.Println("Address obtained: ", url)
url, err = validateAuroraURL(url)
if err != nil {
return nil, errors.Wrap(err, "invalid Aurora url")
@ -428,7 +436,8 @@ func NewRealisClient(options ...ClientOption) (Realis, error) {
adminClient: aurora.NewAuroraAdminClientFactory(config.transport, config.protoFactory),
logger: LevelLogger{logger: config.logger, debug: config.debug, trace: config.trace},
lock: &sync.Mutex{},
transport: config.transport}, nil
transport: config.transport,
}, nil
}
// GetDefaultClusterFromZKUrl creates a cluster object from a Zoookeper url. This is deprecated in favor of using
@ -485,11 +494,11 @@ func defaultTTransport(url string, timeoutMs int, config *config) (thrift.TTrans
})
if err != nil {
return nil, errors.Wrap(err, "Error creating transport")
return nil, errors.Wrap(err, "error creating transport")
}
if err := trans.Open(); err != nil {
return nil, errors.Wrapf(err, "Error opening connection to %s", url)
return nil, errors.Wrapf(err, "error opening connection to %s", url)
}
return trans, nil
@ -532,16 +541,17 @@ func (r *realisClient) ReestablishConn() error {
return nil
}
// Releases resources associated with the realis client.
// Close releases resources associated with the realis client.
func (r *realisClient) Close() {
r.lock.Lock()
defer r.lock.Unlock()
r.transport.Close()
// The return value of Close here is ignored on purpose because there's nothing that can be done if it fails.
_ = r.transport.Close()
}
// Uses predefined set of states to retrieve a set of active jobs in Apache Aurora.
// GetInstanceIds uses a predefined set of states to retrieve a set of active jobs in the Aurora Scheduler.
func (r *realisClient) GetInstanceIds(key *aurora.JobKey, states []aurora.ScheduleStatus) ([]int32, error) {
taskQ := &aurora.TaskQuery{
JobKeys: []*aurora.JobKey{{Environment: key.Environment, Role: key.Role, Name: key.Name}},
@ -554,7 +564,9 @@ func (r *realisClient) GetInstanceIds(key *aurora.JobKey, states []aurora.Schedu
false,
func() (*aurora.Response, error) {
return r.client.GetTasksWithoutConfigs(context.TODO(), taskQ)
})
},
nil,
)
// If we encountered an error we couldn't recover from by retrying, return an error to the user
if retryErr != nil {
@ -579,10 +591,16 @@ func (r *realisClient) GetJobUpdateSummaries(jobUpdateQuery *aurora.JobUpdateQue
false,
func() (*aurora.Response, error) {
return r.readonlyClient.GetJobUpdateSummaries(context.TODO(), jobUpdateQuery)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error getting job update summaries from Aurora Scheduler")
return resp, errors.Wrap(retryErr, "error getting job update summaries from Aurora Scheduler")
}
if resp.GetResult_() == nil || resp.GetResult_().GetGetJobUpdateSummariesResult_() == nil {
return nil, errors.New("unexpected response from scheduler")
}
return resp, nil
@ -596,7 +614,9 @@ func (r *realisClient) GetJobs(role string) (*aurora.Response, *aurora.GetJobsRe
false,
func() (*aurora.Response, error) {
return r.readonlyClient.GetJobs(context.TODO(), role)
})
},
nil,
)
if retryErr != nil {
return nil, result, errors.Wrap(retryErr, "error getting Jobs from Aurora Scheduler")
@ -609,7 +629,7 @@ func (r *realisClient) GetJobs(role string) (*aurora.Response, *aurora.GetJobsRe
return resp, result, nil
}
// Kill specific instances of a job.
// KillInstances kills specific instances of a job.
func (r *realisClient) KillInstances(key *aurora.JobKey, instances ...int32) (*aurora.Response, error) {
r.logger.debugPrintf("KillTasks Thrift Payload: %+v %v\n", key, instances)
@ -617,7 +637,9 @@ func (r *realisClient) KillInstances(key *aurora.JobKey, instances ...int32) (*a
false,
func() (*aurora.Response, error) {
return r.client.KillTasks(context.TODO(), key, instances, "")
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error sending Kill command to Aurora Scheduler")
@ -629,7 +651,7 @@ func (r *realisClient) RealisConfig() *config {
return r.config
}
// Sends a kill message to the scheduler for all active tasks under a job.
// KillJob kills all instances of a job.
func (r *realisClient) KillJob(key *aurora.JobKey) (*aurora.Response, error) {
r.logger.debugPrintf("KillTasks Thrift Payload: %+v\n", key)
@ -639,7 +661,9 @@ func (r *realisClient) KillJob(key *aurora.JobKey) (*aurora.Response, error) {
func() (*aurora.Response, error) {
// Giving the KillTasks thrift call an empty set tells the Aurora scheduler to kill all active shards
return r.client.KillTasks(context.TODO(), key, nil, "")
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error sending Kill command to Aurora Scheduler")
@ -647,7 +671,7 @@ func (r *realisClient) KillJob(key *aurora.JobKey) (*aurora.Response, error) {
return resp, nil
}
// Sends a create job message to the scheduler with a specific job configuration.
// CreateJob sends a create job message to the scheduler with a specific job configuration.
// Although this API is able to create service jobs, it is better to use CreateService instead
// as that API uses the update thrift call which has a few extra features available.
// Use this API to create ad-hoc jobs.
@ -655,19 +679,36 @@ func (r *realisClient) CreateJob(auroraJob Job) (*aurora.Response, error) {
r.logger.debugPrintf("CreateJob Thrift Payload: %+v\n", auroraJob.JobConfig())
// Response is checked by the thrift retry code
resp, retryErr := r.thriftCallWithRetries(
false,
func() (*aurora.Response, error) {
return r.client.CreateJob(context.TODO(), auroraJob.JobConfig())
})
},
// On a client timeout, attempt to verify that payload made to the Scheduler by
// trying to get the config summary for the job key
func() (*aurora.Response, bool) {
exists, err := r.jobExists(*auroraJob.JobKey())
if err != nil {
r.logger.Print("verification failed ", err)
}
if exists {
return &aurora.Response{ResponseCode: aurora.ResponseCode_OK}, true
}
return nil, false
},
)
if retryErr != nil {
return resp, errors.Wrap(retryErr, "error sending Create command to Aurora Scheduler")
}
return resp, nil
}
// This API uses an update thrift call to create the services giving a few more robust features.
// CreateService uses the scheduler's updating mechanism to create a job.
func (r *realisClient) CreateService(
auroraJob Job,
settings *aurora.JobUpdateSettings) (*aurora.Response, *aurora.StartJobUpdateResult_, error) {
@ -678,17 +719,12 @@ func (r *realisClient) CreateService(
resp, err := r.StartJobUpdate(update, "")
if err != nil {
if IsTimeout(err) {
return resp, nil, err
return nil, nil, err
}
return resp, nil, errors.Wrap(err, "unable to create service")
}
if resp.GetResult_() != nil {
return resp, resp.GetResult_().GetStartJobUpdateResult_(), nil
}
return resp, nil, errors.New("results object is nil")
return resp, resp.GetResult_().StartJobUpdateResult_, nil
}
func (r *realisClient) ScheduleCronJob(auroraJob Job) (*aurora.Response, error) {
@ -698,7 +734,9 @@ func (r *realisClient) ScheduleCronJob(auroraJob Job) (*aurora.Response, error)
false,
func() (*aurora.Response, error) {
return r.client.ScheduleCronJob(context.TODO(), auroraJob.JobConfig())
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error sending Cron Job Schedule message to Aurora Scheduler")
@ -714,7 +752,9 @@ func (r *realisClient) DescheduleCronJob(key *aurora.JobKey) (*aurora.Response,
false,
func() (*aurora.Response, error) {
return r.client.DescheduleCronJob(context.TODO(), key)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error sending Cron Job De-schedule message to Aurora Scheduler")
@ -732,7 +772,9 @@ func (r *realisClient) StartCronJob(key *aurora.JobKey) (*aurora.Response, error
false,
func() (*aurora.Response, error) {
return r.client.StartCronJob(context.TODO(), key)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error sending Start Cron Job message to Aurora Scheduler")
@ -741,7 +783,7 @@ func (r *realisClient) StartCronJob(key *aurora.JobKey) (*aurora.Response, error
}
// Restarts specific instances specified
// RestartInstances restarts the specified instances of a Job.
func (r *realisClient) RestartInstances(key *aurora.JobKey, instances ...int32) (*aurora.Response, error) {
r.logger.debugPrintf("RestartShards Thrift Payload: %+v %v\n", key, instances)
@ -749,7 +791,9 @@ func (r *realisClient) RestartInstances(key *aurora.JobKey, instances ...int32)
false,
func() (*aurora.Response, error) {
return r.client.RestartShards(context.TODO(), key, instances)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error sending Restart command to Aurora Scheduler")
@ -757,12 +801,12 @@ func (r *realisClient) RestartInstances(key *aurora.JobKey, instances ...int32)
return resp, nil
}
// Restarts all active tasks under a job configuration.
// RestartJob restarts all active instances of a Job.
func (r *realisClient) RestartJob(key *aurora.JobKey) (*aurora.Response, error) {
instanceIds, err1 := r.GetInstanceIds(key, aurora.ACTIVE_STATES)
if err1 != nil {
return nil, errors.Wrap(err1, "Could not retrieve relevant task instance IDs")
return nil, errors.Wrap(err1, "could not retrieve relevant task instance IDs")
}
r.logger.debugPrintf("RestartShards Thrift Payload: %+v %v\n", key, instanceIds)
@ -772,7 +816,9 @@ func (r *realisClient) RestartJob(key *aurora.JobKey) (*aurora.Response, error)
false,
func() (*aurora.Response, error) {
return r.client.RestartShards(context.TODO(), key, instanceIds)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error sending Restart command to Aurora Scheduler")
@ -784,7 +830,7 @@ func (r *realisClient) RestartJob(key *aurora.JobKey) (*aurora.Response, error)
return nil, errors.New("No tasks in the Active state")
}
// Update all tasks under a job configuration. Currently gorealis doesn't support for canary deployments.
// StartJobUpdate updates all instances under a job configuration.
func (r *realisClient) StartJobUpdate(updateJob *UpdateJob, message string) (*aurora.Response, error) {
r.logger.debugPrintf("StartJobUpdate Thrift Payload: %+v %v\n", updateJob, message)
@ -793,20 +839,56 @@ func (r *realisClient) StartJobUpdate(updateJob *UpdateJob, message string) (*au
true,
func() (*aurora.Response, error) {
return r.client.StartJobUpdate(context.TODO(), updateJob.req, message)
})
},
func() (*aurora.Response, bool) {
summariesResp, err := r.readonlyClient.GetJobUpdateSummaries(
context.TODO(),
&aurora.JobUpdateQuery{
JobKey: updateJob.JobKey(),
UpdateStatuses: aurora.ACTIVE_JOB_UPDATE_STATES,
Limit: 1,
})
if err != nil {
r.logger.Print("verification failed ", err)
return nil, false
}
summaries := response.JobUpdateSummaries(summariesResp)
if len(summaries) == 0 {
return nil, false
}
return &aurora.Response{
ResponseCode: aurora.ResponseCode_OK,
Result_: &aurora.Result_{
StartJobUpdateResult_: &aurora.StartJobUpdateResult_{
UpdateSummary: summaries[0],
Key: summaries[0].Key,
},
},
}, true
},
)
if retryErr != nil {
// A timeout took place when attempting this call, attempt to recover
if IsTimeout(retryErr) {
return resp, retryErr
return nil, retryErr
}
return resp, errors.Wrap(retryErr, "error sending StartJobUpdate command to Aurora Scheduler")
}
if resp.GetResult_() == nil {
return resp, errors.New("no result in response")
}
return resp, nil
}
// Abort Job Update on Aurora. Requires the updateId which can be obtained on the Aurora web UI.
// AbortJobUpdate terminates a job update in the scheduler.
// It requires the updateId which can be obtained on the Aurora web UI.
// This API is meant to be synchronous. It will attempt to wait until the update transitions to the aborted state.
// However, if the job update does not transition to the ABORT state an error will be returned.
func (r *realisClient) AbortJobUpdate(updateKey aurora.JobUpdateKey, message string) (*aurora.Response, error) {
@ -817,7 +899,9 @@ func (r *realisClient) AbortJobUpdate(updateKey aurora.JobUpdateKey, message str
false,
func() (*aurora.Response, error) {
return r.client.AbortJobUpdate(context.TODO(), &updateKey, message)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error sending AbortJobUpdate command to Aurora Scheduler")
@ -834,7 +918,8 @@ func (r *realisClient) AbortJobUpdate(updateKey aurora.JobUpdateKey, message str
return resp, err
}
// Pause Job Update. UpdateID is returned from StartJobUpdate or the Aurora web UI.
// PauseJobUpdate pauses the progress of an ongoing update.
// The UpdateID value needed for this function is returned from StartJobUpdate or can be obtained from the Aurora web UI.
func (r *realisClient) PauseJobUpdate(updateKey *aurora.JobUpdateKey, message string) (*aurora.Response, error) {
r.logger.debugPrintf("PauseJobUpdate Thrift Payload: %+v %v\n", updateKey, message)
@ -843,7 +928,9 @@ func (r *realisClient) PauseJobUpdate(updateKey *aurora.JobUpdateKey, message st
false,
func() (*aurora.Response, error) {
return r.client.PauseJobUpdate(context.TODO(), updateKey, message)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error sending PauseJobUpdate command to Aurora Scheduler")
@ -852,7 +939,7 @@ func (r *realisClient) PauseJobUpdate(updateKey *aurora.JobUpdateKey, message st
return resp, nil
}
// Resume Paused Job Update. UpdateID is returned from StartJobUpdate or the Aurora web UI.
// ResumeJobUpdate resumes a previously Paused Job update.
func (r *realisClient) ResumeJobUpdate(updateKey *aurora.JobUpdateKey, message string) (*aurora.Response, error) {
r.logger.debugPrintf("ResumeJobUpdate Thrift Payload: %+v %v\n", updateKey, message)
@ -861,7 +948,9 @@ func (r *realisClient) ResumeJobUpdate(updateKey *aurora.JobUpdateKey, message s
false,
func() (*aurora.Response, error) {
return r.client.ResumeJobUpdate(context.TODO(), updateKey, message)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error sending ResumeJobUpdate command to Aurora Scheduler")
@ -870,7 +959,7 @@ func (r *realisClient) ResumeJobUpdate(updateKey *aurora.JobUpdateKey, message s
return resp, nil
}
// Pulse Job Update on Aurora. UpdateID is returned from StartJobUpdate or the Aurora web UI.
// PulseJobUpdate sends a pulse to an ongoing Job update.
func (r *realisClient) PulseJobUpdate(updateKey *aurora.JobUpdateKey) (*aurora.Response, error) {
r.logger.debugPrintf("PulseJobUpdate Thrift Payload: %+v\n", updateKey)
@ -879,7 +968,9 @@ func (r *realisClient) PulseJobUpdate(updateKey *aurora.JobUpdateKey) (*aurora.R
false,
func() (*aurora.Response, error) {
return r.client.PulseJobUpdate(context.TODO(), updateKey)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error sending PulseJobUpdate command to Aurora Scheduler")
@ -888,8 +979,7 @@ func (r *realisClient) PulseJobUpdate(updateKey *aurora.JobUpdateKey) (*aurora.R
return resp, nil
}
// Scale up the number of instances under a job configuration using the configuration for specific
// instance to scale up.
// AddInstances scales up the number of instances for a Job.
func (r *realisClient) AddInstances(instKey aurora.InstanceKey, count int32) (*aurora.Response, error) {
r.logger.debugPrintf("AddInstances Thrift Payload: %+v %v\n", instKey, count)
@ -898,7 +988,9 @@ func (r *realisClient) AddInstances(instKey aurora.InstanceKey, count int32) (*a
false,
func() (*aurora.Response, error) {
return r.client.AddInstances(context.TODO(), &instKey, count)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error sending AddInstances command to Aurora Scheduler")
@ -907,15 +999,15 @@ func (r *realisClient) AddInstances(instKey aurora.InstanceKey, count int32) (*a
}
// Scale down the number of instances under a job configuration using the configuration of a specific instance
// RemoveInstances scales down the number of instances for a Job.
func (r *realisClient) RemoveInstances(key *aurora.JobKey, count int32) (*aurora.Response, error) {
instanceIds, err := r.GetInstanceIds(key, aurora.ACTIVE_STATES)
if err != nil {
return nil, errors.Wrap(err, "RemoveInstances: Could not retrieve relevant instance IDs")
return nil, errors.Wrap(err, "could not retrieve relevant instance IDs")
}
if len(instanceIds) < int(count) {
return nil, errors.Errorf("Insufficient active instances available for killing: "+
return nil, errors.Errorf("insufficient active instances available for killing: "+
" Instances to be killed %d Active instances %d", count, len(instanceIds))
}
@ -928,7 +1020,7 @@ func (r *realisClient) RemoveInstances(key *aurora.JobKey, count int32) (*aurora
return r.KillInstances(key, instanceIds[:count]...)
}
// Get information about task including a fully hydrated task configuration object
// GetTaskStatus gets information about task including a fully hydrated task configuration object.
func (r *realisClient) GetTaskStatus(query *aurora.TaskQuery) ([]*aurora.ScheduledTask, error) {
r.logger.debugPrintf("GetTasksStatus Thrift Payload: %+v\n", query)
@ -937,7 +1029,9 @@ func (r *realisClient) GetTaskStatus(query *aurora.TaskQuery) ([]*aurora.Schedul
false,
func() (*aurora.Response, error) {
return r.client.GetTasksStatus(context.TODO(), query)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error querying Aurora Scheduler for task status")
@ -946,7 +1040,7 @@ func (r *realisClient) GetTaskStatus(query *aurora.TaskQuery) ([]*aurora.Schedul
return response.ScheduleStatusResult(resp).GetTasks(), nil
}
// Get pending reason
// GetPendingReason returns the reason why the an instance of a Job has not been scheduled.
func (r *realisClient) GetPendingReason(query *aurora.TaskQuery) ([]*aurora.PendingReason, error) {
r.logger.debugPrintf("GetPendingReason Thrift Payload: %+v\n", query)
@ -955,7 +1049,9 @@ func (r *realisClient) GetPendingReason(query *aurora.TaskQuery) ([]*aurora.Pend
false,
func() (*aurora.Response, error) {
return r.client.GetPendingReason(context.TODO(), query)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error querying Aurora Scheduler for pending Reasons")
@ -970,7 +1066,8 @@ func (r *realisClient) GetPendingReason(query *aurora.TaskQuery) ([]*aurora.Pend
return pendingReasons, nil
}
// Get information about task including without a task configuration object
// GetTasksWithoutConfigs gets information about task including without a task configuration object.
// This is a more lightweight version of GetTaskStatus but contains less information as a result.
func (r *realisClient) GetTasksWithoutConfigs(query *aurora.TaskQuery) ([]*aurora.ScheduledTask, error) {
r.logger.debugPrintf("GetTasksWithoutConfigs Thrift Payload: %+v\n", query)
@ -979,7 +1076,9 @@ func (r *realisClient) GetTasksWithoutConfigs(query *aurora.TaskQuery) ([]*auror
false,
func() (*aurora.Response, error) {
return r.client.GetTasksWithoutConfigs(context.TODO(), query)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error querying Aurora Scheduler for task status without configs")
@ -989,7 +1088,7 @@ func (r *realisClient) GetTasksWithoutConfigs(query *aurora.TaskQuery) ([]*auror
}
// Get the task configuration from the aurora scheduler for a job
// FetchTaskConfig gets the task configuration from the aurora scheduler for a job.
func (r *realisClient) FetchTaskConfig(instKey aurora.InstanceKey) (*aurora.TaskConfig, error) {
taskQ := &aurora.TaskQuery{
Role: &instKey.JobKey.Role,
@ -1005,7 +1104,9 @@ func (r *realisClient) FetchTaskConfig(instKey aurora.InstanceKey) (*aurora.Task
false,
func() (*aurora.Response, error) {
return r.client.GetTasksStatus(context.TODO(), taskQ)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "error querying Aurora Scheduler for task configuration")
@ -1014,7 +1115,7 @@ func (r *realisClient) FetchTaskConfig(instKey aurora.InstanceKey) (*aurora.Task
tasks := response.ScheduleStatusResult(resp).GetTasks()
if len(tasks) == 0 {
return nil, errors.Errorf("Instance %d for jobkey %s/%s/%s doesn't exist",
return nil, errors.Errorf("instance %d for jobkey %s/%s/%s doesn't exist",
instKey.InstanceId,
instKey.JobKey.Environment,
instKey.JobKey.Role,
@ -1033,10 +1134,12 @@ func (r *realisClient) JobUpdateDetails(updateQuery aurora.JobUpdateQuery) (*aur
false,
func() (*aurora.Response, error) {
return r.client.GetJobUpdateDetails(context.TODO(), &updateQuery)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "Unable to get job update details")
return nil, errors.Wrap(retryErr, "unable to get job update details")
}
return resp, nil
@ -1050,7 +1153,9 @@ func (r *realisClient) RollbackJobUpdate(key aurora.JobUpdateKey, message string
false,
func() (*aurora.Response, error) {
return r.client.RollbackJobUpdate(context.TODO(), &key, message)
})
},
nil,
)
if retryErr != nil {
return nil, errors.Wrap(retryErr, "unable to roll back job update")

View file

@ -30,7 +30,9 @@ func (r *realisClient) DrainHosts(hosts ...string) (*aurora.Response, *aurora.Dr
false,
func() (*aurora.Response, error) {
return r.adminClient.DrainHosts(context.TODO(), drainList)
})
},
nil,
)
if retryErr != nil {
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
@ -56,6 +58,18 @@ func (r *realisClient) SLADrainHosts(
return nil, errors.New("no hosts provided to drain")
}
if policy == nil || policy.CountSetFieldsSlaPolicy() == 0 {
policy = &defaultSlaPolicy
r.logger.Printf("Warning: start draining with default sla policy %v", policy)
}
if timeout < 0 {
r.logger.Printf("Warning: timeout %d secs is invalid, draining with default timeout %d secs",
timeout,
defaultSlaDrainTimeoutSecs)
timeout = defaultSlaDrainTimeoutSecs
}
drainList := aurora.NewHosts()
drainList.HostNames = hosts
@ -65,7 +79,9 @@ func (r *realisClient) SLADrainHosts(
false,
func() (*aurora.Response, error) {
return r.adminClient.SlaDrainHosts(context.TODO(), drainList, policy, timeout)
})
},
nil,
)
if retryErr != nil {
return result, errors.Wrap(retryErr, "Unable to recover connection")
@ -95,7 +111,9 @@ func (r *realisClient) StartMaintenance(hosts ...string) (*aurora.Response, *aur
false,
func() (*aurora.Response, error) {
return r.adminClient.StartMaintenance(context.TODO(), hostList)
})
},
nil,
)
if retryErr != nil {
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
@ -125,7 +143,9 @@ func (r *realisClient) EndMaintenance(hosts ...string) (*aurora.Response, *auror
false,
func() (*aurora.Response, error) {
return r.adminClient.EndMaintenance(context.TODO(), hostList)
})
},
nil,
)
if retryErr != nil {
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
@ -157,7 +177,9 @@ func (r *realisClient) MaintenanceStatus(hosts ...string) (*aurora.Response, *au
false,
func() (*aurora.Response, error) {
return r.adminClient.MaintenanceStatus(context.TODO(), hostList)
})
},
nil,
)
if retryErr != nil {
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
@ -182,7 +204,9 @@ func (r *realisClient) SetQuota(role string, cpu *float64, ramMb *int64, diskMb
false,
func() (*aurora.Response, error) {
return r.adminClient.SetQuota(context.TODO(), role, quota)
})
},
nil,
)
if retryErr != nil {
return resp, errors.Wrap(retryErr, "Unable to set role quota")
@ -198,7 +222,9 @@ func (r *realisClient) GetQuota(role string) (*aurora.Response, error) {
false,
func() (*aurora.Response, error) {
return r.adminClient.GetQuota(context.TODO(), role)
})
},
nil,
)
if retryErr != nil {
return resp, errors.Wrap(retryErr, "Unable to get role quota")
@ -213,7 +239,9 @@ func (r *realisClient) Snapshot() error {
false,
func() (*aurora.Response, error) {
return r.adminClient.Snapshot(context.TODO())
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "Unable to recover connection")
@ -229,7 +257,9 @@ func (r *realisClient) PerformBackup() error {
false,
func() (*aurora.Response, error) {
return r.adminClient.PerformBackup(context.TODO())
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "Unable to recover connection")
@ -244,7 +274,9 @@ func (r *realisClient) ForceImplicitTaskReconciliation() error {
false,
func() (*aurora.Response, error) {
return r.adminClient.TriggerImplicitTaskReconciliation(context.TODO())
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "Unable to recover connection")
@ -265,7 +297,9 @@ func (r *realisClient) ForceExplicitTaskReconciliation(batchSize *int32) error {
_, retryErr := r.thriftCallWithRetries(false,
func() (*aurora.Response, error) {
return r.adminClient.TriggerExplicitTaskReconciliation(context.TODO(), settings)
})
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "Unable to recover connection")

View file

@ -750,6 +750,53 @@ func TestRealisClient_SLADrainHosts(t *testing.T) {
5,
10)
assert.NoError(t, err)
// slaDrainHosts goes with default policy if no policy is specified
_, err = r.SLADrainHosts(nil, 30, hosts...)
require.NoError(t, err, "unable to drain host with SLA policy")
// Monitor change to DRAINING and DRAINED mode
hostResults, err = monitor.HostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
1,
50)
assert.NoError(t, err)
assert.Equal(t, map[string]bool{"localhost": true}, hostResults)
_, _, err = r.EndMaintenance(hosts...)
require.NoError(t, err)
// Monitor change to DRAINING and DRAINED mode
_, err = monitor.HostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
5,
10)
assert.NoError(t, err)
_, err = r.SLADrainHosts(&aurora.SlaPolicy{}, 30, hosts...)
require.NoError(t, err, "unable to drain host with SLA policy")
// Monitor change to DRAINING and DRAINED mode
hostResults, err = monitor.HostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
1,
50)
assert.NoError(t, err)
assert.Equal(t, map[string]bool{"localhost": true}, hostResults)
_, _, err = r.EndMaintenance(hosts...)
require.NoError(t, err)
// Monitor change to DRAINING and DRAINED mode
_, err = monitor.HostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
5,
10)
assert.NoError(t, err)
}
// Test multiple go routines using a single connection
@ -1059,8 +1106,10 @@ func TestRealisClient_BatchAwareAutoPause(t *testing.T) {
assert.Equal(t, i, curStep)
_, err = r.ResumeJobUpdate(&key, "auto resuming test")
require.NoError(t, err)
if i != len(updateGroups)-1 {
_, err = r.ResumeJobUpdate(&key, "auto resuming test")
require.NoError(t, err)
}
}
_, err = r.KillJob(job.JobKey())

View file

@ -36,6 +36,10 @@ func ScheduleStatusResult(resp *aurora.Response) *aurora.ScheduleStatusResult_ {
}
func JobUpdateSummaries(resp *aurora.Response) []*aurora.JobUpdateSummary {
if resp.GetResult_() == nil || resp.GetResult_().GetGetJobUpdateSummariesResult_() == nil {
return nil
}
return resp.GetResult_().GetGetJobUpdateSummariesResult_().GetUpdateSummaries()
}

163
retry.go
View file

@ -114,10 +114,19 @@ func ExponentialBackoff(backoff Backoff, logger logger, condition ConditionFunc)
type auroraThriftCall func() (resp *aurora.Response, err error)
// verifyOntimeout defines the type of function that will be used to verify whether a Thirft call to the Scheduler
// made it to the scheduler or not. In general, these types of functions will have to interact with the scheduler
// through the very same Thrift API which previously encountered a time out from the client.
// This means that the functions themselves should be kept to a minimum number of Thrift calls.
// It should also be noted that this is a best effort mechanism and
// is likely to fail for the same reasons that the original call failed.
type verifyOnTimeout func() (*aurora.Response, bool)
// Duplicates the functionality of ExponentialBackoff but is specifically targeted towards ThriftCalls.
func (r *realisClient) thriftCallWithRetries(
returnOnTimeout bool,
thriftCall auroraThriftCall) (*aurora.Response, error) {
thriftCall auroraThriftCall,
verifyOnTimeout verifyOnTimeout) (*aurora.Response, error) {
var resp *aurora.Response
var clientErr error
@ -157,42 +166,22 @@ func (r *realisClient) thriftCallWithRetries(
r.logger.tracePrintf("Aurora Thrift Call ended resp: %v clientErr: %v", resp, clientErr)
}()
// Check if our thrift call is returning an error. This is a retryable event as we don't know
// if it was caused by network issues.
// Check if our thrift call is returning an error.
if clientErr != nil {
// Print out the error to the user
r.logger.Printf("Client Error: %v", clientErr)
// Determine if error is a temporary URL error by going up the stack
e, ok := clientErr.(thrift.TTransportException)
if ok {
r.logger.debugPrint("Encountered a transport exception")
temporary, timedout := isConnectionError(clientErr)
if !temporary && r.RealisConfig().failOnPermanentErrors {
return nil, errors.Wrap(clientErr, "permanent connection error")
}
e, ok := e.Err().(*url.Error)
if ok {
// EOF error occurs when the server closes the read buffer of the client. This is common
// when the server is overloaded and should be retried. All other errors that are permanent
// will not be retried.
if e.Err != io.EOF && !e.Temporary() && r.RealisConfig().failOnPermanentErrors {
return nil, errors.Wrap(clientErr, "permanent connection error")
}
// Corner case where thrift payload was received by Aurora but connection timedout before Aurora was
// able to reply. In this case we will return whatever response was received and a TimedOut behaving
// error. Users can take special action on a timeout by using IsTimedout and reacting accordingly.
if e.Timeout() {
timeouts++
r.logger.debugPrintf(
"Client closed connection (timedout) %d times before server responded, "+
"consider increasing connection timeout",
timeouts)
if returnOnTimeout {
return resp, newTimedoutError(errors.New("client connection closed before server answer"))
}
}
}
// There exists a corner case where thrift payload was received by Aurora but
// connection timed out before Aurora was able to reply.
// Users can take special action on a timeout by using IsTimedout and reacting accordingly
// if they have configured the client to return on a timeout.
if timedout && returnOnTimeout {
return resp, newTimedoutError(errors.New("client connection closed before server answer"))
}
// In the future, reestablish connection should be able to check if it is actually possible
@ -202,48 +191,71 @@ func (r *realisClient) thriftCallWithRetries(
if reestablishErr != nil {
r.logger.debugPrintf("error re-establishing connection ", reestablishErr)
}
} else {
// If there was no client error, but the response is nil, something went wrong.
// Ideally, we'll never encounter this but we're placing a safeguard here.
if resp == nil {
return nil, errors.New("response from aurora is nil")
// If users did not opt for a return on timeout in order to react to a timedout error,
// attempt to verify that the call made it to the scheduler after the connection was re-established.
if timedout {
timeouts++
r.logger.debugPrintf(
"Client closed connection %d times before server responded, "+
"consider increasing connection timeout",
timeouts)
// Allow caller to provide a function which checks if the original call was successful before
// it timed out.
if verifyOnTimeout != nil {
if verifyResp, ok := verifyOnTimeout(); ok {
r.logger.Print("verified that the call went through successfully after a client timeout")
// Response here might be different than the original as it is no longer constructed
// by the scheduler but mimicked.
// This is OK since the scheduler is very unlikely to change responses at this point in its
// development cycle but we must be careful to not return an incorrectly constructed response.
return verifyResp, nil
}
}
}
// Check Response Code from thrift and make a decision to continue retrying or not.
switch responseCode := resp.GetResponseCode(); responseCode {
// Retry the thrift payload
continue
}
// If the thrift call succeeded, stop retrying
case aurora.ResponseCode_OK:
return resp, nil
// If there was no client error, but the response is nil, something went wrong.
// Ideally, we'll never encounter this but we're placing a safeguard here.
if resp == nil {
return nil, errors.New("response from aurora is nil")
}
// If the response code is transient, continue retrying
case aurora.ResponseCode_ERROR_TRANSIENT:
r.logger.Println("Aurora replied with Transient error code, retrying")
continue
// Check Response Code from thrift and make a decision to continue retrying or not.
switch responseCode := resp.GetResponseCode(); responseCode {
// Failure scenarios, these indicate a bad payload or a bad config. Stop retrying.
case aurora.ResponseCode_INVALID_REQUEST,
aurora.ResponseCode_ERROR,
aurora.ResponseCode_AUTH_FAILED,
aurora.ResponseCode_JOB_UPDATING_ERROR:
r.logger.Printf("Terminal Response Code %v from Aurora, won't retry\n", resp.GetResponseCode().String())
return resp, errors.New(response.CombineMessage(resp))
// If the thrift call succeeded, stop retrying
case aurora.ResponseCode_OK:
return resp, nil
// The only case that should fall down to here is a WARNING response code.
// It is currently not used as a response in the scheduler so it is unknown how to handle it.
default:
r.logger.debugPrintf("unhandled response code %v received from Aurora\n", responseCode)
return nil, errors.Errorf("unhandled response code from Aurora %v", responseCode.String())
}
// If the response code is transient, continue retrying
case aurora.ResponseCode_ERROR_TRANSIENT:
r.logger.Println("Aurora replied with Transient error code, retrying")
continue
// Failure scenarios, these indicate a bad payload or a bad config. Stop retrying.
case aurora.ResponseCode_INVALID_REQUEST,
aurora.ResponseCode_ERROR,
aurora.ResponseCode_AUTH_FAILED,
aurora.ResponseCode_JOB_UPDATING_ERROR:
r.logger.Printf("Terminal Response Code %v from Aurora, won't retry\n", resp.GetResponseCode().String())
return resp, errors.New(response.CombineMessage(resp))
// The only case that should fall down to here is a WARNING response code.
// It is currently not used as a response in the scheduler so it is unknown how to handle it.
default:
r.logger.debugPrintf("unhandled response code %v received from Aurora\n", responseCode)
return nil, errors.Errorf("unhandled response code from Aurora %v", responseCode.String())
}
}
r.logger.debugPrintf("it took %v retries to complete this operation\n", curStep)
if curStep > 1 {
r.config.logger.Printf("retried this thrift call %d time(s)", curStep)
r.config.logger.Printf("this thrift call was retried %d time(s)", curStep)
}
// Provide more information to the user wherever possible.
@ -253,3 +265,30 @@ func (r *realisClient) thriftCallWithRetries(
return nil, newRetryError(errors.New("ran out of retries"), curStep)
}
// isConnectionError processes the error received by the client.
// The return values indicate weather this was determined to be a temporary error
// and weather it was determined to be a timeout error
func isConnectionError(err error) (bool, bool) {
// Determine if error is a temporary URL error by going up the stack
transportException, ok := err.(thrift.TTransportException)
if !ok {
return false, false
}
urlError, ok := transportException.Err().(*url.Error)
if !ok {
return false, false
}
// EOF error occurs when the server closes the read buffer of the client. This is common
// when the server is overloaded and we consider it temporary.
// All other which are not temporary as per the member function Temporary(),
// are considered not temporary (permanent).
if urlError.Err != io.EOF && !urlError.Temporary() {
return false, false
}
return true, urlError.Timeout()
}

View file

@ -1,4 +1,4 @@
#!/bin/bash
# Since we run our docker compose setup in bridge mode to be able to run on MacOS, we have to launch a Docker container within the bridge network in order to avoid any routing issues.
docker run --rm -t -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10-stretch go test -v github.com/paypal/gorealis $@
docker run --rm -t -w /gorealis -v $GOPATH/pkg:/go/pkg -v $(pwd):/gorealis --network gorealis_aurora_cluster golang:1.16-buster go test -v github.com/paypal/gorealis $@

View file

@ -29,7 +29,7 @@ var TerminalStates = make(map[aurora.ScheduleStatus]bool)
// ActiveJobUpdateStates - States a Job Update may be in where it is considered active.
var ActiveJobUpdateStates = make(map[aurora.JobUpdateStatus]bool)
// TerminalJobUpdateStates returns a slice containing all the terminal states an update may end up in.
// TerminalUpdateStates returns a slice containing all the terminal states an update may be in.
// This is a function in order to avoid having a slice that can be accidentally mutated.
func TerminalUpdateStates() []aurora.JobUpdateStatus {
return []aurora.JobUpdateStatus{