Compare commits
75 commits
Author | SHA1 | Date | |
---|---|---|---|
|
7b0c75450b | ||
|
235f854087 | ||
|
4fc4953ec4 | ||
|
119d1c429b | ||
|
a8a7cf779f | ||
|
98f2cab4a2 | ||
|
09628391cc | ||
|
f72fdacfb0 | ||
|
55cf9bcb70 | ||
|
fe4a0dc06e | ||
|
d67b8ca1d7 | ||
|
ecd59f7a8d | ||
|
5d75dcc15e | ||
|
9a70711537 | ||
|
203f178d68 | ||
|
9584266b71 | ||
|
6f20f5b62f | ||
|
04471c6918 | ||
|
dbad078d95 | ||
|
b9db36520c | ||
|
2c795debfd | ||
|
c553f67d4e | ||
|
461b23400c | ||
|
9b3593e9d9 | ||
|
8d67d8c2f3 | ||
|
e13349db26 | ||
|
afcdaa84b8 | ||
|
51597ecb32 | ||
|
acbe9ad9e5 | ||
|
4a0cbcd770 | ||
|
b776bd301d | ||
|
e4e8a1c0b3 | ||
|
71d41de2e4 | ||
|
84e8762495 | ||
|
11c71b0463 | ||
|
8f9a678b7d | ||
|
fdd94e9bea | ||
|
67b37d5a42 | ||
|
56b325ed80 | ||
|
ef421f60c3 | ||
|
c4691c7347 | ||
|
533591ab89 | ||
|
0c00765995 | ||
|
0b43a58b15 | ||
|
992e52eba2 | ||
|
0c32a7e683 | ||
|
e1906542a6 | ||
|
005980fc44 | ||
|
98b4061513 | ||
|
e00e0a0492 | ||
|
5836ede37b | ||
|
b0c25e9013 | ||
|
76300782ba | ||
|
c1be2fe62b | ||
|
133938b307 | ||
|
c071e5ca62 | ||
|
c00b83b14c | ||
|
47d955d4a4 | ||
|
99b03c1254 | ||
|
7967270b3b | ||
|
54378b2d8a | ||
|
59e3a7065e | ||
|
cec9c001fb | ||
|
366599fb80 | ||
|
356978cb42 | ||
|
3e4590dcc0 | ||
|
b6effe66b7 | ||
|
848b5f7971 | ||
|
d747a48626 | ||
|
573e45a59c | ||
|
8a9a97c150 | ||
|
1146736c2b | ||
|
c65a47f6e2 | ||
|
4471c62659 | ||
|
a23bd1b2cc |
81 changed files with 10203 additions and 14531 deletions
5
.aurora-config/security.ini
Normal file
5
.aurora-config/security.ini
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
[users]
|
||||||
|
aurora = secret, admin
|
||||||
|
|
||||||
|
[roles]
|
||||||
|
admin = *
|
|
@ -1 +1 @@
|
||||||
0.23.0
|
0.21.0
|
||||||
|
|
25
.github/main.yml
vendored
25
.github/main.yml
vendored
|
@ -1,25 +0,0 @@
|
||||||
name: CI
|
|
||||||
|
|
||||||
on: [push]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: Setup Go for use with actions
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.16
|
|
||||||
- name: Install goimports
|
|
||||||
run: go get golang.org/x/tools/cmd/goimports
|
|
||||||
- name: Set env with list of directories in repo containin go code
|
|
||||||
run: echo GO_USR_DIRS=$(go list -f {{.Dir}} ./... | grep -E -v "/gen-go/|/vendor/") >> $GITHUB_ENV
|
|
||||||
- name: Run goimports check
|
|
||||||
run: test -z "`for d in $GO_USR_DIRS; do goimports -d $d/*.go | tee /dev/stderr; done`"
|
|
||||||
- name: Create aurora/mesos docker cluster
|
|
||||||
run: docker-compose up -d
|
|
||||||
- name: Run tests
|
|
||||||
run: go test -timeout 35m -race -coverprofile=coverage.txt -covermode=atomic -v github.com/paypal/gorealis
|
|
57
.github/workflows/codeql-analysis.yml
vendored
57
.github/workflows/codeql-analysis.yml
vendored
|
@ -1,57 +0,0 @@
|
||||||
# For most projects, this workflow file will not need changing; you simply need
|
|
||||||
# to commit it to your repository.
|
|
||||||
#
|
|
||||||
# You may wish to alter this file to override the set of languages analyzed,
|
|
||||||
# or to provide custom queries or build logic.
|
|
||||||
#
|
|
||||||
# ******** NOTE ********
|
|
||||||
# We have attempted to detect the languages in your repository. Please check
|
|
||||||
# the `language` matrix defined below to confirm you have the correct set of
|
|
||||||
# supported CodeQL languages.
|
|
||||||
#
|
|
||||||
name: "CodeQL"
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [ main ]
|
|
||||||
pull_request:
|
|
||||||
# The branches below must be a subset of the branches above
|
|
||||||
branches: [ main ]
|
|
||||||
schedule:
|
|
||||||
- cron: '34 4 * * 3'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
analyze:
|
|
||||||
name: Analyze
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
actions: read
|
|
||||||
contents: read
|
|
||||||
security-events: write
|
|
||||||
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
language: [ 'go' ]
|
|
||||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
|
||||||
# Learn more:
|
|
||||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
# Initializes the CodeQL tools for scanning.
|
|
||||||
- name: Initialize CodeQL
|
|
||||||
uses: github/codeql-action/init@v1
|
|
||||||
with:
|
|
||||||
languages: ${{ matrix.language }}
|
|
||||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
|
||||||
# By default, queries listed here will override any specified in a config file.
|
|
||||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
|
||||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
|
||||||
|
|
||||||
- run: go build examples/client.go
|
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
|
||||||
uses: github/codeql-action/analyze@v1
|
|
30
.github/workflows/main.yml
vendored
30
.github/workflows/main.yml
vendored
|
@ -1,30 +0,0 @@
|
||||||
name: CI
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: Setup Go for use with actions
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.16
|
|
||||||
- name: Install goimports
|
|
||||||
run: go get golang.org/x/tools/cmd/goimports
|
|
||||||
- name: Set env with list of directories in repo containin go code
|
|
||||||
run: echo GO_USR_DIRS=$(go list -f {{.Dir}} ./... | grep -E -v "/gen-go/|/vendor/") >> $GITHUB_ENV
|
|
||||||
- name: Run goimports check
|
|
||||||
run: test -z "`for d in $GO_USR_DIRS; do goimports -d $d/*.go | tee /dev/stderr; done`"
|
|
||||||
- name: Create aurora/mesos docker cluster
|
|
||||||
run: docker-compose up -d
|
|
||||||
- name: Run tests
|
|
||||||
run: go test -timeout 35m -race -coverprofile=coverage.txt -covermode=atomic -v github.com/paypal/gorealis
|
|
|
@ -1,71 +0,0 @@
|
||||||
# This file contains all available configuration options
|
|
||||||
# with their default values.
|
|
||||||
|
|
||||||
# options for analysis running
|
|
||||||
run:
|
|
||||||
# default concurrency is a available CPU number
|
|
||||||
concurrency: 4
|
|
||||||
|
|
||||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
|
||||||
deadline: 1m
|
|
||||||
|
|
||||||
# exit code when at least one issue was found, default is 1
|
|
||||||
issues-exit-code: 1
|
|
||||||
|
|
||||||
# include test files or not, default is true
|
|
||||||
tests: true
|
|
||||||
|
|
||||||
skip-dirs:
|
|
||||||
- gen-go/
|
|
||||||
|
|
||||||
# output configuration options
|
|
||||||
output:
|
|
||||||
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
|
||||||
format: colored-line-number
|
|
||||||
|
|
||||||
# print lines of code with issue, default is true
|
|
||||||
print-issued-lines: true
|
|
||||||
|
|
||||||
# print linter name in the end of issue text, default is true
|
|
||||||
print-linter-name: true
|
|
||||||
|
|
||||||
|
|
||||||
# all available settings of specific linters
|
|
||||||
linters-settings:
|
|
||||||
errcheck:
|
|
||||||
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
|
|
||||||
# default is false: such cases aren't reported by default.
|
|
||||||
check-type-assertions: true
|
|
||||||
|
|
||||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
|
||||||
# default is false: such cases aren't reported by default.
|
|
||||||
check-blank: true
|
|
||||||
govet:
|
|
||||||
# report about shadowed variables
|
|
||||||
check-shadowing: true
|
|
||||||
goconst:
|
|
||||||
# minimal length of string constant, 3 by default
|
|
||||||
min-len: 3
|
|
||||||
# minimal occurrences count to trigger, 3 by default
|
|
||||||
min-occurrences: 2
|
|
||||||
misspell:
|
|
||||||
# Correct spellings using locale preferences for US or UK.
|
|
||||||
# Default is to use a neutral variety of English.
|
|
||||||
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
|
||||||
locale: US
|
|
||||||
lll:
|
|
||||||
# max line length, lines longer will be reported. Default is 120.
|
|
||||||
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
|
||||||
line-length: 120
|
|
||||||
# tab width in spaces. Default to 1.
|
|
||||||
tab-width: 4
|
|
||||||
|
|
||||||
linters:
|
|
||||||
enable:
|
|
||||||
- govet
|
|
||||||
- goimports
|
|
||||||
- golint
|
|
||||||
- lll
|
|
||||||
- goconst
|
|
||||||
enable-all: false
|
|
||||||
fast: false
|
|
26
.travis.yml
Normal file
26
.travis.yml
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
sudo: required
|
||||||
|
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- "1.11.x"
|
||||||
|
|
||||||
|
env:
|
||||||
|
global:
|
||||||
|
- GO_USR_DIRS=$(go list -f {{.Dir}} ./... | grep -E -v "/gen-go/|/vendor/")
|
||||||
|
|
||||||
|
services:
|
||||||
|
- docker
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- go get golang.org/x/tools/cmd/goimports
|
||||||
|
- test -z "`for d in $GO_USR_DIRS; do goimports -d $d/*.go | tee /dev/stderr; done`"
|
||||||
|
|
||||||
|
install:
|
||||||
|
- docker-compose up -d
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go test -race -coverprofile=coverage.txt -covermode=atomic -v github.com/paypal/gorealis
|
||||||
|
|
||||||
|
after_success:
|
||||||
|
- bash <(curl -s https://codecov.io/bash)
|
62
CHANGELOG.md
62
CHANGELOG.md
|
@ -1,62 +0,0 @@
|
||||||
1.25.1 (unreleased)
|
|
||||||
|
|
||||||
1.25.0
|
|
||||||
|
|
||||||
* Add priority api
|
|
||||||
|
|
||||||
1.24.0
|
|
||||||
|
|
||||||
* enable default sla for slaDrain
|
|
||||||
* Changes Travis CI badge to Github Actions badge
|
|
||||||
* Bug fix for auto paused update monitor
|
|
||||||
* Adds support for running CI on github actions
|
|
||||||
|
|
||||||
1.23.0
|
|
||||||
|
|
||||||
* First release tested against Aurora Scheduler 0.23.0
|
|
||||||
|
|
||||||
1.22.5
|
|
||||||
|
|
||||||
* Upgrading to thrift 0.14.0
|
|
||||||
|
|
||||||
1.22.4
|
|
||||||
|
|
||||||
* Updates which result in a no-op now return a response value so that the caller may analyze it to determine what happened
|
|
||||||
|
|
||||||
1.22.3
|
|
||||||
|
|
||||||
* Contains a monitor timeout fix. Previously an error was being left unchecked which made a specific monitor timining out not be handled properly.
|
|
||||||
|
|
||||||
1.22.2
|
|
||||||
|
|
||||||
* Bug fix: Change in retry mechanism created a deadlock. This release reverts that particular change.
|
|
||||||
|
|
||||||
1.22.1
|
|
||||||
|
|
||||||
* Adding safeguards against setting multiple constraints with the same name for a single task.
|
|
||||||
|
|
||||||
1.22.0
|
|
||||||
|
|
||||||
* CreateService and StartJobUpdate do not continue retrying if a timeout has been encountered
|
|
||||||
by the HTTP client. Instead they now return an error that conforms to the Timedout interface.
|
|
||||||
Users can check for a Timedout error by using `realis.IsTimeout(err)`.
|
|
||||||
* New API function VariableBatchStep has been added which returns the current batch at which
|
|
||||||
a Variable Batch Update configured Update is currently in.
|
|
||||||
* Added new PauseUpdateMonitor which monitors an update until it is an `ROLL_FORWARD_PAUSED` state.
|
|
||||||
* Added variableBatchStep command to sample client to be used for testing new VariableBatchStep api.
|
|
||||||
* JobUpdateStatus has changed function signature from:
|
|
||||||
`JobUpdateStatus(updateKey aurora.JobUpdateKey, desiredStatuses map[aurora.JobUpdateStatus]bool, interval, timeout time.Duration) (aurora.JobUpdateStatus, error)`
|
|
||||||
to
|
|
||||||
`JobUpdateStatus(updateKey aurora.JobUpdateKey, desiredStatuses []aurora.JobUpdateStatus, interval, timeout time.Duration) (aurora.JobUpdateStatus, error)`
|
|
||||||
* Added TerminalUpdateStates function which returns an slice containing all UpdateStates which are considered terminal states.
|
|
||||||
|
|
||||||
1.21.0
|
|
||||||
|
|
||||||
* Version numbering change. Future versions will be labled X.Y.Z where X is the major version, Y is the Aurora version the library has been tested against (e.g. 21 -> 0.21.0), and X is the minor revision.
|
|
||||||
* Moved to Thrift 0.12.0 code generator and go library.
|
|
||||||
* `aurora.ACTIVE_STATES`, `aurora.SLAVE_ASSIGNED_STATES`, `aurora.LIVE_STATES`, `aurora.TERMINAL_STATES`, `aurora.ACTIVE_JOB_UPDATE_STATES`, `aurora.AWAITNG_PULSE_JOB_UPDATE_STATES` are all now generated as a slices.
|
|
||||||
* Please use `realis.ActiveStates`, `realis.SlaveAssignedStates`,`realis.LiveStates`, `realis.TerminalStates`, `realis.ActiveJobUpdateStates`, `realis.AwaitingPulseJobUpdateStates` in their places when map representations are needed.
|
|
||||||
* `GetInstanceIds(key *aurora.JobKey, states map[aurora.ScheduleStatus]bool) (map[int32]bool, error)` has changed signature to ` GetInstanceIds(key *aurora.JobKey, states []aurora.ScheduleStatus) ([]int32, error)`
|
|
||||||
* Adding support for GPU as resource.
|
|
||||||
* Changing compose environment to Aurora snapshot in order to support staggered update.
|
|
||||||
* Adding staggered updates API.
|
|
60
Gopkg.lock
generated
Normal file
60
Gopkg.lock
generated
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "0.12.0"
|
||||||
|
digest = "1:89696c38cec777120b8b1bb5e2d363d655cf2e1e7d8c851919aaa0fd576d9b86"
|
||||||
|
name = "github.com/apache/thrift"
|
||||||
|
packages = ["lib/go/thrift"]
|
||||||
|
pruneopts = ""
|
||||||
|
revision = "384647d290e2e4a55a14b1b7ef1b7e66293a2c33"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:0deddd908b6b4b768cfc272c16ee61e7088a60f7fe2f06c547bd3d8e1f8b8e77"
|
||||||
|
name = "github.com/davecgh/go-spew"
|
||||||
|
packages = ["spew"]
|
||||||
|
pruneopts = ""
|
||||||
|
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||||
|
version = "v1.1.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:df48fb76fb2a40edea0c9b3d960bc95e326660d82ff1114e1f88001f7a236b40"
|
||||||
|
name = "github.com/pkg/errors"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = ""
|
||||||
|
revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411"
|
||||||
|
name = "github.com/pmezard/go-difflib"
|
||||||
|
packages = ["difflib"]
|
||||||
|
pruneopts = ""
|
||||||
|
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:78bea5e26e82826dacc5fd64a1013a6711b7075ec8072819b89e6ad76cb8196d"
|
||||||
|
name = "github.com/samuel/go-zookeeper"
|
||||||
|
packages = ["zk"]
|
||||||
|
pruneopts = ""
|
||||||
|
revision = "471cd4e61d7a78ece1791fa5faa0345dc8c7d5a5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75"
|
||||||
|
name = "github.com/stretchr/testify"
|
||||||
|
packages = ["assert"]
|
||||||
|
pruneopts = ""
|
||||||
|
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||||
|
version = "v1.2.2"
|
||||||
|
|
||||||
|
[solve-meta]
|
||||||
|
analyzer-name = "dep"
|
||||||
|
analyzer-version = 1
|
||||||
|
input-imports = [
|
||||||
|
"github.com/apache/thrift/lib/go/thrift",
|
||||||
|
"github.com/pkg/errors",
|
||||||
|
"github.com/samuel/go-zookeeper/zk",
|
||||||
|
"github.com/stretchr/testify/assert",
|
||||||
|
]
|
||||||
|
solver-name = "gps-cdcl"
|
||||||
|
solver-version = 1
|
16
Gopkg.toml
Normal file
16
Gopkg.toml
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/apache/thrift"
|
||||||
|
branch = "0.12.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/pkg/errors"
|
||||||
|
revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/samuel/go-zookeeper"
|
||||||
|
revision = "471cd4e61d7a78ece1791fa5faa0345dc8c7d5a5"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/stretchr/testify"
|
||||||
|
version = "1.2.0"
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
# gorealis [](https://godoc.org/github.com/paypal/gorealis)  [](https://codecov.io/gh/paypal/gorealis)
|
# gorealis [](https://godoc.org/github.com/paypal/gorealis) [](https://travis-ci.org/paypal/gorealis) [](https://codecov.io/gh/paypal/gorealis/branch/master-v2.0)
|
||||||
|
|
||||||
Version 1 of Go library for interacting with [Aurora Scheduler](https://github.com/aurora-scheduler/aurora).
|
Go library for interacting with [Apache Aurora](https://github.com/apache/aurora).
|
||||||
|
|
||||||
Version 2 of this library can be found [here](https://github.com/aurora-scheduler/gorealis).
|
|
||||||
|
|
||||||
### Aurora version compatibility
|
### Aurora version compatibility
|
||||||
Please see [.auroraversion](./.auroraversion) to see the latest Aurora version against which this
|
Please see [.auroraversion](./.auroraversion) to see the latest Aurora version against which this
|
||||||
|
@ -16,7 +14,7 @@ library has been tested.
|
||||||
|
|
||||||
## Projects using gorealis
|
## Projects using gorealis
|
||||||
|
|
||||||
* [australis](https://github.com/aurora-scheduler/australis)
|
* [australis](https://github.com/rdelval/australis)
|
||||||
|
|
||||||
## Contributions
|
## Contributions
|
||||||
Contributions are always welcome. Please raise an issue to discuss a contribution before it is made.
|
Contributions are always welcome. Please raise an issue to discuss a contribution before it is made.
|
||||||
|
|
|
@ -716,40 +716,9 @@ struct JobUpdateKey {
|
||||||
2: string id
|
2: string id
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Limits the amount of active changes being made to instances to groupSize. */
|
|
||||||
struct QueueJobUpdateStrategy {
|
|
||||||
1: i32 groupSize
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Similar to Queue strategy but will not start a new group until all instances in an active
|
|
||||||
* group have finished updating.
|
|
||||||
*/
|
|
||||||
struct BatchJobUpdateStrategy {
|
|
||||||
1: i32 groupSize
|
|
||||||
/* Update will pause automatically after each batch completes */
|
|
||||||
2: bool autopauseAfterBatch
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Same as Batch strategy but each time an active group completes, the size of the next active
|
|
||||||
* group may change.
|
|
||||||
*/
|
|
||||||
struct VariableBatchJobUpdateStrategy {
|
|
||||||
1: list<i32> groupSizes
|
|
||||||
/* Update will pause automatically after each batch completes */
|
|
||||||
2: bool autopauseAfterBatch
|
|
||||||
}
|
|
||||||
|
|
||||||
union JobUpdateStrategy {
|
|
||||||
1: QueueJobUpdateStrategy queueStrategy
|
|
||||||
2: BatchJobUpdateStrategy batchStrategy
|
|
||||||
3: VariableBatchJobUpdateStrategy varBatchStrategy
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Job update thresholds and limits. */
|
/** Job update thresholds and limits. */
|
||||||
struct JobUpdateSettings {
|
struct JobUpdateSettings {
|
||||||
/** Deprecated, please set value inside of desired update strategy instead.
|
/** Max number of instances being updated at any given moment. */
|
||||||
* Max number of instances being updated at any given moment.
|
|
||||||
*/
|
|
||||||
1: i32 updateGroupSize
|
1: i32 updateGroupSize
|
||||||
|
|
||||||
/** Max number of instance failures to tolerate before marking instance as FAILED. */
|
/** Max number of instance failures to tolerate before marking instance as FAILED. */
|
||||||
|
@ -767,7 +736,7 @@ struct JobUpdateSettings {
|
||||||
/** Instance IDs to act on. All instances will be affected if this is not set. */
|
/** Instance IDs to act on. All instances will be affected if this is not set. */
|
||||||
7: set<Range> updateOnlyTheseInstances
|
7: set<Range> updateOnlyTheseInstances
|
||||||
|
|
||||||
/** Deprecated, please set updateStrategy to the Batch strategy instead.
|
/**
|
||||||
* If true, use updateGroupSize as strict batching boundaries, and avoid proceeding to another
|
* If true, use updateGroupSize as strict batching boundaries, and avoid proceeding to another
|
||||||
* batch until the preceding batch finishes updating.
|
* batch until the preceding batch finishes updating.
|
||||||
*/
|
*/
|
||||||
|
@ -786,9 +755,6 @@ struct JobUpdateSettings {
|
||||||
* differs between the old and new task configurations, updates will use the newest configuration.
|
* differs between the old and new task configurations, updates will use the newest configuration.
|
||||||
*/
|
*/
|
||||||
10: optional bool slaAware
|
10: optional bool slaAware
|
||||||
|
|
||||||
/** Update strategy to be used for the update. See JobUpdateStrategy for choices. */
|
|
||||||
11: optional JobUpdateStrategy updateStrategy
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Event marking a state transition in job update lifecycle. */
|
/** Event marking a state transition in job update lifecycle. */
|
||||||
|
|
16
clusters.go
16
clusters.go
|
@ -21,8 +21,6 @@ import (
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Cluster contains the definition of the clusters.json file used by the default Aurora
|
|
||||||
// client for configuration
|
|
||||||
type Cluster struct {
|
type Cluster struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
AgentRoot string `json:"slave_root"`
|
AgentRoot string `json:"slave_root"`
|
||||||
|
@ -35,8 +33,7 @@ type Cluster struct {
|
||||||
AuthMechanism string `json:"auth_mechanism"`
|
AuthMechanism string `json:"auth_mechanism"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadClusters loads clusters.json file traditionally located at /etc/aurora/clusters.json
|
// Loads clusters.json file traditionally located at /etc/aurora/clusters.json
|
||||||
// for use with a gorealis client
|
|
||||||
func LoadClusters(config string) (map[string]Cluster, error) {
|
func LoadClusters(config string) (map[string]Cluster, error) {
|
||||||
|
|
||||||
file, err := os.Open(config)
|
file, err := os.Open(config)
|
||||||
|
@ -57,3 +54,14 @@ func LoadClusters(config string) (map[string]Cluster, error) {
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GetDefaultClusterFromZKUrl(zkURL string) *Cluster {
|
||||||
|
return &Cluster{
|
||||||
|
Name: "defaultCluster",
|
||||||
|
AuthMechanism: "UNAUTHENTICATED",
|
||||||
|
ZK: zkURL,
|
||||||
|
SchedZKPath: "/aurora/scheduler",
|
||||||
|
AgentRunDir: "latest",
|
||||||
|
AgentRoot: "/var/lib/mesos",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -18,7 +18,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
realis "github.com/paypal/gorealis"
|
realis "github.com/paypal/gorealis/v2"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
54
container.go
54
container.go
|
@ -15,44 +15,31 @@
|
||||||
package realis
|
package realis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
"github.com/paypal/gorealis/v2/gen-go/apache/aurora"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Container is an interface that defines a single function needed to create
|
|
||||||
// an Aurora container type. It exists because the code must support both Mesos
|
|
||||||
// and Docker containers.
|
|
||||||
type Container interface {
|
type Container interface {
|
||||||
Build() *aurora.Container
|
Build() *aurora.Container
|
||||||
}
|
}
|
||||||
|
|
||||||
// MesosContainer is a Mesos style container that can be used by Aurora Jobs.
|
|
||||||
type MesosContainer struct {
|
|
||||||
container *aurora.MesosContainer
|
|
||||||
}
|
|
||||||
|
|
||||||
// DockerContainer is a vanilla Docker style container that can be used by Aurora Jobs.
|
|
||||||
type DockerContainer struct {
|
type DockerContainer struct {
|
||||||
container *aurora.DockerContainer
|
container *aurora.DockerContainer
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDockerContainer creates a new Aurora compatible Docker container configuration.
|
func NewDockerContainer() *DockerContainer {
|
||||||
func NewDockerContainer() DockerContainer {
|
return &DockerContainer{container: aurora.NewDockerContainer()}
|
||||||
return DockerContainer{container: aurora.NewDockerContainer()}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build creates an Aurora container based upon the configuration provided.
|
func (c *DockerContainer) Build() *aurora.Container {
|
||||||
func (c DockerContainer) Build() *aurora.Container {
|
|
||||||
return &aurora.Container{Docker: c.container}
|
return &aurora.Container{Docker: c.container}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Image adds the name of a Docker image to be used by the Job when running.
|
func (c *DockerContainer) Image(image string) *DockerContainer {
|
||||||
func (c DockerContainer) Image(image string) DockerContainer {
|
|
||||||
c.container.Image = image
|
c.container.Image = image
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddParameter adds a parameter to be passed to Docker when the container is run.
|
func (c *DockerContainer) AddParameter(name, value string) *DockerContainer {
|
||||||
func (c DockerContainer) AddParameter(name, value string) DockerContainer {
|
|
||||||
c.container.Parameters = append(c.container.Parameters, &aurora.DockerParameter{
|
c.container.Parameters = append(c.container.Parameters, &aurora.DockerParameter{
|
||||||
Name: name,
|
Name: name,
|
||||||
Value: value,
|
Value: value,
|
||||||
|
@ -60,18 +47,19 @@ func (c DockerContainer) AddParameter(name, value string) DockerContainer {
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMesosContainer creates a Mesos style container to be configured and built for use by an Aurora Job.
|
type MesosContainer struct {
|
||||||
func NewMesosContainer() MesosContainer {
|
container *aurora.MesosContainer
|
||||||
return MesosContainer{container: aurora.NewMesosContainer()}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build creates a Mesos style Aurora container configuration to be passed on to the Aurora Job.
|
func NewMesosContainer() *MesosContainer {
|
||||||
func (c MesosContainer) Build() *aurora.Container {
|
return &MesosContainer{container: aurora.NewMesosContainer()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MesosContainer) Build() *aurora.Container {
|
||||||
return &aurora.Container{Mesos: c.container}
|
return &aurora.Container{Mesos: c.container}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DockerImage configures the Mesos container to use a specific Docker image when being run.
|
func (c *MesosContainer) DockerImage(name, tag string) *MesosContainer {
|
||||||
func (c MesosContainer) DockerImage(name, tag string) MesosContainer {
|
|
||||||
if c.container.Image == nil {
|
if c.container.Image == nil {
|
||||||
c.container.Image = aurora.NewImage()
|
c.container.Image = aurora.NewImage()
|
||||||
}
|
}
|
||||||
|
@ -80,12 +68,20 @@ func (c MesosContainer) DockerImage(name, tag string) MesosContainer {
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// AppcImage configures the Mesos container to use an image in the Appc format to run the container.
|
func (c *MesosContainer) AppcImage(name, imageId string) *MesosContainer {
|
||||||
func (c MesosContainer) AppcImage(name, imageID string) MesosContainer {
|
|
||||||
if c.container.Image == nil {
|
if c.container.Image == nil {
|
||||||
c.container.Image = aurora.NewImage()
|
c.container.Image = aurora.NewImage()
|
||||||
}
|
}
|
||||||
|
|
||||||
c.container.Image.Appc = &aurora.AppcImage{Name: name, ImageId: imageID}
|
c.container.Image.Appc = &aurora.AppcImage{Name: name, ImageId: imageId}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MesosContainer) AddVolume(hostPath, containerPath string, mode aurora.Mode) *MesosContainer {
|
||||||
|
c.container.Volumes = append(c.container.Volumes, &aurora.Volume{
|
||||||
|
HostPath: hostPath,
|
||||||
|
ContainerPath: containerPath,
|
||||||
|
Mode: mode})
|
||||||
|
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ services:
|
||||||
ipv4_address: 192.168.33.2
|
ipv4_address: 192.168.33.2
|
||||||
|
|
||||||
master:
|
master:
|
||||||
image: aurorascheduler/mesos-master:1.7.2
|
image: rdelvalle/mesos-master:1.5.1
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
ports:
|
ports:
|
||||||
- "5050:5050"
|
- "5050:5050"
|
||||||
|
@ -32,13 +32,12 @@ services:
|
||||||
- zk
|
- zk
|
||||||
|
|
||||||
agent-one:
|
agent-one:
|
||||||
image: aurorascheduler/mesos-agent:1.7.2
|
image: rdelvalle/mesos-agent:1.5.1
|
||||||
pid: host
|
pid: host
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
ports:
|
ports:
|
||||||
- "5051:5051"
|
- "5051:5051"
|
||||||
environment:
|
environment:
|
||||||
MESOS_ATTRIBUTES: 'zone:west'
|
|
||||||
MESOS_MASTER: zk://192.168.33.2:2181/mesos
|
MESOS_MASTER: zk://192.168.33.2:2181/mesos
|
||||||
MESOS_CONTAINERIZERS: docker,mesos
|
MESOS_CONTAINERIZERS: docker,mesos
|
||||||
MESOS_PORT: 5051
|
MESOS_PORT: 5051
|
||||||
|
@ -56,33 +55,8 @@ services:
|
||||||
depends_on:
|
depends_on:
|
||||||
- zk
|
- zk
|
||||||
|
|
||||||
agent-two:
|
|
||||||
image: aurorascheduler/mesos-agent:1.7.2
|
|
||||||
pid: host
|
|
||||||
restart: on-failure
|
|
||||||
ports:
|
|
||||||
- "5061:5061"
|
|
||||||
environment:
|
|
||||||
MESOS_ATTRIBUTES: 'zone:east'
|
|
||||||
MESOS_MASTER: zk://192.168.33.2:2181/mesos
|
|
||||||
MESOS_CONTAINERIZERS: docker,mesos
|
|
||||||
MESOS_HOSTNAME: localhost
|
|
||||||
MESOS_PORT: 5061
|
|
||||||
MESOS_RESOURCES: ports(*):[11000-11999]
|
|
||||||
MESOS_SYSTEMD_ENABLE_SUPPORT: 'false'
|
|
||||||
MESOS_WORK_DIR: /tmp/mesos
|
|
||||||
networks:
|
|
||||||
aurora_cluster:
|
|
||||||
ipv4_address: 192.168.33.5
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
- /sys/fs/cgroup:/sys/fs/cgroup
|
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
|
||||||
depends_on:
|
|
||||||
- zk
|
|
||||||
|
|
||||||
aurora-one:
|
aurora-one:
|
||||||
image: aurorascheduler/scheduler:0.23.0
|
image: rdelvalle/aurora:0.21.0
|
||||||
pid: host
|
pid: host
|
||||||
ports:
|
ports:
|
||||||
- "8081:8081"
|
- "8081:8081"
|
||||||
|
@ -91,7 +65,12 @@ services:
|
||||||
CLUSTER_NAME: test-cluster
|
CLUSTER_NAME: test-cluster
|
||||||
ZK_ENDPOINTS: "192.168.33.2:2181"
|
ZK_ENDPOINTS: "192.168.33.2:2181"
|
||||||
MESOS_MASTER: "zk://192.168.33.2:2181/mesos"
|
MESOS_MASTER: "zk://192.168.33.2:2181/mesos"
|
||||||
EXTRA_SCHEDULER_ARGS: "-min_required_instances_for_sla_check=1"
|
EXTRA_SCHEDULER_ARGS: >
|
||||||
|
-http_authentication_mechanism=BASIC
|
||||||
|
-shiro_realm_modules=INI_AUTHNZ
|
||||||
|
-shiro_ini_path=/etc/aurora/security.ini
|
||||||
|
volumes:
|
||||||
|
- ./.aurora-config:/etc/aurora
|
||||||
networks:
|
networks:
|
||||||
aurora_cluster:
|
aurora_cluster:
|
||||||
ipv4_address: 192.168.33.7
|
ipv4_address: 192.168.33.7
|
||||||
|
|
|
@ -88,6 +88,12 @@ On Ubuntu, restarting the aurora-scheduler can be achieved by running the follow
|
||||||
$ sudo service aurora-scheduler restart
|
$ sudo service aurora-scheduler restart
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Using a custom client
|
||||||
|
Pystachio does not yet support launching tasks using custom executors. Therefore, a custom
|
||||||
|
client must be used in order to launch tasks using a custom executor. In this case,
|
||||||
|
we will be using [gorealis](https://github.com/paypal/gorealis) to launch a task with
|
||||||
|
the compose executor on Aurora.
|
||||||
|
|
||||||
## Using [dce-go](https://github.com/paypal/dce-go)
|
## Using [dce-go](https://github.com/paypal/dce-go)
|
||||||
Instead of manually configuring Aurora to run the docker-compose executor, one can follow the instructions provided [here](https://github.com/paypal/dce-go/blob/develop/docs/environment.md) to quickly create a DCE environment that would include mesos, aurora, golang1.7, docker, docker-compose and DCE installed.
|
Instead of manually configuring Aurora to run the docker-compose executor, one can follow the instructions provided [here](https://github.com/paypal/dce-go/blob/develop/docs/environment.md) to quickly create a DCE environment that would include mesos, aurora, golang1.7, docker, docker-compose and DCE installed.
|
||||||
|
|
||||||
|
@ -101,12 +107,80 @@ Mesos endpoint --> http://192.168.33.8:5050
|
||||||
|
|
||||||
### Installing Go
|
### Installing Go
|
||||||
|
|
||||||
Follow the instructions at the official golang website: [golang.org/doc/install](https://golang.org/doc/install)
|
#### Linux
|
||||||
|
|
||||||
### Installing docker-compose
|
##### Ubuntu
|
||||||
|
|
||||||
Agents which will run dce-go will need docker-compose in order to sucessfully run the executor.
|
###### Adding a PPA and install via apt-get
|
||||||
Instructions for installing docker-compose on various platforms may be found on Docker's webiste: [docs.docker.com/compose/install/](https://docs.docker.com/compose/install/)
|
```
|
||||||
|
$ sudo add-apt-repository ppa:ubuntu-lxc/lxd-stable
|
||||||
|
$ sudo apt-get update
|
||||||
|
$ sudo apt-get install golang
|
||||||
|
```
|
||||||
|
|
||||||
|
###### Configuring the GOPATH
|
||||||
|
|
||||||
|
Configure the environment to be able to compile and run Go code.
|
||||||
|
```
|
||||||
|
$ mkdir $HOME/go
|
||||||
|
$ echo export GOPATH=$HOME/go >> $HOME/.bashrc
|
||||||
|
$ echo export GOROOT=/usr/lib/go >> $HOME/.bashrc
|
||||||
|
$ echo export PATH=$PATH:$GOPATH/bin >> $HOME/.bashrc
|
||||||
|
$ echo export PATH=$PATH:$GOROOT/bin >> $HOME/.bashrc
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally we must reload the .bashrc configuration:
|
||||||
|
```
|
||||||
|
$ source $HOME/.bashrc
|
||||||
|
```
|
||||||
|
|
||||||
|
#### OS X
|
||||||
|
|
||||||
|
One way to install go on OS X is by using [Homebrew](http://brew.sh/)
|
||||||
|
|
||||||
|
##### Installing Homebrew
|
||||||
|
Run the following command from the terminal to install Hombrew:
|
||||||
|
```
|
||||||
|
$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Installing Go using Hombrew
|
||||||
|
|
||||||
|
Run the following command from the terminal to install Go:
|
||||||
|
```
|
||||||
|
$ brew install go
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Configuring the GOPATH
|
||||||
|
|
||||||
|
Configure the environment to be able to compile and run Go code.
|
||||||
|
```
|
||||||
|
$ mkdir $HOME/go
|
||||||
|
$ echo export GOPATH=$HOME/go >> $HOME/.profile
|
||||||
|
$ echo export GOROOT=/usr/local/opt/go/libexec >> $HOME/.profile
|
||||||
|
$ echo export PATH=$PATH:$GOPATH/bin >> $HOME/.profile
|
||||||
|
$ echo export PATH=$PATH:$GOROOT/bin >> $HOME/.profile
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally we must reload the .profile configuration:
|
||||||
|
```
|
||||||
|
$ source $HOME/.profile
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Windows
|
||||||
|
|
||||||
|
Download and run the msi installer from https://golang.org/dl/
|
||||||
|
|
||||||
|
## Installing Docker Compose (if manually configured Aurora)
|
||||||
|
To show Aurora's new multi executor feature, we need to use at least one custom executor.
|
||||||
|
In this case we will be using the [docker-compose-executor](https://github.com/mesos/docker-compose-executor).
|
||||||
|
|
||||||
|
In order to run the docker-compose executor, each agent must have docker-compose installed on it.
|
||||||
|
|
||||||
|
This can be done using pip:
|
||||||
|
```
|
||||||
|
$ sudo pip install docker-compose
|
||||||
|
```
|
||||||
|
|
||||||
## Downloading gorealis
|
## Downloading gorealis
|
||||||
Finally, we must get `gorealis` using the `go get` command:
|
Finally, we must get `gorealis` using the `go get` command:
|
||||||
|
@ -118,7 +192,7 @@ go get github.com/paypal/gorealis
|
||||||
# Creating Aurora Jobs
|
# Creating Aurora Jobs
|
||||||
|
|
||||||
## Creating a thermos job
|
## Creating a thermos job
|
||||||
To demonstrate that we are able to run jobs using different executors on the
|
To demonstrate that we are able to run jobs using different executors on the
|
||||||
same scheduler, we'll first launch a thermos job using the default Aurora Client.
|
same scheduler, we'll first launch a thermos job using the default Aurora Client.
|
||||||
|
|
||||||
We can use a sample job for this:
|
We can use a sample job for this:
|
||||||
|
@ -185,8 +259,8 @@ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compo
|
||||||
```
|
```
|
||||||
|
|
||||||
If everything went according to plan, a new job will be shown in the Aurora UI.
|
If everything went according to plan, a new job will be shown in the Aurora UI.
|
||||||
We can further investigate inside the Mesos task sandbox. Inside the sandbox, under
|
We can further investigate inside the Mesos task sandbox. Inside the sandbox, under
|
||||||
the sample-app folder, we can find a docker-compose.yml-generated.yml. If we inspect this file,
|
the sample-app folder, we can find a docker-compose.yml-generated.yml. If we inspect this file,
|
||||||
we can find the port at which we can find the web server we launched.
|
we can find the port at which we can find the web server we launched.
|
||||||
|
|
||||||
Under Web->Ports, we find the port Mesos allocated. We can then navigate to:
|
Under Web->Ports, we find the port Mesos allocated. We can then navigate to:
|
||||||
|
@ -195,10 +269,10 @@ Under Web->Ports, we find the port Mesos allocated. We can then navigate to:
|
||||||
A message from the executor should greet us.
|
A message from the executor should greet us.
|
||||||
|
|
||||||
## Creating a Thermos job using gorealis
|
## Creating a Thermos job using gorealis
|
||||||
It is also possible to create a thermos job using gorealis. To do this, however,
|
It is also possible to create a thermos job using gorealis. To do this, however,
|
||||||
a thermos payload is required. A thermos payload consists of a JSON blob that details
|
a thermos payload is required. A thermos payload consists of a JSON blob that details
|
||||||
the entire task as it exists inside the Aurora Scheduler. *Creating the blob is unfortunately
|
the entire task as it exists inside the Aurora Scheduler. *Creating the blob is unfortunately
|
||||||
out of the scope of what gorealis does*, so a thermos payload must be generated beforehand or
|
out of the scope of what gorealis does*, so a thermos payload must be generated beforehand or
|
||||||
retrieved from the structdump of an existing task for testing purposes.
|
retrieved from the structdump of an existing task for testing purposes.
|
||||||
|
|
||||||
A sample thermos JSON payload may be found [here](../examples/thermos_payload.json) in the examples folder.
|
A sample thermos JSON payload may be found [here](../examples/thermos_payload.json) in the examples folder.
|
||||||
|
|
|
@ -57,19 +57,4 @@ updateJob := realis.NewUpdateJob(job)
|
||||||
updateJob.InstanceCount(1)
|
updateJob.InstanceCount(1)
|
||||||
updateJob.Ram(128)
|
updateJob.Ram(128)
|
||||||
msg, err := r.UpdateJob(updateJob, "")
|
msg, err := r.UpdateJob(updateJob, "")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
* Handling a timeout scenario:
|
|
||||||
|
|
||||||
When sending an API call to Aurora, the call may timeout at the client side.
|
|
||||||
This means that the time limit has been reached while waiting for the scheduler
|
|
||||||
to reply. In such a case it is recommended that the timeout is increased through
|
|
||||||
the use of the `realis.TimeoutMS()` option.
|
|
||||||
|
|
||||||
As these timeouts cannot be totally avoided, there exists a mechanism to mitigate such
|
|
||||||
scenarios. The `StartJobUpdate` and `CreateService` API will return an error that
|
|
||||||
implements the Timeout interface.
|
|
||||||
|
|
||||||
An error can be checked to see if it is a Timeout error by using the `realis.IsTimeout()`
|
|
||||||
function.
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Using the Sample client
|
# Using the Sample client
|
||||||
|
|
||||||
## Usage:
|
## Usage:
|
||||||
```
|
```
|
||||||
Usage of ./client:
|
Usage of ./client:
|
||||||
-cluster string
|
-cluster string
|
||||||
|
|
23
errors.go
23
errors.go
|
@ -17,14 +17,12 @@ package realis
|
||||||
// Using a pattern described by Dave Cheney to differentiate errors
|
// Using a pattern described by Dave Cheney to differentiate errors
|
||||||
// https://dave.cheney.net/2016/04/27/dont-just-check-errors-handle-them-gracefully
|
// https://dave.cheney.net/2016/04/27/dont-just-check-errors-handle-them-gracefully
|
||||||
|
|
||||||
// Timeout errors are returned when a function is unable to continue executing due
|
// Timedout errors are returned when a function is unable to continue executing due
|
||||||
// to a time constraint or meeting a set number of retries.
|
// to a time constraint or meeting a set number of retries.
|
||||||
type timeout interface {
|
type timeout interface {
|
||||||
Timedout() bool
|
Timedout() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsTimeout returns true if the error being passed as an argument implements the Timeout interface
|
|
||||||
// and the Timedout function returns true.
|
|
||||||
func IsTimeout(err error) bool {
|
func IsTimeout(err error) bool {
|
||||||
temp, ok := err.(timeout)
|
temp, ok := err.(timeout)
|
||||||
return ok && temp.Timedout()
|
return ok && temp.Timedout()
|
||||||
|
@ -63,42 +61,41 @@ func (r *retryErr) RetryCount() int {
|
||||||
return r.retryCount
|
return r.retryCount
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToRetryCount is a helper function for testing verification to avoid whitebox testing
|
// Helper function for testing verification to avoid whitebox testing
|
||||||
// as well as keeping retryErr as a private.
|
// as well as keeping retryErr as a private.
|
||||||
// Should NOT be used under any other context.
|
// Should NOT be used under any other context.
|
||||||
func ToRetryCount(err error) *retryErr {
|
func ToRetryCount(err error) *retryErr {
|
||||||
if retryErr, ok := err.(*retryErr); ok {
|
if retryErr, ok := err.(*retryErr); ok {
|
||||||
return retryErr
|
return retryErr
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRetryError(err error, retryCount int) *retryErr {
|
func newRetryError(err error, retryCount int) *retryErr {
|
||||||
return &retryErr{error: err, timedout: true, retryCount: retryCount}
|
return &retryErr{error: err, timedout: true, retryCount: retryCount}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Temporary errors indicate that the action may or should be retried.
|
// Temporary errors indicate that the action may and should be retried.
|
||||||
type temporary interface {
|
type temporary interface {
|
||||||
Temporary() bool
|
Temporary() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsTemporary indicates whether the error passed in as an argument implements the temporary interface
|
|
||||||
// and if the Temporary function returns true.
|
|
||||||
func IsTemporary(err error) bool {
|
func IsTemporary(err error) bool {
|
||||||
temp, ok := err.(temporary)
|
temp, ok := err.(temporary)
|
||||||
return ok && temp.Temporary()
|
return ok && temp.Temporary()
|
||||||
}
|
}
|
||||||
|
|
||||||
type temporaryErr struct {
|
type TemporaryErr struct {
|
||||||
error
|
error
|
||||||
temporary bool
|
temporary bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *temporaryErr) Temporary() bool {
|
func (t *TemporaryErr) Temporary() bool {
|
||||||
return t.temporary
|
return t.temporary
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTemporaryError creates a new error which satisfies the Temporary interface.
|
// Retrying after receiving this error is advised
|
||||||
func NewTemporaryError(err error) *temporaryErr {
|
func NewTemporaryError(err error) *TemporaryErr {
|
||||||
return &temporaryErr{error: err, temporary: true}
|
return &TemporaryErr{error: err, temporary: true}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,24 +17,22 @@ package main
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
realis "github.com/paypal/gorealis"
|
realis "github.com/paypal/gorealis/v2"
|
||||||
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
"github.com/paypal/gorealis/v2/gen-go/apache/aurora"
|
||||||
"github.com/paypal/gorealis/response"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var cmd, executor, url, clustersConfig, clusterName, updateId, username, password, zkUrl, hostList, role string
|
var cmd, executor, url, clustersConfig, clusterName, updateId, username, password, zkUrl, hostList, role string
|
||||||
var caCertsPath string
|
var caCertsPath string
|
||||||
var clientKey, clientCert string
|
var clientKey, clientCert string
|
||||||
|
|
||||||
var ConnectionTimeout = 20000
|
var ConnectionTimeout = 20 * time.Second
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
flag.StringVar(&cmd, "cmd", "", "Job request type to send to Aurora Scheduler")
|
flag.StringVar(&cmd, "cmd", "", "Aurora Job request type to send to Aurora Scheduler")
|
||||||
flag.StringVar(&executor, "executor", "thermos", "Executor to use")
|
flag.StringVar(&executor, "executor", "thermos", "Executor to use")
|
||||||
flag.StringVar(&url, "url", "", "URL at which the Aurora Scheduler exists as [url]:[port]")
|
flag.StringVar(&url, "url", "", "URL at which the Aurora Scheduler exists as [url]:[port]")
|
||||||
flag.StringVar(&clustersConfig, "clusters", "", "Location of the clusters.json file used by aurora.")
|
flag.StringVar(&clustersConfig, "clusters", "", "Location of the clusters.json file used by aurora.")
|
||||||
|
@ -74,15 +72,14 @@ func init() {
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|
||||||
var job realis.Job
|
var job *realis.AuroraJob
|
||||||
var err error
|
var err error
|
||||||
var monitor *realis.Monitor
|
var r *realis.Client
|
||||||
var r realis.Realis
|
|
||||||
|
|
||||||
clientOptions := []realis.ClientOption{
|
clientOptions := []realis.ClientOption{
|
||||||
realis.BasicAuth(username, password),
|
realis.BasicAuth(username, password),
|
||||||
realis.ThriftJSON(),
|
realis.ThriftJSON(),
|
||||||
realis.TimeoutMS(ConnectionTimeout),
|
realis.Timeout(ConnectionTimeout),
|
||||||
realis.BackOff(realis.Backoff{
|
realis.BackOff(realis.Backoff{
|
||||||
Steps: 2,
|
Steps: 2,
|
||||||
Duration: 10 * time.Second,
|
Duration: 10 * time.Second,
|
||||||
|
@ -100,39 +97,36 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if caCertsPath != "" {
|
if caCertsPath != "" {
|
||||||
clientOptions = append(clientOptions, realis.Certspath(caCertsPath))
|
clientOptions = append(clientOptions, realis.CertsPath(caCertsPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
if clientKey != "" && clientCert != "" {
|
if clientKey != "" && clientCert != "" {
|
||||||
clientOptions = append(clientOptions, realis.ClientCerts(clientKey, clientCert))
|
clientOptions = append(clientOptions, realis.ClientCerts(clientKey, clientCert))
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err = realis.NewRealisClient(clientOptions...)
|
r, err = realis.NewClient(clientOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
monitor = &realis.Monitor{Client: r}
|
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
|
|
||||||
switch executor {
|
switch executor {
|
||||||
case "thermos":
|
case "thermos":
|
||||||
payload, err := ioutil.ReadFile("examples/thermos_payload.json")
|
thermosExec := realis.ThermosExecutor{}
|
||||||
if err != nil {
|
thermosExec.AddProcess(realis.NewThermosProcess("boostrap", "echo bootsrapping")).
|
||||||
log.Fatalln("Error reading json config file: ", err)
|
AddProcess(realis.NewThermosProcess("hello_gorealis", "while true; do echo hello world from gorealis; sleep 10; done"))
|
||||||
}
|
|
||||||
|
|
||||||
job = realis.NewJob().
|
job = realis.NewJob().
|
||||||
Environment("prod").
|
Environment("prod").
|
||||||
Role("vagrant").
|
Role("vagrant").
|
||||||
Name("hello_world_from_gorealis").
|
Name("hello_world_from_gorealis").
|
||||||
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
|
|
||||||
ExecutorData(string(payload)).
|
|
||||||
CPU(1).
|
CPU(1).
|
||||||
RAM(64).
|
RAM(64).
|
||||||
Disk(100).
|
Disk(100).
|
||||||
IsService(true).
|
IsService(true).
|
||||||
InstanceCount(1).
|
InstanceCount(1).
|
||||||
AddPorts(1)
|
AddPorts(1).
|
||||||
|
ThermosExecutor(thermosExec)
|
||||||
case "compose":
|
case "compose":
|
||||||
job = realis.NewJob().
|
job = realis.NewJob().
|
||||||
Environment("prod").
|
Environment("prod").
|
||||||
|
@ -166,14 +160,13 @@ func main() {
|
||||||
switch cmd {
|
switch cmd {
|
||||||
case "create":
|
case "create":
|
||||||
fmt.Println("Creating job")
|
fmt.Println("Creating job")
|
||||||
resp, err := r.CreateJob(job)
|
err := r.CreateJob(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
fmt.Println(resp.String())
|
|
||||||
|
|
||||||
if ok, mErr := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 5, 50); !ok || mErr != nil {
|
if ok, mErr := r.MonitorInstances(job.JobKey(), job.GetInstanceCount(), 5*time.Second, 50*time.Second); !ok || mErr != nil {
|
||||||
_, err := r.KillJob(job.JobKey())
|
err := r.KillJob(job.JobKey())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
@ -183,18 +176,17 @@ func main() {
|
||||||
case "createService":
|
case "createService":
|
||||||
// Create a service with three instances using the update API instead of the createJob API
|
// Create a service with three instances using the update API instead of the createJob API
|
||||||
fmt.Println("Creating service")
|
fmt.Println("Creating service")
|
||||||
settings := realis.NewUpdateSettings()
|
settings := realis.JobUpdateFromAuroraTask(job.AuroraTask()).InstanceCount(3)
|
||||||
job.InstanceCount(3)
|
|
||||||
resp, result, err := r.CreateService(job, settings)
|
result, err := r.CreateService(settings)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println("error: ", err)
|
log.Fatal("error: ", err)
|
||||||
log.Fatal("response: ", resp.String())
|
|
||||||
}
|
}
|
||||||
fmt.Println(result.String())
|
fmt.Println(result.String())
|
||||||
|
|
||||||
if ok, mErr := monitor.JobUpdate(*result.GetKey(), 5, 180); !ok || mErr != nil {
|
if ok, mErr := r.MonitorJobUpdate(*result.GetKey(), 5*time.Second, 180*time.Second); !ok || mErr != nil {
|
||||||
_, err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out")
|
err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out")
|
||||||
_, err = r.KillJob(job.JobKey())
|
err = r.KillJob(job.JobKey())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -205,14 +197,13 @@ func main() {
|
||||||
fmt.Println("Creating a docker based job")
|
fmt.Println("Creating a docker based job")
|
||||||
container := realis.NewDockerContainer().Image("python:2.7").AddParameter("network", "host")
|
container := realis.NewDockerContainer().Image("python:2.7").AddParameter("network", "host")
|
||||||
job.Container(container)
|
job.Container(container)
|
||||||
resp, err := r.CreateJob(job)
|
err := r.CreateJob(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
fmt.Println(resp.String())
|
|
||||||
|
|
||||||
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
|
if ok, err := r.MonitorInstances(job.JobKey(), job.GetInstanceCount(), 10*time.Second, 300*time.Second); !ok || err != nil {
|
||||||
_, err := r.KillJob(job.JobKey())
|
err := r.KillJob(job.JobKey())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -222,14 +213,13 @@ func main() {
|
||||||
fmt.Println("Creating a docker based job")
|
fmt.Println("Creating a docker based job")
|
||||||
container := realis.NewMesosContainer().DockerImage("python", "2.7")
|
container := realis.NewMesosContainer().DockerImage("python", "2.7")
|
||||||
job.Container(container)
|
job.Container(container)
|
||||||
resp, err := r.CreateJob(job)
|
err := r.CreateJob(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
fmt.Println(resp.String())
|
|
||||||
|
|
||||||
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
|
if ok, err := r.MonitorInstances(job.JobKey(), job.GetInstanceCount(), 10*time.Second, 300*time.Second); !ok || err != nil {
|
||||||
_, err := r.KillJob(job.JobKey())
|
err := r.KillJob(job.JobKey())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -240,50 +230,44 @@ func main() {
|
||||||
// Cron config
|
// Cron config
|
||||||
job.CronSchedule("* * * * *")
|
job.CronSchedule("* * * * *")
|
||||||
job.IsService(false)
|
job.IsService(false)
|
||||||
resp, err := r.ScheduleCronJob(job)
|
err := r.ScheduleCronJob(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
fmt.Println(resp.String())
|
|
||||||
|
|
||||||
case "startCron":
|
case "startCron":
|
||||||
fmt.Println("Starting a Cron job")
|
fmt.Println("Starting a Cron job")
|
||||||
resp, err := r.StartCronJob(job.JobKey())
|
err := r.StartCronJob(job.JobKey())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
fmt.Println(resp.String())
|
|
||||||
|
|
||||||
case "descheduleCron":
|
case "descheduleCron":
|
||||||
fmt.Println("Descheduling a Cron job")
|
fmt.Println("Descheduling a Cron job")
|
||||||
resp, err := r.DescheduleCronJob(job.JobKey())
|
err := r.DescheduleCronJob(job.JobKey())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
fmt.Println(resp.String())
|
|
||||||
|
|
||||||
case "kill":
|
case "kill":
|
||||||
fmt.Println("Killing job")
|
fmt.Println("Killing job")
|
||||||
|
|
||||||
resp, err := r.KillJob(job.JobKey())
|
err := r.KillJob(job.JobKey())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ok, err := monitor.Instances(job.JobKey(), 0, 5, 50); !ok || err != nil {
|
if ok, err := r.MonitorInstances(job.JobKey(), 0, 5*time.Second, 50*time.Second); !ok || err != nil {
|
||||||
log.Fatal("Unable to kill all instances of job")
|
log.Fatal("Unable to kill all instances of job")
|
||||||
}
|
}
|
||||||
fmt.Println(resp.String())
|
|
||||||
|
|
||||||
case "restart":
|
case "restart":
|
||||||
fmt.Println("Restarting job")
|
fmt.Println("Restarting job")
|
||||||
resp, err := r.RestartJob(job.JobKey())
|
err := r.RestartJob(job.JobKey())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(resp.String())
|
|
||||||
|
|
||||||
case "liveCount":
|
case "liveCount":
|
||||||
fmt.Println("Getting instance count")
|
fmt.Println("Getting instance count")
|
||||||
|
|
||||||
|
@ -302,106 +286,110 @@ func main() {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("Number of live instances: ", len(live))
|
fmt.Println("Active instances: ", live)
|
||||||
|
|
||||||
case "flexUp":
|
case "flexUp":
|
||||||
fmt.Println("Flexing up job")
|
fmt.Println("Flexing up job")
|
||||||
|
|
||||||
numOfInstances := int32(4)
|
numOfInstances := 4
|
||||||
|
|
||||||
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
|
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
currInstances := int32(len(live))
|
currInstances := len(live)
|
||||||
fmt.Println("Current num of instances: ", currInstances)
|
fmt.Println("Current num of instances: ", currInstances)
|
||||||
resp, err := r.AddInstances(aurora.InstanceKey{
|
|
||||||
JobKey: job.JobKey(),
|
key := job.JobKey()
|
||||||
|
err = r.AddInstances(aurora.InstanceKey{
|
||||||
|
JobKey: &key,
|
||||||
InstanceId: live[0],
|
InstanceId: live[0],
|
||||||
},
|
},
|
||||||
numOfInstances)
|
int32(numOfInstances))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ok, err := monitor.Instances(job.JobKey(), currInstances+numOfInstances, 5, 50); !ok || err != nil {
|
if ok, err := r.MonitorInstances(job.JobKey(), int32(currInstances+numOfInstances), 5*time.Second, 50*time.Second); !ok || err != nil {
|
||||||
fmt.Println("Flexing up failed")
|
fmt.Println("Flexing up failed")
|
||||||
}
|
}
|
||||||
fmt.Println(resp.String())
|
|
||||||
|
|
||||||
case "flexDown":
|
case "flexDown":
|
||||||
fmt.Println("Flexing down job")
|
fmt.Println("Flexing down job")
|
||||||
|
|
||||||
numOfInstances := int32(2)
|
numOfInstances := 2
|
||||||
|
|
||||||
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
|
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
currInstances := int32(len(live))
|
currInstances := len(live)
|
||||||
fmt.Println("Current num of instances: ", currInstances)
|
fmt.Println("Current num of instances: ", currInstances)
|
||||||
|
|
||||||
resp, err := r.RemoveInstances(job.JobKey(), numOfInstances)
|
err = r.RemoveInstances(job.JobKey(), numOfInstances)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ok, err := monitor.Instances(job.JobKey(), currInstances-numOfInstances, 5, 100); !ok || err != nil {
|
if ok, err := r.MonitorInstances(job.JobKey(), int32(currInstances-numOfInstances), 5*time.Second, 100*time.Second); !ok || err != nil {
|
||||||
fmt.Println("flexDown failed")
|
fmt.Println("flexDown failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(resp.String())
|
|
||||||
|
|
||||||
case "update":
|
case "update":
|
||||||
fmt.Println("Updating a job with with more RAM and to 5 instances")
|
fmt.Println("Updating a job with with more RAM and to 5 instances")
|
||||||
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
|
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
key := job.JobKey()
|
||||||
taskConfig, err := r.FetchTaskConfig(aurora.InstanceKey{
|
taskConfig, err := r.FetchTaskConfig(aurora.InstanceKey{
|
||||||
JobKey: job.JobKey(),
|
JobKey: &key,
|
||||||
InstanceId: live[0],
|
InstanceId: live[0],
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
updateJob := realis.NewDefaultUpdateJob(taskConfig)
|
updateJob := realis.JobUpdateFromConfig(taskConfig).InstanceCount(5).RAM(128)
|
||||||
updateJob.InstanceCount(5).RAM(128)
|
|
||||||
|
|
||||||
resp, err := r.StartJobUpdate(updateJob, "")
|
result, err := r.StartJobUpdate(updateJob, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
jobUpdateKey := response.JobUpdateKey(resp)
|
jobUpdateKey := result.GetKey()
|
||||||
monitor.JobUpdate(*jobUpdateKey, 5, 500)
|
_, err = r.MonitorJobUpdate(*jobUpdateKey, 5*time.Second, 6*time.Minute)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
case "pauseJobUpdate":
|
case "pauseJobUpdate":
|
||||||
resp, err := r.PauseJobUpdate(&aurora.JobUpdateKey{
|
key := job.JobKey()
|
||||||
Job: job.JobKey(),
|
err := r.PauseJobUpdate(&aurora.JobUpdateKey{
|
||||||
|
Job: &key,
|
||||||
ID: updateId,
|
ID: updateId,
|
||||||
}, "")
|
}, "")
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
fmt.Println("PauseJobUpdate response: ", resp.String())
|
|
||||||
|
|
||||||
case "resumeJobUpdate":
|
case "resumeJobUpdate":
|
||||||
resp, err := r.ResumeJobUpdate(&aurora.JobUpdateKey{
|
key := job.JobKey()
|
||||||
Job: job.JobKey(),
|
err := r.ResumeJobUpdate(aurora.JobUpdateKey{
|
||||||
|
Job: &key,
|
||||||
ID: updateId,
|
ID: updateId,
|
||||||
}, "")
|
}, "")
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
fmt.Println("ResumeJobUpdate response: ", resp.String())
|
|
||||||
|
|
||||||
case "pulseJobUpdate":
|
case "pulseJobUpdate":
|
||||||
resp, err := r.PulseJobUpdate(&aurora.JobUpdateKey{
|
key := job.JobKey()
|
||||||
Job: job.JobKey(),
|
resp, err := r.PulseJobUpdate(aurora.JobUpdateKey{
|
||||||
|
Job: &key,
|
||||||
ID: updateId,
|
ID: updateId,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -411,9 +399,10 @@ func main() {
|
||||||
fmt.Println("PulseJobUpdate response: ", resp.String())
|
fmt.Println("PulseJobUpdate response: ", resp.String())
|
||||||
|
|
||||||
case "updateDetails":
|
case "updateDetails":
|
||||||
resp, err := r.JobUpdateDetails(aurora.JobUpdateQuery{
|
key := job.JobKey()
|
||||||
|
result, err := r.JobUpdateDetails(aurora.JobUpdateQuery{
|
||||||
Key: &aurora.JobUpdateKey{
|
Key: &aurora.JobUpdateKey{
|
||||||
Job: job.JobKey(),
|
Job: &key,
|
||||||
ID: updateId,
|
ID: updateId,
|
||||||
},
|
},
|
||||||
Limit: 1,
|
Limit: 1,
|
||||||
|
@ -423,12 +412,13 @@ func main() {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(response.JobUpdateDetails(resp))
|
fmt.Println(result)
|
||||||
|
|
||||||
case "abortUpdate":
|
case "abortUpdate":
|
||||||
fmt.Println("Abort update")
|
fmt.Println("Abort update")
|
||||||
resp, err := r.AbortJobUpdate(aurora.JobUpdateKey{
|
key := job.JobKey()
|
||||||
Job: job.JobKey(),
|
err := r.AbortJobUpdate(aurora.JobUpdateKey{
|
||||||
|
Job: &key,
|
||||||
ID: updateId,
|
ID: updateId,
|
||||||
},
|
},
|
||||||
"")
|
"")
|
||||||
|
@ -436,12 +426,12 @@ func main() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
fmt.Println(resp.String())
|
|
||||||
|
|
||||||
case "rollbackUpdate":
|
case "rollbackUpdate":
|
||||||
fmt.Println("Abort update")
|
fmt.Println("Abort update")
|
||||||
resp, err := r.RollbackJobUpdate(aurora.JobUpdateKey{
|
key := job.JobKey()
|
||||||
Job: job.JobKey(),
|
err := r.RollbackJobUpdate(aurora.JobUpdateKey{
|
||||||
|
Job: &key,
|
||||||
ID: updateId,
|
ID: updateId,
|
||||||
},
|
},
|
||||||
"")
|
"")
|
||||||
|
@ -449,7 +439,6 @@ func main() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
fmt.Println(resp.String())
|
|
||||||
|
|
||||||
case "taskConfig":
|
case "taskConfig":
|
||||||
fmt.Println("Getting job info")
|
fmt.Println("Getting job info")
|
||||||
|
@ -458,8 +447,9 @@ func main() {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
key := job.JobKey()
|
||||||
config, err := r.FetchTaskConfig(aurora.InstanceKey{
|
config, err := r.FetchTaskConfig(aurora.InstanceKey{
|
||||||
JobKey: job.JobKey(),
|
JobKey: &key,
|
||||||
InstanceId: live[0],
|
InstanceId: live[0],
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -471,9 +461,10 @@ func main() {
|
||||||
|
|
||||||
case "updatesummary":
|
case "updatesummary":
|
||||||
fmt.Println("Getting job update summary")
|
fmt.Println("Getting job update summary")
|
||||||
|
key := job.JobKey()
|
||||||
jobquery := &aurora.JobUpdateQuery{
|
jobquery := &aurora.JobUpdateQuery{
|
||||||
Role: &job.JobKey().Role,
|
Role: &key.Role,
|
||||||
JobKey: job.JobKey(),
|
JobKey: &key,
|
||||||
}
|
}
|
||||||
updatesummary, err := r.GetJobUpdateSummaries(jobquery)
|
updatesummary, err := r.GetJobUpdateSummaries(jobquery)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -484,10 +475,11 @@ func main() {
|
||||||
|
|
||||||
case "taskStatus":
|
case "taskStatus":
|
||||||
fmt.Println("Getting task status")
|
fmt.Println("Getting task status")
|
||||||
|
key := job.JobKey()
|
||||||
taskQ := &aurora.TaskQuery{
|
taskQ := &aurora.TaskQuery{
|
||||||
Role: &job.JobKey().Role,
|
Role: &key.Role,
|
||||||
Environment: &job.JobKey().Environment,
|
Environment: &key.Environment,
|
||||||
JobName: &job.JobKey().Name,
|
JobName: &key.Name,
|
||||||
}
|
}
|
||||||
tasks, err := r.GetTaskStatus(taskQ)
|
tasks, err := r.GetTaskStatus(taskQ)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -499,10 +491,11 @@ func main() {
|
||||||
|
|
||||||
case "tasksWithoutConfig":
|
case "tasksWithoutConfig":
|
||||||
fmt.Println("Getting task status")
|
fmt.Println("Getting task status")
|
||||||
|
key := job.JobKey()
|
||||||
taskQ := &aurora.TaskQuery{
|
taskQ := &aurora.TaskQuery{
|
||||||
Role: &job.JobKey().Role,
|
Role: &key.Role,
|
||||||
Environment: &job.JobKey().Environment,
|
Environment: &key.Environment,
|
||||||
JobName: &job.JobKey().Name,
|
JobName: &key.Name,
|
||||||
}
|
}
|
||||||
tasks, err := r.GetTasksWithoutConfigs(taskQ)
|
tasks, err := r.GetTasksWithoutConfigs(taskQ)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -518,17 +511,17 @@ func main() {
|
||||||
log.Fatal("No hosts specified to drain")
|
log.Fatal("No hosts specified to drain")
|
||||||
}
|
}
|
||||||
hosts := strings.Split(hostList, ",")
|
hosts := strings.Split(hostList, ",")
|
||||||
_, result, err := r.DrainHosts(hosts...)
|
_, err := r.DrainHosts(hosts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("error: %+v\n", err.Error())
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Monitor change to DRAINING and DRAINED mode
|
// Monitor change to DRAINING and DRAINED mode
|
||||||
hostResult, err := monitor.HostMaintenance(
|
hostResult, err := r.MonitorHostMaintenance(
|
||||||
hosts,
|
hosts,
|
||||||
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
|
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
|
||||||
5,
|
5*time.Second,
|
||||||
10)
|
10*time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
for host, ok := range hostResult {
|
for host, ok := range hostResult {
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -538,8 +531,6 @@ func main() {
|
||||||
log.Fatalf("error: %+v\n", err.Error())
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Print(result.String())
|
|
||||||
|
|
||||||
case "SLADrainHosts":
|
case "SLADrainHosts":
|
||||||
fmt.Println("Setting hosts to DRAINING using SLA aware draining")
|
fmt.Println("Setting hosts to DRAINING using SLA aware draining")
|
||||||
if hostList == "" {
|
if hostList == "" {
|
||||||
|
@ -549,17 +540,17 @@ func main() {
|
||||||
|
|
||||||
policy := aurora.SlaPolicy{PercentageSlaPolicy: &aurora.PercentageSlaPolicy{Percentage: 50.0}}
|
policy := aurora.SlaPolicy{PercentageSlaPolicy: &aurora.PercentageSlaPolicy{Percentage: 50.0}}
|
||||||
|
|
||||||
result, err := r.SLADrainHosts(&policy, 30, hosts...)
|
_, err := r.SLADrainHosts(&policy, 30, hosts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("error: %+v\n", err.Error())
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Monitor change to DRAINING and DRAINED mode
|
// Monitor change to DRAINING and DRAINED mode
|
||||||
hostResult, err := monitor.HostMaintenance(
|
hostResult, err := r.MonitorHostMaintenance(
|
||||||
hosts,
|
hosts,
|
||||||
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
|
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
|
||||||
5,
|
5*time.Second,
|
||||||
10)
|
10*time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
for host, ok := range hostResult {
|
for host, ok := range hostResult {
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -569,25 +560,23 @@ func main() {
|
||||||
log.Fatalf("error: %+v\n", err.Error())
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Print(result.String())
|
|
||||||
|
|
||||||
case "endMaintenance":
|
case "endMaintenance":
|
||||||
fmt.Println("Setting hosts to ACTIVE")
|
fmt.Println("Setting hosts to ACTIVE")
|
||||||
if hostList == "" {
|
if hostList == "" {
|
||||||
log.Fatal("No hosts specified to drain")
|
log.Fatal("No hosts specified to drain")
|
||||||
}
|
}
|
||||||
hosts := strings.Split(hostList, ",")
|
hosts := strings.Split(hostList, ",")
|
||||||
_, result, err := r.EndMaintenance(hosts...)
|
_, err := r.EndMaintenance(hosts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("error: %+v\n", err.Error())
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Monitor change to DRAINING and DRAINED mode
|
// Monitor change to DRAINING and DRAINED mode
|
||||||
hostResult, err := monitor.HostMaintenance(
|
hostResult, err := r.MonitorHostMaintenance(
|
||||||
hosts,
|
hosts,
|
||||||
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
|
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
|
||||||
5,
|
5*time.Second,
|
||||||
10)
|
10*time.Second)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
for host, ok := range hostResult {
|
for host, ok := range hostResult {
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -597,14 +586,13 @@ func main() {
|
||||||
log.Fatalf("error: %+v\n", err.Error())
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Print(result.String())
|
|
||||||
|
|
||||||
case "getPendingReasons":
|
case "getPendingReasons":
|
||||||
fmt.Println("Getting pending reasons")
|
fmt.Println("Getting pending reasons")
|
||||||
|
key := job.JobKey()
|
||||||
taskQ := &aurora.TaskQuery{
|
taskQ := &aurora.TaskQuery{
|
||||||
Role: &job.JobKey().Role,
|
Role: &key.Role,
|
||||||
Environment: &job.JobKey().Environment,
|
Environment: &key.Environment,
|
||||||
JobName: &job.JobKey().Name,
|
JobName: &key.Name,
|
||||||
}
|
}
|
||||||
reasons, err := r.GetPendingReason(taskQ)
|
reasons, err := r.GetPendingReason(taskQ)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -616,7 +604,7 @@ func main() {
|
||||||
|
|
||||||
case "getJobs":
|
case "getJobs":
|
||||||
fmt.Println("GetJobs...role: ", role)
|
fmt.Println("GetJobs...role: ", role)
|
||||||
_, result, err := r.GetJobs(role)
|
result, err := r.GetJobs(role)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("error: %+v\n", err.Error())
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,8 +23,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
realis "github.com/paypal/gorealis"
|
realis "github.com/paypal/gorealis/v2"
|
||||||
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
"github.com/paypal/gorealis/v2/gen-go/apache/aurora"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ func init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateRealisClient(config *Config) (realis.Realis, error) {
|
func CreateRealisClient(config *Config) (*realis.Client, error) {
|
||||||
var transportOption realis.ClientOption
|
var transportOption realis.ClientOption
|
||||||
// Configuring transport protocol. If not transport is provided, then using JSON as the
|
// Configuring transport protocol. If not transport is provided, then using JSON as the
|
||||||
// default transport protocol.
|
// default transport protocol.
|
||||||
|
@ -157,7 +157,7 @@ func CreateRealisClient(config *Config) (realis.Realis, error) {
|
||||||
clientOptions = append(clientOptions, realis.Debug())
|
clientOptions = append(clientOptions, realis.Debug())
|
||||||
}
|
}
|
||||||
|
|
||||||
return realis.NewRealisClient(clientOptions...)
|
return realis.NewClient(clientOptions...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -165,7 +165,6 @@ func main() {
|
||||||
fmt.Println(clientCreationErr)
|
fmt.Println(clientCreationErr)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
} else {
|
} else {
|
||||||
monitor := &realis.Monitor{Client: r}
|
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
uris := job.URIs
|
uris := job.URIs
|
||||||
labels := job.Labels
|
labels := job.Labels
|
||||||
|
@ -205,20 +204,18 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("Creating Job...")
|
fmt.Println("Creating Job...")
|
||||||
if resp, jobCreationErr := r.CreateJob(auroraJob); jobCreationErr != nil {
|
if jobCreationErr := r.CreateJob(auroraJob); jobCreationErr != nil {
|
||||||
fmt.Println("Error creating Aurora job: ", jobCreationErr)
|
fmt.Println("Error creating Aurora job: ", jobCreationErr)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
} else {
|
} else {
|
||||||
if resp.ResponseCode == aurora.ResponseCode_OK {
|
if ok, monitorErr := r.MonitorInstances(auroraJob.JobKey(), auroraJob.GetInstanceCount(), 5, 50); !ok || monitorErr != nil {
|
||||||
if ok, monitorErr := monitor.Instances(auroraJob.JobKey(), auroraJob.GetInstanceCount(), 5, 50); !ok || monitorErr != nil {
|
if jobErr := r.KillJob(auroraJob.JobKey()); jobErr !=
|
||||||
if _, jobErr := r.KillJob(auroraJob.JobKey()); jobErr !=
|
nil {
|
||||||
nil {
|
fmt.Println(jobErr)
|
||||||
fmt.Println(jobErr)
|
os.Exit(1)
|
||||||
os.Exit(1)
|
} else {
|
||||||
} else {
|
fmt.Println("ok: ", ok)
|
||||||
fmt.Println("ok: ", ok)
|
fmt.Println("jobErr: ", jobErr)
|
||||||
fmt.Println("jobErr: ", jobErr)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,62 +0,0 @@
|
||||||
{
|
|
||||||
"environment": "prod",
|
|
||||||
"health_check_config": {
|
|
||||||
"initial_interval_secs": 15.0,
|
|
||||||
"health_checker": {
|
|
||||||
"http": {
|
|
||||||
"expected_response_code": 0,
|
|
||||||
"endpoint": "/health",
|
|
||||||
"expected_response": "ok"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"interval_secs": 10.0,
|
|
||||||
"timeout_secs": 1.0,
|
|
||||||
"max_consecutive_failures": 0
|
|
||||||
},
|
|
||||||
"name": "hello_world_from_gorealis",
|
|
||||||
"service": false,
|
|
||||||
"max_task_failures": 1,
|
|
||||||
"cron_collision_policy": "KILL_EXISTING",
|
|
||||||
"enable_hooks": false,
|
|
||||||
"cluster": "devcluster",
|
|
||||||
"task": {
|
|
||||||
"processes": [
|
|
||||||
{
|
|
||||||
"daemon": false,
|
|
||||||
"name": "hello",
|
|
||||||
"ephemeral": false,
|
|
||||||
"max_failures": 1,
|
|
||||||
"min_duration": 5,
|
|
||||||
"cmdline": "\n while true; do\n echo hello world from gorealis\n sleep 10\n done\n ",
|
|
||||||
"final": false
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"name": "hello",
|
|
||||||
"finalization_wait": 30,
|
|
||||||
"max_failures": 1,
|
|
||||||
"max_concurrency": 0,
|
|
||||||
"resources": {
|
|
||||||
"gpu": 0,
|
|
||||||
"disk": 134217728,
|
|
||||||
"ram": 134217728,
|
|
||||||
"cpu": 1.0
|
|
||||||
},
|
|
||||||
"constraints": [
|
|
||||||
{
|
|
||||||
"order": [
|
|
||||||
"hello"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"production": false,
|
|
||||||
"role": "vagrant",
|
|
||||||
"lifecycle": {
|
|
||||||
"http": {
|
|
||||||
"graceful_shutdown_endpoint": "/quitquitquit",
|
|
||||||
"port": "health",
|
|
||||||
"shutdown_endpoint": "/abortabortabort"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"priority": 0
|
|
||||||
}
|
|
28
examples/thermos_payload_minimal.json
Normal file
28
examples/thermos_payload_minimal.json
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
{
|
||||||
|
"task": {
|
||||||
|
"processes": [
|
||||||
|
{
|
||||||
|
"daemon": false,
|
||||||
|
"name": "hello",
|
||||||
|
"ephemeral": false,
|
||||||
|
"max_failures": 1,
|
||||||
|
"min_duration": 5,
|
||||||
|
"cmdline": "\n while true; do\n echo hello world from gorealis\n sleep 10\n done\n ",
|
||||||
|
"final": false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"resources": {
|
||||||
|
"gpu": 0,
|
||||||
|
"disk": 134217728,
|
||||||
|
"ram": 134217728,
|
||||||
|
"cpu": 1.1
|
||||||
|
},
|
||||||
|
"constraints": [
|
||||||
|
{
|
||||||
|
"order": [
|
||||||
|
"hello"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,4 +1,5 @@
|
||||||
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
|
// Autogenerated by Thrift Compiler (0.12.0)
|
||||||
|
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||||
|
|
||||||
package aurora
|
package aurora
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,13 @@
|
||||||
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
|
// Autogenerated by Thrift Compiler (0.12.0)
|
||||||
|
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||||
|
|
||||||
package aurora
|
package aurora
|
||||||
|
|
||||||
import(
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"reflect"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
|
||||||
"github.com/apache/thrift/lib/go/thrift"
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -14,7 +15,7 @@ import(
|
||||||
var _ = thrift.ZERO
|
var _ = thrift.ZERO
|
||||||
var _ = fmt.Printf
|
var _ = fmt.Printf
|
||||||
var _ = context.Background
|
var _ = context.Background
|
||||||
var _ = time.Now
|
var _ = reflect.DeepEqual
|
||||||
var _ = bytes.Equal
|
var _ = bytes.Equal
|
||||||
|
|
||||||
const AURORA_EXECUTOR_NAME = "AuroraExecutor"
|
const AURORA_EXECUTOR_NAME = "AuroraExecutor"
|
||||||
|
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -1,22 +1,22 @@
|
||||||
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
|
// Autogenerated by Thrift Compiler (0.12.0)
|
||||||
|
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"github.com/apache/thrift/lib/go/thrift"
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
"apache/aurora"
|
"apache/aurora"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = aurora.GoUnusedProtection__
|
|
||||||
|
|
||||||
func Usage() {
|
func Usage() {
|
||||||
fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
|
fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
|
||||||
|
@ -175,19 +175,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "CreateJob requires 1 args")
|
fmt.Fprintln(os.Stderr, "CreateJob requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg213 := flag.Arg(1)
|
arg162 := flag.Arg(1)
|
||||||
mbTrans214 := thrift.NewTMemoryBufferLen(len(arg213))
|
mbTrans163 := thrift.NewTMemoryBufferLen(len(arg162))
|
||||||
defer mbTrans214.Close()
|
defer mbTrans163.Close()
|
||||||
_, err215 := mbTrans214.WriteString(arg213)
|
_, err164 := mbTrans163.WriteString(arg162)
|
||||||
if err215 != nil {
|
if err164 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory216 := thrift.NewTJSONProtocolFactory()
|
factory165 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt217 := factory216.GetProtocol(mbTrans214)
|
jsProt166 := factory165.GetProtocol(mbTrans163)
|
||||||
argvalue0 := aurora.NewJobConfiguration()
|
argvalue0 := aurora.NewJobConfiguration()
|
||||||
err218 := argvalue0.Read(context.Background(), jsProt217)
|
err167 := argvalue0.Read(jsProt166)
|
||||||
if err218 != nil {
|
if err167 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -200,19 +200,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "ScheduleCronJob requires 1 args")
|
fmt.Fprintln(os.Stderr, "ScheduleCronJob requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg219 := flag.Arg(1)
|
arg168 := flag.Arg(1)
|
||||||
mbTrans220 := thrift.NewTMemoryBufferLen(len(arg219))
|
mbTrans169 := thrift.NewTMemoryBufferLen(len(arg168))
|
||||||
defer mbTrans220.Close()
|
defer mbTrans169.Close()
|
||||||
_, err221 := mbTrans220.WriteString(arg219)
|
_, err170 := mbTrans169.WriteString(arg168)
|
||||||
if err221 != nil {
|
if err170 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory222 := thrift.NewTJSONProtocolFactory()
|
factory171 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt223 := factory222.GetProtocol(mbTrans220)
|
jsProt172 := factory171.GetProtocol(mbTrans169)
|
||||||
argvalue0 := aurora.NewJobConfiguration()
|
argvalue0 := aurora.NewJobConfiguration()
|
||||||
err224 := argvalue0.Read(context.Background(), jsProt223)
|
err173 := argvalue0.Read(jsProt172)
|
||||||
if err224 != nil {
|
if err173 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -225,19 +225,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "DescheduleCronJob requires 1 args")
|
fmt.Fprintln(os.Stderr, "DescheduleCronJob requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg225 := flag.Arg(1)
|
arg174 := flag.Arg(1)
|
||||||
mbTrans226 := thrift.NewTMemoryBufferLen(len(arg225))
|
mbTrans175 := thrift.NewTMemoryBufferLen(len(arg174))
|
||||||
defer mbTrans226.Close()
|
defer mbTrans175.Close()
|
||||||
_, err227 := mbTrans226.WriteString(arg225)
|
_, err176 := mbTrans175.WriteString(arg174)
|
||||||
if err227 != nil {
|
if err176 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory228 := thrift.NewTJSONProtocolFactory()
|
factory177 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt229 := factory228.GetProtocol(mbTrans226)
|
jsProt178 := factory177.GetProtocol(mbTrans175)
|
||||||
argvalue0 := aurora.NewJobKey()
|
argvalue0 := aurora.NewJobKey()
|
||||||
err230 := argvalue0.Read(context.Background(), jsProt229)
|
err179 := argvalue0.Read(jsProt178)
|
||||||
if err230 != nil {
|
if err179 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -250,19 +250,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "StartCronJob requires 1 args")
|
fmt.Fprintln(os.Stderr, "StartCronJob requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg231 := flag.Arg(1)
|
arg180 := flag.Arg(1)
|
||||||
mbTrans232 := thrift.NewTMemoryBufferLen(len(arg231))
|
mbTrans181 := thrift.NewTMemoryBufferLen(len(arg180))
|
||||||
defer mbTrans232.Close()
|
defer mbTrans181.Close()
|
||||||
_, err233 := mbTrans232.WriteString(arg231)
|
_, err182 := mbTrans181.WriteString(arg180)
|
||||||
if err233 != nil {
|
if err182 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory234 := thrift.NewTJSONProtocolFactory()
|
factory183 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt235 := factory234.GetProtocol(mbTrans232)
|
jsProt184 := factory183.GetProtocol(mbTrans181)
|
||||||
argvalue0 := aurora.NewJobKey()
|
argvalue0 := aurora.NewJobKey()
|
||||||
err236 := argvalue0.Read(context.Background(), jsProt235)
|
err185 := argvalue0.Read(jsProt184)
|
||||||
if err236 != nil {
|
if err185 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -275,36 +275,36 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "RestartShards requires 2 args")
|
fmt.Fprintln(os.Stderr, "RestartShards requires 2 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg237 := flag.Arg(1)
|
arg186 := flag.Arg(1)
|
||||||
mbTrans238 := thrift.NewTMemoryBufferLen(len(arg237))
|
mbTrans187 := thrift.NewTMemoryBufferLen(len(arg186))
|
||||||
defer mbTrans238.Close()
|
defer mbTrans187.Close()
|
||||||
_, err239 := mbTrans238.WriteString(arg237)
|
_, err188 := mbTrans187.WriteString(arg186)
|
||||||
if err239 != nil {
|
if err188 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory240 := thrift.NewTJSONProtocolFactory()
|
factory189 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt241 := factory240.GetProtocol(mbTrans238)
|
jsProt190 := factory189.GetProtocol(mbTrans187)
|
||||||
argvalue0 := aurora.NewJobKey()
|
argvalue0 := aurora.NewJobKey()
|
||||||
err242 := argvalue0.Read(context.Background(), jsProt241)
|
err191 := argvalue0.Read(jsProt190)
|
||||||
if err242 != nil {
|
if err191 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
value0 := argvalue0
|
value0 := argvalue0
|
||||||
arg243 := flag.Arg(2)
|
arg192 := flag.Arg(2)
|
||||||
mbTrans244 := thrift.NewTMemoryBufferLen(len(arg243))
|
mbTrans193 := thrift.NewTMemoryBufferLen(len(arg192))
|
||||||
defer mbTrans244.Close()
|
defer mbTrans193.Close()
|
||||||
_, err245 := mbTrans244.WriteString(arg243)
|
_, err194 := mbTrans193.WriteString(arg192)
|
||||||
if err245 != nil {
|
if err194 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory246 := thrift.NewTJSONProtocolFactory()
|
factory195 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt247 := factory246.GetProtocol(mbTrans244)
|
jsProt196 := factory195.GetProtocol(mbTrans193)
|
||||||
containerStruct1 := aurora.NewAuroraSchedulerManagerRestartShardsArgs()
|
containerStruct1 := aurora.NewAuroraSchedulerManagerRestartShardsArgs()
|
||||||
err248 := containerStruct1.ReadField2(context.Background(), jsProt247)
|
err197 := containerStruct1.ReadField2(jsProt196)
|
||||||
if err248 != nil {
|
if err197 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -318,36 +318,36 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "KillTasks requires 3 args")
|
fmt.Fprintln(os.Stderr, "KillTasks requires 3 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg249 := flag.Arg(1)
|
arg198 := flag.Arg(1)
|
||||||
mbTrans250 := thrift.NewTMemoryBufferLen(len(arg249))
|
mbTrans199 := thrift.NewTMemoryBufferLen(len(arg198))
|
||||||
defer mbTrans250.Close()
|
defer mbTrans199.Close()
|
||||||
_, err251 := mbTrans250.WriteString(arg249)
|
_, err200 := mbTrans199.WriteString(arg198)
|
||||||
if err251 != nil {
|
if err200 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory252 := thrift.NewTJSONProtocolFactory()
|
factory201 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt253 := factory252.GetProtocol(mbTrans250)
|
jsProt202 := factory201.GetProtocol(mbTrans199)
|
||||||
argvalue0 := aurora.NewJobKey()
|
argvalue0 := aurora.NewJobKey()
|
||||||
err254 := argvalue0.Read(context.Background(), jsProt253)
|
err203 := argvalue0.Read(jsProt202)
|
||||||
if err254 != nil {
|
if err203 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
value0 := argvalue0
|
value0 := argvalue0
|
||||||
arg255 := flag.Arg(2)
|
arg204 := flag.Arg(2)
|
||||||
mbTrans256 := thrift.NewTMemoryBufferLen(len(arg255))
|
mbTrans205 := thrift.NewTMemoryBufferLen(len(arg204))
|
||||||
defer mbTrans256.Close()
|
defer mbTrans205.Close()
|
||||||
_, err257 := mbTrans256.WriteString(arg255)
|
_, err206 := mbTrans205.WriteString(arg204)
|
||||||
if err257 != nil {
|
if err206 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory258 := thrift.NewTJSONProtocolFactory()
|
factory207 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt259 := factory258.GetProtocol(mbTrans256)
|
jsProt208 := factory207.GetProtocol(mbTrans205)
|
||||||
containerStruct1 := aurora.NewAuroraSchedulerManagerKillTasksArgs()
|
containerStruct1 := aurora.NewAuroraSchedulerManagerKillTasksArgs()
|
||||||
err260 := containerStruct1.ReadField2(context.Background(), jsProt259)
|
err209 := containerStruct1.ReadField2(jsProt208)
|
||||||
if err260 != nil {
|
if err209 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -363,25 +363,25 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "AddInstances requires 2 args")
|
fmt.Fprintln(os.Stderr, "AddInstances requires 2 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg262 := flag.Arg(1)
|
arg211 := flag.Arg(1)
|
||||||
mbTrans263 := thrift.NewTMemoryBufferLen(len(arg262))
|
mbTrans212 := thrift.NewTMemoryBufferLen(len(arg211))
|
||||||
defer mbTrans263.Close()
|
defer mbTrans212.Close()
|
||||||
_, err264 := mbTrans263.WriteString(arg262)
|
_, err213 := mbTrans212.WriteString(arg211)
|
||||||
if err264 != nil {
|
if err213 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory265 := thrift.NewTJSONProtocolFactory()
|
factory214 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt266 := factory265.GetProtocol(mbTrans263)
|
jsProt215 := factory214.GetProtocol(mbTrans212)
|
||||||
argvalue0 := aurora.NewInstanceKey()
|
argvalue0 := aurora.NewInstanceKey()
|
||||||
err267 := argvalue0.Read(context.Background(), jsProt266)
|
err216 := argvalue0.Read(jsProt215)
|
||||||
if err267 != nil {
|
if err216 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
value0 := argvalue0
|
value0 := argvalue0
|
||||||
tmp1, err268 := (strconv.Atoi(flag.Arg(2)))
|
tmp1, err217 := (strconv.Atoi(flag.Arg(2)))
|
||||||
if err268 != nil {
|
if err217 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -395,19 +395,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "ReplaceCronTemplate requires 1 args")
|
fmt.Fprintln(os.Stderr, "ReplaceCronTemplate requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg269 := flag.Arg(1)
|
arg218 := flag.Arg(1)
|
||||||
mbTrans270 := thrift.NewTMemoryBufferLen(len(arg269))
|
mbTrans219 := thrift.NewTMemoryBufferLen(len(arg218))
|
||||||
defer mbTrans270.Close()
|
defer mbTrans219.Close()
|
||||||
_, err271 := mbTrans270.WriteString(arg269)
|
_, err220 := mbTrans219.WriteString(arg218)
|
||||||
if err271 != nil {
|
if err220 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory272 := thrift.NewTJSONProtocolFactory()
|
factory221 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt273 := factory272.GetProtocol(mbTrans270)
|
jsProt222 := factory221.GetProtocol(mbTrans219)
|
||||||
argvalue0 := aurora.NewJobConfiguration()
|
argvalue0 := aurora.NewJobConfiguration()
|
||||||
err274 := argvalue0.Read(context.Background(), jsProt273)
|
err223 := argvalue0.Read(jsProt222)
|
||||||
if err274 != nil {
|
if err223 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -420,19 +420,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "StartJobUpdate requires 2 args")
|
fmt.Fprintln(os.Stderr, "StartJobUpdate requires 2 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg275 := flag.Arg(1)
|
arg224 := flag.Arg(1)
|
||||||
mbTrans276 := thrift.NewTMemoryBufferLen(len(arg275))
|
mbTrans225 := thrift.NewTMemoryBufferLen(len(arg224))
|
||||||
defer mbTrans276.Close()
|
defer mbTrans225.Close()
|
||||||
_, err277 := mbTrans276.WriteString(arg275)
|
_, err226 := mbTrans225.WriteString(arg224)
|
||||||
if err277 != nil {
|
if err226 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory278 := thrift.NewTJSONProtocolFactory()
|
factory227 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt279 := factory278.GetProtocol(mbTrans276)
|
jsProt228 := factory227.GetProtocol(mbTrans225)
|
||||||
argvalue0 := aurora.NewJobUpdateRequest()
|
argvalue0 := aurora.NewJobUpdateRequest()
|
||||||
err280 := argvalue0.Read(context.Background(), jsProt279)
|
err229 := argvalue0.Read(jsProt228)
|
||||||
if err280 != nil {
|
if err229 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -447,19 +447,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "PauseJobUpdate requires 2 args")
|
fmt.Fprintln(os.Stderr, "PauseJobUpdate requires 2 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg282 := flag.Arg(1)
|
arg231 := flag.Arg(1)
|
||||||
mbTrans283 := thrift.NewTMemoryBufferLen(len(arg282))
|
mbTrans232 := thrift.NewTMemoryBufferLen(len(arg231))
|
||||||
defer mbTrans283.Close()
|
defer mbTrans232.Close()
|
||||||
_, err284 := mbTrans283.WriteString(arg282)
|
_, err233 := mbTrans232.WriteString(arg231)
|
||||||
if err284 != nil {
|
if err233 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory285 := thrift.NewTJSONProtocolFactory()
|
factory234 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt286 := factory285.GetProtocol(mbTrans283)
|
jsProt235 := factory234.GetProtocol(mbTrans232)
|
||||||
argvalue0 := aurora.NewJobUpdateKey()
|
argvalue0 := aurora.NewJobUpdateKey()
|
||||||
err287 := argvalue0.Read(context.Background(), jsProt286)
|
err236 := argvalue0.Read(jsProt235)
|
||||||
if err287 != nil {
|
if err236 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -474,19 +474,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "ResumeJobUpdate requires 2 args")
|
fmt.Fprintln(os.Stderr, "ResumeJobUpdate requires 2 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg289 := flag.Arg(1)
|
arg238 := flag.Arg(1)
|
||||||
mbTrans290 := thrift.NewTMemoryBufferLen(len(arg289))
|
mbTrans239 := thrift.NewTMemoryBufferLen(len(arg238))
|
||||||
defer mbTrans290.Close()
|
defer mbTrans239.Close()
|
||||||
_, err291 := mbTrans290.WriteString(arg289)
|
_, err240 := mbTrans239.WriteString(arg238)
|
||||||
if err291 != nil {
|
if err240 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory292 := thrift.NewTJSONProtocolFactory()
|
factory241 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt293 := factory292.GetProtocol(mbTrans290)
|
jsProt242 := factory241.GetProtocol(mbTrans239)
|
||||||
argvalue0 := aurora.NewJobUpdateKey()
|
argvalue0 := aurora.NewJobUpdateKey()
|
||||||
err294 := argvalue0.Read(context.Background(), jsProt293)
|
err243 := argvalue0.Read(jsProt242)
|
||||||
if err294 != nil {
|
if err243 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -501,19 +501,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "AbortJobUpdate requires 2 args")
|
fmt.Fprintln(os.Stderr, "AbortJobUpdate requires 2 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg296 := flag.Arg(1)
|
arg245 := flag.Arg(1)
|
||||||
mbTrans297 := thrift.NewTMemoryBufferLen(len(arg296))
|
mbTrans246 := thrift.NewTMemoryBufferLen(len(arg245))
|
||||||
defer mbTrans297.Close()
|
defer mbTrans246.Close()
|
||||||
_, err298 := mbTrans297.WriteString(arg296)
|
_, err247 := mbTrans246.WriteString(arg245)
|
||||||
if err298 != nil {
|
if err247 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory299 := thrift.NewTJSONProtocolFactory()
|
factory248 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt300 := factory299.GetProtocol(mbTrans297)
|
jsProt249 := factory248.GetProtocol(mbTrans246)
|
||||||
argvalue0 := aurora.NewJobUpdateKey()
|
argvalue0 := aurora.NewJobUpdateKey()
|
||||||
err301 := argvalue0.Read(context.Background(), jsProt300)
|
err250 := argvalue0.Read(jsProt249)
|
||||||
if err301 != nil {
|
if err250 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -528,19 +528,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "RollbackJobUpdate requires 2 args")
|
fmt.Fprintln(os.Stderr, "RollbackJobUpdate requires 2 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg303 := flag.Arg(1)
|
arg252 := flag.Arg(1)
|
||||||
mbTrans304 := thrift.NewTMemoryBufferLen(len(arg303))
|
mbTrans253 := thrift.NewTMemoryBufferLen(len(arg252))
|
||||||
defer mbTrans304.Close()
|
defer mbTrans253.Close()
|
||||||
_, err305 := mbTrans304.WriteString(arg303)
|
_, err254 := mbTrans253.WriteString(arg252)
|
||||||
if err305 != nil {
|
if err254 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory306 := thrift.NewTJSONProtocolFactory()
|
factory255 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt307 := factory306.GetProtocol(mbTrans304)
|
jsProt256 := factory255.GetProtocol(mbTrans253)
|
||||||
argvalue0 := aurora.NewJobUpdateKey()
|
argvalue0 := aurora.NewJobUpdateKey()
|
||||||
err308 := argvalue0.Read(context.Background(), jsProt307)
|
err257 := argvalue0.Read(jsProt256)
|
||||||
if err308 != nil {
|
if err257 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -555,19 +555,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "PulseJobUpdate requires 1 args")
|
fmt.Fprintln(os.Stderr, "PulseJobUpdate requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg310 := flag.Arg(1)
|
arg259 := flag.Arg(1)
|
||||||
mbTrans311 := thrift.NewTMemoryBufferLen(len(arg310))
|
mbTrans260 := thrift.NewTMemoryBufferLen(len(arg259))
|
||||||
defer mbTrans311.Close()
|
defer mbTrans260.Close()
|
||||||
_, err312 := mbTrans311.WriteString(arg310)
|
_, err261 := mbTrans260.WriteString(arg259)
|
||||||
if err312 != nil {
|
if err261 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory313 := thrift.NewTJSONProtocolFactory()
|
factory262 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt314 := factory313.GetProtocol(mbTrans311)
|
jsProt263 := factory262.GetProtocol(mbTrans260)
|
||||||
argvalue0 := aurora.NewJobUpdateKey()
|
argvalue0 := aurora.NewJobUpdateKey()
|
||||||
err315 := argvalue0.Read(context.Background(), jsProt314)
|
err264 := argvalue0.Read(jsProt263)
|
||||||
if err315 != nil {
|
if err264 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -598,19 +598,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
|
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg317 := flag.Arg(1)
|
arg266 := flag.Arg(1)
|
||||||
mbTrans318 := thrift.NewTMemoryBufferLen(len(arg317))
|
mbTrans267 := thrift.NewTMemoryBufferLen(len(arg266))
|
||||||
defer mbTrans318.Close()
|
defer mbTrans267.Close()
|
||||||
_, err319 := mbTrans318.WriteString(arg317)
|
_, err268 := mbTrans267.WriteString(arg266)
|
||||||
if err319 != nil {
|
if err268 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory320 := thrift.NewTJSONProtocolFactory()
|
factory269 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt321 := factory320.GetProtocol(mbTrans318)
|
jsProt270 := factory269.GetProtocol(mbTrans267)
|
||||||
argvalue0 := aurora.NewTaskQuery()
|
argvalue0 := aurora.NewTaskQuery()
|
||||||
err322 := argvalue0.Read(context.Background(), jsProt321)
|
err271 := argvalue0.Read(jsProt270)
|
||||||
if err322 != nil {
|
if err271 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -623,19 +623,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
|
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg323 := flag.Arg(1)
|
arg272 := flag.Arg(1)
|
||||||
mbTrans324 := thrift.NewTMemoryBufferLen(len(arg323))
|
mbTrans273 := thrift.NewTMemoryBufferLen(len(arg272))
|
||||||
defer mbTrans324.Close()
|
defer mbTrans273.Close()
|
||||||
_, err325 := mbTrans324.WriteString(arg323)
|
_, err274 := mbTrans273.WriteString(arg272)
|
||||||
if err325 != nil {
|
if err274 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory326 := thrift.NewTJSONProtocolFactory()
|
factory275 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt327 := factory326.GetProtocol(mbTrans324)
|
jsProt276 := factory275.GetProtocol(mbTrans273)
|
||||||
argvalue0 := aurora.NewTaskQuery()
|
argvalue0 := aurora.NewTaskQuery()
|
||||||
err328 := argvalue0.Read(context.Background(), jsProt327)
|
err277 := argvalue0.Read(jsProt276)
|
||||||
if err328 != nil {
|
if err277 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -648,19 +648,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
|
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg329 := flag.Arg(1)
|
arg278 := flag.Arg(1)
|
||||||
mbTrans330 := thrift.NewTMemoryBufferLen(len(arg329))
|
mbTrans279 := thrift.NewTMemoryBufferLen(len(arg278))
|
||||||
defer mbTrans330.Close()
|
defer mbTrans279.Close()
|
||||||
_, err331 := mbTrans330.WriteString(arg329)
|
_, err280 := mbTrans279.WriteString(arg278)
|
||||||
if err331 != nil {
|
if err280 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory332 := thrift.NewTJSONProtocolFactory()
|
factory281 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt333 := factory332.GetProtocol(mbTrans330)
|
jsProt282 := factory281.GetProtocol(mbTrans279)
|
||||||
argvalue0 := aurora.NewTaskQuery()
|
argvalue0 := aurora.NewTaskQuery()
|
||||||
err334 := argvalue0.Read(context.Background(), jsProt333)
|
err283 := argvalue0.Read(jsProt282)
|
||||||
if err334 != nil {
|
if err283 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -673,19 +673,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
|
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg335 := flag.Arg(1)
|
arg284 := flag.Arg(1)
|
||||||
mbTrans336 := thrift.NewTMemoryBufferLen(len(arg335))
|
mbTrans285 := thrift.NewTMemoryBufferLen(len(arg284))
|
||||||
defer mbTrans336.Close()
|
defer mbTrans285.Close()
|
||||||
_, err337 := mbTrans336.WriteString(arg335)
|
_, err286 := mbTrans285.WriteString(arg284)
|
||||||
if err337 != nil {
|
if err286 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory338 := thrift.NewTJSONProtocolFactory()
|
factory287 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt339 := factory338.GetProtocol(mbTrans336)
|
jsProt288 := factory287.GetProtocol(mbTrans285)
|
||||||
argvalue0 := aurora.NewJobKey()
|
argvalue0 := aurora.NewJobKey()
|
||||||
err340 := argvalue0.Read(context.Background(), jsProt339)
|
err289 := argvalue0.Read(jsProt288)
|
||||||
if err340 != nil {
|
if err289 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -718,19 +718,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
|
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg343 := flag.Arg(1)
|
arg292 := flag.Arg(1)
|
||||||
mbTrans344 := thrift.NewTMemoryBufferLen(len(arg343))
|
mbTrans293 := thrift.NewTMemoryBufferLen(len(arg292))
|
||||||
defer mbTrans344.Close()
|
defer mbTrans293.Close()
|
||||||
_, err345 := mbTrans344.WriteString(arg343)
|
_, err294 := mbTrans293.WriteString(arg292)
|
||||||
if err345 != nil {
|
if err294 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory346 := thrift.NewTJSONProtocolFactory()
|
factory295 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt347 := factory346.GetProtocol(mbTrans344)
|
jsProt296 := factory295.GetProtocol(mbTrans293)
|
||||||
argvalue0 := aurora.NewJobConfiguration()
|
argvalue0 := aurora.NewJobConfiguration()
|
||||||
err348 := argvalue0.Read(context.Background(), jsProt347)
|
err297 := argvalue0.Read(jsProt296)
|
||||||
if err348 != nil {
|
if err297 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -743,19 +743,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
|
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg349 := flag.Arg(1)
|
arg298 := flag.Arg(1)
|
||||||
mbTrans350 := thrift.NewTMemoryBufferLen(len(arg349))
|
mbTrans299 := thrift.NewTMemoryBufferLen(len(arg298))
|
||||||
defer mbTrans350.Close()
|
defer mbTrans299.Close()
|
||||||
_, err351 := mbTrans350.WriteString(arg349)
|
_, err300 := mbTrans299.WriteString(arg298)
|
||||||
if err351 != nil {
|
if err300 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory352 := thrift.NewTJSONProtocolFactory()
|
factory301 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt353 := factory352.GetProtocol(mbTrans350)
|
jsProt302 := factory301.GetProtocol(mbTrans299)
|
||||||
argvalue0 := aurora.NewJobUpdateQuery()
|
argvalue0 := aurora.NewJobUpdateQuery()
|
||||||
err354 := argvalue0.Read(context.Background(), jsProt353)
|
err303 := argvalue0.Read(jsProt302)
|
||||||
if err354 != nil {
|
if err303 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -768,19 +768,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
|
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg355 := flag.Arg(1)
|
arg304 := flag.Arg(1)
|
||||||
mbTrans356 := thrift.NewTMemoryBufferLen(len(arg355))
|
mbTrans305 := thrift.NewTMemoryBufferLen(len(arg304))
|
||||||
defer mbTrans356.Close()
|
defer mbTrans305.Close()
|
||||||
_, err357 := mbTrans356.WriteString(arg355)
|
_, err306 := mbTrans305.WriteString(arg304)
|
||||||
if err357 != nil {
|
if err306 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory358 := thrift.NewTJSONProtocolFactory()
|
factory307 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt359 := factory358.GetProtocol(mbTrans356)
|
jsProt308 := factory307.GetProtocol(mbTrans305)
|
||||||
argvalue0 := aurora.NewJobUpdateQuery()
|
argvalue0 := aurora.NewJobUpdateQuery()
|
||||||
err360 := argvalue0.Read(context.Background(), jsProt359)
|
err309 := argvalue0.Read(jsProt308)
|
||||||
if err360 != nil {
|
if err309 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -793,19 +793,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
|
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg361 := flag.Arg(1)
|
arg310 := flag.Arg(1)
|
||||||
mbTrans362 := thrift.NewTMemoryBufferLen(len(arg361))
|
mbTrans311 := thrift.NewTMemoryBufferLen(len(arg310))
|
||||||
defer mbTrans362.Close()
|
defer mbTrans311.Close()
|
||||||
_, err363 := mbTrans362.WriteString(arg361)
|
_, err312 := mbTrans311.WriteString(arg310)
|
||||||
if err363 != nil {
|
if err312 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory364 := thrift.NewTJSONProtocolFactory()
|
factory313 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt365 := factory364.GetProtocol(mbTrans362)
|
jsProt314 := factory313.GetProtocol(mbTrans311)
|
||||||
argvalue0 := aurora.NewJobUpdateRequest()
|
argvalue0 := aurora.NewJobUpdateRequest()
|
||||||
err366 := argvalue0.Read(context.Background(), jsProt365)
|
err315 := argvalue0.Read(jsProt314)
|
||||||
if err366 != nil {
|
if err315 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,22 +1,22 @@
|
||||||
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
|
// Autogenerated by Thrift Compiler (0.12.0)
|
||||||
|
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"github.com/apache/thrift/lib/go/thrift"
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
"apache/aurora"
|
"apache/aurora"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = aurora.GoUnusedProtection__
|
|
||||||
|
|
||||||
func Usage() {
|
func Usage() {
|
||||||
fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
|
fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
|
||||||
|
@ -179,19 +179,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
|
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg132 := flag.Arg(1)
|
arg81 := flag.Arg(1)
|
||||||
mbTrans133 := thrift.NewTMemoryBufferLen(len(arg132))
|
mbTrans82 := thrift.NewTMemoryBufferLen(len(arg81))
|
||||||
defer mbTrans133.Close()
|
defer mbTrans82.Close()
|
||||||
_, err134 := mbTrans133.WriteString(arg132)
|
_, err83 := mbTrans82.WriteString(arg81)
|
||||||
if err134 != nil {
|
if err83 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory135 := thrift.NewTJSONProtocolFactory()
|
factory84 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt136 := factory135.GetProtocol(mbTrans133)
|
jsProt85 := factory84.GetProtocol(mbTrans82)
|
||||||
argvalue0 := aurora.NewTaskQuery()
|
argvalue0 := aurora.NewTaskQuery()
|
||||||
err137 := argvalue0.Read(context.Background(), jsProt136)
|
err86 := argvalue0.Read(jsProt85)
|
||||||
if err137 != nil {
|
if err86 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -204,19 +204,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
|
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg138 := flag.Arg(1)
|
arg87 := flag.Arg(1)
|
||||||
mbTrans139 := thrift.NewTMemoryBufferLen(len(arg138))
|
mbTrans88 := thrift.NewTMemoryBufferLen(len(arg87))
|
||||||
defer mbTrans139.Close()
|
defer mbTrans88.Close()
|
||||||
_, err140 := mbTrans139.WriteString(arg138)
|
_, err89 := mbTrans88.WriteString(arg87)
|
||||||
if err140 != nil {
|
if err89 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory141 := thrift.NewTJSONProtocolFactory()
|
factory90 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt142 := factory141.GetProtocol(mbTrans139)
|
jsProt91 := factory90.GetProtocol(mbTrans88)
|
||||||
argvalue0 := aurora.NewTaskQuery()
|
argvalue0 := aurora.NewTaskQuery()
|
||||||
err143 := argvalue0.Read(context.Background(), jsProt142)
|
err92 := argvalue0.Read(jsProt91)
|
||||||
if err143 != nil {
|
if err92 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -229,19 +229,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
|
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg144 := flag.Arg(1)
|
arg93 := flag.Arg(1)
|
||||||
mbTrans145 := thrift.NewTMemoryBufferLen(len(arg144))
|
mbTrans94 := thrift.NewTMemoryBufferLen(len(arg93))
|
||||||
defer mbTrans145.Close()
|
defer mbTrans94.Close()
|
||||||
_, err146 := mbTrans145.WriteString(arg144)
|
_, err95 := mbTrans94.WriteString(arg93)
|
||||||
if err146 != nil {
|
if err95 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory147 := thrift.NewTJSONProtocolFactory()
|
factory96 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt148 := factory147.GetProtocol(mbTrans145)
|
jsProt97 := factory96.GetProtocol(mbTrans94)
|
||||||
argvalue0 := aurora.NewTaskQuery()
|
argvalue0 := aurora.NewTaskQuery()
|
||||||
err149 := argvalue0.Read(context.Background(), jsProt148)
|
err98 := argvalue0.Read(jsProt97)
|
||||||
if err149 != nil {
|
if err98 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -254,19 +254,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
|
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg150 := flag.Arg(1)
|
arg99 := flag.Arg(1)
|
||||||
mbTrans151 := thrift.NewTMemoryBufferLen(len(arg150))
|
mbTrans100 := thrift.NewTMemoryBufferLen(len(arg99))
|
||||||
defer mbTrans151.Close()
|
defer mbTrans100.Close()
|
||||||
_, err152 := mbTrans151.WriteString(arg150)
|
_, err101 := mbTrans100.WriteString(arg99)
|
||||||
if err152 != nil {
|
if err101 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory153 := thrift.NewTJSONProtocolFactory()
|
factory102 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt154 := factory153.GetProtocol(mbTrans151)
|
jsProt103 := factory102.GetProtocol(mbTrans100)
|
||||||
argvalue0 := aurora.NewJobKey()
|
argvalue0 := aurora.NewJobKey()
|
||||||
err155 := argvalue0.Read(context.Background(), jsProt154)
|
err104 := argvalue0.Read(jsProt103)
|
||||||
if err155 != nil {
|
if err104 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -299,19 +299,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
|
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg158 := flag.Arg(1)
|
arg107 := flag.Arg(1)
|
||||||
mbTrans159 := thrift.NewTMemoryBufferLen(len(arg158))
|
mbTrans108 := thrift.NewTMemoryBufferLen(len(arg107))
|
||||||
defer mbTrans159.Close()
|
defer mbTrans108.Close()
|
||||||
_, err160 := mbTrans159.WriteString(arg158)
|
_, err109 := mbTrans108.WriteString(arg107)
|
||||||
if err160 != nil {
|
if err109 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory161 := thrift.NewTJSONProtocolFactory()
|
factory110 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt162 := factory161.GetProtocol(mbTrans159)
|
jsProt111 := factory110.GetProtocol(mbTrans108)
|
||||||
argvalue0 := aurora.NewJobConfiguration()
|
argvalue0 := aurora.NewJobConfiguration()
|
||||||
err163 := argvalue0.Read(context.Background(), jsProt162)
|
err112 := argvalue0.Read(jsProt111)
|
||||||
if err163 != nil {
|
if err112 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -324,19 +324,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
|
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg164 := flag.Arg(1)
|
arg113 := flag.Arg(1)
|
||||||
mbTrans165 := thrift.NewTMemoryBufferLen(len(arg164))
|
mbTrans114 := thrift.NewTMemoryBufferLen(len(arg113))
|
||||||
defer mbTrans165.Close()
|
defer mbTrans114.Close()
|
||||||
_, err166 := mbTrans165.WriteString(arg164)
|
_, err115 := mbTrans114.WriteString(arg113)
|
||||||
if err166 != nil {
|
if err115 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory167 := thrift.NewTJSONProtocolFactory()
|
factory116 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt168 := factory167.GetProtocol(mbTrans165)
|
jsProt117 := factory116.GetProtocol(mbTrans114)
|
||||||
argvalue0 := aurora.NewJobUpdateQuery()
|
argvalue0 := aurora.NewJobUpdateQuery()
|
||||||
err169 := argvalue0.Read(context.Background(), jsProt168)
|
err118 := argvalue0.Read(jsProt117)
|
||||||
if err169 != nil {
|
if err118 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -349,19 +349,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
|
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg170 := flag.Arg(1)
|
arg119 := flag.Arg(1)
|
||||||
mbTrans171 := thrift.NewTMemoryBufferLen(len(arg170))
|
mbTrans120 := thrift.NewTMemoryBufferLen(len(arg119))
|
||||||
defer mbTrans171.Close()
|
defer mbTrans120.Close()
|
||||||
_, err172 := mbTrans171.WriteString(arg170)
|
_, err121 := mbTrans120.WriteString(arg119)
|
||||||
if err172 != nil {
|
if err121 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory173 := thrift.NewTJSONProtocolFactory()
|
factory122 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt174 := factory173.GetProtocol(mbTrans171)
|
jsProt123 := factory122.GetProtocol(mbTrans120)
|
||||||
argvalue0 := aurora.NewJobUpdateQuery()
|
argvalue0 := aurora.NewJobUpdateQuery()
|
||||||
err175 := argvalue0.Read(context.Background(), jsProt174)
|
err124 := argvalue0.Read(jsProt123)
|
||||||
if err175 != nil {
|
if err124 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -374,19 +374,19 @@ func main() {
|
||||||
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
|
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
}
|
}
|
||||||
arg176 := flag.Arg(1)
|
arg125 := flag.Arg(1)
|
||||||
mbTrans177 := thrift.NewTMemoryBufferLen(len(arg176))
|
mbTrans126 := thrift.NewTMemoryBufferLen(len(arg125))
|
||||||
defer mbTrans177.Close()
|
defer mbTrans126.Close()
|
||||||
_, err178 := mbTrans177.WriteString(arg176)
|
_, err127 := mbTrans126.WriteString(arg125)
|
||||||
if err178 != nil {
|
if err127 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
factory179 := thrift.NewTJSONProtocolFactory()
|
factory128 := thrift.NewTJSONProtocolFactory()
|
||||||
jsProt180 := factory179.GetProtocol(mbTrans177)
|
jsProt129 := factory128.GetProtocol(mbTrans126)
|
||||||
argvalue0 := aurora.NewJobUpdateRequest()
|
argvalue0 := aurora.NewJobUpdateRequest()
|
||||||
err181 := argvalue0.Read(context.Background(), jsProt180)
|
err130 := argvalue0.Read(jsProt129)
|
||||||
if err181 != nil {
|
if err130 != nil {
|
||||||
Usage()
|
Usage()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#! /bin/bash
|
#! /bin/bash
|
||||||
|
|
||||||
THRIFT_VER=0.14.0
|
THRIFT_VER=0.12.0
|
||||||
|
|
||||||
if [[ $(thrift -version | grep -e $THRIFT_VER -c) -ne 1 ]]; then
|
if [[ $(thrift -version | grep -e $THRIFT_VER -c) -ne 1 ]]; then
|
||||||
echo "Warning: This wrapper has only been tested with version" $THRIFT_VER;
|
echo "Warning: This wrapper has only been tested with version" $THRIFT_VER;
|
||||||
|
|
10
go.mod
10
go.mod
|
@ -1,12 +1,10 @@
|
||||||
module github.com/paypal/gorealis
|
module github.com/paypal/gorealis/v2
|
||||||
|
|
||||||
go 1.13
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/apache/thrift v0.14.0
|
github.com/apache/thrift v0.12.0
|
||||||
github.com/davecgh/go-spew v1.1.0 // indirect
|
github.com/davecgh/go-spew v1.1.0 // indirect
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a
|
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.2.0
|
||||||
)
|
)
|
||||||
|
|
30
go.sum
30
go.sum
|
@ -1,30 +0,0 @@
|
||||||
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
|
|
||||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
|
||||||
github.com/apache/thrift v0.14.0 h1:vqZ2DP42i8th2OsgCcYZkirtbzvpZEFx53LiWDJXIAs=
|
|
||||||
github.com/apache/thrift v0.14.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
|
||||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e h1:+RHxT/gm0O3UF7nLJbdNzAmULvCFt4XfXHWzh3XI/zs=
|
|
||||||
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/ridv/thrift v0.12.1 h1:b80V1Oa2Mbd++jrlJZbJsIybO5/MCfbXKzd1A5v4aSo=
|
|
||||||
github.com/ridv/thrift v0.12.1/go.mod h1:yTMRF94RCZjO1fY1xt69yncvMbQCPdRL8BhbwIrjPx8=
|
|
||||||
github.com/ridv/thrift v0.13.1 h1:/8XnTRUqJJeiuqoL7mfnJQmXQa4GJn9tUCiP7+i6Y9o=
|
|
||||||
github.com/ridv/thrift v0.13.1/go.mod h1:yTMRF94RCZjO1fY1xt69yncvMbQCPdRL8BhbwIrjPx8=
|
|
||||||
github.com/ridv/thrift v0.13.2 h1:Q3Smr8poXd7VkWZPHvdJZzlQCJO+b5W37ECfoUL4qHc=
|
|
||||||
github.com/ridv/thrift v0.13.2/go.mod h1:yTMRF94RCZjO1fY1xt69yncvMbQCPdRL8BhbwIrjPx8=
|
|
||||||
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a h1:EYL2xz/Zdo0hyqdZMXR4lmT2O11jDLTPCEqIe/FR6W4=
|
|
||||||
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
|
||||||
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
||||||
github.com/stretchr/testify v1.2.0 h1:LThGCOvhuJic9Gyd1VBCkhyUXmO8vKaBFvBsJ2k03rg=
|
|
||||||
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
|
||||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
21
helpers.go
21
helpers.go
|
@ -1,21 +0,0 @@
|
||||||
package realis
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *realisClient) jobExists(key aurora.JobKey) (bool, error) {
|
|
||||||
resp, err := r.client.GetConfigSummary(context.TODO(), &key)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp == nil ||
|
|
||||||
resp.GetResult_() == nil ||
|
|
||||||
resp.GetResult_().GetConfigSummaryResult_() == nil ||
|
|
||||||
resp.GetResult_().GetConfigSummaryResult_().GetSummary() == nil ||
|
|
||||||
resp.GetResponseCode() != aurora.ResponseCode_OK,
|
|
||||||
nil
|
|
||||||
}
|
|
408
job.go
408
job.go
|
@ -15,377 +15,203 @@
|
||||||
package realis
|
package realis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strconv"
|
"github.com/paypal/gorealis/v2/gen-go/apache/aurora"
|
||||||
|
|
||||||
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Job inteface is used to define a set of functions an Aurora Job object
|
// Structure to collect all information pertaining to an Aurora job.
|
||||||
// must implemement.
|
|
||||||
// TODO(rdelvalle): Consider getting rid of the Job interface
|
|
||||||
type Job interface {
|
|
||||||
// Set Job Key environment.
|
|
||||||
Environment(env string) Job
|
|
||||||
Role(role string) Job
|
|
||||||
Name(name string) Job
|
|
||||||
CronSchedule(cron string) Job
|
|
||||||
CronCollisionPolicy(policy aurora.CronCollisionPolicy) Job
|
|
||||||
CPU(cpus float64) Job
|
|
||||||
Disk(disk int64) Job
|
|
||||||
RAM(ram int64) Job
|
|
||||||
GPU(gpu int64) Job
|
|
||||||
ExecutorName(name string) Job
|
|
||||||
ExecutorData(data string) Job
|
|
||||||
AddPorts(num int) Job
|
|
||||||
AddLabel(key string, value string) Job
|
|
||||||
AddNamedPorts(names ...string) Job
|
|
||||||
AddLimitConstraint(name string, limit int32) Job
|
|
||||||
AddValueConstraint(name string, negated bool, values ...string) Job
|
|
||||||
|
|
||||||
// From Aurora Docs:
|
|
||||||
// dedicated attribute. Aurora treats this specially, and only allows matching jobs
|
|
||||||
// to run on these machines, and will only schedule matching jobs on these machines.
|
|
||||||
// When a job is created, the scheduler requires that the $role component matches
|
|
||||||
// the role field in the job configuration, and will reject the job creation otherwise.
|
|
||||||
// A wildcard (*) may be used for the role portion of the dedicated attribute, which
|
|
||||||
// will allow any owner to elect for a job to run on the host(s)
|
|
||||||
AddDedicatedConstraint(role, name string) Job
|
|
||||||
AddURIs(extract bool, cache bool, values ...string) Job
|
|
||||||
JobKey() *aurora.JobKey
|
|
||||||
JobConfig() *aurora.JobConfiguration
|
|
||||||
TaskConfig() *aurora.TaskConfig
|
|
||||||
IsService(isService bool) Job
|
|
||||||
InstanceCount(instCount int32) Job
|
|
||||||
GetInstanceCount() int32
|
|
||||||
MaxFailure(maxFail int32) Job
|
|
||||||
Container(container Container) Job
|
|
||||||
PartitionPolicy(policy *aurora.PartitionPolicy) Job
|
|
||||||
Tier(tier string) Job
|
|
||||||
SlaPolicy(policy *aurora.SlaPolicy) Job
|
|
||||||
Priority(priority int32) Job
|
|
||||||
}
|
|
||||||
|
|
||||||
type resourceType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
CPU resourceType = iota
|
|
||||||
RAM
|
|
||||||
DISK
|
|
||||||
GPU
|
|
||||||
)
|
|
||||||
|
|
||||||
const portNamePrefix = "org.apache.aurora.port."
|
|
||||||
|
|
||||||
// AuroraJob is a structure to collect all information pertaining to an Aurora job.
|
|
||||||
type AuroraJob struct {
|
type AuroraJob struct {
|
||||||
jobConfig *aurora.JobConfiguration
|
jobConfig *aurora.JobConfiguration
|
||||||
resources map[resourceType]*aurora.Resource
|
task *AuroraTask
|
||||||
metadata map[string]*aurora.Metadata
|
|
||||||
constraints map[string]*aurora.Constraint
|
|
||||||
portCount int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewJob is used to create a Job object with everything initialized.
|
// Create a AuroraJob object with everything initialized.
|
||||||
func NewJob() Job {
|
func NewJob() *AuroraJob {
|
||||||
jobConfig := aurora.NewJobConfiguration()
|
|
||||||
taskConfig := aurora.NewTaskConfig()
|
|
||||||
jobKey := aurora.NewJobKey()
|
|
||||||
|
|
||||||
// Job Config
|
jobKey := &aurora.JobKey{}
|
||||||
jobConfig.Key = jobKey
|
|
||||||
jobConfig.TaskConfig = taskConfig
|
|
||||||
|
|
||||||
// Task Config
|
// AuroraTask clientConfig
|
||||||
taskConfig.Job = jobKey
|
task := NewTask()
|
||||||
taskConfig.Container = aurora.NewContainer()
|
task.task.Job = jobKey
|
||||||
taskConfig.Container.Mesos = aurora.NewMesosContainer()
|
|
||||||
|
|
||||||
// Resources
|
// AuroraJob clientConfig
|
||||||
numCpus := aurora.NewResource()
|
jobConfig := &aurora.JobConfiguration{
|
||||||
ramMb := aurora.NewResource()
|
Key: jobKey,
|
||||||
diskMb := aurora.NewResource()
|
TaskConfig: task.TaskConfig(),
|
||||||
|
}
|
||||||
resources := map[resourceType]*aurora.Resource{CPU: numCpus, RAM: ramMb, DISK: diskMb}
|
|
||||||
taskConfig.Resources = []*aurora.Resource{numCpus, ramMb, diskMb}
|
|
||||||
|
|
||||||
numCpus.NumCpus = new(float64)
|
|
||||||
ramMb.RamMb = new(int64)
|
|
||||||
diskMb.DiskMb = new(int64)
|
|
||||||
|
|
||||||
return &AuroraJob{
|
return &AuroraJob{
|
||||||
jobConfig: jobConfig,
|
jobConfig: jobConfig,
|
||||||
resources: resources,
|
task: task,
|
||||||
metadata: make(map[string]*aurora.Metadata),
|
|
||||||
constraints: make(map[string]*aurora.Constraint),
|
|
||||||
portCount: 0,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Environment sets the Job Key environment.
|
// Set AuroraJob Key environment. Explicit changes to AuroraTask's job key are not needed
|
||||||
func (j *AuroraJob) Environment(env string) Job {
|
// because they share a pointer to the same JobKey.
|
||||||
|
func (j *AuroraJob) Environment(env string) *AuroraJob {
|
||||||
j.jobConfig.Key.Environment = env
|
j.jobConfig.Key.Environment = env
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// Role sets the Job Key role.
|
// Set AuroraJob Key Role.
|
||||||
func (j *AuroraJob) Role(role string) Job {
|
func (j *AuroraJob) Role(role string) *AuroraJob {
|
||||||
j.jobConfig.Key.Role = role
|
j.jobConfig.Key.Role = role
|
||||||
|
|
||||||
// Will be deprecated
|
// Will be deprecated
|
||||||
identity := &aurora.Identity{User: role}
|
identity := &aurora.Identity{User: role}
|
||||||
j.jobConfig.Owner = identity
|
j.jobConfig.Owner = identity
|
||||||
j.jobConfig.TaskConfig.Owner = identity
|
j.jobConfig.TaskConfig.Owner = identity
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name sets the Job Key Name.
|
// Set AuroraJob Key Name.
|
||||||
func (j *AuroraJob) Name(name string) Job {
|
func (j *AuroraJob) Name(name string) *AuroraJob {
|
||||||
j.jobConfig.Key.Name = name
|
j.jobConfig.Key.Name = name
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecutorName sets the name of the executor that will the task will be configured to.
|
// How many instances of the job to run
|
||||||
func (j *AuroraJob) ExecutorName(name string) Job {
|
func (j *AuroraJob) InstanceCount(instCount int32) *AuroraJob {
|
||||||
|
|
||||||
if j.jobConfig.TaskConfig.ExecutorConfig == nil {
|
|
||||||
j.jobConfig.TaskConfig.ExecutorConfig = aurora.NewExecutorConfig()
|
|
||||||
}
|
|
||||||
|
|
||||||
j.jobConfig.TaskConfig.ExecutorConfig.Name = name
|
|
||||||
return j
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecutorData sets the data blob that will be passed to the Mesos executor.
|
|
||||||
func (j *AuroraJob) ExecutorData(data string) Job {
|
|
||||||
|
|
||||||
if j.jobConfig.TaskConfig.ExecutorConfig == nil {
|
|
||||||
j.jobConfig.TaskConfig.ExecutorConfig = aurora.NewExecutorConfig()
|
|
||||||
}
|
|
||||||
|
|
||||||
j.jobConfig.TaskConfig.ExecutorConfig.Data = data
|
|
||||||
return j
|
|
||||||
}
|
|
||||||
|
|
||||||
// CPU sets the amount of CPU each task will use in an Aurora Job.
|
|
||||||
func (j *AuroraJob) CPU(cpus float64) Job {
|
|
||||||
*j.resources[CPU].NumCpus = cpus
|
|
||||||
return j
|
|
||||||
}
|
|
||||||
|
|
||||||
// RAM sets the amount of RAM each task will use in an Aurora Job.
|
|
||||||
func (j *AuroraJob) RAM(ram int64) Job {
|
|
||||||
*j.resources[RAM].RamMb = ram
|
|
||||||
return j
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disk sets the amount of Disk each task will use in an Aurora Job.
|
|
||||||
func (j *AuroraJob) Disk(disk int64) Job {
|
|
||||||
*j.resources[DISK].DiskMb = disk
|
|
||||||
return j
|
|
||||||
}
|
|
||||||
|
|
||||||
// GPU sets the amount of GPU each task will use in an Aurora Job.
|
|
||||||
func (j *AuroraJob) GPU(gpu int64) Job {
|
|
||||||
// GPU resource must be set explicitly since the scheduler by default
|
|
||||||
// rejects jobs with GPU resources attached to it.
|
|
||||||
if _, ok := j.resources[GPU]; !ok {
|
|
||||||
j.resources[GPU] = &aurora.Resource{}
|
|
||||||
j.JobConfig().GetTaskConfig().Resources = append(
|
|
||||||
j.JobConfig().GetTaskConfig().Resources,
|
|
||||||
j.resources[GPU])
|
|
||||||
}
|
|
||||||
|
|
||||||
j.resources[GPU].NumGpus = &gpu
|
|
||||||
return j
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxFailure sets how many failures to tolerate before giving up per Job.
|
|
||||||
func (j *AuroraJob) MaxFailure(maxFail int32) Job {
|
|
||||||
j.jobConfig.TaskConfig.MaxTaskFailures = maxFail
|
|
||||||
return j
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceCount sets how many instances of the task to run for this Job.
|
|
||||||
func (j *AuroraJob) InstanceCount(instCount int32) Job {
|
|
||||||
j.jobConfig.InstanceCount = instCount
|
j.jobConfig.InstanceCount = instCount
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// CronSchedule allows the user to configure a cron schedule for this job to run in.
|
func (j *AuroraJob) CronSchedule(cron string) *AuroraJob {
|
||||||
func (j *AuroraJob) CronSchedule(cron string) Job {
|
|
||||||
j.jobConfig.CronSchedule = &cron
|
j.jobConfig.CronSchedule = &cron
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// CronCollisionPolicy allows the user to decide what happens if two or more instances
|
func (j *AuroraJob) CronCollisionPolicy(policy aurora.CronCollisionPolicy) *AuroraJob {
|
||||||
// of the same Cron job need to run.
|
|
||||||
func (j *AuroraJob) CronCollisionPolicy(policy aurora.CronCollisionPolicy) Job {
|
|
||||||
j.jobConfig.CronCollisionPolicy = policy
|
j.jobConfig.CronCollisionPolicy = policy
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetInstanceCount returns how many tasks this Job contains.
|
// How many instances of the job to run
|
||||||
func (j *AuroraJob) GetInstanceCount() int32 {
|
func (j *AuroraJob) GetInstanceCount() int32 {
|
||||||
return j.jobConfig.InstanceCount
|
return j.jobConfig.InstanceCount
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsService returns true if the job is a long term running job or false if it is an ad-hoc job.
|
// Get the current job configurations key to use for some realis calls.
|
||||||
func (j *AuroraJob) IsService(isService bool) Job {
|
func (j *AuroraJob) JobKey() aurora.JobKey {
|
||||||
j.jobConfig.TaskConfig.IsService = isService
|
return *j.jobConfig.Key
|
||||||
return j
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobKey returns the job's configuration key.
|
// Get the current job configurations key to use for some realis calls.
|
||||||
func (j *AuroraJob) JobKey() *aurora.JobKey {
|
|
||||||
return j.jobConfig.Key
|
|
||||||
}
|
|
||||||
|
|
||||||
// JobConfig returns the job's configuration.
|
|
||||||
func (j *AuroraJob) JobConfig() *aurora.JobConfiguration {
|
func (j *AuroraJob) JobConfig() *aurora.JobConfiguration {
|
||||||
return j.jobConfig
|
return j.jobConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// TaskConfig returns the job's task(shard) configuration.
|
// Get the current job configurations key to use for some realis calls.
|
||||||
|
func (j *AuroraJob) AuroraTask() *AuroraTask {
|
||||||
|
return j.task
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
AuroraTask specific API, see task.go for further documentation.
|
||||||
|
These functions are provided for the convenience of chaining API calls.
|
||||||
|
*/
|
||||||
|
|
||||||
|
func (j *AuroraJob) ExecutorName(name string) *AuroraJob {
|
||||||
|
j.task.ExecutorName(name)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *AuroraJob) ExecutorData(data string) *AuroraJob {
|
||||||
|
j.task.ExecutorData(data)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *AuroraJob) CPU(cpus float64) *AuroraJob {
|
||||||
|
j.task.CPU(cpus)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *AuroraJob) RAM(ram int64) *AuroraJob {
|
||||||
|
j.task.RAM(ram)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *AuroraJob) Disk(disk int64) *AuroraJob {
|
||||||
|
j.task.Disk(disk)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *AuroraJob) GPU(gpu int64) *AuroraJob {
|
||||||
|
j.task.GPU(gpu)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *AuroraJob) Tier(tier string) *AuroraJob {
|
||||||
|
j.task.Tier(tier)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *AuroraJob) MaxFailure(maxFail int32) *AuroraJob {
|
||||||
|
j.task.MaxFailure(maxFail)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *AuroraJob) IsService(isService bool) *AuroraJob {
|
||||||
|
j.task.IsService(isService)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
func (j *AuroraJob) TaskConfig() *aurora.TaskConfig {
|
func (j *AuroraJob) TaskConfig() *aurora.TaskConfig {
|
||||||
return j.jobConfig.TaskConfig
|
return j.task.TaskConfig()
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddURIs adds a list of URIs with the same extract and cache configuration. Scheduler must have
|
func (j *AuroraJob) AddURIs(extract bool, cache bool, values ...string) *AuroraJob {
|
||||||
// --enable_mesos_fetcher flag enabled. Currently there is no duplicate detection.
|
j.task.AddURIs(extract, cache, values...)
|
||||||
func (j *AuroraJob) AddURIs(extract bool, cache bool, values ...string) Job {
|
|
||||||
for _, value := range values {
|
|
||||||
j.jobConfig.TaskConfig.MesosFetcherUris = append(j.jobConfig.TaskConfig.MesosFetcherUris,
|
|
||||||
&aurora.MesosFetcherURI{Value: value, Extract: &extract, Cache: &cache})
|
|
||||||
}
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddLabel adds a Mesos label to the job. Note that Aurora will add the
|
func (j *AuroraJob) AddLabel(key string, value string) *AuroraJob {
|
||||||
// prefix "org.apache.aurora.metadata." to the beginning of each key.
|
j.task.AddLabel(key, value)
|
||||||
func (j *AuroraJob) AddLabel(key string, value string) Job {
|
|
||||||
if _, ok := j.metadata[key]; !ok {
|
|
||||||
j.metadata[key] = &aurora.Metadata{Key: key}
|
|
||||||
j.jobConfig.TaskConfig.Metadata = append(j.jobConfig.TaskConfig.Metadata, j.metadata[key])
|
|
||||||
}
|
|
||||||
|
|
||||||
j.metadata[key].Value = value
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddNamedPorts adds a named port to the job configuration These are random ports as it's
|
func (j *AuroraJob) AddNamedPorts(names ...string) *AuroraJob {
|
||||||
// not currently possible to request specific ports using Aurora.
|
j.task.AddNamedPorts(names...)
|
||||||
func (j *AuroraJob) AddNamedPorts(names ...string) Job {
|
|
||||||
j.portCount += len(names)
|
|
||||||
for _, name := range names {
|
|
||||||
j.jobConfig.TaskConfig.Resources = append(
|
|
||||||
j.jobConfig.TaskConfig.Resources,
|
|
||||||
&aurora.Resource{NamedPort: &name})
|
|
||||||
}
|
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddPorts adds a request for a number of ports to the job configuration. The names chosen for these ports
|
func (j *AuroraJob) AddPorts(num int) *AuroraJob {
|
||||||
// will be org.apache.aurora.port.X, where X is the current port count for the job configuration
|
j.task.AddPorts(num)
|
||||||
// starting at 0. These are random ports as it's not currently possible to request
|
return j
|
||||||
// specific ports using Aurora.
|
}
|
||||||
func (j *AuroraJob) AddPorts(num int) Job {
|
func (j *AuroraJob) AddValueConstraint(name string, negated bool, values ...string) *AuroraJob {
|
||||||
start := j.portCount
|
j.task.AddValueConstraint(name, negated, values...)
|
||||||
j.portCount += num
|
|
||||||
for i := start; i < j.portCount; i++ {
|
|
||||||
portName := portNamePrefix + strconv.Itoa(i)
|
|
||||||
j.jobConfig.TaskConfig.Resources = append(
|
|
||||||
j.jobConfig.TaskConfig.Resources,
|
|
||||||
&aurora.Resource{NamedPort: &portName})
|
|
||||||
}
|
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddValueConstraint allows the user to add a value constrain to the job to limit which agents the job's
|
func (j *AuroraJob) AddLimitConstraint(name string, limit int32) *AuroraJob {
|
||||||
// tasks can be run on. If the name matches a constraint that was previously set, the previous value will be
|
j.task.AddLimitConstraint(name, limit)
|
||||||
// overwritten. In case the previous constraint attached to the name was of type limit, the constraint will be clobbered
|
|
||||||
// by this new Value constraint.
|
|
||||||
// From Aurora Docs:
|
|
||||||
// Add a Value constraint
|
|
||||||
// name - Mesos slave attribute that the constraint is matched against.
|
|
||||||
// If negated = true , treat this as a 'not' - to avoid specific values.
|
|
||||||
// Values - list of values we look for in attribute name
|
|
||||||
func (j *AuroraJob) AddValueConstraint(name string, negated bool, values ...string) Job {
|
|
||||||
if _, ok := j.constraints[name]; !ok {
|
|
||||||
j.constraints[name] = &aurora.Constraint{Name: name}
|
|
||||||
j.jobConfig.TaskConfig.Constraints = append(j.jobConfig.TaskConfig.Constraints, j.constraints[name])
|
|
||||||
}
|
|
||||||
|
|
||||||
j.constraints[name].Constraint = &aurora.TaskConstraint{
|
|
||||||
Value: &aurora.ValueConstraint{
|
|
||||||
Negated: negated,
|
|
||||||
Values: values,
|
|
||||||
},
|
|
||||||
Limit: nil,
|
|
||||||
}
|
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddLimitConstraint allows the user to limit how many tasks form the same Job are run on a single host.
|
func (j *AuroraJob) AddDedicatedConstraint(role, name string) *AuroraJob {
|
||||||
// If the name matches a constraint that was previously set, the previous value will be
|
j.task.AddDedicatedConstraint(role, name)
|
||||||
// overwritten. In case the previous constraint attached to the name was of type Value, the constraint will be clobbered
|
|
||||||
// by this new Limit constraint.
|
|
||||||
// From Aurora Docs:
|
|
||||||
// A constraint that specifies the maximum number of active tasks on a host with
|
|
||||||
// a matching attribute that may be scheduled simultaneously.
|
|
||||||
func (j *AuroraJob) AddLimitConstraint(name string, limit int32) Job {
|
|
||||||
if _, ok := j.constraints[name]; !ok {
|
|
||||||
j.constraints[name] = &aurora.Constraint{Name: name}
|
|
||||||
j.jobConfig.TaskConfig.Constraints = append(j.jobConfig.TaskConfig.Constraints, j.constraints[name])
|
|
||||||
}
|
|
||||||
|
|
||||||
j.constraints[name].Constraint = &aurora.TaskConstraint{
|
|
||||||
Value: nil,
|
|
||||||
Limit: &aurora.LimitConstraint{Limit: limit},
|
|
||||||
}
|
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddDedicatedConstraint is a convenience function that allows the user to
|
func (j *AuroraJob) Container(container Container) *AuroraJob {
|
||||||
// add a dedicated constraint to a Job configuration.
|
j.task.Container(container)
|
||||||
// In case a previous dedicated constraint was set, it will be clobbered by this new value.
|
|
||||||
func (j *AuroraJob) AddDedicatedConstraint(role, name string) Job {
|
|
||||||
j.AddValueConstraint("dedicated", false, role+"/"+name)
|
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// Container sets a container to run for the job configuration to run.
|
func (j *AuroraJob) ThermosExecutor(thermos ThermosExecutor) *AuroraJob {
|
||||||
func (j *AuroraJob) Container(container Container) Job {
|
j.task.ThermosExecutor(thermos)
|
||||||
j.jobConfig.TaskConfig.Container = container.Build()
|
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// PartitionPolicy sets a partition policy for the job configuration to implement.
|
func (j *AuroraJob) BuildThermosPayload() error {
|
||||||
func (j *AuroraJob) PartitionPolicy(policy *aurora.PartitionPolicy) Job {
|
return j.task.BuildThermosPayload()
|
||||||
j.jobConfig.TaskConfig.PartitionPolicy = policy
|
|
||||||
return j
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tier sets the Tier for the Job.
|
func (j *AuroraJob) PartitionPolicy(reschedule bool, delay int64) *AuroraJob {
|
||||||
func (j *AuroraJob) Tier(tier string) Job {
|
j.task.PartitionPolicy(aurora.PartitionPolicy{
|
||||||
j.jobConfig.TaskConfig.Tier = &tier
|
Reschedule: reschedule,
|
||||||
|
DelaySecs: &delay,
|
||||||
return j
|
})
|
||||||
}
|
|
||||||
|
|
||||||
// SlaPolicy sets an SlaPolicy for the Job.
|
|
||||||
func (j *AuroraJob) SlaPolicy(policy *aurora.SlaPolicy) Job {
|
|
||||||
j.jobConfig.TaskConfig.SlaPolicy = policy
|
|
||||||
|
|
||||||
return j
|
|
||||||
}
|
|
||||||
|
|
||||||
func (j *AuroraJob) Priority(priority int32) Job {
|
|
||||||
j.jobConfig.TaskConfig.Priority = priority
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
249
jobUpdate.go
Normal file
249
jobUpdate.go
Normal file
|
@ -0,0 +1,249 @@
|
||||||
|
/**
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package realis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
|
"github.com/paypal/gorealis/v2/gen-go/apache/aurora"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Structure to collect all information required to create job update
|
||||||
|
type JobUpdate struct {
|
||||||
|
task *AuroraTask
|
||||||
|
request *aurora.JobUpdateRequest
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a default JobUpdate object with an empty task and no fields filled in.
|
||||||
|
func NewJobUpdate() *JobUpdate {
|
||||||
|
newTask := NewTask()
|
||||||
|
|
||||||
|
req := aurora.JobUpdateRequest{}
|
||||||
|
req.TaskConfig = newTask.TaskConfig()
|
||||||
|
req.Settings = newUpdateSettings()
|
||||||
|
|
||||||
|
return &JobUpdate{task: newTask, request: &req}
|
||||||
|
}
|
||||||
|
|
||||||
|
func JobUpdateFromAuroraTask(task *AuroraTask) *JobUpdate {
|
||||||
|
newTask := task.Clone()
|
||||||
|
|
||||||
|
req := aurora.JobUpdateRequest{}
|
||||||
|
req.TaskConfig = newTask.TaskConfig()
|
||||||
|
req.Settings = newUpdateSettings()
|
||||||
|
|
||||||
|
return &JobUpdate{task: newTask, request: &req}
|
||||||
|
}
|
||||||
|
|
||||||
|
func JobUpdateFromConfig(task *aurora.TaskConfig) *JobUpdate {
|
||||||
|
// Perform a deep copy to avoid unexpected behavior
|
||||||
|
newTask := TaskFromThrift(task)
|
||||||
|
|
||||||
|
req := aurora.JobUpdateRequest{}
|
||||||
|
req.TaskConfig = newTask.TaskConfig()
|
||||||
|
req.Settings = newUpdateSettings()
|
||||||
|
|
||||||
|
return &JobUpdate{task: newTask, request: &req}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set instance count the job will have after the update.
|
||||||
|
func (j *JobUpdate) InstanceCount(inst int32) *JobUpdate {
|
||||||
|
j.request.InstanceCount = inst
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max number of instances being updated at any given moment.
|
||||||
|
func (j *JobUpdate) BatchSize(size int32) *JobUpdate {
|
||||||
|
j.request.Settings.UpdateGroupSize = size
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// Minimum number of seconds a shard must remain in RUNNING state before considered a success.
|
||||||
|
func (j *JobUpdate) WatchTime(timeout time.Duration) *JobUpdate {
|
||||||
|
j.request.Settings.MinWaitInInstanceRunningMs = int32(timeout.Seconds() * 1000)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for all instances in a group to be done before moving on.
|
||||||
|
func (j *JobUpdate) WaitForBatchCompletion(batchWait bool) *JobUpdate {
|
||||||
|
j.request.Settings.WaitForBatchCompletion = batchWait
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max number of instance failures to tolerate before marking instance as FAILED.
|
||||||
|
func (j *JobUpdate) MaxPerInstanceFailures(inst int32) *JobUpdate {
|
||||||
|
j.request.Settings.MaxPerInstanceFailures = inst
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max number of FAILED instances to tolerate before terminating the update.
|
||||||
|
func (j *JobUpdate) MaxFailedInstances(inst int32) *JobUpdate {
|
||||||
|
j.request.Settings.MaxFailedInstances = inst
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// When False, prevents auto rollback of a failed update.
|
||||||
|
func (j *JobUpdate) RollbackOnFail(rollback bool) *JobUpdate {
|
||||||
|
j.request.Settings.RollbackOnFailure = rollback
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets the interval at which pulses should be received by the job update before timing out.
|
||||||
|
func (j *JobUpdate) PulseIntervalTimeout(timeout time.Duration) *JobUpdate {
|
||||||
|
j.request.Settings.BlockIfNoPulsesAfterMs = thrift.Int32Ptr(int32(timeout.Seconds() * 1000))
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func newUpdateSettings() *aurora.JobUpdateSettings {
|
||||||
|
|
||||||
|
us := aurora.JobUpdateSettings{}
|
||||||
|
// Mirrors defaults set by Pystachio
|
||||||
|
us.UpdateOnlyTheseInstances = []*aurora.Range{}
|
||||||
|
us.UpdateGroupSize = 1
|
||||||
|
us.WaitForBatchCompletion = false
|
||||||
|
us.MinWaitInInstanceRunningMs = 45000
|
||||||
|
us.MaxPerInstanceFailures = 0
|
||||||
|
us.MaxFailedInstances = 0
|
||||||
|
us.RollbackOnFailure = true
|
||||||
|
|
||||||
|
return &us
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
These methods are provided for user convenience in order to chain
|
||||||
|
calls for configuration.
|
||||||
|
API below here are wrappers around modifying an AuroraTask instance.
|
||||||
|
See task.go for further documentation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
func (j *JobUpdate) Environment(env string) *JobUpdate {
|
||||||
|
j.task.Environment(env)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) Role(role string) *JobUpdate {
|
||||||
|
j.task.Role(role)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) Name(name string) *JobUpdate {
|
||||||
|
j.task.Name(name)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) ExecutorName(name string) *JobUpdate {
|
||||||
|
j.task.ExecutorName(name)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) ExecutorData(data string) *JobUpdate {
|
||||||
|
j.task.ExecutorData(data)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) CPU(cpus float64) *JobUpdate {
|
||||||
|
j.task.CPU(cpus)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) RAM(ram int64) *JobUpdate {
|
||||||
|
j.task.RAM(ram)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) Disk(disk int64) *JobUpdate {
|
||||||
|
j.task.Disk(disk)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) Tier(tier string) *JobUpdate {
|
||||||
|
j.task.Tier(tier)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) MaxFailure(maxFail int32) *JobUpdate {
|
||||||
|
j.task.MaxFailure(maxFail)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) IsService(isService bool) *JobUpdate {
|
||||||
|
j.task.IsService(isService)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) TaskConfig() *aurora.TaskConfig {
|
||||||
|
return j.task.TaskConfig()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) AddURIs(extract bool, cache bool, values ...string) *JobUpdate {
|
||||||
|
j.task.AddURIs(extract, cache, values...)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) AddLabel(key string, value string) *JobUpdate {
|
||||||
|
j.task.AddLabel(key, value)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) AddNamedPorts(names ...string) *JobUpdate {
|
||||||
|
j.task.AddNamedPorts(names...)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) AddPorts(num int) *JobUpdate {
|
||||||
|
j.task.AddPorts(num)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
func (j *JobUpdate) AddValueConstraint(name string, negated bool, values ...string) *JobUpdate {
|
||||||
|
j.task.AddValueConstraint(name, negated, values...)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) AddLimitConstraint(name string, limit int32) *JobUpdate {
|
||||||
|
j.task.AddLimitConstraint(name, limit)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) AddDedicatedConstraint(role, name string) *JobUpdate {
|
||||||
|
j.task.AddDedicatedConstraint(role, name)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) Container(container Container) *JobUpdate {
|
||||||
|
j.task.Container(container)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) JobKey() aurora.JobKey {
|
||||||
|
return j.task.JobKey()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) ThermosExecutor(thermos ThermosExecutor) *JobUpdate {
|
||||||
|
j.task.ThermosExecutor(thermos)
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) BuildThermosPayload() error {
|
||||||
|
return j.task.BuildThermosPayload()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JobUpdate) PartitionPolicy(reschedule bool, delay int64) *JobUpdate {
|
||||||
|
j.task.PartitionPolicy(aurora.PartitionPolicy{
|
||||||
|
Reschedule: reschedule,
|
||||||
|
DelaySecs: &delay,
|
||||||
|
})
|
||||||
|
return j
|
||||||
|
}
|
24
logger.go
24
logger.go
|
@ -14,73 +14,65 @@
|
||||||
|
|
||||||
package realis
|
package realis
|
||||||
|
|
||||||
type logger interface {
|
type Logger interface {
|
||||||
Println(v ...interface{})
|
Println(v ...interface{})
|
||||||
Printf(format string, v ...interface{})
|
Printf(format string, v ...interface{})
|
||||||
Print(v ...interface{})
|
Print(v ...interface{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// NoopLogger is a logger that can be attached to the client which will not print anything.
|
|
||||||
type NoopLogger struct{}
|
type NoopLogger struct{}
|
||||||
|
|
||||||
// Printf is a NOOP function here.
|
|
||||||
func (NoopLogger) Printf(format string, a ...interface{}) {}
|
func (NoopLogger) Printf(format string, a ...interface{}) {}
|
||||||
|
|
||||||
// Print is a NOOP function here.
|
|
||||||
func (NoopLogger) Print(a ...interface{}) {}
|
func (NoopLogger) Print(a ...interface{}) {}
|
||||||
|
|
||||||
// Println is a NOOP function here.
|
|
||||||
func (NoopLogger) Println(a ...interface{}) {}
|
func (NoopLogger) Println(a ...interface{}) {}
|
||||||
|
|
||||||
// LevelLogger is a logger that can be configured to output different levels of information: Debug and Trace.
|
|
||||||
// Trace should only be enabled when very in depth information about the sequence of events a function took is needed.
|
|
||||||
type LevelLogger struct {
|
type LevelLogger struct {
|
||||||
logger
|
Logger
|
||||||
debug bool
|
debug bool
|
||||||
trace bool
|
trace bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnableDebug enables debug level logging for the LevelLogger
|
|
||||||
func (l *LevelLogger) EnableDebug(enable bool) {
|
func (l *LevelLogger) EnableDebug(enable bool) {
|
||||||
l.debug = enable
|
l.debug = enable
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnableTrace enables trace level logging for the LevelLogger
|
|
||||||
func (l *LevelLogger) EnableTrace(enable bool) {
|
func (l *LevelLogger) EnableTrace(enable bool) {
|
||||||
l.trace = enable
|
l.trace = enable
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l LevelLogger) debugPrintf(format string, a ...interface{}) {
|
func (l LevelLogger) DebugPrintf(format string, a ...interface{}) {
|
||||||
if l.debug {
|
if l.debug {
|
||||||
l.Printf("[DEBUG] "+format, a...)
|
l.Printf("[DEBUG] "+format, a...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l LevelLogger) debugPrint(a ...interface{}) {
|
func (l LevelLogger) DebugPrint(a ...interface{}) {
|
||||||
if l.debug {
|
if l.debug {
|
||||||
l.Print(append([]interface{}{"[DEBUG] "}, a...)...)
|
l.Print(append([]interface{}{"[DEBUG] "}, a...)...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l LevelLogger) debugPrintln(a ...interface{}) {
|
func (l LevelLogger) DebugPrintln(a ...interface{}) {
|
||||||
if l.debug {
|
if l.debug {
|
||||||
l.Println(append([]interface{}{"[DEBUG] "}, a...)...)
|
l.Println(append([]interface{}{"[DEBUG] "}, a...)...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l LevelLogger) tracePrintf(format string, a ...interface{}) {
|
func (l LevelLogger) TracePrintf(format string, a ...interface{}) {
|
||||||
if l.trace {
|
if l.trace {
|
||||||
l.Printf("[TRACE] "+format, a...)
|
l.Printf("[TRACE] "+format, a...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l LevelLogger) tracePrint(a ...interface{}) {
|
func (l LevelLogger) TracePrint(a ...interface{}) {
|
||||||
if l.trace {
|
if l.trace {
|
||||||
l.Print(append([]interface{}{"[TRACE] "}, a...)...)
|
l.Print(append([]interface{}{"[TRACE] "}, a...)...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l LevelLogger) tracePrintln(a ...interface{}) {
|
func (l LevelLogger) TracePrintln(a ...interface{}) {
|
||||||
if l.trace {
|
if l.trace {
|
||||||
l.Println(append([]interface{}{"[TRACE] "}, a...)...)
|
l.Println(append([]interface{}{"[TRACE] "}, a...)...)
|
||||||
}
|
}
|
||||||
|
|
232
monitors.go
232
monitors.go
|
@ -12,46 +12,48 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
// Collection of monitors to create synchronicity
|
||||||
package realis
|
package realis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
"github.com/paypal/gorealis/v2/gen-go/apache/aurora"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Monitor is a wrapper for the Realis client which allows us to have functions
|
// MonitorJobUpdate polls the scheduler every certain amount of time to see if the update has succeeded.
|
||||||
// with the same name for Monitoring purposes.
|
// If the update entered a terminal update state but it is not ROLLED_FORWARD, this function will return an error.
|
||||||
// TODO(rdelvalle): Deprecate monitors and instead add prefix Monitor to
|
func (c *Client) MonitorJobUpdate(updateKey aurora.JobUpdateKey, interval, timeout time.Duration) (bool, error) {
|
||||||
// all functions in this file like it is done in V2.
|
if interval < 1*time.Second {
|
||||||
type Monitor struct {
|
interval = interval * time.Second
|
||||||
Client Realis
|
|
||||||
}
|
|
||||||
|
|
||||||
// JobUpdate polls the scheduler every certain amount of time to see if the update has entered a terminal state.
|
|
||||||
func (m *Monitor) JobUpdate(
|
|
||||||
updateKey aurora.JobUpdateKey,
|
|
||||||
interval int,
|
|
||||||
timeout int) (bool, error) {
|
|
||||||
|
|
||||||
updateQ := aurora.JobUpdateQuery{
|
|
||||||
Key: &updateKey,
|
|
||||||
Limit: 1,
|
|
||||||
UpdateStatuses: TerminalUpdateStates(),
|
|
||||||
}
|
}
|
||||||
updateSummaries, err := m.JobUpdateQuery(
|
|
||||||
updateQ,
|
|
||||||
time.Duration(interval)*time.Second,
|
|
||||||
time.Duration(timeout)*time.Second)
|
|
||||||
|
|
||||||
status := updateSummaries[0].State.Status
|
if timeout < 1*time.Second {
|
||||||
|
timeout = timeout * time.Second
|
||||||
|
}
|
||||||
|
updateSummaries, err := c.MonitorJobUpdateQuery(
|
||||||
|
aurora.JobUpdateQuery{
|
||||||
|
Key: &updateKey,
|
||||||
|
Limit: 1,
|
||||||
|
UpdateStatuses: []aurora.JobUpdateStatus{
|
||||||
|
aurora.JobUpdateStatus_ROLLED_FORWARD,
|
||||||
|
aurora.JobUpdateStatus_ROLLED_BACK,
|
||||||
|
aurora.JobUpdateStatus_ABORTED,
|
||||||
|
aurora.JobUpdateStatus_ERROR,
|
||||||
|
aurora.JobUpdateStatus_FAILED,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
interval,
|
||||||
|
timeout)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
m.Client.RealisConfig().logger.Printf("job update status: %v\n", status)
|
status := updateSummaries[0].State.Status
|
||||||
|
|
||||||
|
c.RealisConfig().logger.Printf("job update status: %v\n", status)
|
||||||
|
|
||||||
// Rolled forward is the only state in which an update has been successfully updated
|
// Rolled forward is the only state in which an update has been successfully updated
|
||||||
// if we encounter an inactive state and it is not at rolled forward, update failed
|
// if we encounter an inactive state and it is not at rolled forward, update failed
|
||||||
|
@ -68,25 +70,41 @@ func (m *Monitor) JobUpdate(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobUpdateStatus polls the scheduler every certain amount of time to see if the update has entered a specified state.
|
// MonitorJobUpdateStatus polls the scheduler for information about an update until the update enters one of the
|
||||||
func (m *Monitor) JobUpdateStatus(updateKey aurora.JobUpdateKey,
|
// desired states or until the function times out.
|
||||||
|
func (c *Client) MonitorJobUpdateStatus(updateKey aurora.JobUpdateKey,
|
||||||
desiredStatuses []aurora.JobUpdateStatus,
|
desiredStatuses []aurora.JobUpdateStatus,
|
||||||
interval, timeout time.Duration) (aurora.JobUpdateStatus, error) {
|
interval, timeout time.Duration) (aurora.JobUpdateStatus, error) {
|
||||||
|
|
||||||
|
if len(desiredStatuses) == 0 {
|
||||||
|
return aurora.JobUpdateStatus(-1), errors.New("no desired statuses provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make deep local copy to avoid side effects from job key being manipulated externally.
|
||||||
|
updateKeyLocal := &aurora.JobUpdateKey{
|
||||||
|
Job: &aurora.JobKey{
|
||||||
|
Role: updateKey.Job.GetRole(),
|
||||||
|
Environment: updateKey.Job.GetEnvironment(),
|
||||||
|
Name: updateKey.Job.GetName(),
|
||||||
|
},
|
||||||
|
ID: updateKey.GetID(),
|
||||||
|
}
|
||||||
|
|
||||||
updateQ := aurora.JobUpdateQuery{
|
updateQ := aurora.JobUpdateQuery{
|
||||||
Key: &updateKey,
|
Key: updateKeyLocal,
|
||||||
Limit: 1,
|
Limit: 1,
|
||||||
UpdateStatuses: desiredStatuses,
|
UpdateStatuses: desiredStatuses,
|
||||||
}
|
}
|
||||||
summary, err := m.JobUpdateQuery(updateQ, interval, timeout)
|
|
||||||
if err != nil {
|
summary, err := c.MonitorJobUpdateQuery(updateQ, interval, timeout)
|
||||||
return 0, err
|
if len(summary) > 0 {
|
||||||
|
return summary[0].State.Status, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return summary[0].State.Status, nil
|
return aurora.JobUpdateStatus(-1), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// JobUpdateQuery polls the scheduler every certain amount of time to see if the query call returns any results.
|
func (c *Client) MonitorJobUpdateQuery(
|
||||||
func (m *Monitor) JobUpdateQuery(
|
|
||||||
updateQuery aurora.JobUpdateQuery,
|
updateQuery aurora.JobUpdateQuery,
|
||||||
interval time.Duration,
|
interval time.Duration,
|
||||||
timeout time.Duration) ([]*aurora.JobUpdateSummary, error) {
|
timeout time.Duration) ([]*aurora.JobUpdateSummary, error) {
|
||||||
|
@ -95,20 +113,16 @@ func (m *Monitor) JobUpdateQuery(
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
timer := time.NewTimer(timeout)
|
timer := time.NewTimer(timeout)
|
||||||
defer timer.Stop()
|
defer timer.Stop()
|
||||||
|
|
||||||
var cliErr error
|
|
||||||
var respDetail *aurora.Response
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
respDetail, cliErr = m.Client.GetJobUpdateSummaries(&updateQuery)
|
updateSummaryResults, cliErr := c.GetJobUpdateSummaries(&updateQuery)
|
||||||
if cliErr != nil {
|
if cliErr != nil {
|
||||||
return nil, cliErr
|
return nil, cliErr
|
||||||
}
|
}
|
||||||
|
|
||||||
updateSummaries := respDetail.Result_.GetJobUpdateSummariesResult_.UpdateSummaries
|
if len(updateSummaryResults.GetUpdateSummaries()) >= 1 {
|
||||||
if len(updateSummaries) >= 1 {
|
return updateSummaryResults.GetUpdateSummaries(), nil
|
||||||
return updateSummaries, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
|
@ -117,105 +131,37 @@ func (m *Monitor) JobUpdateQuery(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AutoPausedUpdateMonitor is a special monitor for auto pause enabled batch updates. This monitor ensures that the update
|
// Monitor a AuroraJob until all instances enter one of the LiveStates
|
||||||
// being monitored is capable of auto pausing and has auto pausing enabled. After verifying this information,
|
func (c *Client) MonitorInstances(key aurora.JobKey, instances int32, interval, timeout time.Duration) (bool, error) {
|
||||||
// the monitor watches for the job to enter the ROLL_FORWARD_PAUSED state and calculates the current batch
|
return c.MonitorScheduleStatus(key, instances, aurora.LIVE_STATES, interval, timeout)
|
||||||
// the update is in using information from the update configuration.
|
|
||||||
func (m *Monitor) AutoPausedUpdateMonitor(key aurora.JobUpdateKey, interval, timeout time.Duration) (int, error) {
|
|
||||||
key.Job = &aurora.JobKey{
|
|
||||||
Role: key.Job.Role,
|
|
||||||
Environment: key.Job.Environment,
|
|
||||||
Name: key.Job.Name,
|
|
||||||
}
|
|
||||||
query := aurora.JobUpdateQuery{
|
|
||||||
UpdateStatuses: aurora.ACTIVE_JOB_UPDATE_STATES,
|
|
||||||
Limit: 1,
|
|
||||||
Key: &key,
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := m.Client.JobUpdateDetails(query)
|
|
||||||
if err != nil {
|
|
||||||
return -1, errors.Wrap(err, "unable to get information about update")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO (rdelvalle): check for possible nil values when going down the list of structs
|
|
||||||
updateDetails := response.Result_.GetJobUpdateDetailsResult_.DetailsList
|
|
||||||
if len(updateDetails) == 0 {
|
|
||||||
return -1, errors.Errorf("details for update could not be found")
|
|
||||||
}
|
|
||||||
|
|
||||||
updateStrategy := updateDetails[0].Update.Instructions.Settings.UpdateStrategy
|
|
||||||
|
|
||||||
var batchSizes []int32
|
|
||||||
switch {
|
|
||||||
case updateStrategy.IsSetVarBatchStrategy():
|
|
||||||
batchSizes = updateStrategy.VarBatchStrategy.GroupSizes
|
|
||||||
if !updateStrategy.VarBatchStrategy.AutopauseAfterBatch {
|
|
||||||
return -1, errors.Errorf("update does not have auto pause enabled")
|
|
||||||
}
|
|
||||||
case updateStrategy.IsSetBatchStrategy():
|
|
||||||
batchSizes = []int32{updateStrategy.BatchStrategy.GroupSize}
|
|
||||||
if !updateStrategy.BatchStrategy.AutopauseAfterBatch {
|
|
||||||
return -1, errors.Errorf("update does not have auto pause enabled")
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return -1, errors.Errorf("update is not using a batch update strategy")
|
|
||||||
}
|
|
||||||
|
|
||||||
query.UpdateStatuses = append(TerminalUpdateStates(), aurora.JobUpdateStatus_ROLL_FORWARD_PAUSED)
|
|
||||||
summary, err := m.JobUpdateQuery(query, interval, timeout)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !(summary[0].State.Status == aurora.JobUpdateStatus_ROLL_FORWARD_PAUSED ||
|
|
||||||
summary[0].State.Status == aurora.JobUpdateStatus_ROLLED_FORWARD) {
|
|
||||||
return -1, errors.Errorf("update is in a terminal state %v", summary[0].State.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
updatingInstances := make(map[int32]struct{})
|
|
||||||
for _, e := range updateDetails[0].InstanceEvents {
|
|
||||||
// We only care about INSTANCE_UPDATING actions because we only care that they've been attempted
|
|
||||||
if e != nil && e.GetAction() == aurora.JobUpdateAction_INSTANCE_UPDATING {
|
|
||||||
updatingInstances[e.GetInstanceId()] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return calculateCurrentBatch(int32(len(updatingInstances)), batchSizes), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instances will monitor a Job until all instances enter one of the LIVE_STATES
|
// Monitor a AuroraJob until all instances enter a desired status.
|
||||||
func (m *Monitor) Instances(key *aurora.JobKey, instances int32, interval, timeout int) (bool, error) {
|
|
||||||
return m.ScheduleStatus(key, instances, LiveStates, interval, timeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScheduleStatus will monitor a Job until all instances enter a desired status.
|
|
||||||
// Defaults sets of desired statuses provided by the thrift API include:
|
// Defaults sets of desired statuses provided by the thrift API include:
|
||||||
// ACTIVE_STATES, SLAVE_ASSIGNED_STATES, LIVE_STATES, and TERMINAL_STATES
|
// ActiveStates, SlaveAssignedStates, LiveStates, and TerminalStates
|
||||||
func (m *Monitor) ScheduleStatus(
|
func (c *Client) MonitorScheduleStatus(key aurora.JobKey,
|
||||||
key *aurora.JobKey,
|
|
||||||
instanceCount int32,
|
instanceCount int32,
|
||||||
desiredStatuses map[aurora.ScheduleStatus]bool,
|
desiredStatuses []aurora.ScheduleStatus,
|
||||||
interval int,
|
interval, timeout time.Duration) (bool, error) {
|
||||||
timeout int) (bool, error) {
|
if interval < 1*time.Second {
|
||||||
|
interval = interval * time.Second
|
||||||
ticker := time.NewTicker(time.Second * time.Duration(interval))
|
|
||||||
defer ticker.Stop()
|
|
||||||
timer := time.NewTimer(time.Second * time.Duration(timeout))
|
|
||||||
defer timer.Stop()
|
|
||||||
|
|
||||||
wantedStatuses := make([]aurora.ScheduleStatus, 0)
|
|
||||||
|
|
||||||
for status := range desiredStatuses {
|
|
||||||
wantedStatuses = append(wantedStatuses, status)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if timeout < 1*time.Second {
|
||||||
|
timeout = timeout * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(interval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
timer := time.NewTimer(timeout)
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
|
|
||||||
// Query Aurora for the state of the job key ever interval
|
// Query Aurora for the state of the job key ever interval
|
||||||
instCount, cliErr := m.Client.GetInstanceIds(key, wantedStatuses)
|
instCount, cliErr := c.GetInstanceIds(key, desiredStatuses)
|
||||||
if cliErr != nil {
|
if cliErr != nil {
|
||||||
return false, errors.Wrap(cliErr, "Unable to communicate with Aurora")
|
return false, errors.Wrap(cliErr, "Unable to communicate with Aurora")
|
||||||
}
|
}
|
||||||
|
@ -225,18 +171,23 @@ func (m *Monitor) ScheduleStatus(
|
||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
|
|
||||||
// If the timer runs out, return a timeout error to user
|
// If the timer runs out, return a timeout error to user
|
||||||
return false, newTimedoutError(errors.New("schedule status monitor timed out"))
|
return false, newTimedoutError(errors.New("schedule status monitor timedout"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// HostMaintenance will monitor host status until all hosts match the status provided.
|
// Monitor host status until all hosts match the status provided. Returns a map where the value is true if the host
|
||||||
// Returns a map where the value is true if the host
|
|
||||||
// is in one of the desired mode(s) or false if it is not as of the time when the monitor exited.
|
// is in one of the desired mode(s) or false if it is not as of the time when the monitor exited.
|
||||||
func (m *Monitor) HostMaintenance(
|
func (c *Client) MonitorHostMaintenance(hosts []string,
|
||||||
hosts []string,
|
|
||||||
modes []aurora.MaintenanceMode,
|
modes []aurora.MaintenanceMode,
|
||||||
interval, timeout int) (map[string]bool, error) {
|
interval, timeout time.Duration) (map[string]bool, error) {
|
||||||
|
if interval < 1*time.Second {
|
||||||
|
interval = interval * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
if timeout < 1*time.Second {
|
||||||
|
timeout = timeout * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
// Transform modes to monitor for into a set for easy lookup
|
// Transform modes to monitor for into a set for easy lookup
|
||||||
desiredMode := make(map[aurora.MaintenanceMode]struct{})
|
desiredMode := make(map[aurora.MaintenanceMode]struct{})
|
||||||
|
@ -245,8 +196,7 @@ func (m *Monitor) HostMaintenance(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Turn slice into a host set to eliminate duplicates.
|
// Turn slice into a host set to eliminate duplicates.
|
||||||
// We also can't use a simple count because multiple modes means
|
// We also can't use a simple count because multiple modes means we can have multiple matches for a single host.
|
||||||
// we can have multiple matches for a single host.
|
|
||||||
// I.e. host A transitions from ACTIVE to DRAINING to DRAINED while monitored
|
// I.e. host A transitions from ACTIVE to DRAINING to DRAINED while monitored
|
||||||
remainingHosts := make(map[string]struct{})
|
remainingHosts := make(map[string]struct{})
|
||||||
for _, host := range hosts {
|
for _, host := range hosts {
|
||||||
|
@ -255,16 +205,16 @@ func (m *Monitor) HostMaintenance(
|
||||||
|
|
||||||
hostResult := make(map[string]bool)
|
hostResult := make(map[string]bool)
|
||||||
|
|
||||||
ticker := time.NewTicker(time.Second * time.Duration(interval))
|
ticker := time.NewTicker(interval)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
timer := time.NewTimer(time.Second * time.Duration(timeout))
|
timer := time.NewTimer(timeout)
|
||||||
defer timer.Stop()
|
defer timer.Stop()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
// Client call has multiple retries internally
|
// Client call has multiple retries internally
|
||||||
_, result, err := m.Client.MaintenanceStatus(hosts...)
|
result, err := c.MaintenanceStatus(hosts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Error is either a payload error or a severe connection error
|
// Error is either a payload error or a severe connection error
|
||||||
for host := range remainingHosts {
|
for host := range remainingHosts {
|
||||||
|
@ -290,7 +240,7 @@ func (m *Monitor) HostMaintenance(
|
||||||
hostResult[host] = false
|
hostResult[host] = false
|
||||||
}
|
}
|
||||||
|
|
||||||
return hostResult, newTimedoutError(errors.New("host maintenance monitor timed out"))
|
return hostResult, newTimedoutError(errors.New("host maintenance monitor timedout"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
301
realis_admin.go
301
realis_admin.go
|
@ -1,308 +1,267 @@
|
||||||
|
/**
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
package realis
|
package realis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
"github.com/paypal/gorealis/v2/gen-go/apache/aurora"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO(rdelvalle): Consider moving these functions to another interface. It would be a backwards incompatible change,
|
|
||||||
// but would add safety.
|
|
||||||
|
|
||||||
// Set a list of nodes to DRAINING. This means nothing will be able to be scheduled on them and any existing
|
// Set a list of nodes to DRAINING. This means nothing will be able to be scheduled on them and any existing
|
||||||
// tasks will be killed and re-scheduled elsewhere in the cluster. Tasks from DRAINING nodes are not guaranteed
|
// tasks will be killed and re-scheduled elsewhere in the cluster. Tasks from DRAINING nodes are not guaranteed
|
||||||
// to return to running unless there is enough capacity in the cluster to run them.
|
// to return to running unless there is enough capacity in the cluster to run them.
|
||||||
func (r *realisClient) DrainHosts(hosts ...string) (*aurora.Response, *aurora.DrainHostsResult_, error) {
|
func (c *Client) DrainHosts(hosts ...string) ([]*aurora.HostStatus, error) {
|
||||||
|
|
||||||
var result *aurora.DrainHostsResult_
|
|
||||||
|
|
||||||
if len(hosts) == 0 {
|
|
||||||
return nil, nil, errors.New("no hosts provided to drain")
|
|
||||||
}
|
|
||||||
|
|
||||||
drainList := aurora.NewHosts()
|
|
||||||
drainList.HostNames = hosts
|
|
||||||
|
|
||||||
r.logger.debugPrintf("DrainHosts Thrift Payload: %v\n", drainList)
|
|
||||||
|
|
||||||
resp, retryErr := r.thriftCallWithRetries(
|
|
||||||
false,
|
|
||||||
func() (*aurora.Response, error) {
|
|
||||||
return r.adminClient.DrainHosts(context.TODO(), drainList)
|
|
||||||
},
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
|
|
||||||
if retryErr != nil {
|
|
||||||
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.GetResult_() != nil {
|
|
||||||
result = resp.GetResult_().GetDrainHostsResult_()
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start SLA Aware Drain.
|
|
||||||
// defaultSlaPolicy is the fallback SlaPolicy to use if a task does not have an SlaPolicy.
|
|
||||||
// After timeoutSecs, tasks will be forcefully drained without checking SLA.
|
|
||||||
func (r *realisClient) SLADrainHosts(
|
|
||||||
policy *aurora.SlaPolicy,
|
|
||||||
timeout int64,
|
|
||||||
hosts ...string) (*aurora.DrainHostsResult_, error) {
|
|
||||||
var result *aurora.DrainHostsResult_
|
|
||||||
|
|
||||||
if len(hosts) == 0 {
|
if len(hosts) == 0 {
|
||||||
return nil, errors.New("no hosts provided to drain")
|
return nil, errors.New("no hosts provided to drain")
|
||||||
}
|
}
|
||||||
|
|
||||||
if policy == nil || policy.CountSetFieldsSlaPolicy() == 0 {
|
drainList := aurora.NewHosts()
|
||||||
policy = &defaultSlaPolicy
|
drainList.HostNames = hosts
|
||||||
r.logger.Printf("Warning: start draining with default sla policy %v", policy)
|
|
||||||
|
c.logger.DebugPrintf("DrainHosts Thrift Payload: %v\n", drainList)
|
||||||
|
|
||||||
|
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
|
||||||
|
return c.adminClient.DrainHosts(context.TODO(), drainList)
|
||||||
|
})
|
||||||
|
|
||||||
|
if retryErr != nil {
|
||||||
|
return nil, errors.Wrap(retryErr, "unable to recover connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
if timeout < 0 {
|
if resp.GetResult_() != nil && resp.GetResult_().GetDrainHostsResult_() != nil {
|
||||||
r.logger.Printf("Warning: timeout %d secs is invalid, draining with default timeout %d secs",
|
return resp.GetResult_().GetDrainHostsResult_().GetStatuses(), nil
|
||||||
timeout,
|
} else {
|
||||||
defaultSlaDrainTimeoutSecs)
|
return nil, errors.New("thrift error: Field in response is nil unexpectedly.")
|
||||||
timeout = defaultSlaDrainTimeoutSecs
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start SLA Aware Drain.
|
||||||
|
// defaultSlaPolicy is the fallback SlaPolicy to use if a task does not have an SlaPolicy.
|
||||||
|
// After timeoutSecs, tasks will be forcefully drained without checking SLA.
|
||||||
|
func (c *Client) SLADrainHosts(policy *aurora.SlaPolicy, timeout int64, hosts ...string) ([]*aurora.HostStatus, error) {
|
||||||
|
|
||||||
|
if len(hosts) == 0 {
|
||||||
|
return nil, errors.New("no hosts provided to drain")
|
||||||
}
|
}
|
||||||
|
|
||||||
drainList := aurora.NewHosts()
|
drainList := aurora.NewHosts()
|
||||||
drainList.HostNames = hosts
|
drainList.HostNames = hosts
|
||||||
|
|
||||||
r.logger.debugPrintf("SLADrainHosts Thrift Payload: %v\n", drainList)
|
c.logger.DebugPrintf("SLADrainHosts Thrift Payload: %v\n", drainList)
|
||||||
|
|
||||||
resp, retryErr := r.thriftCallWithRetries(
|
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
|
||||||
false,
|
return c.adminClient.SlaDrainHosts(context.TODO(), drainList, policy, timeout)
|
||||||
func() (*aurora.Response, error) {
|
})
|
||||||
return r.adminClient.SlaDrainHosts(context.TODO(), drainList, policy, timeout)
|
|
||||||
},
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
|
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
return result, errors.Wrap(retryErr, "Unable to recover connection")
|
return nil, errors.Wrap(retryErr, "unable to recover connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.GetResult_() != nil {
|
if resp.GetResult_() != nil && resp.GetResult_().GetDrainHostsResult_() != nil {
|
||||||
result = resp.GetResult_().GetDrainHostsResult_()
|
return resp.GetResult_().GetDrainHostsResult_().GetStatuses(), nil
|
||||||
|
} else {
|
||||||
|
return nil, errors.New("thrift error: Field in response is nil unexpectedly.")
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *realisClient) StartMaintenance(hosts ...string) (*aurora.Response, *aurora.StartMaintenanceResult_, error) {
|
func (c *Client) StartMaintenance(hosts ...string) ([]*aurora.HostStatus, error) {
|
||||||
|
|
||||||
var result *aurora.StartMaintenanceResult_
|
|
||||||
|
|
||||||
if len(hosts) == 0 {
|
if len(hosts) == 0 {
|
||||||
return nil, nil, errors.New("no hosts provided to start maintenance on")
|
return nil, errors.New("no hosts provided to start maintenance on")
|
||||||
}
|
}
|
||||||
|
|
||||||
hostList := aurora.NewHosts()
|
hostList := aurora.NewHosts()
|
||||||
hostList.HostNames = hosts
|
hostList.HostNames = hosts
|
||||||
|
|
||||||
r.logger.debugPrintf("StartMaintenance Thrift Payload: %v\n", hostList)
|
c.logger.DebugPrintf("StartMaintenance Thrift Payload: %v\n", hostList)
|
||||||
|
|
||||||
resp, retryErr := r.thriftCallWithRetries(
|
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
|
||||||
false,
|
return c.adminClient.StartMaintenance(context.TODO(), hostList)
|
||||||
func() (*aurora.Response, error) {
|
})
|
||||||
return r.adminClient.StartMaintenance(context.TODO(), hostList)
|
|
||||||
},
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
|
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
|
return nil, errors.Wrap(retryErr, "unable to recover connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.GetResult_() != nil {
|
if resp.GetResult_() != nil && resp.GetResult_().GetStartMaintenanceResult_() != nil {
|
||||||
result = resp.GetResult_().GetStartMaintenanceResult_()
|
return resp.GetResult_().GetStartMaintenanceResult_().GetStatuses(), nil
|
||||||
|
} else {
|
||||||
|
return nil, errors.New("thrift error: Field in response is nil unexpectedly.")
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, result, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *realisClient) EndMaintenance(hosts ...string) (*aurora.Response, *aurora.EndMaintenanceResult_, error) {
|
func (c *Client) EndMaintenance(hosts ...string) ([]*aurora.HostStatus, error) {
|
||||||
|
|
||||||
var result *aurora.EndMaintenanceResult_
|
|
||||||
|
|
||||||
if len(hosts) == 0 {
|
if len(hosts) == 0 {
|
||||||
return nil, nil, errors.New("no hosts provided to end maintenance on")
|
return nil, errors.New("no hosts provided to end maintenance on")
|
||||||
}
|
}
|
||||||
|
|
||||||
hostList := aurora.NewHosts()
|
hostList := aurora.NewHosts()
|
||||||
hostList.HostNames = hosts
|
hostList.HostNames = hosts
|
||||||
|
|
||||||
r.logger.debugPrintf("EndMaintenance Thrift Payload: %v\n", hostList)
|
c.logger.DebugPrintf("EndMaintenance Thrift Payload: %v\n", hostList)
|
||||||
|
|
||||||
resp, retryErr := r.thriftCallWithRetries(
|
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
|
||||||
false,
|
return c.adminClient.EndMaintenance(context.TODO(), hostList)
|
||||||
func() (*aurora.Response, error) {
|
})
|
||||||
return r.adminClient.EndMaintenance(context.TODO(), hostList)
|
|
||||||
},
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
|
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
|
return nil, errors.Wrap(retryErr, "unable to recover connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.GetResult_() != nil {
|
if resp.GetResult_() != nil && resp.GetResult_().GetEndMaintenanceResult_() != nil {
|
||||||
result = resp.GetResult_().GetEndMaintenanceResult_()
|
return resp.GetResult_().GetEndMaintenanceResult_().GetStatuses(), nil
|
||||||
|
} else {
|
||||||
|
return nil, errors.New("thrift error: Field in response is nil unexpectedly.")
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, result, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *realisClient) MaintenanceStatus(hosts ...string) (*aurora.Response, *aurora.MaintenanceStatusResult_, error) {
|
func (c *Client) MaintenanceStatus(hosts ...string) (*aurora.MaintenanceStatusResult_, error) {
|
||||||
|
|
||||||
var result *aurora.MaintenanceStatusResult_
|
var result *aurora.MaintenanceStatusResult_
|
||||||
|
|
||||||
if len(hosts) == 0 {
|
if len(hosts) == 0 {
|
||||||
return nil, nil, errors.New("no hosts provided to get maintenance status from")
|
return nil, errors.New("no hosts provided to get maintenance status from")
|
||||||
}
|
}
|
||||||
|
|
||||||
hostList := aurora.NewHosts()
|
hostList := aurora.NewHosts()
|
||||||
hostList.HostNames = hosts
|
hostList.HostNames = hosts
|
||||||
|
|
||||||
r.logger.debugPrintf("MaintenanceStatus Thrift Payload: %v\n", hostList)
|
c.logger.DebugPrintf("MaintenanceStatus Thrift Payload: %v\n", hostList)
|
||||||
|
|
||||||
// Make thrift call. If we encounter an error sending the call, attempt to reconnect
|
// Make thrift call. If we encounter an error sending the call, attempt to reconnect
|
||||||
// and continue trying to resend command until we run out of retries.
|
// and continue trying to resend command until we run out of retries.
|
||||||
resp, retryErr := r.thriftCallWithRetries(
|
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
|
||||||
false,
|
return c.adminClient.MaintenanceStatus(context.TODO(), hostList)
|
||||||
func() (*aurora.Response, error) {
|
})
|
||||||
return r.adminClient.MaintenanceStatus(context.TODO(), hostList)
|
|
||||||
},
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
|
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
|
return result, errors.Wrap(retryErr, "unable to recover connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
if resp.GetResult_() != nil {
|
if resp.GetResult_() != nil {
|
||||||
result = resp.GetResult_().GetMaintenanceStatusResult_()
|
result = resp.GetResult_().GetMaintenanceStatusResult_()
|
||||||
}
|
}
|
||||||
|
|
||||||
return resp, result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetQuota sets a quota aggregate for the given role
|
// SetQuota sets a quota aggregate for the given role
|
||||||
// TODO(zircote) Currently investigating an error that is returned
|
// TODO(zircote) Currently investigating an error that is returned from thrift calls that include resources for `NamedPort` and `NumGpu`
|
||||||
// from thrift calls that include resources for `NamedPort` and `NumGpu`
|
func (c *Client) SetQuota(role string, cpu *float64, ramMb *int64, diskMb *int64) error {
|
||||||
func (r *realisClient) SetQuota(role string, cpu *float64, ramMb *int64, diskMb *int64) (*aurora.Response, error) {
|
ramResource := aurora.NewResource()
|
||||||
quota := &aurora.ResourceAggregate{
|
ramResource.RamMb = ramMb
|
||||||
Resources: []*aurora.Resource{{NumCpus: cpu}, {RamMb: ramMb}, {DiskMb: diskMb}},
|
cpuResource := aurora.NewResource()
|
||||||
}
|
cpuResource.NumCpus = cpu
|
||||||
|
diskResource := aurora.NewResource()
|
||||||
|
diskResource.DiskMb = diskMb
|
||||||
|
|
||||||
resp, retryErr := r.thriftCallWithRetries(
|
quota := aurora.NewResourceAggregate()
|
||||||
false,
|
quota.Resources = []*aurora.Resource{ramResource, cpuResource, diskResource}
|
||||||
func() (*aurora.Response, error) {
|
|
||||||
return r.adminClient.SetQuota(context.TODO(), role, quota)
|
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
|
||||||
},
|
return c.adminClient.SetQuota(context.TODO(), role, quota)
|
||||||
nil,
|
})
|
||||||
)
|
|
||||||
|
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
return resp, errors.Wrap(retryErr, "Unable to set role quota")
|
return errors.Wrap(retryErr, "unable to set role quota")
|
||||||
}
|
}
|
||||||
return resp, retryErr
|
return retryErr
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetQuota returns the resource aggregate for the given role
|
// GetQuota returns the resource aggregate for the given role
|
||||||
func (r *realisClient) GetQuota(role string) (*aurora.Response, error) {
|
func (c *Client) GetQuota(role string) (*aurora.GetQuotaResult_, error) {
|
||||||
|
|
||||||
resp, retryErr := r.thriftCallWithRetries(
|
resp, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
|
||||||
false,
|
return c.adminClient.GetQuota(context.TODO(), role)
|
||||||
func() (*aurora.Response, error) {
|
})
|
||||||
return r.adminClient.GetQuota(context.TODO(), role)
|
|
||||||
},
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
|
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
return resp, errors.Wrap(retryErr, "Unable to get role quota")
|
return nil, errors.Wrap(retryErr, "unable to get role quota")
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.GetResult_() != nil {
|
||||||
|
return resp.GetResult_().GetGetQuotaResult_(), nil
|
||||||
|
} else {
|
||||||
|
return nil, errors.New("thrift error: Field in response is nil unexpectedly.")
|
||||||
}
|
}
|
||||||
return resp, retryErr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Force Aurora Scheduler to perform a snapshot and write to Mesos log
|
// Force Aurora Scheduler to perform a snapshot and write to Mesos log
|
||||||
func (r *realisClient) Snapshot() error {
|
func (c *Client) Snapshot() error {
|
||||||
|
|
||||||
_, retryErr := r.thriftCallWithRetries(
|
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
|
||||||
false,
|
return c.adminClient.Snapshot(context.TODO())
|
||||||
func() (*aurora.Response, error) {
|
})
|
||||||
return r.adminClient.Snapshot(context.TODO())
|
|
||||||
},
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
|
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
return errors.Wrap(retryErr, "Unable to recover connection")
|
return errors.Wrap(retryErr, "unable to recover connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Force Aurora Scheduler to write backup file to a file in the backup directory
|
// Force Aurora Scheduler to write backup file to a file in the backup directory
|
||||||
func (r *realisClient) PerformBackup() error {
|
func (c *Client) PerformBackup() error {
|
||||||
|
|
||||||
_, retryErr := r.thriftCallWithRetries(
|
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
|
||||||
false,
|
return c.adminClient.PerformBackup(context.TODO())
|
||||||
func() (*aurora.Response, error) {
|
})
|
||||||
return r.adminClient.PerformBackup(context.TODO())
|
|
||||||
},
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
|
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
return errors.Wrap(retryErr, "Unable to recover connection")
|
return errors.Wrap(retryErr, "unable to recover connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *realisClient) ForceImplicitTaskReconciliation() error {
|
// Force an Implicit reconciliation between Mesos and Aurora
|
||||||
|
func (c *Client) ForceImplicitTaskReconciliation() error {
|
||||||
|
|
||||||
_, retryErr := r.thriftCallWithRetries(
|
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
|
||||||
false,
|
return c.adminClient.TriggerImplicitTaskReconciliation(context.TODO())
|
||||||
func() (*aurora.Response, error) {
|
})
|
||||||
return r.adminClient.TriggerImplicitTaskReconciliation(context.TODO())
|
|
||||||
},
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
|
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
return errors.Wrap(retryErr, "Unable to recover connection")
|
return errors.Wrap(retryErr, "unable to recover connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *realisClient) ForceExplicitTaskReconciliation(batchSize *int32) error {
|
// Force an Explicit reconciliation between Mesos and Aurora
|
||||||
|
func (c *Client) ForceExplicitTaskReconciliation(batchSize *int32) error {
|
||||||
|
|
||||||
if batchSize != nil && *batchSize < 1 {
|
if batchSize != nil && *batchSize < 1 {
|
||||||
return errors.New("invalid batch size")
|
return errors.New("invalid batch size.")
|
||||||
}
|
}
|
||||||
settings := aurora.NewExplicitReconciliationSettings()
|
settings := aurora.NewExplicitReconciliationSettings()
|
||||||
|
|
||||||
settings.BatchSize = batchSize
|
settings.BatchSize = batchSize
|
||||||
|
|
||||||
_, retryErr := r.thriftCallWithRetries(false,
|
_, retryErr := c.thriftCallWithRetries(false, func() (*aurora.Response, error) {
|
||||||
func() (*aurora.Response, error) {
|
return c.adminClient.TriggerExplicitTaskReconciliation(context.TODO(), settings)
|
||||||
return r.adminClient.TriggerExplicitTaskReconciliation(context.TODO(), settings)
|
})
|
||||||
},
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
|
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
return errors.Wrap(retryErr, "Unable to recover connection")
|
return errors.Wrap(retryErr, "unable to recover connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
171
realis_config.go
Normal file
171
realis_config.go
Normal file
|
@ -0,0 +1,171 @@
|
||||||
|
/**
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package realis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
|
)
|
||||||
|
|
||||||
|
type clientConfig struct {
|
||||||
|
username, password string
|
||||||
|
url string
|
||||||
|
timeout time.Duration
|
||||||
|
transportProtocol TransportProtocol
|
||||||
|
cluster *Cluster
|
||||||
|
backoff Backoff
|
||||||
|
transport thrift.TTransport
|
||||||
|
protoFactory thrift.TProtocolFactory
|
||||||
|
logger *LevelLogger
|
||||||
|
insecureSkipVerify bool
|
||||||
|
certsPath string
|
||||||
|
clientKey, clientCert string
|
||||||
|
options []ClientOption
|
||||||
|
debug bool
|
||||||
|
trace bool
|
||||||
|
zkOptions []ZKOpt
|
||||||
|
failOnPermanentErrors bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultBackoff = Backoff{
|
||||||
|
Steps: 3,
|
||||||
|
Duration: 10 * time.Second,
|
||||||
|
Factor: 5.0,
|
||||||
|
Jitter: 0.1,
|
||||||
|
}
|
||||||
|
|
||||||
|
type TransportProtocol int
|
||||||
|
|
||||||
|
const (
|
||||||
|
unsetProtocol TransportProtocol = iota
|
||||||
|
jsonProtocol
|
||||||
|
binaryProtocol
|
||||||
|
)
|
||||||
|
|
||||||
|
type ClientOption func(*clientConfig)
|
||||||
|
|
||||||
|
// clientConfig sets for options in clientConfig.
|
||||||
|
func BasicAuth(username, password string) ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.username = username
|
||||||
|
config.password = password
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func SchedulerUrl(url string) ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.url = url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Timeout(timeout time.Duration) ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.timeout = timeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ZKCluster(cluster *Cluster) ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.cluster = cluster
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ZKUrl(url string) ClientOption {
|
||||||
|
|
||||||
|
opts := []ZKOpt{ZKEndpoints(strings.Split(url, ",")...), ZKPath("/aurora/scheduler")}
|
||||||
|
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
if config.zkOptions == nil {
|
||||||
|
config.zkOptions = opts
|
||||||
|
} else {
|
||||||
|
config.zkOptions = append(config.zkOptions, opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ThriftJSON() ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.transportProtocol = jsonProtocol
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ThriftBinary() ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.transportProtocol = binaryProtocol
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BackOff(b Backoff) ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.backoff = b
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func InsecureSkipVerify(InsecureSkipVerify bool) ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.insecureSkipVerify = InsecureSkipVerify
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func CertsPath(certspath string) ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.certsPath = certspath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ClientCerts(clientKey, clientCert string) ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.clientKey, config.clientCert = clientKey, clientCert
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use this option if you'd like to override default settings for connecting to Zookeeper.
|
||||||
|
// See zk.go for what is possible to set as an option.
|
||||||
|
func ZookeeperOptions(opts ...ZKOpt) ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.zkOptions = opts
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Using the word set to avoid name collision with Interface.
|
||||||
|
func SetLogger(l Logger) ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.logger = &LevelLogger{Logger: l}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable debug statements.
|
||||||
|
func Debug() ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.debug = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable trace statements.
|
||||||
|
func Trace() ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.trace = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FailOnPermanentErrors - If the client encounters a connection error the standard library
|
||||||
|
// considers permanent, stop retrying and return an error to the user.
|
||||||
|
func FailOnPermanentErrors() ClientOption {
|
||||||
|
return func(config *clientConfig) {
|
||||||
|
config.failOnPermanentErrors = true
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load diff
72
realis_test.go
Normal file
72
realis_test.go
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
/**
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package realis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetCACerts(t *testing.T) {
|
||||||
|
certs, err := GetCerts("./examples/certs")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, len(certs.Subjects()), 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAuroraURLValidator(t *testing.T) {
|
||||||
|
t.Run("badURL", func(t *testing.T) {
|
||||||
|
url, err := validateAuroraAddress("http://badurl.com/badpath")
|
||||||
|
assert.Empty(t, url)
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("URLHttp", func(t *testing.T) {
|
||||||
|
url, err := validateAuroraAddress("http://goodurl.com:8081/api")
|
||||||
|
assert.Equal(t, "http://goodurl.com:8081/api", url)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("URLHttps", func(t *testing.T) {
|
||||||
|
url, err := validateAuroraAddress("https://goodurl.com:8081/api")
|
||||||
|
assert.Equal(t, "https://goodurl.com:8081/api", url)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("URLNoPath", func(t *testing.T) {
|
||||||
|
url, err := validateAuroraAddress("http://goodurl.com:8081")
|
||||||
|
assert.Equal(t, "http://goodurl.com:8081/api", url)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ipAddrNoPath", func(t *testing.T) {
|
||||||
|
url, err := validateAuroraAddress("http://192.168.1.33:8081")
|
||||||
|
assert.Equal(t, "http://192.168.1.33:8081/api", url)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("URLNoProtocol", func(t *testing.T) {
|
||||||
|
url, err := validateAuroraAddress("goodurl.com:8081/api")
|
||||||
|
assert.Equal(t, "http://goodurl.com:8081/api", url)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("URLNoProtocolNoPathNoPort", func(t *testing.T) {
|
||||||
|
url, err := validateAuroraAddress("goodurl.com")
|
||||||
|
assert.Equal(t, "http://goodurl.com:8081/api", url)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
|
@ -17,9 +17,8 @@ package response
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
"github.com/paypal/gorealis/v2/gen-go/apache/aurora"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Get key from a response created by a StartJobUpdate call
|
// Get key from a response created by a StartJobUpdate call
|
||||||
|
@ -36,25 +35,9 @@ func ScheduleStatusResult(resp *aurora.Response) *aurora.ScheduleStatusResult_ {
|
||||||
}
|
}
|
||||||
|
|
||||||
func JobUpdateSummaries(resp *aurora.Response) []*aurora.JobUpdateSummary {
|
func JobUpdateSummaries(resp *aurora.Response) []*aurora.JobUpdateSummary {
|
||||||
if resp.GetResult_() == nil || resp.GetResult_().GetGetJobUpdateSummariesResult_() == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp.GetResult_().GetGetJobUpdateSummariesResult_().GetUpdateSummaries()
|
return resp.GetResult_().GetGetJobUpdateSummariesResult_().GetUpdateSummaries()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: Replaced by checks done inside of thriftCallHelper
|
|
||||||
func ResponseCodeCheck(resp *aurora.Response) (*aurora.Response, error) {
|
|
||||||
if resp == nil {
|
|
||||||
return resp, errors.New("Response is nil")
|
|
||||||
}
|
|
||||||
if resp.GetResponseCode() != aurora.ResponseCode_OK {
|
|
||||||
return resp, errors.New(CombineMessage(resp))
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Based on aurora client: src/main/python/apache/aurora/client/base.py
|
// Based on aurora client: src/main/python/apache/aurora/client/base.py
|
||||||
func CombineMessage(resp *aurora.Response) string {
|
func CombineMessage(resp *aurora.Response) string {
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
|
|
239
retry.go
239
retry.go
|
@ -17,20 +17,21 @@ package realis
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/apache/thrift/lib/go/thrift"
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
"github.com/paypal/gorealis/v2/gen-go/apache/aurora"
|
||||||
"github.com/paypal/gorealis/response"
|
"github.com/paypal/gorealis/v2/response"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Backoff determines how the retry mechanism should react after each failure and how many failures it should
|
|
||||||
// tolerate.
|
|
||||||
type Backoff struct {
|
type Backoff struct {
|
||||||
Duration time.Duration // the base duration
|
Duration time.Duration // the base duration
|
||||||
Factor float64 // Duration is multiplied by a factor each iteration
|
Factor float64 // Duration is multipled by factor each iteration
|
||||||
Jitter float64 // The amount of jitter applied each iteration
|
Jitter float64 // The amount of jitter applied each iteration
|
||||||
Steps int // Exit with error after this many steps
|
Steps int // Exit with error after this many steps
|
||||||
}
|
}
|
||||||
|
@ -52,19 +53,23 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration {
|
||||||
// if the loop should be aborted.
|
// if the loop should be aborted.
|
||||||
type ConditionFunc func() (done bool, err error)
|
type ConditionFunc func() (done bool, err error)
|
||||||
|
|
||||||
// ExponentialBackoff is a modified version of the Kubernetes exponential-backoff code.
|
// Modified version of the Kubernetes exponential-backoff code.
|
||||||
// It repeats a condition check with exponential backoff and checks the condition up to
|
// ExponentialBackoff repeats a condition check with exponential backoff.
|
||||||
// Steps times, increasing the wait by multiplying the previous duration by Factor.
|
//
|
||||||
|
// It checks the condition up to Steps times, increasing the wait by multiplying
|
||||||
|
// the previous duration by Factor.
|
||||||
//
|
//
|
||||||
// If Jitter is greater than zero, a random amount of each duration is added
|
// If Jitter is greater than zero, a random amount of each duration is added
|
||||||
// (between duration and duration*(1+jitter)).
|
// (between duration and duration*(1+jitter)).
|
||||||
//
|
//
|
||||||
// If the condition never returns true, ErrWaitTimeout is returned. Errors
|
// If the condition never returns true, ErrWaitTimeout is returned. Errors
|
||||||
// do not cause the function to return.
|
// do not cause the function to return.
|
||||||
func ExponentialBackoff(backoff Backoff, logger logger, condition ConditionFunc) error {
|
|
||||||
|
func ExponentialBackoff(backoff Backoff, logger Logger, condition ConditionFunc) error {
|
||||||
var err error
|
var err error
|
||||||
var ok bool
|
var ok bool
|
||||||
var curStep int
|
var curStep int
|
||||||
|
|
||||||
duration := backoff.Duration
|
duration := backoff.Duration
|
||||||
|
|
||||||
for curStep = 0; curStep < backoff.Steps; curStep++ {
|
for curStep = 0; curStep < backoff.Steps; curStep++ {
|
||||||
|
@ -76,8 +81,7 @@ func ExponentialBackoff(backoff Backoff, logger logger, condition ConditionFunc)
|
||||||
adjusted = Jitter(duration, backoff.Jitter)
|
adjusted = Jitter(duration, backoff.Jitter)
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Printf(
|
logger.Printf("A retryable error occurred during function call, backing off for %v before retrying\n", adjusted)
|
||||||
"A retryable error occurred during function call, backing off for %v before retrying\n", adjusted)
|
|
||||||
time.Sleep(adjusted)
|
time.Sleep(adjusted)
|
||||||
duration = time.Duration(float64(duration) * backoff.Factor)
|
duration = time.Duration(float64(duration) * backoff.Factor)
|
||||||
}
|
}
|
||||||
|
@ -94,9 +98,10 @@ func ExponentialBackoff(backoff Backoff, logger logger, condition ConditionFunc)
|
||||||
// If the error is temporary, continue retrying.
|
// If the error is temporary, continue retrying.
|
||||||
if !IsTemporary(err) {
|
if !IsTemporary(err) {
|
||||||
return err
|
return err
|
||||||
|
} else {
|
||||||
|
// Print out the temporary error we experienced.
|
||||||
|
logger.Println(err)
|
||||||
}
|
}
|
||||||
// Print out the temporary error we experienced.
|
|
||||||
logger.Println(err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,33 +112,21 @@ func ExponentialBackoff(backoff Backoff, logger logger, condition ConditionFunc)
|
||||||
// Provide more information to the user wherever possible
|
// Provide more information to the user wherever possible
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return newRetryError(errors.Wrap(err, "ran out of retries"), curStep)
|
return newRetryError(errors.Wrap(err, "ran out of retries"), curStep)
|
||||||
|
} else {
|
||||||
|
return newRetryError(errors.New("ran out of retries"), curStep)
|
||||||
}
|
}
|
||||||
|
|
||||||
return newRetryError(errors.New("ran out of retries"), curStep)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type auroraThriftCall func() (resp *aurora.Response, err error)
|
type auroraThriftCall func() (resp *aurora.Response, err error)
|
||||||
|
|
||||||
// verifyOntimeout defines the type of function that will be used to verify whether a Thirft call to the Scheduler
|
|
||||||
// made it to the scheduler or not. In general, these types of functions will have to interact with the scheduler
|
|
||||||
// through the very same Thrift API which previously encountered a time out from the client.
|
|
||||||
// This means that the functions themselves should be kept to a minimum number of Thrift calls.
|
|
||||||
// It should also be noted that this is a best effort mechanism and
|
|
||||||
// is likely to fail for the same reasons that the original call failed.
|
|
||||||
type verifyOnTimeout func() (*aurora.Response, bool)
|
|
||||||
|
|
||||||
// Duplicates the functionality of ExponentialBackoff but is specifically targeted towards ThriftCalls.
|
// Duplicates the functionality of ExponentialBackoff but is specifically targeted towards ThriftCalls.
|
||||||
func (r *realisClient) thriftCallWithRetries(
|
func (c *Client) thriftCallWithRetries(returnOnTimeout bool, thriftCall auroraThriftCall) (*aurora.Response, error) {
|
||||||
returnOnTimeout bool,
|
|
||||||
thriftCall auroraThriftCall,
|
|
||||||
verifyOnTimeout verifyOnTimeout) (*aurora.Response, error) {
|
|
||||||
|
|
||||||
var resp *aurora.Response
|
var resp *aurora.Response
|
||||||
var clientErr error
|
var clientErr error
|
||||||
var curStep int
|
var curStep int
|
||||||
timeouts := 0
|
var timeouts int
|
||||||
|
|
||||||
backoff := r.config.backoff
|
backoff := c.config.backoff
|
||||||
duration := backoff.Duration
|
duration := backoff.Duration
|
||||||
|
|
||||||
for curStep = 0; curStep < backoff.Steps; curStep++ {
|
for curStep = 0; curStep < backoff.Steps; curStep++ {
|
||||||
|
@ -145,10 +138,7 @@ func (r *realisClient) thriftCallWithRetries(
|
||||||
adjusted = Jitter(duration, backoff.Jitter)
|
adjusted = Jitter(duration, backoff.Jitter)
|
||||||
}
|
}
|
||||||
|
|
||||||
r.logger.Printf(
|
c.logger.Printf("A retryable error occurred during thrift call, backing off for %v before retry %v\n", adjusted, curStep)
|
||||||
"A retryable error occurred during thrift call, backing off for %v before retry %v",
|
|
||||||
adjusted,
|
|
||||||
curStep)
|
|
||||||
|
|
||||||
time.Sleep(adjusted)
|
time.Sleep(adjusted)
|
||||||
duration = time.Duration(float64(duration) * backoff.Factor)
|
duration = time.Duration(float64(duration) * backoff.Factor)
|
||||||
|
@ -158,137 +148,110 @@ func (r *realisClient) thriftCallWithRetries(
|
||||||
// Placing this in an anonymous function in order to create a new, short-lived stack allowing unlock
|
// Placing this in an anonymous function in order to create a new, short-lived stack allowing unlock
|
||||||
// to be run in case of a panic inside of thriftCall.
|
// to be run in case of a panic inside of thriftCall.
|
||||||
func() {
|
func() {
|
||||||
r.lock.Lock()
|
c.lock.Lock()
|
||||||
defer r.lock.Unlock()
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
resp, clientErr = thriftCall()
|
resp, clientErr = thriftCall()
|
||||||
|
|
||||||
r.logger.tracePrintf("Aurora Thrift Call ended resp: %v clientErr: %v", resp, clientErr)
|
c.logger.TracePrintf("Aurora Thrift Call ended resp: %v clientErr: %v\n", resp, clientErr)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Check if our thrift call is returning an error.
|
// Check if our thrift call is returning an error. This is a retryable event as we don't know
|
||||||
|
// if it was caused by network issues.
|
||||||
if clientErr != nil {
|
if clientErr != nil {
|
||||||
|
|
||||||
// Print out the error to the user
|
// Print out the error to the user
|
||||||
r.logger.Printf("Client Error: %v", clientErr)
|
c.logger.Printf("Client Error: %v\n", clientErr)
|
||||||
|
|
||||||
temporary, timedout := isConnectionError(clientErr)
|
// Determine if error is a temporary URL error by going up the stack
|
||||||
if !temporary && r.RealisConfig().failOnPermanentErrors {
|
e, ok := clientErr.(thrift.TTransportException)
|
||||||
return nil, errors.Wrap(clientErr, "permanent connection error")
|
if ok {
|
||||||
}
|
c.logger.DebugPrint("Encountered a transport exception")
|
||||||
|
|
||||||
// There exists a corner case where thrift payload was received by Aurora but
|
// TODO(rdelvalle): Figure out a better way to obtain the error code as this is a very brittle solution
|
||||||
// connection timed out before Aurora was able to reply.
|
// 401 Unauthorized means the wrong username and password were provided
|
||||||
// Users can take special action on a timeout by using IsTimedout and reacting accordingly
|
if strings.Contains(e.Error(), strconv.Itoa(http.StatusUnauthorized)) {
|
||||||
// if they have configured the client to return on a timeout.
|
return nil, errors.Wrap(clientErr, "wrong username or password provided")
|
||||||
if timedout && returnOnTimeout {
|
}
|
||||||
return resp, newTimedoutError(errors.New("client connection closed before server answer"))
|
|
||||||
|
e, ok := e.Err().(*url.Error)
|
||||||
|
if ok {
|
||||||
|
// EOF error occurs when the server closes the read buffer of the client. This is common
|
||||||
|
// when the server is overloaded and should be retried. All other errors that are permanent
|
||||||
|
// will not be retried.
|
||||||
|
if e.Err != io.EOF && !e.Temporary() && c.RealisConfig().failOnPermanentErrors {
|
||||||
|
return nil, errors.Wrap(clientErr, "permanent connection error")
|
||||||
|
}
|
||||||
|
// Corner case where thrift payload was received by Aurora but connection timedout before Aurora was
|
||||||
|
// able to reply. In this case we will return whatever response was received and a TimedOut behaving
|
||||||
|
// error. Users can take special action on a timeout by using IsTimedout and reacting accordingly.
|
||||||
|
if e.Timeout() {
|
||||||
|
timeouts++
|
||||||
|
c.logger.DebugPrintf(
|
||||||
|
"Client closed connection (timedout) %d times before server responded,"+
|
||||||
|
" consider increasing connection timeout",
|
||||||
|
timeouts)
|
||||||
|
if returnOnTimeout {
|
||||||
|
return resp,
|
||||||
|
newTimedoutError(errors.New("client connection closed before server answer"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// In the future, reestablish connection should be able to check if it is actually possible
|
// In the future, reestablish connection should be able to check if it is actually possible
|
||||||
// to make a thrift call to Aurora. For now, a reconnect should always lead to a retry.
|
// to make a thrift call to Aurora. For now, a reconnect should always lead to a retry.
|
||||||
// Ignoring error due to the fact that an error should be retried regardless
|
// Ignoring error due to the fact that an error should be retried regardless
|
||||||
reestablishErr := r.ReestablishConn()
|
_ = c.ReestablishConn()
|
||||||
if reestablishErr != nil {
|
|
||||||
r.logger.debugPrintf("error re-establishing connection ", reestablishErr)
|
} else {
|
||||||
|
|
||||||
|
// If there was no client error, but the response is nil, something went wrong.
|
||||||
|
// Ideally, we'll never encounter this but we're placing a safeguard here.
|
||||||
|
if resp == nil {
|
||||||
|
return nil, errors.New("response from aurora is nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If users did not opt for a return on timeout in order to react to a timedout error,
|
// Check Response Code from thrift and make a decision to continue retrying or not.
|
||||||
// attempt to verify that the call made it to the scheduler after the connection was re-established.
|
switch responseCode := resp.GetResponseCode(); responseCode {
|
||||||
if timedout {
|
|
||||||
timeouts++
|
|
||||||
r.logger.debugPrintf(
|
|
||||||
"Client closed connection %d times before server responded, "+
|
|
||||||
"consider increasing connection timeout",
|
|
||||||
timeouts)
|
|
||||||
|
|
||||||
// Allow caller to provide a function which checks if the original call was successful before
|
// If the thrift call succeeded, stop retrying
|
||||||
// it timed out.
|
case aurora.ResponseCode_OK:
|
||||||
if verifyOnTimeout != nil {
|
return resp, nil
|
||||||
if verifyResp, ok := verifyOnTimeout(); ok {
|
|
||||||
r.logger.Print("verified that the call went through successfully after a client timeout")
|
// If the response code is transient, continue retrying
|
||||||
// Response here might be different than the original as it is no longer constructed
|
case aurora.ResponseCode_ERROR_TRANSIENT:
|
||||||
// by the scheduler but mimicked.
|
c.logger.Println("Aurora replied with Transient error code, retrying")
|
||||||
// This is OK since the scheduler is very unlikely to change responses at this point in its
|
continue
|
||||||
// development cycle but we must be careful to not return an incorrectly constructed response.
|
|
||||||
return verifyResp, nil
|
// Failure scenarios, these indicate a bad payload or a bad clientConfig. Stop retrying.
|
||||||
}
|
case aurora.ResponseCode_INVALID_REQUEST,
|
||||||
}
|
aurora.ResponseCode_ERROR,
|
||||||
|
aurora.ResponseCode_AUTH_FAILED,
|
||||||
|
aurora.ResponseCode_JOB_UPDATING_ERROR:
|
||||||
|
c.logger.Printf("Terminal Response Code %v from Aurora, won't retry\n", resp.GetResponseCode().String())
|
||||||
|
return resp, errors.New(response.CombineMessage(resp))
|
||||||
|
|
||||||
|
// The only case that should fall down to here is a WARNING response code.
|
||||||
|
// It is currently not used as a response in the scheduler so it is unknown how to handle it.
|
||||||
|
default:
|
||||||
|
c.logger.DebugPrintf("unhandled response code %v received from Aurora\n", responseCode)
|
||||||
|
return nil, errors.Errorf("unhandled response code from Aurora %v", responseCode.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retry the thrift payload
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there was no client error, but the response is nil, something went wrong.
|
|
||||||
// Ideally, we'll never encounter this but we're placing a safeguard here.
|
|
||||||
if resp == nil {
|
|
||||||
return nil, errors.New("response from aurora is nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check Response Code from thrift and make a decision to continue retrying or not.
|
|
||||||
switch responseCode := resp.GetResponseCode(); responseCode {
|
|
||||||
|
|
||||||
// If the thrift call succeeded, stop retrying
|
|
||||||
case aurora.ResponseCode_OK:
|
|
||||||
return resp, nil
|
|
||||||
|
|
||||||
// If the response code is transient, continue retrying
|
|
||||||
case aurora.ResponseCode_ERROR_TRANSIENT:
|
|
||||||
r.logger.Println("Aurora replied with Transient error code, retrying")
|
|
||||||
continue
|
|
||||||
|
|
||||||
// Failure scenarios, these indicate a bad payload or a bad config. Stop retrying.
|
|
||||||
case aurora.ResponseCode_INVALID_REQUEST,
|
|
||||||
aurora.ResponseCode_ERROR,
|
|
||||||
aurora.ResponseCode_AUTH_FAILED,
|
|
||||||
aurora.ResponseCode_JOB_UPDATING_ERROR:
|
|
||||||
r.logger.Printf("Terminal Response Code %v from Aurora, won't retry\n", resp.GetResponseCode().String())
|
|
||||||
return resp, errors.New(response.CombineMessage(resp))
|
|
||||||
|
|
||||||
// The only case that should fall down to here is a WARNING response code.
|
|
||||||
// It is currently not used as a response in the scheduler so it is unknown how to handle it.
|
|
||||||
default:
|
|
||||||
r.logger.debugPrintf("unhandled response code %v received from Aurora\n", responseCode)
|
|
||||||
return nil, errors.Errorf("unhandled response code from Aurora %v", responseCode.String())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.logger.DebugPrintf("it took %v retries to complete this operation\n", curStep)
|
||||||
|
|
||||||
if curStep > 1 {
|
if curStep > 1 {
|
||||||
r.config.logger.Printf("this thrift call was retried %d time(s)", curStep)
|
c.config.logger.Printf("retried this thrift call %d time(s)", curStep)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provide more information to the user wherever possible.
|
// Provide more information to the user wherever possible.
|
||||||
if clientErr != nil {
|
if clientErr != nil {
|
||||||
return nil, newRetryError(errors.Wrap(clientErr, "ran out of retries, including latest error"), curStep)
|
return nil, newRetryError(errors.Wrap(clientErr, "ran out of retries, including latest error"), curStep)
|
||||||
|
} else {
|
||||||
|
return nil, newRetryError(errors.New("ran out of retries"), curStep)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, newRetryError(errors.New("ran out of retries"), curStep)
|
|
||||||
}
|
|
||||||
|
|
||||||
// isConnectionError processes the error received by the client.
|
|
||||||
// The return values indicate weather this was determined to be a temporary error
|
|
||||||
// and weather it was determined to be a timeout error
|
|
||||||
func isConnectionError(err error) (bool, bool) {
|
|
||||||
|
|
||||||
// Determine if error is a temporary URL error by going up the stack
|
|
||||||
transportException, ok := err.(thrift.TTransportException)
|
|
||||||
if !ok {
|
|
||||||
return false, false
|
|
||||||
}
|
|
||||||
|
|
||||||
urlError, ok := transportException.Err().(*url.Error)
|
|
||||||
if !ok {
|
|
||||||
return false, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// EOF error occurs when the server closes the read buffer of the client. This is common
|
|
||||||
// when the server is overloaded and we consider it temporary.
|
|
||||||
// All other which are not temporary as per the member function Temporary(),
|
|
||||||
// are considered not temporary (permanent).
|
|
||||||
if urlError.Err != io.EOF && !urlError.Temporary() {
|
|
||||||
return false, false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, urlError.Timeout()
|
|
||||||
}
|
}
|
||||||
|
|
13
runTests.sh
13
runTests.sh
|
@ -1,13 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
docker-compose up -d
|
|
||||||
|
|
||||||
# If running docker-compose up gives any error, don't do anything.
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Since we run our docker compose setup in bridge mode to be able to run on MacOS, we have to launch a Docker container within the bridge network in order to avoid any routing issues.
|
|
||||||
docker run --rm -t -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10-stretch go test -v github.com/paypal/gorealis $@
|
|
||||||
|
|
||||||
docker-compose down
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Since we run our docker compose setup in bridge mode to be able to run on MacOS, we have to launch a Docker container within the bridge network in order to avoid any routing issues.
|
# Since we run our docker compose setup in bridge mode to be able to run on MacOS, we have to launch a Docker container within the bridge network in order to avoid any routing issues.
|
||||||
docker run --rm -t -w /gorealis -v $GOPATH/pkg:/go/pkg -v $(pwd):/gorealis --network gorealis_aurora_cluster golang:1.16-buster go test -v github.com/paypal/gorealis $@
|
docker run --rm -t -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10-stretch go test -v github.com/paypal/gorealis $@
|
||||||
|
|
449
task.go
Normal file
449
task.go
Normal file
|
@ -0,0 +1,449 @@
|
||||||
|
/**
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package realis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
|
"github.com/paypal/gorealis/v2/gen-go/apache/aurora"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ResourceType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
CPU ResourceType = iota
|
||||||
|
RAM
|
||||||
|
DISK
|
||||||
|
GPU
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
dedicated = "dedicated"
|
||||||
|
portPrefix = "org.apache.aurora.port."
|
||||||
|
)
|
||||||
|
|
||||||
|
type AuroraTask struct {
|
||||||
|
task *aurora.TaskConfig
|
||||||
|
resources map[ResourceType]*aurora.Resource
|
||||||
|
portCount int
|
||||||
|
thermos *ThermosExecutor
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewTask() *AuroraTask {
|
||||||
|
numCpus := &aurora.Resource{}
|
||||||
|
ramMb := &aurora.Resource{}
|
||||||
|
diskMb := &aurora.Resource{}
|
||||||
|
|
||||||
|
numCpus.NumCpus = new(float64)
|
||||||
|
ramMb.RamMb = new(int64)
|
||||||
|
diskMb.DiskMb = new(int64)
|
||||||
|
|
||||||
|
resources := map[ResourceType]*aurora.Resource{CPU: numCpus, RAM: ramMb, DISK: diskMb}
|
||||||
|
|
||||||
|
return &AuroraTask{task: &aurora.TaskConfig{
|
||||||
|
Job: &aurora.JobKey{},
|
||||||
|
MesosFetcherUris: make([]*aurora.MesosFetcherURI, 0),
|
||||||
|
Metadata: make([]*aurora.Metadata, 0),
|
||||||
|
Constraints: make([]*aurora.Constraint, 0),
|
||||||
|
// Container is a Union so one container field must be set. Set Mesos by default.
|
||||||
|
Container: NewMesosContainer().Build(),
|
||||||
|
Resources: []*aurora.Resource{numCpus, ramMb, diskMb},
|
||||||
|
},
|
||||||
|
resources: resources,
|
||||||
|
portCount: 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper method to convert aurora.TaskConfig to gorealis AuroraTask type
|
||||||
|
func TaskFromThrift(config *aurora.TaskConfig) *AuroraTask {
|
||||||
|
|
||||||
|
newTask := NewTask()
|
||||||
|
|
||||||
|
// Pass values using receivers as much as possible
|
||||||
|
newTask.
|
||||||
|
Environment(config.Job.Environment).
|
||||||
|
Role(config.Job.Role).
|
||||||
|
Name(config.Job.Name).
|
||||||
|
MaxFailure(config.MaxTaskFailures).
|
||||||
|
IsService(config.IsService)
|
||||||
|
|
||||||
|
if config.Tier != nil {
|
||||||
|
newTask.Tier(*config.Tier)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.ExecutorConfig != nil {
|
||||||
|
newTask.
|
||||||
|
ExecutorName(config.ExecutorConfig.Name).
|
||||||
|
ExecutorData(config.ExecutorConfig.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.PartitionPolicy != nil {
|
||||||
|
newTask.PartitionPolicy(
|
||||||
|
aurora.PartitionPolicy{
|
||||||
|
Reschedule: config.PartitionPolicy.Reschedule,
|
||||||
|
DelaySecs: thrift.Int64Ptr(*config.PartitionPolicy.DelaySecs),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a deep copy of the task's container
|
||||||
|
if config.Container != nil {
|
||||||
|
if config.Container.Mesos != nil {
|
||||||
|
mesosContainer := NewMesosContainer()
|
||||||
|
|
||||||
|
if config.Container.Mesos.Image != nil {
|
||||||
|
if config.Container.Mesos.Image.Appc != nil {
|
||||||
|
mesosContainer.AppcImage(config.Container.Mesos.Image.Appc.Name, config.Container.Mesos.Image.Appc.ImageId)
|
||||||
|
} else if config.Container.Mesos.Image.Docker != nil {
|
||||||
|
mesosContainer.DockerImage(config.Container.Mesos.Image.Docker.Name, config.Container.Mesos.Image.Docker.Tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, vol := range config.Container.Mesos.Volumes {
|
||||||
|
mesosContainer.AddVolume(vol.ContainerPath, vol.HostPath, vol.Mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
newTask.Container(mesosContainer)
|
||||||
|
} else if config.Container.Docker != nil {
|
||||||
|
dockerContainer := NewDockerContainer()
|
||||||
|
dockerContainer.Image(config.Container.Docker.Image)
|
||||||
|
|
||||||
|
for _, param := range config.Container.Docker.Parameters {
|
||||||
|
dockerContainer.AddParameter(param.Name, param.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
newTask.Container(dockerContainer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy all ports
|
||||||
|
for _, resource := range config.Resources {
|
||||||
|
// Copy only ports. Set CPU, RAM, DISK, and GPU
|
||||||
|
if resource != nil {
|
||||||
|
if resource.NamedPort != nil {
|
||||||
|
newTask.task.Resources = append(
|
||||||
|
newTask.task.Resources,
|
||||||
|
&aurora.Resource{NamedPort: thrift.StringPtr(*resource.NamedPort)},
|
||||||
|
)
|
||||||
|
newTask.portCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
if resource.RamMb != nil {
|
||||||
|
newTask.RAM(*resource.RamMb)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resource.NumCpus != nil {
|
||||||
|
newTask.CPU(*resource.NumCpus)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resource.DiskMb != nil {
|
||||||
|
newTask.Disk(*resource.DiskMb)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resource.NumGpus != nil {
|
||||||
|
newTask.GPU(*resource.NumGpus)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy constraints
|
||||||
|
for _, constraint := range config.Constraints {
|
||||||
|
if constraint != nil && constraint.Constraint != nil {
|
||||||
|
|
||||||
|
newConstraint := aurora.Constraint{Name: constraint.Name}
|
||||||
|
|
||||||
|
taskConstraint := constraint.Constraint
|
||||||
|
if taskConstraint.Limit != nil {
|
||||||
|
newConstraint.Constraint = &aurora.TaskConstraint{
|
||||||
|
Limit: &aurora.LimitConstraint{Limit: taskConstraint.Limit.Limit},
|
||||||
|
}
|
||||||
|
newTask.task.Constraints = append(newTask.task.Constraints, &newConstraint)
|
||||||
|
|
||||||
|
} else if taskConstraint.Value != nil {
|
||||||
|
|
||||||
|
values := make([]string, 0)
|
||||||
|
for _, val := range taskConstraint.Value.Values {
|
||||||
|
values = append(values, val)
|
||||||
|
}
|
||||||
|
|
||||||
|
newConstraint.Constraint = &aurora.TaskConstraint{
|
||||||
|
Value: &aurora.ValueConstraint{Negated: taskConstraint.Value.Negated, Values: values}}
|
||||||
|
|
||||||
|
newTask.task.Constraints = append(newTask.task.Constraints, &newConstraint)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy labels
|
||||||
|
for _, label := range config.Metadata {
|
||||||
|
newTask.task.Metadata = append(newTask.task.Metadata, &aurora.Metadata{Key: label.Key, Value: label.Value})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy Mesos fetcher URIs
|
||||||
|
for _, uri := range config.MesosFetcherUris {
|
||||||
|
newTask.task.MesosFetcherUris = append(
|
||||||
|
newTask.task.MesosFetcherUris,
|
||||||
|
&aurora.MesosFetcherURI{
|
||||||
|
Value: uri.Value,
|
||||||
|
Extract: thrift.BoolPtr(*uri.Extract),
|
||||||
|
Cache: thrift.BoolPtr(*uri.Cache),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return newTask
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set AuroraTask Key environment.
|
||||||
|
func (t *AuroraTask) Environment(env string) *AuroraTask {
|
||||||
|
t.task.Job.Environment = env
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set AuroraTask Key Role.
|
||||||
|
func (t *AuroraTask) Role(role string) *AuroraTask {
|
||||||
|
t.task.Job.Role = role
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set AuroraTask Key Name.
|
||||||
|
func (t *AuroraTask) Name(name string) *AuroraTask {
|
||||||
|
t.task.Job.Name = name
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set name of the executor that will the task will be configured to.
|
||||||
|
func (t *AuroraTask) ExecutorName(name string) *AuroraTask {
|
||||||
|
if t.task.ExecutorConfig == nil {
|
||||||
|
t.task.ExecutorConfig = aurora.NewExecutorConfig()
|
||||||
|
}
|
||||||
|
|
||||||
|
t.task.ExecutorConfig.Name = name
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Will be included as part of entire task inside the scheduler that will be serialized.
|
||||||
|
func (t *AuroraTask) ExecutorData(data string) *AuroraTask {
|
||||||
|
if t.task.ExecutorConfig == nil {
|
||||||
|
t.task.ExecutorConfig = aurora.NewExecutorConfig()
|
||||||
|
}
|
||||||
|
|
||||||
|
t.task.ExecutorConfig.Data = data
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AuroraTask) CPU(cpus float64) *AuroraTask {
|
||||||
|
*t.resources[CPU].NumCpus = cpus
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AuroraTask) RAM(ram int64) *AuroraTask {
|
||||||
|
*t.resources[RAM].RamMb = ram
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AuroraTask) Disk(disk int64) *AuroraTask {
|
||||||
|
*t.resources[DISK].DiskMb = disk
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AuroraTask) GPU(gpu int64) *AuroraTask {
|
||||||
|
// GPU resource must be set explicitly since the scheduler by default
|
||||||
|
// rejects jobs with GPU resources attached to it.
|
||||||
|
if _, ok := t.resources[GPU]; !ok {
|
||||||
|
t.resources[GPU] = &aurora.Resource{}
|
||||||
|
t.task.Resources = append(t.task.Resources, t.resources[GPU])
|
||||||
|
}
|
||||||
|
|
||||||
|
t.resources[GPU].NumGpus = &gpu
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AuroraTask) Tier(tier string) *AuroraTask {
|
||||||
|
t.task.Tier = &tier
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// How many failures to tolerate before giving up.
|
||||||
|
func (t *AuroraTask) MaxFailure(maxFail int32) *AuroraTask {
|
||||||
|
t.task.MaxTaskFailures = maxFail
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restart the job's tasks if they fail
|
||||||
|
func (t *AuroraTask) IsService(isService bool) *AuroraTask {
|
||||||
|
t.task.IsService = isService
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a list of URIs with the same extract and cache configuration. Scheduler must have
|
||||||
|
// --enable_mesos_fetcher flag enabled. Currently there is no duplicate detection.
|
||||||
|
func (t *AuroraTask) AddURIs(extract bool, cache bool, values ...string) *AuroraTask {
|
||||||
|
for _, value := range values {
|
||||||
|
t.task.MesosFetcherUris = append(
|
||||||
|
t.task.MesosFetcherUris,
|
||||||
|
&aurora.MesosFetcherURI{Value: value, Extract: &extract, Cache: &cache})
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds a Mesos label to the job. Note that Aurora will add the
|
||||||
|
// prefix "org.apache.aurora.metadata." to the beginning of each key.
|
||||||
|
func (t *AuroraTask) AddLabel(key string, value string) *AuroraTask {
|
||||||
|
t.task.Metadata = append(t.task.Metadata, &aurora.Metadata{Key: key, Value: value})
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a named port to the job configuration These are random ports as it's
|
||||||
|
// not currently possible to request specific ports using Aurora.
|
||||||
|
func (t *AuroraTask) AddNamedPorts(names ...string) *AuroraTask {
|
||||||
|
t.portCount += len(names)
|
||||||
|
for _, name := range names {
|
||||||
|
t.task.Resources = append(t.task.Resources, &aurora.Resource{NamedPort: &name})
|
||||||
|
}
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds a request for a number of ports to the job configuration. The names chosen for these ports
|
||||||
|
// will be org.apache.aurora.port.X, where X is the current port count for the job configuration
|
||||||
|
// starting at 0. These are random ports as it's not currently possible to request
|
||||||
|
// specific ports using Aurora.
|
||||||
|
func (t *AuroraTask) AddPorts(num int) *AuroraTask {
|
||||||
|
start := t.portCount
|
||||||
|
t.portCount += num
|
||||||
|
for i := start; i < t.portCount; i++ {
|
||||||
|
portName := portPrefix + strconv.Itoa(i)
|
||||||
|
t.task.Resources = append(t.task.Resources, &aurora.Resource{NamedPort: &portName})
|
||||||
|
}
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// From Aurora Docs:
|
||||||
|
// Add a Value constraint
|
||||||
|
// name - Mesos slave attribute that the constraint is matched against.
|
||||||
|
// If negated = true , treat this as a 'not' - to avoid specific values.
|
||||||
|
// Values - list of values we look for in attribute name
|
||||||
|
func (t *AuroraTask) AddValueConstraint(name string, negated bool, values ...string) *AuroraTask {
|
||||||
|
t.task.Constraints = append(t.task.Constraints,
|
||||||
|
&aurora.Constraint{
|
||||||
|
Name: name,
|
||||||
|
Constraint: &aurora.TaskConstraint{
|
||||||
|
Value: &aurora.ValueConstraint{
|
||||||
|
Negated: negated,
|
||||||
|
Values: values,
|
||||||
|
},
|
||||||
|
Limit: nil,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// From Aurora Docs:
|
||||||
|
// A constraint that specifies the maximum number of active tasks on a host with
|
||||||
|
// a matching attribute that may be scheduled simultaneously.
|
||||||
|
func (t *AuroraTask) AddLimitConstraint(name string, limit int32) *AuroraTask {
|
||||||
|
t.task.Constraints = append(t.task.Constraints,
|
||||||
|
&aurora.Constraint{
|
||||||
|
Name: name,
|
||||||
|
Constraint: &aurora.TaskConstraint{
|
||||||
|
Value: nil,
|
||||||
|
Limit: &aurora.LimitConstraint{Limit: limit},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// From Aurora Docs:
|
||||||
|
// dedicated attribute. Aurora treats this specially, and only allows matching jobs
|
||||||
|
// to run on these machines, and will only schedule matching jobs on these machines.
|
||||||
|
// When a job is created, the scheduler requires that the $role component matches
|
||||||
|
// the role field in the job configuration, and will reject the job creation otherwise.
|
||||||
|
// A wildcard (*) may be used for the role portion of the dedicated attribute, which
|
||||||
|
// will allow any owner to elect for a job to run on the host(s)
|
||||||
|
func (t *AuroraTask) AddDedicatedConstraint(role, name string) *AuroraTask {
|
||||||
|
t.AddValueConstraint(dedicated, false, role+"/"+name)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set a container to run for the job configuration to run.
|
||||||
|
func (t *AuroraTask) Container(container Container) *AuroraTask {
|
||||||
|
t.task.Container = container.Build()
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AuroraTask) TaskConfig() *aurora.TaskConfig {
|
||||||
|
return t.task
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AuroraTask) JobKey() aurora.JobKey {
|
||||||
|
return *t.task.Job
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AuroraTask) Clone() *AuroraTask {
|
||||||
|
newTask := TaskFromThrift(t.task)
|
||||||
|
|
||||||
|
if t.thermos != nil {
|
||||||
|
newTask.ThermosExecutor(*t.thermos.Clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
return newTask
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AuroraTask) ThermosExecutor(thermos ThermosExecutor) *AuroraTask {
|
||||||
|
t.thermos = &thermos
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *AuroraTask) BuildThermosPayload() error {
|
||||||
|
if t.thermos != nil {
|
||||||
|
|
||||||
|
// Set the correct resources
|
||||||
|
if t.resources[CPU].NumCpus != nil {
|
||||||
|
t.thermos.cpu(*t.resources[CPU].NumCpus)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.resources[RAM].RamMb != nil {
|
||||||
|
t.thermos.ram(*t.resources[RAM].RamMb)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.resources[DISK].DiskMb != nil {
|
||||||
|
t.thermos.disk(*t.resources[DISK].DiskMb)
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.resources[GPU] != nil && t.resources[GPU].NumGpus != nil {
|
||||||
|
t.thermos.gpu(*t.resources[GPU].NumGpus)
|
||||||
|
}
|
||||||
|
|
||||||
|
payload, err := json.Marshal(t.thermos)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
t.ExecutorName(aurora.AURORA_EXECUTOR_NAME)
|
||||||
|
t.ExecutorData(string(payload))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set a partition policy for the job configuration to implement.
|
||||||
|
func (t *AuroraTask) PartitionPolicy(policy aurora.PartitionPolicy) *AuroraTask {
|
||||||
|
t.task.PartitionPolicy = &policy
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
57
task_test.go
Normal file
57
task_test.go
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
/**
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package realis_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
realis "github.com/paypal/gorealis/v2"
|
||||||
|
"github.com/paypal/gorealis/v2/gen-go/apache/aurora"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAuroraTask_Clone(t *testing.T) {
|
||||||
|
|
||||||
|
task0 := realis.NewTask().
|
||||||
|
Environment("development").
|
||||||
|
Role("ubuntu").
|
||||||
|
Name("this_is_a_test").
|
||||||
|
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
|
||||||
|
ExecutorData("{fake:payload}").
|
||||||
|
CPU(10).
|
||||||
|
RAM(643).
|
||||||
|
Disk(1000).
|
||||||
|
IsService(true).
|
||||||
|
AddPorts(10).
|
||||||
|
Tier("preferred").
|
||||||
|
MaxFailure(23).
|
||||||
|
AddURIs(true, true, "testURI").
|
||||||
|
AddLabel("Test", "Value").
|
||||||
|
AddNamedPorts("test").
|
||||||
|
AddValueConstraint("test", false, "testing").
|
||||||
|
AddLimitConstraint("test_limit", 1).
|
||||||
|
AddDedicatedConstraint("ubuntu", "name").
|
||||||
|
Container(realis.NewDockerContainer().AddParameter("hello", "world").Image("testImg"))
|
||||||
|
|
||||||
|
task1 := task0.Clone()
|
||||||
|
|
||||||
|
assert.EqualValues(t, task0, task1, "Clone does not return the correct deep copy of AuroraTask")
|
||||||
|
|
||||||
|
task0.Container(realis.NewMesosContainer().
|
||||||
|
AppcImage("test", "testing").
|
||||||
|
AddVolume("test", "test", aurora.Mode_RW))
|
||||||
|
task2 := task0.Clone()
|
||||||
|
assert.EqualValues(t, task0, task2, "Clone does not return the correct deep copy of AuroraTask")
|
||||||
|
}
|
195
thermos.go
Normal file
195
thermos.go
Normal file
|
@ -0,0 +1,195 @@
|
||||||
|
/**
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package realis
|
||||||
|
|
||||||
|
import "encoding/json"
|
||||||
|
|
||||||
|
type ThermosExecutor struct {
|
||||||
|
Task ThermosTask `json:"task""`
|
||||||
|
order *ThermosConstraint `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ThermosTask struct {
|
||||||
|
Processes map[string]*ThermosProcess `json:"processes"`
|
||||||
|
Constraints []*ThermosConstraint `json:"constraints"`
|
||||||
|
Resources thermosResources `json:"resources"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ThermosConstraint struct {
|
||||||
|
Order []string `json:"order,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// This struct should always be controlled by the Aurora job struct.
|
||||||
|
// Therefore it is private.
|
||||||
|
type thermosResources struct {
|
||||||
|
CPU *float64 `json:"cpu,omitempty"`
|
||||||
|
Disk *int64 `json:"disk,omitempty"`
|
||||||
|
RAM *int64 `json:"ram,omitempty"`
|
||||||
|
GPU *int64 `json:"gpu,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ThermosProcess struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Cmdline string `json:"cmdline"`
|
||||||
|
Daemon bool `json:"daemon"`
|
||||||
|
Ephemeral bool `json:"ephemeral"`
|
||||||
|
MaxFailures int `json:"max_failures"`
|
||||||
|
MinDuration int `json:"min_duration"`
|
||||||
|
Final bool `json:"final"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewThermosProcess(name, command string) ThermosProcess {
|
||||||
|
return ThermosProcess{
|
||||||
|
Name: name,
|
||||||
|
Cmdline: command,
|
||||||
|
MaxFailures: 1,
|
||||||
|
Daemon: false,
|
||||||
|
Ephemeral: false,
|
||||||
|
MinDuration: 5,
|
||||||
|
Final: false}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Processes must have unique names. Adding a process whose name already exists will
|
||||||
|
// result in overwriting the previous version of the process.
|
||||||
|
func (t *ThermosExecutor) AddProcess(process ThermosProcess) *ThermosExecutor {
|
||||||
|
if len(t.Task.Processes) == 0 {
|
||||||
|
t.Task.Processes = make(map[string]*ThermosProcess, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Task.Processes[process.Name] = &process
|
||||||
|
|
||||||
|
// Add Process to order
|
||||||
|
t.addToOrder(process.Name)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only constraint that should be added for now is the order of execution, therefore this
|
||||||
|
// receiver is private.
|
||||||
|
func (t *ThermosExecutor) addConstraint(constraint *ThermosConstraint) *ThermosExecutor {
|
||||||
|
if len(t.Task.Constraints) == 0 {
|
||||||
|
t.Task.Constraints = make([]*ThermosConstraint, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Task.Constraints = append(t.Task.Constraints, constraint)
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order in which the Processes should be executed. Index 0 will be executed first, index N will be executed last.
|
||||||
|
func (t *ThermosExecutor) ProcessOrder(order ...string) *ThermosExecutor {
|
||||||
|
if t.order == nil {
|
||||||
|
t.order = &ThermosConstraint{}
|
||||||
|
t.addConstraint(t.order)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.order.Order = order
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add Process to execution order. By default this is a FIFO setup. Custom order can be given by overriding
|
||||||
|
// with ProcessOrder
|
||||||
|
func (t *ThermosExecutor) addToOrder(name string) {
|
||||||
|
if t.order == nil {
|
||||||
|
t.order = &ThermosConstraint{Order: make([]string, 0)}
|
||||||
|
t.addConstraint(t.order)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.order.Order = append(t.order.Order, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ram is determined by the job object.
|
||||||
|
func (t *ThermosExecutor) ram(ram int64) {
|
||||||
|
// Convert from bytes to MiB
|
||||||
|
ram *= 1024 ^ 2
|
||||||
|
t.Task.Resources.RAM = &ram
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disk is determined by the job object.
|
||||||
|
func (t *ThermosExecutor) disk(disk int64) {
|
||||||
|
// Convert from bytes to MiB
|
||||||
|
disk *= 1024 ^ 2
|
||||||
|
t.Task.Resources.Disk = &disk
|
||||||
|
}
|
||||||
|
|
||||||
|
// CPU is determined by the job object.
|
||||||
|
func (t *ThermosExecutor) cpu(cpu float64) {
|
||||||
|
t.Task.Resources.CPU = &cpu
|
||||||
|
}
|
||||||
|
|
||||||
|
// GPU is determined by the job object.
|
||||||
|
func (t *ThermosExecutor) gpu(gpu int64) {
|
||||||
|
t.Task.Resources.GPU = &gpu
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deep copy of Thermos executor
|
||||||
|
func (t *ThermosExecutor) Clone() *ThermosExecutor {
|
||||||
|
tNew := ThermosExecutor{}
|
||||||
|
|
||||||
|
if t.order != nil {
|
||||||
|
tNew.order = &ThermosConstraint{Order: t.order.Order}
|
||||||
|
|
||||||
|
tNew.addConstraint(tNew.order)
|
||||||
|
}
|
||||||
|
|
||||||
|
tNew.Task.Processes = make(map[string]*ThermosProcess)
|
||||||
|
|
||||||
|
for name, process := range t.Task.Processes {
|
||||||
|
newProcess := *process
|
||||||
|
tNew.Task.Processes[name] = &newProcess
|
||||||
|
}
|
||||||
|
|
||||||
|
tNew.Task.Resources = t.Task.Resources
|
||||||
|
|
||||||
|
return &tNew
|
||||||
|
}
|
||||||
|
|
||||||
|
type thermosTaskJSON struct {
|
||||||
|
Processes []*ThermosProcess `json:"processes"`
|
||||||
|
Constraints []*ThermosConstraint `json:"constraints"`
|
||||||
|
Resources thermosResources `json:"resources"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Custom Marshaling for Thermos Task to match what Thermos expects
|
||||||
|
func (t *ThermosTask) MarshalJSON() ([]byte, error) {
|
||||||
|
|
||||||
|
// Convert map to array to match what Thermos expects
|
||||||
|
processes := make([]*ThermosProcess, 0)
|
||||||
|
for _, process := range t.Processes {
|
||||||
|
processes = append(processes, process)
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(&thermosTaskJSON{
|
||||||
|
Processes: processes,
|
||||||
|
Constraints: t.Constraints,
|
||||||
|
Resources: t.Resources,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Custom Unmarshaling to match what Thermos would contain
|
||||||
|
func (t *ThermosTask) UnmarshalJSON(data []byte) error {
|
||||||
|
|
||||||
|
// Thermos format
|
||||||
|
aux := &thermosTaskJSON{}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(data, &aux); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
processes := make(map[string]*ThermosProcess)
|
||||||
|
for _, process := range aux.Processes {
|
||||||
|
processes[process.Name] = process
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
71
thermos_test.go
Normal file
71
thermos_test.go
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
package realis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestThermosTask(t *testing.T) {
|
||||||
|
|
||||||
|
// Test that we can successfully deserialize a minimum subset of an Aurora generated thermos payload
|
||||||
|
thermosJSON := []byte(
|
||||||
|
`{
|
||||||
|
"task": {
|
||||||
|
"processes": [
|
||||||
|
{
|
||||||
|
"daemon": false,
|
||||||
|
"name": "hello",
|
||||||
|
"ephemeral": false,
|
||||||
|
"max_failures": 1,
|
||||||
|
"min_duration": 5,
|
||||||
|
"cmdline": "\n while true; do\n echo hello world from gorealis\n sleep 10\n done\n ",
|
||||||
|
"final": false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"resources": {
|
||||||
|
"gpu": 0,
|
||||||
|
"disk": 134217728,
|
||||||
|
"ram": 134217728,
|
||||||
|
"cpu": 1.1
|
||||||
|
},
|
||||||
|
"constraints": [
|
||||||
|
{
|
||||||
|
"order": [
|
||||||
|
"hello"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
thermos := ThermosExecutor{}
|
||||||
|
|
||||||
|
err := json.Unmarshal(thermosJSON, &thermos)
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
process := &ThermosProcess{
|
||||||
|
Daemon: false,
|
||||||
|
Name: "hello",
|
||||||
|
Ephemeral: false,
|
||||||
|
MaxFailures: 1,
|
||||||
|
MinDuration: 5,
|
||||||
|
Cmdline: "\n while true; do\n echo hello world from gorealis\n sleep 10\n done\n ",
|
||||||
|
Final: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
constraint := &ThermosConstraint{Order: []string{process.Name}}
|
||||||
|
|
||||||
|
thermosExpected := ThermosExecutor{
|
||||||
|
Task: ThermosTask{
|
||||||
|
Processes: map[string]*ThermosProcess{process.Name: process},
|
||||||
|
Constraints: []*ThermosConstraint{constraint},
|
||||||
|
Resources: thermosResources{CPU: thrift.Float64Ptr(1.1),
|
||||||
|
Disk: thrift.Int64Ptr(134217728),
|
||||||
|
RAM: thrift.Int64Ptr(134217728),
|
||||||
|
GPU: thrift.Int64Ptr(0)}}}
|
||||||
|
|
||||||
|
assert.ObjectsAreEqualValues(thermosExpected, thermos)
|
||||||
|
}
|
188
updatejob.go
188
updatejob.go
|
@ -1,188 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package realis
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UpdateJob is a structure to collect all information required to create job update.
|
|
||||||
type UpdateJob struct {
|
|
||||||
Job // SetInstanceCount for job is hidden, access via full qualifier
|
|
||||||
req *aurora.JobUpdateRequest
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDefaultUpdateJob creates an UpdateJob object with opinionated default settings.
|
|
||||||
func NewDefaultUpdateJob(config *aurora.TaskConfig) *UpdateJob {
|
|
||||||
|
|
||||||
req := aurora.NewJobUpdateRequest()
|
|
||||||
req.TaskConfig = config
|
|
||||||
req.Settings = NewUpdateSettings()
|
|
||||||
|
|
||||||
job, ok := NewJob().(*AuroraJob)
|
|
||||||
if !ok {
|
|
||||||
// This should never happen but it is here as a safeguard
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
job.jobConfig.TaskConfig = config
|
|
||||||
|
|
||||||
// Rebuild resource map from TaskConfig
|
|
||||||
for _, ptr := range config.Resources {
|
|
||||||
if ptr.NumCpus != nil {
|
|
||||||
job.resources[CPU].NumCpus = ptr.NumCpus
|
|
||||||
continue // Guard against Union violations that Go won't enforce
|
|
||||||
}
|
|
||||||
|
|
||||||
if ptr.RamMb != nil {
|
|
||||||
job.resources[RAM].RamMb = ptr.RamMb
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if ptr.DiskMb != nil {
|
|
||||||
job.resources[DISK].DiskMb = ptr.DiskMb
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if ptr.NumGpus != nil {
|
|
||||||
job.resources[GPU] = &aurora.Resource{NumGpus: ptr.NumGpus}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mirrors defaults set by Pystachio
|
|
||||||
req.Settings.UpdateGroupSize = 1
|
|
||||||
req.Settings.WaitForBatchCompletion = false
|
|
||||||
req.Settings.MinWaitInInstanceRunningMs = 45000
|
|
||||||
req.Settings.MaxPerInstanceFailures = 0
|
|
||||||
req.Settings.MaxFailedInstances = 0
|
|
||||||
req.Settings.RollbackOnFailure = true
|
|
||||||
|
|
||||||
//TODO(rdelvalle): Deep copy job struct to avoid unexpected behavior
|
|
||||||
return &UpdateJob{Job: job, req: req}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUpdateJob creates an UpdateJob object wihtout default settings.
|
|
||||||
func NewUpdateJob(config *aurora.TaskConfig, settings *aurora.JobUpdateSettings) *UpdateJob {
|
|
||||||
|
|
||||||
req := aurora.NewJobUpdateRequest()
|
|
||||||
req.TaskConfig = config
|
|
||||||
req.Settings = settings
|
|
||||||
|
|
||||||
job, ok := NewJob().(*AuroraJob)
|
|
||||||
if !ok {
|
|
||||||
// This should never happen but it is here as a safeguard
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
job.jobConfig.TaskConfig = config
|
|
||||||
|
|
||||||
// Rebuild resource map from TaskConfig
|
|
||||||
for _, ptr := range config.Resources {
|
|
||||||
if ptr.NumCpus != nil {
|
|
||||||
job.resources[CPU].NumCpus = ptr.NumCpus
|
|
||||||
continue // Guard against Union violations that Go won't enforce
|
|
||||||
}
|
|
||||||
|
|
||||||
if ptr.RamMb != nil {
|
|
||||||
job.resources[RAM].RamMb = ptr.RamMb
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if ptr.DiskMb != nil {
|
|
||||||
job.resources[DISK].DiskMb = ptr.DiskMb
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if ptr.NumGpus != nil {
|
|
||||||
job.resources[GPU] = &aurora.Resource{}
|
|
||||||
job.resources[GPU].NumGpus = ptr.NumGpus
|
|
||||||
continue // Guard against Union violations that Go won't enforce
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//TODO(rdelvalle): Deep copy job struct to avoid unexpected behavior
|
|
||||||
return &UpdateJob{Job: job, req: req}
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceCount sets instance count the job will have after the update.
|
|
||||||
func (u *UpdateJob) InstanceCount(inst int32) *UpdateJob {
|
|
||||||
u.req.InstanceCount = inst
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
// BatchSize sets the max number of instances being updated at any given moment.
|
|
||||||
func (u *UpdateJob) BatchSize(size int32) *UpdateJob {
|
|
||||||
u.req.Settings.UpdateGroupSize = size
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
// WatchTime sets the minimum number of seconds a shard must remain in RUNNING state before considered a success.
|
|
||||||
func (u *UpdateJob) WatchTime(ms int32) *UpdateJob {
|
|
||||||
u.req.Settings.MinWaitInInstanceRunningMs = ms
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForBatchCompletion configures the job update to wait for all instances in a group to be done before moving on.
|
|
||||||
func (u *UpdateJob) WaitForBatchCompletion(batchWait bool) *UpdateJob {
|
|
||||||
u.req.Settings.WaitForBatchCompletion = batchWait
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxPerInstanceFailures sets the max number of instance failures to tolerate before marking instance as FAILED.
|
|
||||||
func (u *UpdateJob) MaxPerInstanceFailures(inst int32) *UpdateJob {
|
|
||||||
u.req.Settings.MaxPerInstanceFailures = inst
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxFailedInstances sets the max number of FAILED instances to tolerate before terminating the update.
|
|
||||||
func (u *UpdateJob) MaxFailedInstances(inst int32) *UpdateJob {
|
|
||||||
u.req.Settings.MaxFailedInstances = inst
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
// RollbackOnFail configure the job to rollback automatically after a job update fails.
|
|
||||||
func (u *UpdateJob) RollbackOnFail(rollback bool) *UpdateJob {
|
|
||||||
u.req.Settings.RollbackOnFailure = rollback
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUpdateSettings return an opinionated set of job update settings.
|
|
||||||
func (u *UpdateJob) BatchUpdateStrategy(strategy aurora.BatchJobUpdateStrategy) *UpdateJob {
|
|
||||||
u.req.Settings.UpdateStrategy = &aurora.JobUpdateStrategy{BatchStrategy: &strategy}
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *UpdateJob) QueueUpdateStrategy(strategy aurora.QueueJobUpdateStrategy) *UpdateJob {
|
|
||||||
u.req.Settings.UpdateStrategy = &aurora.JobUpdateStrategy{QueueStrategy: &strategy}
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *UpdateJob) VariableBatchStrategy(strategy aurora.VariableBatchJobUpdateStrategy) *UpdateJob {
|
|
||||||
u.req.Settings.UpdateStrategy = &aurora.JobUpdateStrategy{VarBatchStrategy: &strategy}
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewUpdateSettings() *aurora.JobUpdateSettings {
|
|
||||||
us := new(aurora.JobUpdateSettings)
|
|
||||||
// Mirrors defaults set by Pystachio
|
|
||||||
us.UpdateGroupSize = 1
|
|
||||||
us.WaitForBatchCompletion = false
|
|
||||||
us.MinWaitInInstanceRunningMs = 45000
|
|
||||||
us.MaxPerInstanceFailures = 0
|
|
||||||
us.MaxFailedInstances = 0
|
|
||||||
us.RollbackOnFailure = true
|
|
||||||
|
|
||||||
return us
|
|
||||||
}
|
|
104
util.go
104
util.go
|
@ -1,47 +1,18 @@
|
||||||
package realis
|
package realis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/x509"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
"github.com/paypal/gorealis/v2/gen-go/apache/aurora"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
const apiPath = "/api"
|
|
||||||
|
|
||||||
// ActiveStates - States a task may be in when active.
|
|
||||||
var ActiveStates = make(map[aurora.ScheduleStatus]bool)
|
var ActiveStates = make(map[aurora.ScheduleStatus]bool)
|
||||||
|
|
||||||
// SlaveAssignedStates - States a task may be in when it has already been assigned to a Mesos agent.
|
|
||||||
var SlaveAssignedStates = make(map[aurora.ScheduleStatus]bool)
|
var SlaveAssignedStates = make(map[aurora.ScheduleStatus]bool)
|
||||||
|
|
||||||
// LiveStates - States a task may be in when it is live (e.g. able to take traffic)
|
|
||||||
var LiveStates = make(map[aurora.ScheduleStatus]bool)
|
var LiveStates = make(map[aurora.ScheduleStatus]bool)
|
||||||
|
|
||||||
// TerminalStates - Set of states a task may not transition away from.
|
|
||||||
var TerminalStates = make(map[aurora.ScheduleStatus]bool)
|
var TerminalStates = make(map[aurora.ScheduleStatus]bool)
|
||||||
|
|
||||||
// ActiveJobUpdateStates - States a Job Update may be in where it is considered active.
|
|
||||||
var ActiveJobUpdateStates = make(map[aurora.JobUpdateStatus]bool)
|
var ActiveJobUpdateStates = make(map[aurora.JobUpdateStatus]bool)
|
||||||
|
|
||||||
// TerminalUpdateStates returns a slice containing all the terminal states an update may be in.
|
|
||||||
// This is a function in order to avoid having a slice that can be accidentally mutated.
|
|
||||||
func TerminalUpdateStates() []aurora.JobUpdateStatus {
|
|
||||||
return []aurora.JobUpdateStatus{
|
|
||||||
aurora.JobUpdateStatus_ROLLED_FORWARD,
|
|
||||||
aurora.JobUpdateStatus_ROLLED_BACK,
|
|
||||||
aurora.JobUpdateStatus_ABORTED,
|
|
||||||
aurora.JobUpdateStatus_ERROR,
|
|
||||||
aurora.JobUpdateStatus_FAILED,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AwaitingPulseJobUpdateStates - States a job update may be in where it is waiting for a pulse.
|
|
||||||
var AwaitingPulseJobUpdateStates = make(map[aurora.JobUpdateStatus]bool)
|
var AwaitingPulseJobUpdateStates = make(map[aurora.JobUpdateStatus]bool)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -69,57 +40,14 @@ func init() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// createCertPool will attempt to load certificates into a certificate pool from a given directory.
|
func validateAuroraAddress(address string) (string, error) {
|
||||||
// Only files with an extension contained in the extension map are considered.
|
|
||||||
// This function ignores any files that cannot be read successfully or cannot be added to the certPool
|
|
||||||
// successfully.
|
|
||||||
func createCertPool(path string, extensions map[string]struct{}) (*x509.CertPool, error) {
|
|
||||||
_, err := os.Stat(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "unable to load certificates")
|
|
||||||
}
|
|
||||||
|
|
||||||
caFiles, err := ioutil.ReadDir(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
certPool := x509.NewCertPool()
|
|
||||||
loadedCerts := 0
|
|
||||||
for _, cert := range caFiles {
|
|
||||||
// Skip directories
|
|
||||||
if cert.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip any files that do not contain the right extension
|
|
||||||
if _, ok := extensions[filepath.Ext(cert.Name())]; !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
pem, err := ioutil.ReadFile(filepath.Join(path, cert.Name()))
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if certPool.AppendCertsFromPEM(pem) {
|
|
||||||
loadedCerts++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if loadedCerts == 0 {
|
|
||||||
return nil, errors.New("no certificates were able to be successfully loaded")
|
|
||||||
}
|
|
||||||
return certPool, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateAuroraURL(location string) (string, error) {
|
|
||||||
|
|
||||||
// If no protocol defined, assume http
|
// If no protocol defined, assume http
|
||||||
if !strings.Contains(location, "://") {
|
if !strings.Contains(address, "://") {
|
||||||
location = "http://" + location
|
address = "http://" + address
|
||||||
}
|
}
|
||||||
|
|
||||||
u, err := url.Parse(location)
|
u, err := url.Parse(address)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "error parsing url")
|
return "", errors.Wrap(err, "error parsing url")
|
||||||
|
@ -139,29 +67,9 @@ func validateAuroraURL(location string) (string, error) {
|
||||||
return "", errors.Errorf("only protocols http and https are supported %v\n", u.Scheme)
|
return "", errors.Errorf("only protocols http and https are supported %v\n", u.Scheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This could theoretically be elsewhere but we'll be strict for the sake of simplicity
|
if u.Path != "/api" {
|
||||||
if u.Path != apiPath {
|
|
||||||
return "", errors.Errorf("expected /api path %v\n", u.Path)
|
return "", errors.Errorf("expected /api path %v\n", u.Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
return u.String(), nil
|
return u.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func calculateCurrentBatch(updatingInstances int32, batchSizes []int32) int {
|
|
||||||
for i, size := range batchSizes {
|
|
||||||
updatingInstances -= size
|
|
||||||
if updatingInstances <= 0 {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Overflow batches
|
|
||||||
batchCount := len(batchSizes) - 1
|
|
||||||
lastBatchIndex := len(batchSizes) - 1
|
|
||||||
batchCount += int(updatingInstances / batchSizes[lastBatchIndex])
|
|
||||||
|
|
||||||
if updatingInstances%batchSizes[lastBatchIndex] != 0 {
|
|
||||||
batchCount++
|
|
||||||
}
|
|
||||||
return batchCount
|
|
||||||
}
|
|
||||||
|
|
114
util_test.go
114
util_test.go
|
@ -1,114 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package realis
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestAuroraURLValidator(t *testing.T) {
|
|
||||||
t.Run("badURL", func(t *testing.T) {
|
|
||||||
url, err := validateAuroraURL("http://badurl.com/badpath")
|
|
||||||
assert.Empty(t, url)
|
|
||||||
assert.Error(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("URLHttp", func(t *testing.T) {
|
|
||||||
url, err := validateAuroraURL("http://goodurl.com:8081/api")
|
|
||||||
assert.Equal(t, "http://goodurl.com:8081/api", url)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("URLHttps", func(t *testing.T) {
|
|
||||||
url, err := validateAuroraURL("https://goodurl.com:8081/api")
|
|
||||||
assert.Equal(t, "https://goodurl.com:8081/api", url)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("URLNoPath", func(t *testing.T) {
|
|
||||||
url, err := validateAuroraURL("http://goodurl.com:8081")
|
|
||||||
assert.Equal(t, "http://goodurl.com:8081/api", url)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("ipAddrNoPath", func(t *testing.T) {
|
|
||||||
url, err := validateAuroraURL("http://192.168.1.33:8081")
|
|
||||||
assert.Equal(t, "http://192.168.1.33:8081/api", url)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("URLNoProtocol", func(t *testing.T) {
|
|
||||||
url, err := validateAuroraURL("goodurl.com:8081/api")
|
|
||||||
assert.Equal(t, "http://goodurl.com:8081/api", url)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("URLNoProtocolNoPathNoPort", func(t *testing.T) {
|
|
||||||
url, err := validateAuroraURL("goodurl.com")
|
|
||||||
assert.Equal(t, "http://goodurl.com:8081/api", url)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCurrentBatchCalculator(t *testing.T) {
|
|
||||||
t.Run("singleBatchOverflow", func(t *testing.T) {
|
|
||||||
curBatch := calculateCurrentBatch(10, []int32{2})
|
|
||||||
assert.Equal(t, 4, curBatch)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("noInstancesUpdating", func(t *testing.T) {
|
|
||||||
curBatch := calculateCurrentBatch(0, []int32{2})
|
|
||||||
assert.Equal(t, 0, curBatch)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("evenMatchSingleBatch", func(t *testing.T) {
|
|
||||||
curBatch := calculateCurrentBatch(2, []int32{2})
|
|
||||||
assert.Equal(t, 0, curBatch)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("moreInstancesThanBatches", func(t *testing.T) {
|
|
||||||
curBatch := calculateCurrentBatch(5, []int32{1, 2})
|
|
||||||
assert.Equal(t, 2, curBatch)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("moreInstancesThanBatchesDecreasing", func(t *testing.T) {
|
|
||||||
curBatch := calculateCurrentBatch(5, []int32{2, 1})
|
|
||||||
assert.Equal(t, 3, curBatch)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("unevenFit", func(t *testing.T) {
|
|
||||||
curBatch := calculateCurrentBatch(2, []int32{1, 2})
|
|
||||||
assert.Equal(t, 1, curBatch)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("halfWay", func(t *testing.T) {
|
|
||||||
curBatch := calculateCurrentBatch(1, []int32{1, 2})
|
|
||||||
assert.Equal(t, 0, curBatch)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCertPoolCreator(t *testing.T) {
|
|
||||||
extensions := map[string]struct{}{".crt": {}}
|
|
||||||
|
|
||||||
_, err := createCertPool("examples/certs", extensions)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
t.Run("badDir", func(t *testing.T) {
|
|
||||||
_, err := createCertPool("idontexist", extensions)
|
|
||||||
assert.Error(t, err)
|
|
||||||
})
|
|
||||||
}
|
|
28
vendor/github.com/davecgh/go-spew/.travis.yml
generated
vendored
28
vendor/github.com/davecgh/go-spew/.travis.yml
generated
vendored
|
@ -1,14 +1,28 @@
|
||||||
language: go
|
language: go
|
||||||
|
go_import_path: github.com/davecgh/go-spew
|
||||||
go:
|
go:
|
||||||
- 1.5.4
|
- 1.6.x
|
||||||
- 1.6.3
|
- 1.7.x
|
||||||
- 1.7
|
- 1.8.x
|
||||||
|
- 1.9.x
|
||||||
|
- 1.10.x
|
||||||
|
- tip
|
||||||
|
sudo: false
|
||||||
install:
|
install:
|
||||||
- go get -v golang.org/x/tools/cmd/cover
|
- go get -v github.com/alecthomas/gometalinter
|
||||||
|
- gometalinter --install
|
||||||
script:
|
script:
|
||||||
- go test -v -tags=safe ./spew
|
- export PATH=$PATH:$HOME/gopath/bin
|
||||||
- go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov
|
- export GORACE="halt_on_error=1"
|
||||||
|
- test -z "$(gometalinter --disable-all
|
||||||
|
--enable=gofmt
|
||||||
|
--enable=golint
|
||||||
|
--enable=vet
|
||||||
|
--enable=gosimple
|
||||||
|
--enable=unconvert
|
||||||
|
--deadline=4m ./spew | tee /dev/stderr)"
|
||||||
|
- go test -v -race -tags safe ./spew
|
||||||
|
- go test -v -race -tags testcgo ./spew -covermode=atomic -coverprofile=profile.cov
|
||||||
after_success:
|
after_success:
|
||||||
- go get -v github.com/mattn/goveralls
|
- go get -v github.com/mattn/goveralls
|
||||||
- export PATH=$PATH:$HOME/gopath/bin
|
|
||||||
- goveralls -coverprofile=profile.cov -service=travis-ci
|
- goveralls -coverprofile=profile.cov -service=travis-ci
|
||||||
|
|
2
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
2
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
|
@ -2,7 +2,7 @@ ISC License
|
||||||
|
|
||||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||||
|
|
||||||
Permission to use, copy, modify, and distribute this software for any
|
Permission to use, copy, modify, and/or distribute this software for any
|
||||||
purpose with or without fee is hereby granted, provided that the above
|
purpose with or without fee is hereby granted, provided that the above
|
||||||
copyright notice and this permission notice appear in all copies.
|
copyright notice and this permission notice appear in all copies.
|
||||||
|
|
||||||
|
|
12
vendor/github.com/davecgh/go-spew/README.md
generated
vendored
12
vendor/github.com/davecgh/go-spew/README.md
generated
vendored
|
@ -1,12 +1,9 @@
|
||||||
go-spew
|
go-spew
|
||||||
=======
|
=======
|
||||||
|
|
||||||
[]
|
[](https://travis-ci.org/davecgh/go-spew)
|
||||||
(https://travis-ci.org/davecgh/go-spew) [![ISC License]
|
[](http://copyfree.org)
|
||||||
(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]
|
[](https://coveralls.io/r/davecgh/go-spew?branch=master)
|
||||||
(https://img.shields.io/coveralls/davecgh/go-spew.svg)]
|
|
||||||
(https://coveralls.io/r/davecgh/go-spew?branch=master)
|
|
||||||
|
|
||||||
|
|
||||||
Go-spew implements a deep pretty printer for Go data structures to aid in
|
Go-spew implements a deep pretty printer for Go data structures to aid in
|
||||||
debugging. A comprehensive suite of tests with 100% test coverage is provided
|
debugging. A comprehensive suite of tests with 100% test coverage is provided
|
||||||
|
@ -21,8 +18,7 @@ post about it
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
[]
|
[](http://godoc.org/github.com/davecgh/go-spew/spew)
|
||||||
(http://godoc.org/github.com/davecgh/go-spew/spew)
|
|
||||||
|
|
||||||
Full `go doc` style documentation for the project can be viewed online without
|
Full `go doc` style documentation for the project can be viewed online without
|
||||||
installing this package by using the excellent GoDoc site here:
|
installing this package by using the excellent GoDoc site here:
|
||||||
|
|
187
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
187
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
|
@ -16,7 +16,9 @@
|
||||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||||
// tag is deprecated and thus should not be used.
|
// tag is deprecated and thus should not be used.
|
||||||
// +build !js,!appengine,!safe,!disableunsafe
|
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||||
|
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||||
|
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||||
|
|
||||||
package spew
|
package spew
|
||||||
|
|
||||||
|
@ -34,80 +36,49 @@ const (
|
||||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
type flag uintptr
|
||||||
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
|
|
||||||
// internal reflect.Value fields. These values are valid before golang
|
|
||||||
// commit ecccf07e7f9d which changed the format. The are also valid
|
|
||||||
// after commit 82f48826c6c7 which changed the format again to mirror
|
|
||||||
// the original format. Code in the init function updates these offsets
|
|
||||||
// as necessary.
|
|
||||||
offsetPtr = uintptr(ptrSize)
|
|
||||||
offsetScalar = uintptr(0)
|
|
||||||
offsetFlag = uintptr(ptrSize * 2)
|
|
||||||
|
|
||||||
// flagKindWidth and flagKindShift indicate various bits that the
|
var (
|
||||||
// reflect package uses internally to track kind information.
|
// flagRO indicates whether the value field of a reflect.Value
|
||||||
//
|
// is read-only.
|
||||||
// flagRO indicates whether or not the value field of a reflect.Value is
|
flagRO flag
|
||||||
// read-only.
|
|
||||||
//
|
// flagAddr indicates whether the address of the reflect.Value's
|
||||||
// flagIndir indicates whether the value field of a reflect.Value is
|
// value may be taken.
|
||||||
// the actual data or a pointer to the data.
|
flagAddr flag
|
||||||
//
|
|
||||||
// These values are valid before golang commit 90a7c3c86944 which
|
|
||||||
// changed their positions. Code in the init function updates these
|
|
||||||
// flags as necessary.
|
|
||||||
flagKindWidth = uintptr(5)
|
|
||||||
flagKindShift = uintptr(flagKindWidth - 1)
|
|
||||||
flagRO = uintptr(1 << 0)
|
|
||||||
flagIndir = uintptr(1 << 1)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
// flagKindMask holds the bits that make up the kind
|
||||||
// Older versions of reflect.Value stored small integers directly in the
|
// part of the flags field. In all the supported versions,
|
||||||
// ptr field (which is named val in the older versions). Versions
|
// it is in the lower 5 bits.
|
||||||
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
|
const flagKindMask = flag(0x1f)
|
||||||
// scalar for this purpose which unfortunately came before the flag
|
|
||||||
// field, so the offset of the flag field is different for those
|
|
||||||
// versions.
|
|
||||||
//
|
|
||||||
// This code constructs a new reflect.Value from a known small integer
|
|
||||||
// and checks if the size of the reflect.Value struct indicates it has
|
|
||||||
// the scalar field. When it does, the offsets are updated accordingly.
|
|
||||||
vv := reflect.ValueOf(0xf00)
|
|
||||||
if unsafe.Sizeof(vv) == (ptrSize * 4) {
|
|
||||||
offsetScalar = ptrSize * 2
|
|
||||||
offsetFlag = ptrSize * 3
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit 90a7c3c86944 changed the flag positions such that the low
|
// Different versions of Go have used different
|
||||||
// order bits are the kind. This code extracts the kind from the flags
|
// bit layouts for the flags type. This table
|
||||||
// field and ensures it's the correct type. When it's not, the flag
|
// records the known combinations.
|
||||||
// order has been changed to the newer format, so the flags are updated
|
var okFlags = []struct {
|
||||||
// accordingly.
|
ro, addr flag
|
||||||
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
|
}{{
|
||||||
upfv := *(*uintptr)(upf)
|
// From Go 1.4 to 1.5
|
||||||
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
|
ro: 1 << 5,
|
||||||
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
|
addr: 1 << 7,
|
||||||
flagKindShift = 0
|
}, {
|
||||||
flagRO = 1 << 5
|
// Up to Go tip.
|
||||||
flagIndir = 1 << 6
|
ro: 1<<5 | 1<<6,
|
||||||
|
addr: 1 << 8,
|
||||||
|
}}
|
||||||
|
|
||||||
// Commit adf9b30e5594 modified the flags to separate the
|
var flagValOffset = func() uintptr {
|
||||||
// flagRO flag into two bits which specifies whether or not the
|
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||||
// field is embedded. This causes flagIndir to move over a bit
|
if !ok {
|
||||||
// and means that flagRO is the combination of either of the
|
panic("reflect.Value has no flag field")
|
||||||
// original flagRO bit and the new bit.
|
|
||||||
//
|
|
||||||
// This code detects the change by extracting what used to be
|
|
||||||
// the indirect bit to ensure it's set. When it's not, the flag
|
|
||||||
// order has been changed to the newer format, so the flags are
|
|
||||||
// updated accordingly.
|
|
||||||
if upfv&flagIndir == 0 {
|
|
||||||
flagRO = 3 << 5
|
|
||||||
flagIndir = 1 << 7
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return field.Offset
|
||||||
|
}()
|
||||||
|
|
||||||
|
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||||
|
func flagField(v *reflect.Value) *flag {
|
||||||
|
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||||
}
|
}
|
||||||
|
|
||||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||||
|
@ -119,34 +90,56 @@ func init() {
|
||||||
// This allows us to check for implementations of the Stringer and error
|
// This allows us to check for implementations of the Stringer and error
|
||||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||||
// inaccessible values such as unexported struct fields.
|
// inaccessible values such as unexported struct fields.
|
||||||
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
|
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||||
indirects := 1
|
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||||
vt := v.Type()
|
return v
|
||||||
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
|
}
|
||||||
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
|
flagFieldPtr := flagField(&v)
|
||||||
if rvf&flagIndir != 0 {
|
*flagFieldPtr &^= flagRO
|
||||||
vt = reflect.PtrTo(v.Type())
|
*flagFieldPtr |= flagAddr
|
||||||
indirects++
|
return v
|
||||||
} else if offsetScalar != 0 {
|
}
|
||||||
// The value is in the scalar field when it's not one of the
|
|
||||||
// reference types.
|
// Sanity checks against future reflect package changes
|
||||||
switch vt.Kind() {
|
// to the type or semantics of the Value.flag field.
|
||||||
case reflect.Uintptr:
|
func init() {
|
||||||
case reflect.Chan:
|
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||||
case reflect.Func:
|
if !ok {
|
||||||
case reflect.Map:
|
panic("reflect.Value has no flag field")
|
||||||
case reflect.Ptr:
|
}
|
||||||
case reflect.UnsafePointer:
|
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||||
default:
|
panic("reflect.Value flag field has changed kind")
|
||||||
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
|
}
|
||||||
offsetScalar)
|
type t0 int
|
||||||
|
var t struct {
|
||||||
|
A t0
|
||||||
|
// t0 will have flagEmbedRO set.
|
||||||
|
t0
|
||||||
|
// a will have flagStickyRO set
|
||||||
|
a t0
|
||||||
|
}
|
||||||
|
vA := reflect.ValueOf(t).FieldByName("A")
|
||||||
|
va := reflect.ValueOf(t).FieldByName("a")
|
||||||
|
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||||
|
|
||||||
|
// Infer flagRO from the difference between the flags
|
||||||
|
// for the (otherwise identical) fields in t.
|
||||||
|
flagPublic := *flagField(&vA)
|
||||||
|
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||||
|
flagRO = flagPublic ^ flagWithRO
|
||||||
|
|
||||||
|
// Infer flagAddr from the difference between a value
|
||||||
|
// taken from a pointer and not.
|
||||||
|
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||||
|
flagNoPtr := *flagField(&vA)
|
||||||
|
flagPtr := *flagField(&vPtrA)
|
||||||
|
flagAddr = flagNoPtr ^ flagPtr
|
||||||
|
|
||||||
|
// Check that the inferred flags tally with one of the known versions.
|
||||||
|
for _, f := range okFlags {
|
||||||
|
if flagRO == f.ro && flagAddr == f.addr {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
panic("reflect.Value read-only flag has changed semantics")
|
||||||
pv := reflect.NewAt(vt, upv)
|
|
||||||
rv = pv
|
|
||||||
for i := 0; i < indirects; i++ {
|
|
||||||
rv = rv.Elem()
|
|
||||||
}
|
|
||||||
return rv
|
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
2
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
|
@ -16,7 +16,7 @@
|
||||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||||
// tag is deprecated and thus should not be used.
|
// tag is deprecated and thus should not be used.
|
||||||
// +build js appengine safe disableunsafe
|
// +build js appengine safe disableunsafe !go1.4
|
||||||
|
|
||||||
package spew
|
package spew
|
||||||
|
|
||||||
|
|
2
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
2
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
|
@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||||
w.Write(closeParenBytes)
|
w.Write(closeParenBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
|
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||||
// prefix to Writer w.
|
// prefix to Writer w.
|
||||||
func printHexPtr(w io.Writer, p uintptr) {
|
func printHexPtr(w io.Writer, p uintptr) {
|
||||||
// Null pointer.
|
// Null pointer.
|
||||||
|
|
10
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
10
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
|
@ -35,16 +35,16 @@ var (
|
||||||
|
|
||||||
// cCharRE is a regular expression that matches a cgo char.
|
// cCharRE is a regular expression that matches a cgo char.
|
||||||
// It is used to detect character arrays to hexdump them.
|
// It is used to detect character arrays to hexdump them.
|
||||||
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
|
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||||
|
|
||||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||||
// char. It is used to detect unsigned character arrays to hexdump
|
// char. It is used to detect unsigned character arrays to hexdump
|
||||||
// them.
|
// them.
|
||||||
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
|
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||||
|
|
||||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||||
// It is used to detect uint8_t arrays to hexdump them.
|
// It is used to detect uint8_t arrays to hexdump them.
|
||||||
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
|
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||||
)
|
)
|
||||||
|
|
||||||
// dumpState contains information about the state of a dump operation.
|
// dumpState contains information about the state of a dump operation.
|
||||||
|
@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||||
// Display dereferenced value.
|
// Display dereferenced value.
|
||||||
d.w.Write(openParenBytes)
|
d.w.Write(openParenBytes)
|
||||||
switch {
|
switch {
|
||||||
case nilFound == true:
|
case nilFound:
|
||||||
d.w.Write(nilAngleBytes)
|
d.w.Write(nilAngleBytes)
|
||||||
|
|
||||||
case cycleFound == true:
|
case cycleFound:
|
||||||
d.w.Write(circularBytes)
|
d.w.Write(circularBytes)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
2
vendor/github.com/davecgh/go-spew/spew/dump_test.go
generated
vendored
2
vendor/github.com/davecgh/go-spew/spew/dump_test.go
generated
vendored
|
@ -768,7 +768,7 @@ func addUintptrDumpTests() {
|
||||||
|
|
||||||
func addUnsafePointerDumpTests() {
|
func addUnsafePointerDumpTests() {
|
||||||
// Null pointer.
|
// Null pointer.
|
||||||
v := unsafe.Pointer(uintptr(0))
|
v := unsafe.Pointer(nil)
|
||||||
nv := (*unsafe.Pointer)(nil)
|
nv := (*unsafe.Pointer)(nil)
|
||||||
pv := &v
|
pv := &v
|
||||||
vAddr := fmt.Sprintf("%p", pv)
|
vAddr := fmt.Sprintf("%p", pv)
|
||||||
|
|
6
vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
generated
vendored
6
vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
generated
vendored
|
@ -82,18 +82,20 @@ func addCgoDumpTests() {
|
||||||
v5Len := fmt.Sprintf("%d", v5l)
|
v5Len := fmt.Sprintf("%d", v5l)
|
||||||
v5Cap := fmt.Sprintf("%d", v5c)
|
v5Cap := fmt.Sprintf("%d", v5c)
|
||||||
v5t := "[6]testdata._Ctype_uint8_t"
|
v5t := "[6]testdata._Ctype_uint8_t"
|
||||||
|
v5t2 := "[6]testdata._Ctype_uchar"
|
||||||
v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
|
v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
|
||||||
"{\n 00000000 74 65 73 74 35 00 " +
|
"{\n 00000000 74 65 73 74 35 00 " +
|
||||||
" |test5.|\n}"
|
" |test5.|\n}"
|
||||||
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
|
addDumpTest(v5, "("+v5t+") "+v5s+"\n", "("+v5t2+") "+v5s+"\n")
|
||||||
|
|
||||||
// C typedefed unsigned char array.
|
// C typedefed unsigned char array.
|
||||||
v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
|
v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
|
||||||
v6Len := fmt.Sprintf("%d", v6l)
|
v6Len := fmt.Sprintf("%d", v6l)
|
||||||
v6Cap := fmt.Sprintf("%d", v6c)
|
v6Cap := fmt.Sprintf("%d", v6c)
|
||||||
v6t := "[6]testdata._Ctype_custom_uchar_t"
|
v6t := "[6]testdata._Ctype_custom_uchar_t"
|
||||||
|
v6t2 := "[6]testdata._Ctype_uchar"
|
||||||
v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
|
v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
|
||||||
"{\n 00000000 74 65 73 74 36 00 " +
|
"{\n 00000000 74 65 73 74 36 00 " +
|
||||||
" |test6.|\n}"
|
" |test6.|\n}"
|
||||||
addDumpTest(v6, "("+v6t+") "+v6s+"\n")
|
addDumpTest(v6, "("+v6t+") "+v6s+"\n", "("+v6t2+") "+v6s+"\n")
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
4
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
|
@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
|
||||||
|
|
||||||
// Display dereferenced value.
|
// Display dereferenced value.
|
||||||
switch {
|
switch {
|
||||||
case nilFound == true:
|
case nilFound:
|
||||||
f.fs.Write(nilAngleBytes)
|
f.fs.Write(nilAngleBytes)
|
||||||
|
|
||||||
case cycleFound == true:
|
case cycleFound:
|
||||||
f.fs.Write(circularShortBytes)
|
f.fs.Write(circularShortBytes)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
6
vendor/github.com/davecgh/go-spew/spew/format_test.go
generated
vendored
6
vendor/github.com/davecgh/go-spew/spew/format_test.go
generated
vendored
|
@ -1083,7 +1083,7 @@ func addUintptrFormatterTests() {
|
||||||
|
|
||||||
func addUnsafePointerFormatterTests() {
|
func addUnsafePointerFormatterTests() {
|
||||||
// Null pointer.
|
// Null pointer.
|
||||||
v := unsafe.Pointer(uintptr(0))
|
v := unsafe.Pointer(nil)
|
||||||
nv := (*unsafe.Pointer)(nil)
|
nv := (*unsafe.Pointer)(nil)
|
||||||
pv := &v
|
pv := &v
|
||||||
vAddr := fmt.Sprintf("%p", pv)
|
vAddr := fmt.Sprintf("%p", pv)
|
||||||
|
@ -1536,14 +1536,14 @@ func TestPrintSortedKeys(t *testing.T) {
|
||||||
t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected)
|
t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2})
|
s = cfg.Sprint(map[testStruct]int{{1}: 1, {3}: 3, {2}: 2})
|
||||||
expected = "map[ts.1:1 ts.2:2 ts.3:3]"
|
expected = "map[ts.1:1 ts.2:2 ts.3:3]"
|
||||||
if s != expected {
|
if s != expected {
|
||||||
t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected)
|
t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !spew.UnsafeDisabled {
|
if !spew.UnsafeDisabled {
|
||||||
s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2})
|
s = cfg.Sprint(map[testStructP]int{{1}: 1, {3}: 3, {2}: 2})
|
||||||
expected = "map[ts.1:1 ts.2:2 ts.3:3]"
|
expected = "map[ts.1:1 ts.2:2 ts.3:3]"
|
||||||
if s != expected {
|
if s != expected {
|
||||||
t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected)
|
t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected)
|
||||||
|
|
5
vendor/github.com/davecgh/go-spew/spew/internal_test.go
generated
vendored
5
vendor/github.com/davecgh/go-spew/spew/internal_test.go
generated
vendored
|
@ -36,10 +36,7 @@ type dummyFmtState struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dfs *dummyFmtState) Flag(f int) bool {
|
func (dfs *dummyFmtState) Flag(f int) bool {
|
||||||
if f == int('+') {
|
return f == int('+')
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (dfs *dummyFmtState) Precision() (int, bool) {
|
func (dfs *dummyFmtState) Precision() (int, bool) {
|
||||||
|
|
11
vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go
generated
vendored
11
vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go
generated
vendored
|
@ -16,7 +16,7 @@
|
||||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||||
// tag is deprecated and thus should not be used.
|
// tag is deprecated and thus should not be used.
|
||||||
// +build !js,!appengine,!safe,!disableunsafe
|
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||||
|
|
||||||
/*
|
/*
|
||||||
This test file is part of the spew package rather than than the spew_test
|
This test file is part of the spew package rather than than the spew_test
|
||||||
|
@ -30,7 +30,6 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"unsafe"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// changeKind uses unsafe to intentionally change the kind of a reflect.Value to
|
// changeKind uses unsafe to intentionally change the kind of a reflect.Value to
|
||||||
|
@ -38,13 +37,13 @@ import (
|
||||||
// fallback code which punts to the standard fmt library for new types that
|
// fallback code which punts to the standard fmt library for new types that
|
||||||
// might get added to the language.
|
// might get added to the language.
|
||||||
func changeKind(v *reflect.Value, readOnly bool) {
|
func changeKind(v *reflect.Value, readOnly bool) {
|
||||||
rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag))
|
flags := flagField(v)
|
||||||
*rvf = *rvf | ((1<<flagKindWidth - 1) << flagKindShift)
|
|
||||||
if readOnly {
|
if readOnly {
|
||||||
*rvf |= flagRO
|
*flags |= flagRO
|
||||||
} else {
|
} else {
|
||||||
*rvf &= ^uintptr(flagRO)
|
*flags &^= flagRO
|
||||||
}
|
}
|
||||||
|
*flags |= flagKindMask
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestAddedReflectValue tests functionaly of the dump and formatter code which
|
// TestAddedReflectValue tests functionaly of the dump and formatter code which
|
||||||
|
|
26
vendor/github.com/stretchr/testify/.travis.yml
generated
vendored
26
vendor/github.com/stretchr/testify/.travis.yml
generated
vendored
|
@ -2,18 +2,14 @@ language: go
|
||||||
|
|
||||||
sudo: false
|
sudo: false
|
||||||
|
|
||||||
matrix:
|
go:
|
||||||
include:
|
- "1.8"
|
||||||
- go: "1.8.x"
|
- "1.9"
|
||||||
- go: "1.9.x"
|
- "1.10"
|
||||||
- go: "1.10.x"
|
- tip
|
||||||
- go: "1.11.x"
|
|
||||||
env: GO111MODULE=off
|
script:
|
||||||
- go: "1.11.x"
|
- ./.travis.gogenerate.sh
|
||||||
env: GO111MODULE=on
|
- ./.travis.gofmt.sh
|
||||||
- go: tip
|
- ./.travis.govet.sh
|
||||||
script:
|
- go test -v -race $(go list ./... | grep -v vendor)
|
||||||
- ./.travis.gogenerate.sh
|
|
||||||
- ./.travis.gofmt.sh
|
|
||||||
- ./.travis.govet.sh
|
|
||||||
- go test -v -race $(go list ./... | grep -v vendor)
|
|
||||||
|
|
35
vendor/github.com/stretchr/testify/LICENSE
generated
vendored
35
vendor/github.com/stretchr/testify/LICENSE
generated
vendored
|
@ -1,21 +1,22 @@
|
||||||
MIT License
|
Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
|
||||||
|
|
||||||
Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell
|
Please consider promoting this project if you find it useful.
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
obtaining a copy of this software and associated documentation
|
||||||
in the Software without restriction, including without limitation the rights
|
files (the "Software"), to deal in the Software without restriction,
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
including without limitation the rights to use, copy, modify, merge,
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
publish, distribute, sublicense, and/or sell copies of the Software,
|
||||||
furnished to do so, subject to the following conditions:
|
and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
The above copyright notice and this permission notice shall be included
|
||||||
copies or substantial portions of the Software.
|
in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
|
||||||
SOFTWARE.
|
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
13
vendor/github.com/stretchr/testify/README.md
generated
vendored
13
vendor/github.com/stretchr/testify/README.md
generated
vendored
|
@ -287,10 +287,8 @@ To install Testify, use `go get`:
|
||||||
This will then make the following packages available to you:
|
This will then make the following packages available to you:
|
||||||
|
|
||||||
github.com/stretchr/testify/assert
|
github.com/stretchr/testify/assert
|
||||||
github.com/stretchr/testify/require
|
|
||||||
github.com/stretchr/testify/mock
|
github.com/stretchr/testify/mock
|
||||||
github.com/stretchr/testify/suite
|
github.com/stretchr/testify/http
|
||||||
github.com/stretchr/testify/http (deprecated)
|
|
||||||
|
|
||||||
Import the `testify/assert` package into your code using this template:
|
Import the `testify/assert` package into your code using this template:
|
||||||
|
|
||||||
|
@ -321,7 +319,7 @@ To update Testify to the latest version, use `go get -u github.com/stretchr/test
|
||||||
Supported go versions
|
Supported go versions
|
||||||
==================
|
==================
|
||||||
|
|
||||||
We support the three major Go versions, which are 1.9, 1.10, and 1.11 at the moment.
|
We support the three major Go versions, which are 1.8, 1.9 and 1.10 at the moment.
|
||||||
|
|
||||||
------
|
------
|
||||||
|
|
||||||
|
@ -331,10 +329,3 @@ Contributing
|
||||||
Please feel free to submit issues, fork the repository and send pull requests!
|
Please feel free to submit issues, fork the repository and send pull requests!
|
||||||
|
|
||||||
When submitting an issue, we ask that you please include a complete test function that demonstrates the issue. Extra credit for those using Testify to write the test code that demonstrates it.
|
When submitting an issue, we ask that you please include a complete test function that demonstrates the issue. Extra credit for those using Testify to write the test code that demonstrates it.
|
||||||
|
|
||||||
------
|
|
||||||
|
|
||||||
License
|
|
||||||
=======
|
|
||||||
|
|
||||||
This project is licensed under the terms of the MIT license.
|
|
||||||
|
|
32
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
32
vendor/github.com/stretchr/testify/assert/assertions.go
generated
vendored
|
@ -39,7 +39,7 @@ type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool
|
||||||
// for table driven tests.
|
// for table driven tests.
|
||||||
type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
|
type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
|
||||||
|
|
||||||
// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful
|
// ValuesAssertionFunc is a common function prototype when validating an error value. Can be useful
|
||||||
// for table driven tests.
|
// for table driven tests.
|
||||||
type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
|
type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
|
||||||
|
|
||||||
|
@ -179,11 +179,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
if len(msgAndArgs) == 1 {
|
if len(msgAndArgs) == 1 {
|
||||||
msg := msgAndArgs[0]
|
return msgAndArgs[0].(string)
|
||||||
if msgAsStr, ok := msg.(string); ok {
|
|
||||||
return msgAsStr
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%+v", msg)
|
|
||||||
}
|
}
|
||||||
if len(msgAndArgs) > 1 {
|
if len(msgAndArgs) > 1 {
|
||||||
return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
|
return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
|
||||||
|
@ -419,17 +415,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||||
return Fail(t, "Expected value not to be nil.", msgAndArgs...)
|
return Fail(t, "Expected value not to be nil.", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// containsKind checks if a specified kind in the slice of kinds.
|
|
||||||
func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
|
|
||||||
for i := 0; i < len(kinds); i++ {
|
|
||||||
if kind == kinds[i] {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// isNil checks if a specified object is nil or not, without Failing.
|
// isNil checks if a specified object is nil or not, without Failing.
|
||||||
func isNil(object interface{}) bool {
|
func isNil(object interface{}) bool {
|
||||||
if object == nil {
|
if object == nil {
|
||||||
|
@ -438,14 +423,7 @@ func isNil(object interface{}) bool {
|
||||||
|
|
||||||
value := reflect.ValueOf(object)
|
value := reflect.ValueOf(object)
|
||||||
kind := value.Kind()
|
kind := value.Kind()
|
||||||
isNilableKind := containsKind(
|
if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {
|
||||||
[]reflect.Kind{
|
|
||||||
reflect.Chan, reflect.Func,
|
|
||||||
reflect.Interface, reflect.Map,
|
|
||||||
reflect.Ptr, reflect.Slice},
|
|
||||||
kind)
|
|
||||||
|
|
||||||
if isNilableKind && value.IsNil() {
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1349,7 +1327,7 @@ func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// diff returns a diff of both values as long as both are of the same type and
|
// diff returns a diff of both values as long as both are of the same type and
|
||||||
// are a struct, map, slice, array or string. Otherwise it returns an empty string.
|
// are a struct, map, slice or array. Otherwise it returns an empty string.
|
||||||
func diff(expected interface{}, actual interface{}) string {
|
func diff(expected interface{}, actual interface{}) string {
|
||||||
if expected == nil || actual == nil {
|
if expected == nil || actual == nil {
|
||||||
return ""
|
return ""
|
||||||
|
@ -1367,7 +1345,7 @@ func diff(expected interface{}, actual interface{}) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
var e, a string
|
var e, a string
|
||||||
if et != reflect.TypeOf("") {
|
if ek != reflect.String {
|
||||||
e = spewConfig.Sdump(expected)
|
e = spewConfig.Sdump(expected)
|
||||||
a = spewConfig.Sdump(actual)
|
a = spewConfig.Sdump(actual)
|
||||||
} else {
|
} else {
|
||||||
|
|
10
vendor/github.com/stretchr/testify/assert/assertions_test.go
generated
vendored
10
vendor/github.com/stretchr/testify/assert/assertions_test.go
generated
vendored
|
@ -175,8 +175,6 @@ func TestIsType(t *testing.T) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type myType string
|
|
||||||
|
|
||||||
func TestEqual(t *testing.T) {
|
func TestEqual(t *testing.T) {
|
||||||
|
|
||||||
mockT := new(testing.T)
|
mockT := new(testing.T)
|
||||||
|
@ -202,9 +200,6 @@ func TestEqual(t *testing.T) {
|
||||||
if !Equal(mockT, uint64(123), uint64(123)) {
|
if !Equal(mockT, uint64(123), uint64(123)) {
|
||||||
t.Error("Equal should return true")
|
t.Error("Equal should return true")
|
||||||
}
|
}
|
||||||
if !Equal(mockT, myType("1"), myType("1")) {
|
|
||||||
t.Error("Equal should return true")
|
|
||||||
}
|
|
||||||
if !Equal(mockT, &struct{}{}, &struct{}{}) {
|
if !Equal(mockT, &struct{}{}, &struct{}{}) {
|
||||||
t.Error("Equal should return true (pointer equality is based on equality of underlying value)")
|
t.Error("Equal should return true (pointer equality is based on equality of underlying value)")
|
||||||
}
|
}
|
||||||
|
@ -212,9 +207,6 @@ func TestEqual(t *testing.T) {
|
||||||
if Equal(mockT, m["bar"], "something") {
|
if Equal(mockT, m["bar"], "something") {
|
||||||
t.Error("Equal should return false")
|
t.Error("Equal should return false")
|
||||||
}
|
}
|
||||||
if Equal(mockT, myType("1"), myType("2")) {
|
|
||||||
t.Error("Equal should return false")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// bufferT implements TestingT. Its implementation of Errorf writes the output that would be produced by
|
// bufferT implements TestingT. Its implementation of Errorf writes the output that would be produced by
|
||||||
|
@ -283,8 +275,6 @@ func TestEqualFormatting(t *testing.T) {
|
||||||
}{
|
}{
|
||||||
{equalWant: "want", equalGot: "got", want: "\tassertions.go:\\d+: \n\t+Error Trace:\t\n\t+Error:\\s+Not equal:\\s+\n\\s+expected: \"want\"\n\\s+actual\\s+: \"got\"\n\\s+Diff:\n\\s+-+ Expected\n\\s+\\++ Actual\n\\s+@@ -1 \\+1 @@\n\\s+-want\n\\s+\\+got\n"},
|
{equalWant: "want", equalGot: "got", want: "\tassertions.go:\\d+: \n\t+Error Trace:\t\n\t+Error:\\s+Not equal:\\s+\n\\s+expected: \"want\"\n\\s+actual\\s+: \"got\"\n\\s+Diff:\n\\s+-+ Expected\n\\s+\\++ Actual\n\\s+@@ -1 \\+1 @@\n\\s+-want\n\\s+\\+got\n"},
|
||||||
{equalWant: "want", equalGot: "got", msgAndArgs: []interface{}{"hello, %v!", "world"}, want: "\tassertions.go:[0-9]+: \n\t+Error Trace:\t\n\t+Error:\\s+Not equal:\\s+\n\\s+expected: \"want\"\n\\s+actual\\s+: \"got\"\n\\s+Diff:\n\\s+-+ Expected\n\\s+\\++ Actual\n\\s+@@ -1 \\+1 @@\n\\s+-want\n\\s+\\+got\n\\s+Messages:\\s+hello, world!\n"},
|
{equalWant: "want", equalGot: "got", msgAndArgs: []interface{}{"hello, %v!", "world"}, want: "\tassertions.go:[0-9]+: \n\t+Error Trace:\t\n\t+Error:\\s+Not equal:\\s+\n\\s+expected: \"want\"\n\\s+actual\\s+: \"got\"\n\\s+Diff:\n\\s+-+ Expected\n\\s+\\++ Actual\n\\s+@@ -1 \\+1 @@\n\\s+-want\n\\s+\\+got\n\\s+Messages:\\s+hello, world!\n"},
|
||||||
{equalWant: "want", equalGot: "got", msgAndArgs: []interface{}{123}, want: "\tassertions.go:[0-9]+: \n\t+Error Trace:\t\n\t+Error:\\s+Not equal:\\s+\n\\s+expected: \"want\"\n\\s+actual\\s+: \"got\"\n\\s+Diff:\n\\s+-+ Expected\n\\s+\\++ Actual\n\\s+@@ -1 \\+1 @@\n\\s+-want\n\\s+\\+got\n\\s+Messages:\\s+123\n"},
|
|
||||||
{equalWant: "want", equalGot: "got", msgAndArgs: []interface{}{struct{ a string }{"hello"}}, want: "\tassertions.go:[0-9]+: \n\t+Error Trace:\t\n\t+Error:\\s+Not equal:\\s+\n\\s+expected: \"want\"\n\\s+actual\\s+: \"got\"\n\\s+Diff:\n\\s+-+ Expected\n\\s+\\++ Actual\n\\s+@@ -1 \\+1 @@\n\\s+-want\n\\s+\\+got\n\\s+Messages:\\s+{a:hello}\n"},
|
|
||||||
} {
|
} {
|
||||||
mockT := &bufferT{}
|
mockT := &bufferT{}
|
||||||
Equal(mockT, currCase.equalWant, currCase.equalGot, currCase.msgAndArgs...)
|
Equal(mockT, currCase.equalWant, currCase.equalGot, currCase.msgAndArgs...)
|
||||||
|
|
7
vendor/github.com/stretchr/testify/go.mod
generated
vendored
7
vendor/github.com/stretchr/testify/go.mod
generated
vendored
|
@ -1,7 +0,0 @@
|
||||||
module github.com/stretchr/testify
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/davecgh/go-spew v1.1.0
|
|
||||||
github.com/pmezard/go-difflib v1.0.0
|
|
||||||
github.com/stretchr/objx v0.1.0
|
|
||||||
)
|
|
6
vendor/github.com/stretchr/testify/go.sum
generated
vendored
6
vendor/github.com/stretchr/testify/go.sum
generated
vendored
|
@ -1,6 +0,0 @@
|
||||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
|
3
vendor/github.com/stretchr/testify/mock/mock.go
generated
vendored
3
vendor/github.com/stretchr/testify/mock/mock.go
generated
vendored
|
@ -176,7 +176,6 @@ func (c *Call) Maybe() *Call {
|
||||||
// Mock.
|
// Mock.
|
||||||
// On("MyMethod", 1).Return(nil).
|
// On("MyMethod", 1).Return(nil).
|
||||||
// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error"))
|
// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error"))
|
||||||
//go:noinline
|
|
||||||
func (c *Call) On(methodName string, arguments ...interface{}) *Call {
|
func (c *Call) On(methodName string, arguments ...interface{}) *Call {
|
||||||
return c.Parent.On(methodName, arguments...)
|
return c.Parent.On(methodName, arguments...)
|
||||||
}
|
}
|
||||||
|
@ -692,7 +691,7 @@ func (args Arguments) Diff(objects []interface{}) (string, int) {
|
||||||
output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher)
|
output = fmt.Sprintf("%s\t%d: PASS: %s matched by %s\n", output, i, actualFmt, matcher)
|
||||||
} else {
|
} else {
|
||||||
differences++
|
differences++
|
||||||
output = fmt.Sprintf("%s\t%d: FAIL: %s not matched by %s\n", output, i, actualFmt, matcher)
|
output = fmt.Sprintf("%s\t%d: PASS: %s not matched by %s\n", output, i, actualFmt, matcher)
|
||||||
}
|
}
|
||||||
} else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() {
|
} else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() {
|
||||||
|
|
||||||
|
|
2
vendor/github.com/stretchr/testify/mock/mock_test.go
generated
vendored
2
vendor/github.com/stretchr/testify/mock/mock_test.go
generated
vendored
|
@ -32,7 +32,6 @@ func (i *TestExampleImplementation) TheExampleMethod(a, b, c int) (int, error) {
|
||||||
return args.Int(0), errors.New("Whoops")
|
return args.Int(0), errors.New("Whoops")
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:noinline
|
|
||||||
func (i *TestExampleImplementation) TheExampleMethod2(yesorno bool) {
|
func (i *TestExampleImplementation) TheExampleMethod2(yesorno bool) {
|
||||||
i.Called(yesorno)
|
i.Called(yesorno)
|
||||||
}
|
}
|
||||||
|
@ -1493,7 +1492,6 @@ func unexpectedCallRegex(method, calledArg, expectedArg, diff string) string {
|
||||||
rMethod, calledArg, rMethod, expectedArg, diff)
|
rMethod, calledArg, rMethod, expectedArg, diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:noinline
|
|
||||||
func ConcurrencyTestMethod(m *Mock) {
|
func ConcurrencyTestMethod(m *Mock) {
|
||||||
m.Called()
|
m.Called()
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/stretchr/testify/require/requirements.go
generated
vendored
2
vendor/github.com/stretchr/testify/require/requirements.go
generated
vendored
|
@ -22,7 +22,7 @@ type ValueAssertionFunc func(TestingT, interface{}, ...interface{})
|
||||||
// for table driven tests.
|
// for table driven tests.
|
||||||
type BoolAssertionFunc func(TestingT, bool, ...interface{})
|
type BoolAssertionFunc func(TestingT, bool, ...interface{})
|
||||||
|
|
||||||
// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful
|
// ValuesAssertionFunc is a common function prototype when validating an error value. Can be useful
|
||||||
// for table driven tests.
|
// for table driven tests.
|
||||||
type ErrorAssertionFunc func(TestingT, error, ...interface{})
|
type ErrorAssertionFunc func(TestingT, error, ...interface{})
|
||||||
|
|
||||||
|
|
24
vendor/github.com/stretchr/testify/suite/suite.go
generated
vendored
24
vendor/github.com/stretchr/testify/suite/suite.go
generated
vendored
|
@ -55,32 +55,10 @@ func (suite *Suite) Assert() *assert.Assertions {
|
||||||
return suite.Assertions
|
return suite.Assertions
|
||||||
}
|
}
|
||||||
|
|
||||||
func failOnPanic(t *testing.T) {
|
|
||||||
r := recover()
|
|
||||||
if r != nil {
|
|
||||||
t.Errorf("test panicked: %v", r)
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run provides suite functionality around golang subtests. It should be
|
|
||||||
// called in place of t.Run(name, func(t *testing.T)) in test suite code.
|
|
||||||
// The passed-in func will be executed as a subtest with a fresh instance of t.
|
|
||||||
// Provides compatibility with go test pkg -run TestSuite/TestName/SubTestName.
|
|
||||||
func (suite *Suite) Run(name string, subtest func()) bool {
|
|
||||||
oldT := suite.T()
|
|
||||||
defer suite.SetT(oldT)
|
|
||||||
return oldT.Run(name, func(t *testing.T) {
|
|
||||||
suite.SetT(t)
|
|
||||||
subtest()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run takes a testing suite and runs all of the tests attached
|
// Run takes a testing suite and runs all of the tests attached
|
||||||
// to it.
|
// to it.
|
||||||
func Run(t *testing.T, suite TestingSuite) {
|
func Run(t *testing.T, suite TestingSuite) {
|
||||||
suite.SetT(t)
|
suite.SetT(t)
|
||||||
defer failOnPanic(t)
|
|
||||||
|
|
||||||
if setupAllSuite, ok := suite.(SetupAllSuite); ok {
|
if setupAllSuite, ok := suite.(SetupAllSuite); ok {
|
||||||
setupAllSuite.SetupSuite()
|
setupAllSuite.SetupSuite()
|
||||||
|
@ -106,8 +84,6 @@ func Run(t *testing.T, suite TestingSuite) {
|
||||||
F: func(t *testing.T) {
|
F: func(t *testing.T) {
|
||||||
parentT := suite.T()
|
parentT := suite.T()
|
||||||
suite.SetT(t)
|
suite.SetT(t)
|
||||||
defer failOnPanic(t)
|
|
||||||
|
|
||||||
if setupTestSuite, ok := suite.(SetupTestSuite); ok {
|
if setupTestSuite, ok := suite.(SetupTestSuite); ok {
|
||||||
setupTestSuite.SetupTest()
|
setupTestSuite.SetupTest()
|
||||||
}
|
}
|
||||||
|
|
134
vendor/github.com/stretchr/testify/suite/suite_test.go
generated
vendored
134
vendor/github.com/stretchr/testify/suite/suite_test.go
generated
vendored
|
@ -42,99 +42,6 @@ func (s *SuiteRequireTwice) TestRequireTwo() {
|
||||||
r.Equal(1, 2)
|
r.Equal(1, 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
type panickingSuite struct {
|
|
||||||
Suite
|
|
||||||
panicInSetupSuite bool
|
|
||||||
panicInSetupTest bool
|
|
||||||
panicInBeforeTest bool
|
|
||||||
panicInTest bool
|
|
||||||
panicInAfterTest bool
|
|
||||||
panicInTearDownTest bool
|
|
||||||
panicInTearDownSuite bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *panickingSuite) SetupSuite() {
|
|
||||||
if s.panicInSetupSuite {
|
|
||||||
panic("oops in setup suite")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *panickingSuite) SetupTest() {
|
|
||||||
if s.panicInSetupTest {
|
|
||||||
panic("oops in setup test")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *panickingSuite) BeforeTest(_, _ string) {
|
|
||||||
if s.panicInBeforeTest {
|
|
||||||
panic("oops in before test")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *panickingSuite) Test() {
|
|
||||||
if s.panicInTest {
|
|
||||||
panic("oops in test")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *panickingSuite) AfterTest(_, _ string) {
|
|
||||||
if s.panicInAfterTest {
|
|
||||||
panic("oops in after test")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *panickingSuite) TearDownTest() {
|
|
||||||
if s.panicInTearDownTest {
|
|
||||||
panic("oops in tear down test")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *panickingSuite) TearDownSuite() {
|
|
||||||
if s.panicInTearDownSuite {
|
|
||||||
panic("oops in tear down suite")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSuiteRecoverPanic(t *testing.T) {
|
|
||||||
ok := true
|
|
||||||
panickingTests := []testing.InternalTest{
|
|
||||||
{
|
|
||||||
Name: "TestPanicInSetupSuite",
|
|
||||||
F: func(t *testing.T) { Run(t, &panickingSuite{panicInSetupSuite: true}) },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "TestPanicInSetupTest",
|
|
||||||
F: func(t *testing.T) { Run(t, &panickingSuite{panicInSetupTest: true}) },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "TestPanicInBeforeTest",
|
|
||||||
F: func(t *testing.T) { Run(t, &panickingSuite{panicInBeforeTest: true}) },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "TestPanicInTest",
|
|
||||||
F: func(t *testing.T) { Run(t, &panickingSuite{panicInTest: true}) },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "TestPanicInAfterTest",
|
|
||||||
F: func(t *testing.T) { Run(t, &panickingSuite{panicInAfterTest: true}) },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "TestPanicInTearDownTest",
|
|
||||||
F: func(t *testing.T) { Run(t, &panickingSuite{panicInTearDownTest: true}) },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "TestPanicInTearDownSuite",
|
|
||||||
F: func(t *testing.T) { Run(t, &panickingSuite{panicInTearDownSuite: true}) },
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NotPanics(t, func() {
|
|
||||||
ok = testing.RunTests(allTestsFilter, panickingTests)
|
|
||||||
})
|
|
||||||
|
|
||||||
assert.False(t, ok)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This suite is intended to store values to make sure that only
|
// This suite is intended to store values to make sure that only
|
||||||
// testing-suite-related methods are run. It's also a fully
|
// testing-suite-related methods are run. It's also a fully
|
||||||
// functional example of a testing suite, using setup/teardown methods
|
// functional example of a testing suite, using setup/teardown methods
|
||||||
|
@ -152,7 +59,6 @@ type SuiteTester struct {
|
||||||
TearDownTestRunCount int
|
TearDownTestRunCount int
|
||||||
TestOneRunCount int
|
TestOneRunCount int
|
||||||
TestTwoRunCount int
|
TestTwoRunCount int
|
||||||
TestSubtestRunCount int
|
|
||||||
NonTestMethodRunCount int
|
NonTestMethodRunCount int
|
||||||
|
|
||||||
SuiteNameBefore []string
|
SuiteNameBefore []string
|
||||||
|
@ -247,27 +153,6 @@ func (suite *SuiteTester) NonTestMethod() {
|
||||||
suite.NonTestMethodRunCount++
|
suite.NonTestMethodRunCount++
|
||||||
}
|
}
|
||||||
|
|
||||||
func (suite *SuiteTester) TestSubtest() {
|
|
||||||
suite.TestSubtestRunCount++
|
|
||||||
|
|
||||||
for _, t := range []struct {
|
|
||||||
testName string
|
|
||||||
}{
|
|
||||||
{"first"},
|
|
||||||
{"second"},
|
|
||||||
} {
|
|
||||||
suiteT := suite.T()
|
|
||||||
suite.Run(t.testName, func() {
|
|
||||||
// We should get a different *testing.T for subtests, so that
|
|
||||||
// go test recognizes them as proper subtests for output formatting
|
|
||||||
// and running individual subtests
|
|
||||||
subTestT := suite.T()
|
|
||||||
suite.NotEqual(subTestT, suiteT)
|
|
||||||
})
|
|
||||||
suite.Equal(suiteT, suite.T())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestRunSuite will be run by the 'go test' command, so within it, we
|
// TestRunSuite will be run by the 'go test' command, so within it, we
|
||||||
// can run our suite using the Run(*testing.T, TestingSuite) function.
|
// can run our suite using the Run(*testing.T, TestingSuite) function.
|
||||||
func TestRunSuite(t *testing.T) {
|
func TestRunSuite(t *testing.T) {
|
||||||
|
@ -283,20 +168,18 @@ func TestRunSuite(t *testing.T) {
|
||||||
assert.Equal(t, suiteTester.SetupSuiteRunCount, 1)
|
assert.Equal(t, suiteTester.SetupSuiteRunCount, 1)
|
||||||
assert.Equal(t, suiteTester.TearDownSuiteRunCount, 1)
|
assert.Equal(t, suiteTester.TearDownSuiteRunCount, 1)
|
||||||
|
|
||||||
assert.Equal(t, len(suiteTester.SuiteNameAfter), 4)
|
assert.Equal(t, len(suiteTester.SuiteNameAfter), 3)
|
||||||
assert.Equal(t, len(suiteTester.SuiteNameBefore), 4)
|
assert.Equal(t, len(suiteTester.SuiteNameBefore), 3)
|
||||||
assert.Equal(t, len(suiteTester.TestNameAfter), 4)
|
assert.Equal(t, len(suiteTester.TestNameAfter), 3)
|
||||||
assert.Equal(t, len(suiteTester.TestNameBefore), 4)
|
assert.Equal(t, len(suiteTester.TestNameBefore), 3)
|
||||||
|
|
||||||
assert.Contains(t, suiteTester.TestNameAfter, "TestOne")
|
assert.Contains(t, suiteTester.TestNameAfter, "TestOne")
|
||||||
assert.Contains(t, suiteTester.TestNameAfter, "TestTwo")
|
assert.Contains(t, suiteTester.TestNameAfter, "TestTwo")
|
||||||
assert.Contains(t, suiteTester.TestNameAfter, "TestSkip")
|
assert.Contains(t, suiteTester.TestNameAfter, "TestSkip")
|
||||||
assert.Contains(t, suiteTester.TestNameAfter, "TestSubtest")
|
|
||||||
|
|
||||||
assert.Contains(t, suiteTester.TestNameBefore, "TestOne")
|
assert.Contains(t, suiteTester.TestNameBefore, "TestOne")
|
||||||
assert.Contains(t, suiteTester.TestNameBefore, "TestTwo")
|
assert.Contains(t, suiteTester.TestNameBefore, "TestTwo")
|
||||||
assert.Contains(t, suiteTester.TestNameBefore, "TestSkip")
|
assert.Contains(t, suiteTester.TestNameBefore, "TestSkip")
|
||||||
assert.Contains(t, suiteTester.TestNameBefore, "TestSubtest")
|
|
||||||
|
|
||||||
for _, suiteName := range suiteTester.SuiteNameAfter {
|
for _, suiteName := range suiteTester.SuiteNameAfter {
|
||||||
assert.Equal(t, "SuiteTester", suiteName)
|
assert.Equal(t, "SuiteTester", suiteName)
|
||||||
|
@ -314,16 +197,15 @@ func TestRunSuite(t *testing.T) {
|
||||||
assert.False(t, when.IsZero())
|
assert.False(t, when.IsZero())
|
||||||
}
|
}
|
||||||
|
|
||||||
// There are four test methods (TestOne, TestTwo, TestSkip, and TestSubtest), so
|
// There are three test methods (TestOne, TestTwo, and TestSkip), so
|
||||||
// the SetupTest and TearDownTest methods (which should be run once for
|
// the SetupTest and TearDownTest methods (which should be run once for
|
||||||
// each test) should have been run four times.
|
// each test) should have been run three times.
|
||||||
assert.Equal(t, suiteTester.SetupTestRunCount, 4)
|
assert.Equal(t, suiteTester.SetupTestRunCount, 3)
|
||||||
assert.Equal(t, suiteTester.TearDownTestRunCount, 4)
|
assert.Equal(t, suiteTester.TearDownTestRunCount, 3)
|
||||||
|
|
||||||
// Each test should have been run once.
|
// Each test should have been run once.
|
||||||
assert.Equal(t, suiteTester.TestOneRunCount, 1)
|
assert.Equal(t, suiteTester.TestOneRunCount, 1)
|
||||||
assert.Equal(t, suiteTester.TestTwoRunCount, 1)
|
assert.Equal(t, suiteTester.TestTwoRunCount, 1)
|
||||||
assert.Equal(t, suiteTester.TestSubtestRunCount, 1)
|
|
||||||
|
|
||||||
// Methods that don't match the test method identifier shouldn't
|
// Methods that don't match the test method identifier shouldn't
|
||||||
// have been run at all.
|
// have been run at all.
|
||||||
|
|
42
zk.go
42
zk.go
|
@ -24,14 +24,14 @@ import (
|
||||||
"github.com/samuel/go-zookeeper/zk"
|
"github.com/samuel/go-zookeeper/zk"
|
||||||
)
|
)
|
||||||
|
|
||||||
type endpoint struct {
|
type Endpoint struct {
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
Port int `json:"port"`
|
Port int `json:"port"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type serviceInstance struct {
|
type ServiceInstance struct {
|
||||||
Service endpoint `json:"serviceEndpoint"`
|
Service Endpoint `json:"serviceEndpoint"`
|
||||||
AdditionalEndpoints map[string]endpoint `json:"additionalEndpoints"`
|
AdditionalEndpoints map[string]Endpoint `json:"additionalEndpoints"`
|
||||||
Status string `json:"status"`
|
Status string `json:"status"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,54 +40,47 @@ type zkConfig struct {
|
||||||
path string
|
path string
|
||||||
backoff Backoff
|
backoff Backoff
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
logger logger
|
logger Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// ZKOpt - Configuration option for the Zookeeper client used.
|
|
||||||
type ZKOpt func(z *zkConfig)
|
type ZKOpt func(z *zkConfig)
|
||||||
|
|
||||||
// ZKEndpoints - Endpoints on which a Zookeeper instance is running to be used by the client.
|
|
||||||
func ZKEndpoints(endpoints ...string) ZKOpt {
|
func ZKEndpoints(endpoints ...string) ZKOpt {
|
||||||
return func(z *zkConfig) {
|
return func(z *zkConfig) {
|
||||||
z.endpoints = endpoints
|
z.endpoints = endpoints
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ZKPath - Path to look for information in when connected to Zookeeper.
|
|
||||||
func ZKPath(path string) ZKOpt {
|
func ZKPath(path string) ZKOpt {
|
||||||
return func(z *zkConfig) {
|
return func(z *zkConfig) {
|
||||||
z.path = path
|
z.path = path
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ZKBackoff - Configuration for Retry mechanism used when connecting to Zookeeper.
|
|
||||||
// TODO(rdelvalle): Determine if this is really necessary as the ZK library already has a retry built in.
|
|
||||||
func ZKBackoff(b Backoff) ZKOpt {
|
func ZKBackoff(b Backoff) ZKOpt {
|
||||||
return func(z *zkConfig) {
|
return func(z *zkConfig) {
|
||||||
z.backoff = b
|
z.backoff = b
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ZKTimeout - How long to wait on a response from the Zookeeper instance before considering it dead.
|
|
||||||
func ZKTimeout(d time.Duration) ZKOpt {
|
func ZKTimeout(d time.Duration) ZKOpt {
|
||||||
return func(z *zkConfig) {
|
return func(z *zkConfig) {
|
||||||
z.timeout = d
|
z.timeout = d
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ZKLogger - Attach a logger to the Zookeeper client in order to debug issues.
|
func ZKLogger(l Logger) ZKOpt {
|
||||||
func ZKLogger(l logger) ZKOpt {
|
|
||||||
return func(z *zkConfig) {
|
return func(z *zkConfig) {
|
||||||
z.logger = l
|
z.logger = l
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// LeaderFromZK - Retrieves current Aurora leader from ZK.
|
// Retrieves current Aurora leader from ZK.
|
||||||
func LeaderFromZK(cluster Cluster) (string, error) {
|
func LeaderFromZK(cluster Cluster) (string, error) {
|
||||||
return LeaderFromZKOpts(ZKEndpoints(strings.Split(cluster.ZK, ",")...), ZKPath(cluster.SchedZKPath))
|
return LeaderFromZKOpts(ZKEndpoints(strings.Split(cluster.ZK, ",")...), ZKPath(cluster.SchedZKPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
// LeaderFromZKOpts - Retrieves current Aurora leader from ZK with a custom configuration.
|
// Retrieves current Aurora leader from ZK with a custom configuration.
|
||||||
func LeaderFromZKOpts(options ...ZKOpt) (string, error) {
|
func LeaderFromZKOpts(options ...ZKOpt) (string, error) {
|
||||||
var leaderURL string
|
var leaderURL string
|
||||||
|
|
||||||
|
@ -110,7 +103,7 @@ func LeaderFromZKOpts(options ...ZKOpt) (string, error) {
|
||||||
|
|
||||||
c, _, err := zk.Connect(config.endpoints, config.timeout, func(c *zk.Conn) { c.SetLogger(config.logger) })
|
c, _, err := zk.Connect(config.endpoints, config.timeout, func(c *zk.Conn) { c.SetLogger(config.logger) })
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, NewTemporaryError(errors.Wrap(err, "failed to connect to Zookeeper"))
|
return false, NewTemporaryError(errors.Wrap(err, "Failed to connect to Zookeeper"))
|
||||||
}
|
}
|
||||||
|
|
||||||
defer c.Close()
|
defer c.Close()
|
||||||
|
@ -124,10 +117,12 @@ func LeaderFromZKOpts(options ...ZKOpt) (string, error) {
|
||||||
return false, errors.Wrapf(err, "path %s is an invalid Zookeeper path", config.path)
|
return false, errors.Wrapf(err, "path %s is an invalid Zookeeper path", config.path)
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, NewTemporaryError(errors.Wrapf(err, "path %s doesn't exist on Zookeeper ", config.path))
|
return false,
|
||||||
|
NewTemporaryError(errors.Wrapf(err, "path %s doesn't exist on Zookeeper ", config.path))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Search for the leader through all the children in the given path
|
// Search for the leader through all the children in the given path
|
||||||
|
serviceInst := new(ServiceInstance)
|
||||||
for _, child := range children {
|
for _, child := range children {
|
||||||
|
|
||||||
// Only the leader will start with member_
|
// Only the leader will start with member_
|
||||||
|
@ -140,13 +135,13 @@ func LeaderFromZKOpts(options ...ZKOpt) (string, error) {
|
||||||
return false, errors.Wrapf(err, "path %s is an invalid Zookeeper path", childPath)
|
return false, errors.Wrapf(err, "path %s is an invalid Zookeeper path", childPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, NewTemporaryError(errors.Wrap(err, "unable to fetch contents of leader"))
|
return false, NewTemporaryError(errors.Wrap(err, "error fetching contents of leader"))
|
||||||
}
|
}
|
||||||
|
|
||||||
var serviceInst serviceInstance
|
err = json.Unmarshal([]byte(data), serviceInst)
|
||||||
err = json.Unmarshal([]byte(data), &serviceInst)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, NewTemporaryError(errors.Wrap(err, "unable to unmarshal contents of leader"))
|
return false,
|
||||||
|
NewTemporaryError(errors.Wrap(err, "unable to unmarshal contents of leader"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Should only be one endpoint.
|
// Should only be one endpoint.
|
||||||
|
@ -154,7 +149,8 @@ func LeaderFromZKOpts(options ...ZKOpt) (string, error) {
|
||||||
// writing bad info into Zookeeper but is kept here as a safety net.
|
// writing bad info into Zookeeper but is kept here as a safety net.
|
||||||
if len(serviceInst.AdditionalEndpoints) > 1 {
|
if len(serviceInst.AdditionalEndpoints) > 1 {
|
||||||
return false,
|
return false,
|
||||||
NewTemporaryError(errors.New("ambiguous endpoints in json blob, Aurora wrote bad info to ZK"))
|
NewTemporaryError(
|
||||||
|
errors.New("ambiguous endpoints in json blob, Aurora wrote bad info to ZK"))
|
||||||
}
|
}
|
||||||
|
|
||||||
var scheme, host, port string
|
var scheme, host, port string
|
||||||
|
@ -174,7 +170,7 @@ func LeaderFromZKOpts(options ...ZKOpt) (string, error) {
|
||||||
})
|
})
|
||||||
|
|
||||||
if retryErr != nil {
|
if retryErr != nil {
|
||||||
config.logger.Printf("failed to determine leader after %v attempts", config.backoff.Steps)
|
config.logger.Printf("Failed to determine leader after %v attempts", config.backoff.Steps)
|
||||||
return "", retryErr
|
return "", retryErr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,16 +20,15 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
realis "github.com/paypal/gorealis"
|
realis "github.com/paypal/gorealis/v2"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
var backoff = realis.Backoff{ // Reduce penalties for this test to make it quick
|
var backoff realis.Backoff = realis.Backoff{ // Reduce penalties for this test to make it quick
|
||||||
Steps: 5,
|
Steps: 5,
|
||||||
Duration: 1 * time.Second,
|
Duration: 1 * time.Second,
|
||||||
Factor: 1.0,
|
Factor: 1.0,
|
||||||
Jitter: 0.1,
|
Jitter: 0.1}
|
||||||
}
|
|
||||||
|
|
||||||
// Test for behavior when no endpoints are given to the ZK leader finding function.
|
// Test for behavior when no endpoints are given to the ZK leader finding function.
|
||||||
func TestZKNoEndpoints(t *testing.T) {
|
func TestZKNoEndpoints(t *testing.T) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue