Compare commits
208 commits
Author | SHA1 | Date | |
---|---|---|---|
|
db10285368 | ||
|
8454a6ebf3 | ||
|
c318042e96 | ||
|
db9bebb802 | ||
|
fff2c16751 | ||
|
c59d01ab51 | ||
|
62df98a3c8 | ||
|
5c39a23eb2 | ||
|
dbc396b0db | ||
|
86eb045808 | ||
|
c7e309f421 | ||
|
49877b7d41 | ||
|
82b40a53f0 | ||
|
a9d99067ee | ||
|
e7f9c0cba9 | ||
|
fbaf218dfb | ||
|
6a1cf25c87 | ||
|
4aaec87d32 | ||
|
895d810b6c | ||
|
511e76a0ac | ||
|
8be66da88a | ||
|
6d20f347f7 | ||
|
74b12c36b1 | ||
|
269e0208c1 | ||
|
4acb0d54a9 | ||
|
5f667555dc | ||
|
2b6025e67d | ||
|
5ec22fab98 | ||
|
f196aa9ed7 | ||
|
bb5408f5e2 | ||
|
ea8e48f3b8 | ||
|
3dc3b09a8e | ||
|
3fa2a20fe4 | ||
|
c6a2a23ddb | ||
|
9da3b96b1f | ||
|
976dc26dcc | ||
|
fe692040aa | ||
|
0b2dd44d94 | ||
|
df8fc2fba1 | ||
|
6dc4bf93b9 | ||
|
4ffb509939 | ||
|
1a15c4a5aa | ||
|
e16e390afe | ||
|
f7bd7cc20f | ||
|
c997b90720 | ||
|
773d842b03 | ||
|
1f459dd56a | ||
|
79fa7ba16d | ||
|
2b7eb3a852 | ||
|
10c620de7b | ||
|
1d3854aa5f | ||
|
73e7ab2671 | ||
|
22b1d82d88 | ||
|
2f7015571c | ||
|
296af622d1 | ||
|
9a835631b2 | ||
|
b100158080 | ||
|
45a4416830 | ||
|
2eaa60f681 | ||
|
a09a18ea3b | ||
|
6762c1784b | ||
|
fa5133c13d | ||
|
5de913493c | ||
|
2306d6180f | ||
|
231793df71 | ||
|
e0f33ab60e | ||
|
9dcb7a8969 | ||
|
4395c2ae1a | ||
|
70252ffacf | ||
|
4963bbb922 | ||
|
149d03988c | ||
|
037c636d6d | ||
|
9ebf118e71 | ||
|
e85781e6d4 | ||
|
5099d7e6ec | ||
|
0f2ece10ac | ||
|
ad0da8c867 | ||
|
48318e026c | ||
|
98d2fa2dd7 | ||
|
1c2b1c5079 | ||
|
0e4a0d726b | ||
|
fe567ee966 | ||
|
6c8ab10b64 | ||
|
8ca953f925 | ||
|
800efccb31 | ||
|
5d12029227 | ||
|
4f6a5e9741 | ||
|
e6b204b9da | ||
|
d03a7b61e4 | ||
|
4f5766b443 | ||
|
c0d2969976 | ||
|
66809c55f7 | ||
|
acc54c1015 | ||
|
0bb23cec71 | ||
|
3d62df1684 | ||
|
dc327bebad | ||
|
a43dc81ea8 | ||
|
64948c3712 | ||
|
a6b077d1fd | ||
|
8bd3957247 | ||
|
dbb08ded90 | ||
|
a941bcb679 | ||
|
b2ffb73183 | ||
|
1c426dd363 | ||
|
8d445c1c77 | ||
|
9631aa3aab | ||
|
ff545e8aa6 | ||
|
c338c03355 | ||
|
d4027bc95c | ||
|
dd804af0a8 | ||
|
06cfa214ec | ||
|
e614e04f27 | ||
|
72b746e431 | ||
|
23430cbf30 | ||
|
a1350c6d55 | ||
|
8a4a9bdb8c | ||
|
65398fdfd6 | ||
|
bd008dbb39 | ||
|
1fd07b5007 | ||
|
fa7833a749 | ||
|
922e8d6b5a | ||
|
3111b358fc | ||
|
430764f025 | ||
|
7db2395df1 | ||
|
bf354bcc0a | ||
|
8334dde12f | ||
|
dc6848f804 | ||
|
c03e8bf79c | ||
|
8fe3780949 | ||
|
f59940f9a7 | ||
|
f59f0bbdc3 | ||
|
0d3126c468 | ||
|
d9f4086853 | ||
|
f301affdd0 | ||
|
13cc103faa | ||
|
ef49df747f | ||
|
4bc0a694ae | ||
|
dcab5e698f | ||
|
7e578f80bd | ||
|
211319238e | ||
|
0524238605 | ||
|
40fcbb3d08 | ||
|
719a121ed5 | ||
|
14483e13a5 | ||
|
99ea6aa094 | ||
|
68e64682d7 | ||
|
8c29330e06 | ||
|
dd334d3a08 | ||
|
900f054633 | ||
|
e12370ffbb | ||
|
68d5b9c8fc | ||
|
89337f9c7f | ||
|
d08c0c637e | ||
|
0c6fc794ed | ||
|
e57dc98d65 | ||
|
ff8f10a004 | ||
|
48ca520eaa | ||
|
64dcf55be4 | ||
|
b10df0603e | ||
|
148b7baf4c | ||
|
e6e4742f64 | ||
|
d27d8a4706 | ||
|
cb6100e690 | ||
|
8f505815d5 | ||
|
764cfac099 | ||
|
9c4c2fce44 | ||
|
0d9b869583 | ||
|
3add32a585 | ||
|
d97e59b9e6 | ||
|
1b475175db | ||
|
f77379dd63 | ||
|
cc739f96aa | ||
|
58088a139c | ||
|
40781fab82 | ||
|
811169a266 | ||
|
97fbec9eaf | ||
|
51699eda4e | ||
|
d7efa913b1 | ||
|
ac40251f00 | ||
|
58c560061f | ||
|
10c12d5a13 | ||
|
75c87f34b3 | ||
|
5f155f4337 | ||
|
2c7dd3468f | ||
|
05a8c838db | ||
|
f4fb52c237 | ||
|
da0f181b96 | ||
|
3b10c10dd1 | ||
|
841ef09bbb | ||
|
8905233375 | ||
|
a715282c00 | ||
|
4c3bbc5079 | ||
|
3bf2e8a831 | ||
|
b3e55be98b | ||
|
76b404e087 | ||
|
66a2868aab | ||
|
c83e5d268a | ||
|
8f524eeec5 | ||
|
a8fa05869c | ||
|
087a9e5fba | ||
|
9a6051c089 | ||
|
15c2472ffd | ||
|
ca8b9359cf | ||
|
97f9c05026 | ||
|
3fd957fe5c | ||
|
a8522cbe63 | ||
|
3a78e32e27 | ||
|
4408aefaad |
228 changed files with 69267 additions and 33932 deletions
|
@ -1 +1 @@
|
||||||
0.16.0-SNAPSHOT
|
0.23.0
|
||||||
|
|
3
.gitattributes
vendored
Normal file
3
.gitattributes
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
gen-go/ linguist-generated=true
|
||||||
|
vendor/ linguist-generated=true
|
||||||
|
Gopkg.lock linguist-generated=true
|
25
.github/main.yml
vendored
Normal file
25
.github/main.yml
vendored
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
name: CI
|
||||||
|
|
||||||
|
on: [push]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Setup Go for use with actions
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.16
|
||||||
|
- name: Install goimports
|
||||||
|
run: go get golang.org/x/tools/cmd/goimports
|
||||||
|
- name: Set env with list of directories in repo containin go code
|
||||||
|
run: echo GO_USR_DIRS=$(go list -f {{.Dir}} ./... | grep -E -v "/gen-go/|/vendor/") >> $GITHUB_ENV
|
||||||
|
- name: Run goimports check
|
||||||
|
run: test -z "`for d in $GO_USR_DIRS; do goimports -d $d/*.go | tee /dev/stderr; done`"
|
||||||
|
- name: Create aurora/mesos docker cluster
|
||||||
|
run: docker-compose up -d
|
||||||
|
- name: Run tests
|
||||||
|
run: go test -timeout 35m -race -coverprofile=coverage.txt -covermode=atomic -v github.com/paypal/gorealis
|
57
.github/workflows/codeql-analysis.yml
vendored
Normal file
57
.github/workflows/codeql-analysis.yml
vendored
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
# For most projects, this workflow file will not need changing; you simply need
|
||||||
|
# to commit it to your repository.
|
||||||
|
#
|
||||||
|
# You may wish to alter this file to override the set of languages analyzed,
|
||||||
|
# or to provide custom queries or build logic.
|
||||||
|
#
|
||||||
|
# ******** NOTE ********
|
||||||
|
# We have attempted to detect the languages in your repository. Please check
|
||||||
|
# the `language` matrix defined below to confirm you have the correct set of
|
||||||
|
# supported CodeQL languages.
|
||||||
|
#
|
||||||
|
name: "CodeQL"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: [ main ]
|
||||||
|
schedule:
|
||||||
|
- cron: '34 4 * * 3'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
language: [ 'go' ]
|
||||||
|
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||||
|
# Learn more:
|
||||||
|
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
# Initializes the CodeQL tools for scanning.
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v1
|
||||||
|
with:
|
||||||
|
languages: ${{ matrix.language }}
|
||||||
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
|
# By default, queries listed here will override any specified in a config file.
|
||||||
|
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||||
|
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||||
|
|
||||||
|
- run: go build examples/client.go
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v1
|
30
.github/workflows/main.yml
vendored
Normal file
30
.github/workflows/main.yml
vendored
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
name: CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Setup Go for use with actions
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.16
|
||||||
|
- name: Install goimports
|
||||||
|
run: go get golang.org/x/tools/cmd/goimports
|
||||||
|
- name: Set env with list of directories in repo containin go code
|
||||||
|
run: echo GO_USR_DIRS=$(go list -f {{.Dir}} ./... | grep -E -v "/gen-go/|/vendor/") >> $GITHUB_ENV
|
||||||
|
- name: Run goimports check
|
||||||
|
run: test -z "`for d in $GO_USR_DIRS; do goimports -d $d/*.go | tee /dev/stderr; done`"
|
||||||
|
- name: Create aurora/mesos docker cluster
|
||||||
|
run: docker-compose up -d
|
||||||
|
- name: Run tests
|
||||||
|
run: go test -timeout 35m -race -coverprofile=coverage.txt -covermode=atomic -v github.com/paypal/gorealis
|
12
.gitignore
vendored
12
.gitignore
vendored
|
@ -6,6 +6,18 @@
|
||||||
# Folders
|
# Folders
|
||||||
_obj
|
_obj
|
||||||
_test
|
_test
|
||||||
|
.idea
|
||||||
|
|
||||||
|
# Thrift library comes with a lot of other files we don't need.
|
||||||
|
# Ignore everything but the files we do need
|
||||||
|
vendor/github.com/apache/thrift/*
|
||||||
|
!vendor/github.com/apache/thrift/lib/
|
||||||
|
|
||||||
|
vendor/github.com/apache/thrift/lib/*
|
||||||
|
!vendor/github.com/apache/thrift/lib/go/
|
||||||
|
|
||||||
|
vendor/github.com/apache/thrift/lib/go/*
|
||||||
|
!vendor/github.com/apache/thrift/lib/go/thrift/
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
# Architecture specific extensions/prefixes
|
||||||
*.[568vq]
|
*.[568vq]
|
||||||
|
|
71
.golangci.yml
Normal file
71
.golangci.yml
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
# This file contains all available configuration options
|
||||||
|
# with their default values.
|
||||||
|
|
||||||
|
# options for analysis running
|
||||||
|
run:
|
||||||
|
# default concurrency is a available CPU number
|
||||||
|
concurrency: 4
|
||||||
|
|
||||||
|
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||||
|
deadline: 1m
|
||||||
|
|
||||||
|
# exit code when at least one issue was found, default is 1
|
||||||
|
issues-exit-code: 1
|
||||||
|
|
||||||
|
# include test files or not, default is true
|
||||||
|
tests: true
|
||||||
|
|
||||||
|
skip-dirs:
|
||||||
|
- gen-go/
|
||||||
|
|
||||||
|
# output configuration options
|
||||||
|
output:
|
||||||
|
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
|
||||||
|
format: colored-line-number
|
||||||
|
|
||||||
|
# print lines of code with issue, default is true
|
||||||
|
print-issued-lines: true
|
||||||
|
|
||||||
|
# print linter name in the end of issue text, default is true
|
||||||
|
print-linter-name: true
|
||||||
|
|
||||||
|
|
||||||
|
# all available settings of specific linters
|
||||||
|
linters-settings:
|
||||||
|
errcheck:
|
||||||
|
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
|
||||||
|
# default is false: such cases aren't reported by default.
|
||||||
|
check-type-assertions: true
|
||||||
|
|
||||||
|
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
||||||
|
# default is false: such cases aren't reported by default.
|
||||||
|
check-blank: true
|
||||||
|
govet:
|
||||||
|
# report about shadowed variables
|
||||||
|
check-shadowing: true
|
||||||
|
goconst:
|
||||||
|
# minimal length of string constant, 3 by default
|
||||||
|
min-len: 3
|
||||||
|
# minimal occurrences count to trigger, 3 by default
|
||||||
|
min-occurrences: 2
|
||||||
|
misspell:
|
||||||
|
# Correct spellings using locale preferences for US or UK.
|
||||||
|
# Default is to use a neutral variety of English.
|
||||||
|
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
|
||||||
|
locale: US
|
||||||
|
lll:
|
||||||
|
# max line length, lines longer will be reported. Default is 120.
|
||||||
|
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
|
||||||
|
line-length: 120
|
||||||
|
# tab width in spaces. Default to 1.
|
||||||
|
tab-width: 4
|
||||||
|
|
||||||
|
linters:
|
||||||
|
enable:
|
||||||
|
- govet
|
||||||
|
- goimports
|
||||||
|
- golint
|
||||||
|
- lll
|
||||||
|
- goconst
|
||||||
|
enable-all: false
|
||||||
|
fast: false
|
62
CHANGELOG.md
Normal file
62
CHANGELOG.md
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
1.25.1 (unreleased)
|
||||||
|
|
||||||
|
1.25.0
|
||||||
|
|
||||||
|
* Add priority api
|
||||||
|
|
||||||
|
1.24.0
|
||||||
|
|
||||||
|
* enable default sla for slaDrain
|
||||||
|
* Changes Travis CI badge to Github Actions badge
|
||||||
|
* Bug fix for auto paused update monitor
|
||||||
|
* Adds support for running CI on github actions
|
||||||
|
|
||||||
|
1.23.0
|
||||||
|
|
||||||
|
* First release tested against Aurora Scheduler 0.23.0
|
||||||
|
|
||||||
|
1.22.5
|
||||||
|
|
||||||
|
* Upgrading to thrift 0.14.0
|
||||||
|
|
||||||
|
1.22.4
|
||||||
|
|
||||||
|
* Updates which result in a no-op now return a response value so that the caller may analyze it to determine what happened
|
||||||
|
|
||||||
|
1.22.3
|
||||||
|
|
||||||
|
* Contains a monitor timeout fix. Previously an error was being left unchecked which made a specific monitor timining out not be handled properly.
|
||||||
|
|
||||||
|
1.22.2
|
||||||
|
|
||||||
|
* Bug fix: Change in retry mechanism created a deadlock. This release reverts that particular change.
|
||||||
|
|
||||||
|
1.22.1
|
||||||
|
|
||||||
|
* Adding safeguards against setting multiple constraints with the same name for a single task.
|
||||||
|
|
||||||
|
1.22.0
|
||||||
|
|
||||||
|
* CreateService and StartJobUpdate do not continue retrying if a timeout has been encountered
|
||||||
|
by the HTTP client. Instead they now return an error that conforms to the Timedout interface.
|
||||||
|
Users can check for a Timedout error by using `realis.IsTimeout(err)`.
|
||||||
|
* New API function VariableBatchStep has been added which returns the current batch at which
|
||||||
|
a Variable Batch Update configured Update is currently in.
|
||||||
|
* Added new PauseUpdateMonitor which monitors an update until it is an `ROLL_FORWARD_PAUSED` state.
|
||||||
|
* Added variableBatchStep command to sample client to be used for testing new VariableBatchStep api.
|
||||||
|
* JobUpdateStatus has changed function signature from:
|
||||||
|
`JobUpdateStatus(updateKey aurora.JobUpdateKey, desiredStatuses map[aurora.JobUpdateStatus]bool, interval, timeout time.Duration) (aurora.JobUpdateStatus, error)`
|
||||||
|
to
|
||||||
|
`JobUpdateStatus(updateKey aurora.JobUpdateKey, desiredStatuses []aurora.JobUpdateStatus, interval, timeout time.Duration) (aurora.JobUpdateStatus, error)`
|
||||||
|
* Added TerminalUpdateStates function which returns an slice containing all UpdateStates which are considered terminal states.
|
||||||
|
|
||||||
|
1.21.0
|
||||||
|
|
||||||
|
* Version numbering change. Future versions will be labled X.Y.Z where X is the major version, Y is the Aurora version the library has been tested against (e.g. 21 -> 0.21.0), and X is the minor revision.
|
||||||
|
* Moved to Thrift 0.12.0 code generator and go library.
|
||||||
|
* `aurora.ACTIVE_STATES`, `aurora.SLAVE_ASSIGNED_STATES`, `aurora.LIVE_STATES`, `aurora.TERMINAL_STATES`, `aurora.ACTIVE_JOB_UPDATE_STATES`, `aurora.AWAITNG_PULSE_JOB_UPDATE_STATES` are all now generated as a slices.
|
||||||
|
* Please use `realis.ActiveStates`, `realis.SlaveAssignedStates`,`realis.LiveStates`, `realis.TerminalStates`, `realis.ActiveJobUpdateStates`, `realis.AwaitingPulseJobUpdateStates` in their places when map representations are needed.
|
||||||
|
* `GetInstanceIds(key *aurora.JobKey, states map[aurora.ScheduleStatus]bool) (map[int32]bool, error)` has changed signature to ` GetInstanceIds(key *aurora.JobKey, states []aurora.ScheduleStatus) ([]int32, error)`
|
||||||
|
* Adding support for GPU as resource.
|
||||||
|
* Changing compose environment to Aurora snapshot in order to support staggered update.
|
||||||
|
* Adding staggered updates API.
|
18
README.md
18
README.md
|
@ -1,11 +1,12 @@
|
||||||
# gorealis [](https://godoc.org/github.com/rdelval/gorealis)
|
# gorealis [](https://godoc.org/github.com/paypal/gorealis)  [](https://codecov.io/gh/paypal/gorealis)
|
||||||
|
|
||||||
Go library for communicating with [Apache Aurora](https://github.com/apache/aurora).
|
Version 1 of Go library for interacting with [Aurora Scheduler](https://github.com/aurora-scheduler/aurora).
|
||||||
Named after the northern lights (Aurora Borealis).
|
|
||||||
|
Version 2 of this library can be found [here](https://github.com/aurora-scheduler/gorealis).
|
||||||
|
|
||||||
### Aurora version compatibility
|
### Aurora version compatibility
|
||||||
Please see [.auroraversion](./.auroraversion) to see the latest Aurora version against which this
|
Please see [.auroraversion](./.auroraversion) to see the latest Aurora version against which this
|
||||||
library has been tested. Vendoring a working version of this library is highly recommended.
|
library has been tested.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
|
@ -13,9 +14,10 @@ library has been tested. Vendoring a working version of this library is highly r
|
||||||
* [Using the sample client](docs/using-the-sample-client.md)
|
* [Using the sample client](docs/using-the-sample-client.md)
|
||||||
* [Leveraging the library](docs/leveraging-the-library.md)
|
* [Leveraging the library](docs/leveraging-the-library.md)
|
||||||
|
|
||||||
## To Do
|
## Projects using gorealis
|
||||||
* Create or import a custom transport that uses https://github.com/jmcvetta/napping to improve efficiency
|
|
||||||
* End to end testing with Vagrant setup
|
* [australis](https://github.com/aurora-scheduler/australis)
|
||||||
|
|
||||||
## Contributions
|
## Contributions
|
||||||
Contributions are very much welcome. Please raise an issue so that the contribution may be discussed before it's made.
|
Contributions are always welcome. Please raise an issue to discuss a contribution before it is made.
|
||||||
|
|
||||||
|
|
229
auroraAPI.thrift
229
auroraAPI.thrift
|
@ -27,8 +27,8 @@ enum ResponseCode {
|
||||||
ERROR = 2,
|
ERROR = 2,
|
||||||
WARNING = 3,
|
WARNING = 3,
|
||||||
AUTH_FAILED = 4,
|
AUTH_FAILED = 4,
|
||||||
/** Raised when a Lock-protected operation failed due to lock validation. */
|
/** Raised when an operation was unable to proceed due to an in-progress job update. */
|
||||||
LOCK_ERROR = 5,
|
JOB_UPDATING_ERROR = 5,
|
||||||
/** Raised when a scheduler is transiently unavailable and later retry is recommended. */
|
/** Raised when a scheduler is transiently unavailable and later retry is recommended. */
|
||||||
ERROR_TRANSIENT = 6
|
ERROR_TRANSIENT = 6
|
||||||
}
|
}
|
||||||
|
@ -115,11 +115,13 @@ struct JobKey {
|
||||||
3: string name
|
3: string name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(jly): Deprecated, remove in 0.21. See AURORA-1959.
|
||||||
/** A unique lock key. */
|
/** A unique lock key. */
|
||||||
union LockKey {
|
union LockKey {
|
||||||
1: JobKey job
|
1: JobKey job
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(jly): Deprecated, remove in 0.21. See AURORA-1959.
|
||||||
/** A generic lock struct to facilitate context specific resource/operation serialization. */
|
/** A generic lock struct to facilitate context specific resource/operation serialization. */
|
||||||
struct Lock {
|
struct Lock {
|
||||||
/** ID of the lock - unique per storage */
|
/** ID of the lock - unique per storage */
|
||||||
|
@ -203,6 +205,8 @@ union Image {
|
||||||
struct MesosContainer {
|
struct MesosContainer {
|
||||||
/** the optional filesystem image to use when launching this task. */
|
/** the optional filesystem image to use when launching this task. */
|
||||||
1: optional Image image
|
1: optional Image image
|
||||||
|
/** the optional list of volumes to mount into the task. */
|
||||||
|
2: optional list<Volume> volumes
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Describes a parameter passed to docker cli */
|
/** Describes a parameter passed to docker cli */
|
||||||
|
@ -236,6 +240,42 @@ union Resource {
|
||||||
5: i64 numGpus
|
5: i64 numGpus
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct PartitionPolicy {
|
||||||
|
1: bool reschedule
|
||||||
|
2: optional i64 delaySecs
|
||||||
|
}
|
||||||
|
|
||||||
|
/** SLA requirements expressed as the percentage of instances to be RUNNING every durationSecs */
|
||||||
|
struct PercentageSlaPolicy {
|
||||||
|
/* The percentage of active instances required every `durationSecs`. */
|
||||||
|
1: double percentage
|
||||||
|
/** Minimum time duration a task needs to be `RUNNING` to be treated as active */
|
||||||
|
2: i64 durationSecs
|
||||||
|
}
|
||||||
|
|
||||||
|
/** SLA requirements expressed as the number of instances to be RUNNING every durationSecs */
|
||||||
|
struct CountSlaPolicy {
|
||||||
|
/** The number of active instances required every `durationSecs` */
|
||||||
|
1: i64 count
|
||||||
|
/** Minimum time duration a task needs to be `RUNNING` to be treated as active */
|
||||||
|
2: i64 durationSecs
|
||||||
|
}
|
||||||
|
|
||||||
|
/** SLA requirements to be delegated to an external coordinator */
|
||||||
|
struct CoordinatorSlaPolicy {
|
||||||
|
/** URL for the coordinator service that needs to be contacted for SLA checks */
|
||||||
|
1: string coordinatorUrl
|
||||||
|
/** Field in the Coordinator response json indicating if the action is allowed or not */
|
||||||
|
2: string statusKey
|
||||||
|
}
|
||||||
|
|
||||||
|
/** SLA requirements expressed in one of the many types */
|
||||||
|
union SlaPolicy {
|
||||||
|
1: PercentageSlaPolicy percentageSlaPolicy
|
||||||
|
2: CountSlaPolicy countSlaPolicy
|
||||||
|
3: CoordinatorSlaPolicy coordinatorSlaPolicy
|
||||||
|
}
|
||||||
|
|
||||||
/** Description of the tasks contained within a job. */
|
/** Description of the tasks contained within a job. */
|
||||||
struct TaskConfig {
|
struct TaskConfig {
|
||||||
/** Job task belongs to. */
|
/** Job task belongs to. */
|
||||||
|
@ -244,12 +284,6 @@ struct TaskConfig {
|
||||||
/** contains the role component of JobKey */
|
/** contains the role component of JobKey */
|
||||||
17: Identity owner
|
17: Identity owner
|
||||||
7: bool isService
|
7: bool isService
|
||||||
// TODO(maxim): Deprecated. See AURORA-1707.
|
|
||||||
8: double numCpus
|
|
||||||
// TODO(maxim): Deprecated. See AURORA-1707.
|
|
||||||
9: i64 ramMb
|
|
||||||
// TODO(maxim): Deprecated. See AURORA-1707.
|
|
||||||
10: i64 diskMb
|
|
||||||
11: i32 priority
|
11: i32 priority
|
||||||
13: i32 maxTaskFailures
|
13: i32 maxTaskFailures
|
||||||
// TODO(mnurolahzade): Deprecated. See AURORA-1708.
|
// TODO(mnurolahzade): Deprecated. See AURORA-1708.
|
||||||
|
@ -261,8 +295,6 @@ struct TaskConfig {
|
||||||
32: set<Resource> resources
|
32: set<Resource> resources
|
||||||
|
|
||||||
20: set<Constraint> constraints
|
20: set<Constraint> constraints
|
||||||
/** a list of named ports this task requests */
|
|
||||||
21: set<string> requestedPorts
|
|
||||||
/** Resources to retrieve with Mesos Fetcher */
|
/** Resources to retrieve with Mesos Fetcher */
|
||||||
33: optional set<MesosFetcherURI> mesosFetcherUris
|
33: optional set<MesosFetcherURI> mesosFetcherUris
|
||||||
/**
|
/**
|
||||||
|
@ -276,6 +308,10 @@ struct TaskConfig {
|
||||||
25: optional ExecutorConfig executorConfig
|
25: optional ExecutorConfig executorConfig
|
||||||
/** Used to display additional details in the UI. */
|
/** Used to display additional details in the UI. */
|
||||||
27: optional set<Metadata> metadata
|
27: optional set<Metadata> metadata
|
||||||
|
/** Policy for how to deal with task partitions */
|
||||||
|
34: optional PartitionPolicy partitionPolicy
|
||||||
|
/** SLA requirements to be met during maintenance */
|
||||||
|
35: optional SlaPolicy slaPolicy
|
||||||
|
|
||||||
// This field is deliberately placed at the end to work around a bug in the immutable wrapper
|
// This field is deliberately placed at the end to work around a bug in the immutable wrapper
|
||||||
// code generator. See AURORA-1185 for details.
|
// code generator. See AURORA-1185 for details.
|
||||||
|
@ -284,15 +320,6 @@ struct TaskConfig {
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ResourceAggregate {
|
struct ResourceAggregate {
|
||||||
// TODO(maxim): Deprecated. See AURORA-1707.
|
|
||||||
/** Number of CPU cores allotted. */
|
|
||||||
1: double numCpus
|
|
||||||
// TODO(maxim): Deprecated. See AURORA-1707.
|
|
||||||
/** Megabytes of RAM allotted. */
|
|
||||||
2: i64 ramMb
|
|
||||||
// TODO(maxim): Deprecated. See AURORA-1707.
|
|
||||||
/** Megabytes of disk space allotted. */
|
|
||||||
3: i64 diskMb
|
|
||||||
/** Aggregated resource values. */
|
/** Aggregated resource values. */
|
||||||
4: set<Resource> resources
|
4: set<Resource> resources
|
||||||
}
|
}
|
||||||
|
@ -324,7 +351,6 @@ struct JobConfiguration {
|
||||||
7: Identity owner
|
7: Identity owner
|
||||||
/**
|
/**
|
||||||
* If present, the job will be handled as a cron job with this crontab-syntax schedule.
|
* If present, the job will be handled as a cron job with this crontab-syntax schedule.
|
||||||
* This currently differs from the thrift API found in the main repo where it is not optional
|
|
||||||
*/
|
*/
|
||||||
4: optional string cronSchedule
|
4: optional string cronSchedule
|
||||||
/** Collision policy to use when handling overlapping cron runs. Default is KILL_EXISTING. */
|
/** Collision policy to use when handling overlapping cron runs. Default is KILL_EXISTING. */
|
||||||
|
@ -421,7 +447,11 @@ enum ScheduleStatus {
|
||||||
/** A fault in the task environment has caused the system to believe the task no longer exists.
|
/** A fault in the task environment has caused the system to believe the task no longer exists.
|
||||||
* This can happen, for example, when a slave process disappears.
|
* This can happen, for example, when a slave process disappears.
|
||||||
*/
|
*/
|
||||||
LOST = 7
|
LOST = 7,
|
||||||
|
/**
|
||||||
|
* The task is currently partitioned and in an unknown state.
|
||||||
|
**/
|
||||||
|
PARTITIONED = 18
|
||||||
}
|
}
|
||||||
|
|
||||||
// States that a task may be in while still considered active.
|
// States that a task may be in while still considered active.
|
||||||
|
@ -433,6 +463,7 @@ const set<ScheduleStatus> ACTIVE_STATES = [ScheduleStatus.ASSIGNED,
|
||||||
ScheduleStatus.RESTARTING
|
ScheduleStatus.RESTARTING
|
||||||
ScheduleStatus.RUNNING,
|
ScheduleStatus.RUNNING,
|
||||||
ScheduleStatus.STARTING,
|
ScheduleStatus.STARTING,
|
||||||
|
ScheduleStatus.PARTITIONED,
|
||||||
ScheduleStatus.THROTTLED]
|
ScheduleStatus.THROTTLED]
|
||||||
|
|
||||||
// States that a task may be in while associated with a slave machine and non-terminal.
|
// States that a task may be in while associated with a slave machine and non-terminal.
|
||||||
|
@ -442,6 +473,7 @@ const set<ScheduleStatus> SLAVE_ASSIGNED_STATES = [ScheduleStatus.ASSIGNED,
|
||||||
ScheduleStatus.PREEMPTING,
|
ScheduleStatus.PREEMPTING,
|
||||||
ScheduleStatus.RESTARTING,
|
ScheduleStatus.RESTARTING,
|
||||||
ScheduleStatus.RUNNING,
|
ScheduleStatus.RUNNING,
|
||||||
|
ScheduleStatus.PARTITIONED,
|
||||||
ScheduleStatus.STARTING]
|
ScheduleStatus.STARTING]
|
||||||
|
|
||||||
// States that a task may be in while in an active sandbox.
|
// States that a task may be in while in an active sandbox.
|
||||||
|
@ -449,6 +481,7 @@ const set<ScheduleStatus> LIVE_STATES = [ScheduleStatus.KILLING,
|
||||||
ScheduleStatus.PREEMPTING,
|
ScheduleStatus.PREEMPTING,
|
||||||
ScheduleStatus.RESTARTING,
|
ScheduleStatus.RESTARTING,
|
||||||
ScheduleStatus.DRAINING,
|
ScheduleStatus.DRAINING,
|
||||||
|
ScheduleStatus.PARTITIONED,
|
||||||
ScheduleStatus.RUNNING]
|
ScheduleStatus.RUNNING]
|
||||||
|
|
||||||
// States a completed task may be in.
|
// States a completed task may be in.
|
||||||
|
@ -517,6 +550,11 @@ struct ScheduledTask {
|
||||||
* this task.
|
* this task.
|
||||||
*/
|
*/
|
||||||
3: i32 failureCount
|
3: i32 failureCount
|
||||||
|
/**
|
||||||
|
* The number of partitions this task has accumulated over its lifetime.
|
||||||
|
*/
|
||||||
|
6: i32 timesPartitioned
|
||||||
|
|
||||||
/** State change history for this task. */
|
/** State change history for this task. */
|
||||||
4: list<TaskEvent> taskEvents
|
4: list<TaskEvent> taskEvents
|
||||||
/**
|
/**
|
||||||
|
@ -539,16 +577,16 @@ struct GetJobsResult {
|
||||||
* (terms are AND'ed together).
|
* (terms are AND'ed together).
|
||||||
*/
|
*/
|
||||||
struct TaskQuery {
|
struct TaskQuery {
|
||||||
14: string role
|
14: optional string role
|
||||||
9: string environment
|
9: optional string environment
|
||||||
2: string jobName
|
2: optional string jobName
|
||||||
4: set<string> taskIds
|
4: optional set<string> taskIds
|
||||||
5: set<ScheduleStatus> statuses
|
5: optional set<ScheduleStatus> statuses
|
||||||
7: set<i32> instanceIds
|
7: optional set<i32> instanceIds
|
||||||
10: set<string> slaveHosts
|
10: optional set<string> slaveHosts
|
||||||
11: set<JobKey> jobKeys
|
11: optional set<JobKey> jobKeys
|
||||||
12: i32 offset
|
12: optional i32 offset
|
||||||
13: i32 limit
|
13: optional i32 limit
|
||||||
}
|
}
|
||||||
|
|
||||||
struct HostStatus {
|
struct HostStatus {
|
||||||
|
@ -618,6 +656,9 @@ const set<JobUpdateStatus> ACTIVE_JOB_UPDATE_STATES = [JobUpdateStatus.ROLLING_F
|
||||||
JobUpdateStatus.ROLL_BACK_PAUSED,
|
JobUpdateStatus.ROLL_BACK_PAUSED,
|
||||||
JobUpdateStatus.ROLL_FORWARD_AWAITING_PULSE,
|
JobUpdateStatus.ROLL_FORWARD_AWAITING_PULSE,
|
||||||
JobUpdateStatus.ROLL_BACK_AWAITING_PULSE]
|
JobUpdateStatus.ROLL_BACK_AWAITING_PULSE]
|
||||||
|
/** States the job update can be in while waiting for a pulse. */
|
||||||
|
const set<JobUpdateStatus> AWAITNG_PULSE_JOB_UPDATE_STATES = [JobUpdateStatus.ROLL_FORWARD_AWAITING_PULSE,
|
||||||
|
JobUpdateStatus.ROLL_BACK_AWAITING_PULSE]
|
||||||
|
|
||||||
/** Job update actions that can be applied to job instances. */
|
/** Job update actions that can be applied to job instances. */
|
||||||
enum JobUpdateAction {
|
enum JobUpdateAction {
|
||||||
|
@ -675,9 +716,40 @@ struct JobUpdateKey {
|
||||||
2: string id
|
2: string id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Limits the amount of active changes being made to instances to groupSize. */
|
||||||
|
struct QueueJobUpdateStrategy {
|
||||||
|
1: i32 groupSize
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Similar to Queue strategy but will not start a new group until all instances in an active
|
||||||
|
* group have finished updating.
|
||||||
|
*/
|
||||||
|
struct BatchJobUpdateStrategy {
|
||||||
|
1: i32 groupSize
|
||||||
|
/* Update will pause automatically after each batch completes */
|
||||||
|
2: bool autopauseAfterBatch
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Same as Batch strategy but each time an active group completes, the size of the next active
|
||||||
|
* group may change.
|
||||||
|
*/
|
||||||
|
struct VariableBatchJobUpdateStrategy {
|
||||||
|
1: list<i32> groupSizes
|
||||||
|
/* Update will pause automatically after each batch completes */
|
||||||
|
2: bool autopauseAfterBatch
|
||||||
|
}
|
||||||
|
|
||||||
|
union JobUpdateStrategy {
|
||||||
|
1: QueueJobUpdateStrategy queueStrategy
|
||||||
|
2: BatchJobUpdateStrategy batchStrategy
|
||||||
|
3: VariableBatchJobUpdateStrategy varBatchStrategy
|
||||||
|
}
|
||||||
|
|
||||||
/** Job update thresholds and limits. */
|
/** Job update thresholds and limits. */
|
||||||
struct JobUpdateSettings {
|
struct JobUpdateSettings {
|
||||||
/** Max number of instances being updated at any given moment. */
|
/** Deprecated, please set value inside of desired update strategy instead.
|
||||||
|
* Max number of instances being updated at any given moment.
|
||||||
|
*/
|
||||||
1: i32 updateGroupSize
|
1: i32 updateGroupSize
|
||||||
|
|
||||||
/** Max number of instance failures to tolerate before marking instance as FAILED. */
|
/** Max number of instance failures to tolerate before marking instance as FAILED. */
|
||||||
|
@ -695,7 +767,7 @@ struct JobUpdateSettings {
|
||||||
/** Instance IDs to act on. All instances will be affected if this is not set. */
|
/** Instance IDs to act on. All instances will be affected if this is not set. */
|
||||||
7: set<Range> updateOnlyTheseInstances
|
7: set<Range> updateOnlyTheseInstances
|
||||||
|
|
||||||
/**
|
/** Deprecated, please set updateStrategy to the Batch strategy instead.
|
||||||
* If true, use updateGroupSize as strict batching boundaries, and avoid proceeding to another
|
* If true, use updateGroupSize as strict batching boundaries, and avoid proceeding to another
|
||||||
* batch until the preceding batch finishes updating.
|
* batch until the preceding batch finishes updating.
|
||||||
*/
|
*/
|
||||||
|
@ -708,6 +780,15 @@ struct JobUpdateSettings {
|
||||||
* unblocked by a fresh pulseJobUpdate call.
|
* unblocked by a fresh pulseJobUpdate call.
|
||||||
*/
|
*/
|
||||||
9: optional i32 blockIfNoPulsesAfterMs
|
9: optional i32 blockIfNoPulsesAfterMs
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If true, updates will obey the SLA requirements of the tasks being updated. If the SLA policy
|
||||||
|
* differs between the old and new task configurations, updates will use the newest configuration.
|
||||||
|
*/
|
||||||
|
10: optional bool slaAware
|
||||||
|
|
||||||
|
/** Update strategy to be used for the update. See JobUpdateStrategy for choices. */
|
||||||
|
11: optional JobUpdateStrategy updateStrategy
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Event marking a state transition in job update lifecycle. */
|
/** Event marking a state transition in job update lifecycle. */
|
||||||
|
@ -738,6 +819,9 @@ struct JobInstanceUpdateEvent {
|
||||||
|
|
||||||
/** Job update action taken on the instance. */
|
/** Job update action taken on the instance. */
|
||||||
3: JobUpdateAction action
|
3: JobUpdateAction action
|
||||||
|
|
||||||
|
/** Optional message explaining the instance update event. */
|
||||||
|
4: optional string message
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Maps instance IDs to TaskConfigs it. */
|
/** Maps instance IDs to TaskConfigs it. */
|
||||||
|
@ -771,6 +855,9 @@ struct JobUpdateSummary {
|
||||||
|
|
||||||
/** Current job update state. */
|
/** Current job update state. */
|
||||||
4: JobUpdateState state
|
4: JobUpdateState state
|
||||||
|
|
||||||
|
/** Update metadata supplied by the client. */
|
||||||
|
6: optional set<Metadata> metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Update configuration and setting details. */
|
/** Update configuration and setting details. */
|
||||||
|
@ -815,6 +902,9 @@ struct JobUpdateRequest {
|
||||||
|
|
||||||
/** Update settings and limits. */
|
/** Update settings and limits. */
|
||||||
3: JobUpdateSettings settings
|
3: JobUpdateSettings settings
|
||||||
|
|
||||||
|
/** Update metadata supplied by the client issuing the JobUpdateRequest. */
|
||||||
|
4: optional set<Metadata> metadata
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -823,19 +913,19 @@ struct JobUpdateRequest {
|
||||||
*/
|
*/
|
||||||
struct JobUpdateQuery {
|
struct JobUpdateQuery {
|
||||||
/** Job role. */
|
/** Job role. */
|
||||||
2: string role
|
2: optional string role
|
||||||
|
|
||||||
/** Unique identifier for a job update. */
|
/** Unique identifier for a job update. */
|
||||||
8: JobUpdateKey key
|
8: optional JobUpdateKey key
|
||||||
|
|
||||||
/** Job key. */
|
/** Job key. */
|
||||||
3: JobKey jobKey
|
3: optional JobKey jobKey
|
||||||
|
|
||||||
/** User who created the update. */
|
/** User who created the update. */
|
||||||
4: string user
|
4: optional string user
|
||||||
|
|
||||||
/** Set of update statuses. */
|
/** Set of update statuses. */
|
||||||
5: set<JobUpdateStatus> updateStatuses
|
5: optional set<JobUpdateStatus> updateStatuses
|
||||||
|
|
||||||
/** Offset to serve data from. Used by pagination. */
|
/** Offset to serve data from. Used by pagination. */
|
||||||
6: i32 offset
|
6: i32 offset
|
||||||
|
@ -844,6 +934,13 @@ struct JobUpdateQuery {
|
||||||
7: i32 limit
|
7: i32 limit
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct HostMaintenanceRequest {
|
||||||
|
1: string host
|
||||||
|
2: SlaPolicy defaultSlaPolicy
|
||||||
|
3: i64 timeoutSecs
|
||||||
|
4: i64 createdTimestampMs
|
||||||
|
}
|
||||||
|
|
||||||
struct ListBackupsResult {
|
struct ListBackupsResult {
|
||||||
1: set<string> backups
|
1: set<string> backups
|
||||||
}
|
}
|
||||||
|
@ -888,6 +985,9 @@ struct GetPendingReasonResult {
|
||||||
struct StartJobUpdateResult {
|
struct StartJobUpdateResult {
|
||||||
/** Unique identifier for the job update. */
|
/** Unique identifier for the job update. */
|
||||||
1: JobUpdateKey key
|
1: JobUpdateKey key
|
||||||
|
|
||||||
|
/** Summary of the update that is in progress for the given JobKey. */
|
||||||
|
2: optional JobUpdateSummary updateSummary
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Result of the getJobUpdateSummaries call. */
|
/** Result of the getJobUpdateSummaries call. */
|
||||||
|
@ -897,7 +997,9 @@ struct GetJobUpdateSummariesResult {
|
||||||
|
|
||||||
/** Result of the getJobUpdateDetails call. */
|
/** Result of the getJobUpdateDetails call. */
|
||||||
struct GetJobUpdateDetailsResult {
|
struct GetJobUpdateDetailsResult {
|
||||||
|
// TODO(zmanji): Remove this once we complete AURORA-1765
|
||||||
1: JobUpdateDetails details
|
1: JobUpdateDetails details
|
||||||
|
2: list<JobUpdateDetails> detailsList
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Result of the pulseJobUpdate call. */
|
/** Result of the pulseJobUpdate call. */
|
||||||
|
@ -1023,7 +1125,7 @@ service ReadOnlyScheduler {
|
||||||
Response getJobUpdateSummaries(1: JobUpdateQuery jobUpdateQuery)
|
Response getJobUpdateSummaries(1: JobUpdateQuery jobUpdateQuery)
|
||||||
|
|
||||||
/** Gets job update details. */
|
/** Gets job update details. */
|
||||||
Response getJobUpdateDetails(1: JobUpdateKey key)
|
Response getJobUpdateDetails(2: JobUpdateQuery query)
|
||||||
|
|
||||||
/** Gets the diff between client (desired) and server (current) job states. */
|
/** Gets the diff between client (desired) and server (current) job states. */
|
||||||
Response getJobUpdateDiff(1: JobUpdateRequest request)
|
Response getJobUpdateDiff(1: JobUpdateRequest request)
|
||||||
|
@ -1062,7 +1164,7 @@ service AuroraSchedulerManager extends ReadOnlyScheduler {
|
||||||
Response restartShards(5: JobKey job, 3: set<i32> shardIds)
|
Response restartShards(5: JobKey job, 3: set<i32> shardIds)
|
||||||
|
|
||||||
/** Initiates a kill on tasks. */
|
/** Initiates a kill on tasks. */
|
||||||
Response killTasks(4: JobKey job, 5: set<i32> instances)
|
Response killTasks(4: JobKey job, 5: set<i32> instances, 6: string message)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Adds new instances with the TaskConfig of the existing instance pointed by the key.
|
* Adds new instances with the TaskConfig of the existing instance pointed by the key.
|
||||||
|
@ -1123,29 +1225,8 @@ service AuroraSchedulerManager extends ReadOnlyScheduler {
|
||||||
Response pulseJobUpdate(1: JobUpdateKey key)
|
Response pulseJobUpdate(1: JobUpdateKey key)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct InstanceConfigRewrite {
|
struct ExplicitReconciliationSettings {
|
||||||
/** Key for the task to rewrite. */
|
1: optional i32 batchSize
|
||||||
1: InstanceKey instanceKey
|
|
||||||
/** The original configuration. */
|
|
||||||
2: TaskConfig oldTask
|
|
||||||
/** The rewritten configuration. */
|
|
||||||
3: TaskConfig rewrittenTask
|
|
||||||
}
|
|
||||||
|
|
||||||
struct JobConfigRewrite {
|
|
||||||
/** The original job configuration. */
|
|
||||||
1: JobConfiguration oldJob
|
|
||||||
/** The rewritten job configuration. */
|
|
||||||
2: JobConfiguration rewrittenJob
|
|
||||||
}
|
|
||||||
|
|
||||||
union ConfigRewrite {
|
|
||||||
1: JobConfigRewrite jobRewrite
|
|
||||||
2: InstanceConfigRewrite instanceRewrite
|
|
||||||
}
|
|
||||||
|
|
||||||
struct RewriteConfigsRequest {
|
|
||||||
1: list<ConfigRewrite> rewriteCommands
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// It would be great to compose these services rather than extend, but that won't be possible until
|
// It would be great to compose these services rather than extend, but that won't be possible until
|
||||||
|
@ -1196,17 +1277,27 @@ service AuroraAdmin extends AuroraSchedulerManager {
|
||||||
/** Set the given hosts back into serving mode. */
|
/** Set the given hosts back into serving mode. */
|
||||||
Response endMaintenance(1: Hosts hosts)
|
Response endMaintenance(1: Hosts hosts)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ask scheduler to put hosts into DRAINING mode and move scheduled tasks off of the hosts
|
||||||
|
* such that its SLA requirements are satisfied. Use defaultSlaPolicy if it is not set for a task.
|
||||||
|
**/
|
||||||
|
Response slaDrainHosts(1: Hosts hosts, 2: SlaPolicy defaultSlaPolicy, 3: i64 timeoutSecs)
|
||||||
|
|
||||||
/** Start a storage snapshot and block until it completes. */
|
/** Start a storage snapshot and block until it completes. */
|
||||||
Response snapshot()
|
Response snapshot()
|
||||||
|
|
||||||
|
/** Tell scheduler to trigger an explicit task reconciliation with the given settings. */
|
||||||
|
Response triggerExplicitTaskReconciliation(1: ExplicitReconciliationSettings settings)
|
||||||
|
|
||||||
|
/** Tell scheduler to trigger an implicit task reconciliation. */
|
||||||
|
Response triggerImplicitTaskReconciliation()
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Forcibly rewrites the stored definition of user configurations. This is intended to be used
|
* Force prune any (terminal) tasks that match the query. If no statuses are supplied with the
|
||||||
* in a controlled setting, primarily to migrate pieces of configurations that are opaque to the
|
* query, it will default to all terminal task states. If statuses are supplied, they must be
|
||||||
* scheduler (e.g. executorConfig).
|
* terminal states.
|
||||||
* The scheduler may do some validation of the rewritten configurations, but it is important
|
|
||||||
* that the caller take care to provide valid input and alter only necessary fields.
|
|
||||||
*/
|
*/
|
||||||
Response rewriteConfigs(1: RewriteConfigsRequest request)
|
Response pruneTasks(1: TaskQuery query)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The name of the header that should be sent to bypass leader redirection in the Scheduler.
|
// The name of the header that should be sent to bypass leader redirection in the Scheduler.
|
||||||
|
|
|
@ -16,10 +16,13 @@ package realis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Cluster contains the definition of the clusters.json file used by the default Aurora
|
||||||
|
// client for configuration
|
||||||
type Cluster struct {
|
type Cluster struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
AgentRoot string `json:"slave_root"`
|
AgentRoot string `json:"slave_root"`
|
||||||
|
@ -32,7 +35,8 @@ type Cluster struct {
|
||||||
AuthMechanism string `json:"auth_mechanism"`
|
AuthMechanism string `json:"auth_mechanism"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Loads clusters.json file traditionally located at /etc/aurora/clusters.json
|
// LoadClusters loads clusters.json file traditionally located at /etc/aurora/clusters.json
|
||||||
|
// for use with a gorealis client
|
||||||
func LoadClusters(config string) (map[string]Cluster, error) {
|
func LoadClusters(config string) (map[string]Cluster, error) {
|
||||||
|
|
||||||
file, err := os.Open(config)
|
file, err := os.Open(config)
|
||||||
|
|
|
@ -12,25 +12,27 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package realis
|
package realis_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
realis "github.com/paypal/gorealis"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLoadClusters(t *testing.T) {
|
func TestLoadClusters(t *testing.T) {
|
||||||
|
|
||||||
clusters, err := LoadClusters("examples/clusters.json")
|
clusters, err := realis.LoadClusters("examples/clusters.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Print(err)
|
fmt.Print(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, clusters[0].Name, "devcluster")
|
assert.Equal(t, clusters["devcluster"].Name, "devcluster")
|
||||||
assert.Equal(t, clusters[0].ZK, "192.168.33.7")
|
assert.Equal(t, clusters["devcluster"].ZK, "192.168.33.7")
|
||||||
assert.Equal(t, clusters[0].SchedZKPath, "/aurora/scheduler")
|
assert.Equal(t, clusters["devcluster"].SchedZKPath, "/aurora/scheduler")
|
||||||
assert.Equal(t, clusters[0].AuthMechanism, "UNAUTHENTICATED")
|
assert.Equal(t, clusters["devcluster"].AuthMechanism, "UNAUTHENTICATED")
|
||||||
assert.Equal(t, clusters[0].AgentRunDir, "latest")
|
assert.Equal(t, clusters["devcluster"].AgentRunDir, "latest")
|
||||||
assert.Equal(t, clusters[0].AgentRoot, "/var/lib/mesos")
|
assert.Equal(t, clusters["devcluster"].AgentRoot, "/var/lib/mesos")
|
||||||
}
|
}
|
||||||
|
|
91
container.go
Normal file
91
container.go
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
/**
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package realis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Container is an interface that defines a single function needed to create
|
||||||
|
// an Aurora container type. It exists because the code must support both Mesos
|
||||||
|
// and Docker containers.
|
||||||
|
type Container interface {
|
||||||
|
Build() *aurora.Container
|
||||||
|
}
|
||||||
|
|
||||||
|
// MesosContainer is a Mesos style container that can be used by Aurora Jobs.
|
||||||
|
type MesosContainer struct {
|
||||||
|
container *aurora.MesosContainer
|
||||||
|
}
|
||||||
|
|
||||||
|
// DockerContainer is a vanilla Docker style container that can be used by Aurora Jobs.
|
||||||
|
type DockerContainer struct {
|
||||||
|
container *aurora.DockerContainer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDockerContainer creates a new Aurora compatible Docker container configuration.
|
||||||
|
func NewDockerContainer() DockerContainer {
|
||||||
|
return DockerContainer{container: aurora.NewDockerContainer()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build creates an Aurora container based upon the configuration provided.
|
||||||
|
func (c DockerContainer) Build() *aurora.Container {
|
||||||
|
return &aurora.Container{Docker: c.container}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Image adds the name of a Docker image to be used by the Job when running.
|
||||||
|
func (c DockerContainer) Image(image string) DockerContainer {
|
||||||
|
c.container.Image = image
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddParameter adds a parameter to be passed to Docker when the container is run.
|
||||||
|
func (c DockerContainer) AddParameter(name, value string) DockerContainer {
|
||||||
|
c.container.Parameters = append(c.container.Parameters, &aurora.DockerParameter{
|
||||||
|
Name: name,
|
||||||
|
Value: value,
|
||||||
|
})
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMesosContainer creates a Mesos style container to be configured and built for use by an Aurora Job.
|
||||||
|
func NewMesosContainer() MesosContainer {
|
||||||
|
return MesosContainer{container: aurora.NewMesosContainer()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build creates a Mesos style Aurora container configuration to be passed on to the Aurora Job.
|
||||||
|
func (c MesosContainer) Build() *aurora.Container {
|
||||||
|
return &aurora.Container{Mesos: c.container}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DockerImage configures the Mesos container to use a specific Docker image when being run.
|
||||||
|
func (c MesosContainer) DockerImage(name, tag string) MesosContainer {
|
||||||
|
if c.container.Image == nil {
|
||||||
|
c.container.Image = aurora.NewImage()
|
||||||
|
}
|
||||||
|
|
||||||
|
c.container.Image.Docker = &aurora.DockerImage{Name: name, Tag: tag}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppcImage configures the Mesos container to use an image in the Appc format to run the container.
|
||||||
|
func (c MesosContainer) AppcImage(name, imageID string) MesosContainer {
|
||||||
|
if c.container.Image == nil {
|
||||||
|
c.container.Image = aurora.NewImage()
|
||||||
|
}
|
||||||
|
|
||||||
|
c.container.Image.Appc = &aurora.AppcImage{Name: name, ImageId: imageID}
|
||||||
|
return c
|
||||||
|
}
|
109
docker-compose.yml
Normal file
109
docker-compose.yml
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
version: "2"
|
||||||
|
|
||||||
|
services:
|
||||||
|
zk:
|
||||||
|
image: rdelvalle/zookeeper
|
||||||
|
restart: on-failure
|
||||||
|
ports:
|
||||||
|
- "2181:2181"
|
||||||
|
environment:
|
||||||
|
ZK_CONFIG: tickTime=2000,initLimit=10,syncLimit=5,maxClientCnxns=128,forceSync=no,clientPort=2181
|
||||||
|
ZK_ID: 1
|
||||||
|
networks:
|
||||||
|
aurora_cluster:
|
||||||
|
ipv4_address: 192.168.33.2
|
||||||
|
|
||||||
|
master:
|
||||||
|
image: aurorascheduler/mesos-master:1.7.2
|
||||||
|
restart: on-failure
|
||||||
|
ports:
|
||||||
|
- "5050:5050"
|
||||||
|
environment:
|
||||||
|
MESOS_ZK: zk://192.168.33.2:2181/mesos
|
||||||
|
MESOS_QUORUM: 1
|
||||||
|
MESOS_HOSTNAME: localhost
|
||||||
|
MESOS_CLUSTER: test-cluster
|
||||||
|
MESOS_REGISTRY: replicated_log
|
||||||
|
MESOS_WORK_DIR: /tmp/mesos
|
||||||
|
networks:
|
||||||
|
aurora_cluster:
|
||||||
|
ipv4_address: 192.168.33.3
|
||||||
|
depends_on:
|
||||||
|
- zk
|
||||||
|
|
||||||
|
agent-one:
|
||||||
|
image: aurorascheduler/mesos-agent:1.7.2
|
||||||
|
pid: host
|
||||||
|
restart: on-failure
|
||||||
|
ports:
|
||||||
|
- "5051:5051"
|
||||||
|
environment:
|
||||||
|
MESOS_ATTRIBUTES: 'zone:west'
|
||||||
|
MESOS_MASTER: zk://192.168.33.2:2181/mesos
|
||||||
|
MESOS_CONTAINERIZERS: docker,mesos
|
||||||
|
MESOS_PORT: 5051
|
||||||
|
MESOS_HOSTNAME: localhost
|
||||||
|
MESOS_RESOURCES: ports(*):[11000-11999]
|
||||||
|
MESOS_SYSTEMD_ENABLE_SUPPORT: 'false'
|
||||||
|
MESOS_WORK_DIR: /tmp/mesos
|
||||||
|
networks:
|
||||||
|
aurora_cluster:
|
||||||
|
ipv4_address: 192.168.33.4
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- /sys/fs/cgroup:/sys/fs/cgroup
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
depends_on:
|
||||||
|
- zk
|
||||||
|
|
||||||
|
agent-two:
|
||||||
|
image: aurorascheduler/mesos-agent:1.7.2
|
||||||
|
pid: host
|
||||||
|
restart: on-failure
|
||||||
|
ports:
|
||||||
|
- "5061:5061"
|
||||||
|
environment:
|
||||||
|
MESOS_ATTRIBUTES: 'zone:east'
|
||||||
|
MESOS_MASTER: zk://192.168.33.2:2181/mesos
|
||||||
|
MESOS_CONTAINERIZERS: docker,mesos
|
||||||
|
MESOS_HOSTNAME: localhost
|
||||||
|
MESOS_PORT: 5061
|
||||||
|
MESOS_RESOURCES: ports(*):[11000-11999]
|
||||||
|
MESOS_SYSTEMD_ENABLE_SUPPORT: 'false'
|
||||||
|
MESOS_WORK_DIR: /tmp/mesos
|
||||||
|
networks:
|
||||||
|
aurora_cluster:
|
||||||
|
ipv4_address: 192.168.33.5
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- /sys/fs/cgroup:/sys/fs/cgroup
|
||||||
|
- /var/run/docker.sock:/var/run/docker.sock
|
||||||
|
depends_on:
|
||||||
|
- zk
|
||||||
|
|
||||||
|
aurora-one:
|
||||||
|
image: aurorascheduler/scheduler:0.23.0
|
||||||
|
pid: host
|
||||||
|
ports:
|
||||||
|
- "8081:8081"
|
||||||
|
restart: on-failure
|
||||||
|
environment:
|
||||||
|
CLUSTER_NAME: test-cluster
|
||||||
|
ZK_ENDPOINTS: "192.168.33.2:2181"
|
||||||
|
MESOS_MASTER: "zk://192.168.33.2:2181/mesos"
|
||||||
|
EXTRA_SCHEDULER_ARGS: "-min_required_instances_for_sla_check=1"
|
||||||
|
networks:
|
||||||
|
aurora_cluster:
|
||||||
|
ipv4_address: 192.168.33.7
|
||||||
|
depends_on:
|
||||||
|
- zk
|
||||||
|
- master
|
||||||
|
- agent-one
|
||||||
|
|
||||||
|
networks:
|
||||||
|
aurora_cluster:
|
||||||
|
driver: bridge
|
||||||
|
ipam:
|
||||||
|
config:
|
||||||
|
- subnet: 192.168.33.0/16
|
||||||
|
gateway: 192.168.33.1
|
90
docs/developing.md
Normal file
90
docs/developing.md
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
|
||||||
|
# Developing gorealis
|
||||||
|
|
||||||
|
|
||||||
|
### Installing Docker
|
||||||
|
|
||||||
|
For our developer environment we leverage of Docker containers.
|
||||||
|
|
||||||
|
First you must have Docker installed. Instructions on how to install Docker
|
||||||
|
vary from platform to platform and can be found [here](https://docs.docker.com/install/).
|
||||||
|
|
||||||
|
### Installing docker-compose
|
||||||
|
|
||||||
|
To make the creation of our developer environment as simple as possible, we leverage
|
||||||
|
docker-compose to bring up all independent components up separately.
|
||||||
|
|
||||||
|
This also allows us to delete and recreate our development cluster very quickly.
|
||||||
|
|
||||||
|
To install docker-compose please follow the instructions for your platform
|
||||||
|
[here](https://docs.docker.com/compose/install/).
|
||||||
|
|
||||||
|
|
||||||
|
### Getting the source code
|
||||||
|
|
||||||
|
As of go 1.10.x, GOPATH is still relevant. This may change in the future but
|
||||||
|
for the sake of making development less error prone, it is suggested that the following
|
||||||
|
directories be created:
|
||||||
|
|
||||||
|
`$ mkdir -p $GOPATH/src/github.com/paypal`
|
||||||
|
|
||||||
|
And then clone the master branch into the newly created folder:
|
||||||
|
|
||||||
|
`$ cd $GOPATH/src/github.com/paypal; git clone git@github.com:paypal/gorealis.git`
|
||||||
|
|
||||||
|
Since we check in our vendor folder, gorealis no further set up is needed.
|
||||||
|
|
||||||
|
### Bringing up the cluster
|
||||||
|
|
||||||
|
To develop gorealis, you will need a fully functioning Mesos cluster along with
|
||||||
|
Apache Aurora.
|
||||||
|
|
||||||
|
In order to bring up our docker-compose set up execute the following command from the root
|
||||||
|
of the git repository:
|
||||||
|
|
||||||
|
`$ docker-compose up -d`
|
||||||
|
|
||||||
|
### Testing code
|
||||||
|
|
||||||
|
Since Docker does not work well using host mode under MacOS, a workaround has been employed:
|
||||||
|
docker-compose brings up a bridged network.
|
||||||
|
|
||||||
|
* The ports 8081 is exposed for Aurora. http://localhost:8081 will load the Aurora Web UI.
|
||||||
|
* The port 5050 is exposed for Mesos. http://localhost:5050 will load the Mesos Web UI.
|
||||||
|
|
||||||
|
#### Note for developers on MacOS:
|
||||||
|
Running the cluster using a bridged network on MacOS has some side effects.
|
||||||
|
Since Aurora exposes it's internal IP location through Zookeeper, gorealis will determine
|
||||||
|
the address to be 192.168.33.7. The address 192.168.33.7 is valid when running in a Linux
|
||||||
|
environment but not when running under MacOS. To run code involving the ZK leader fetcher
|
||||||
|
(such as the tests), a container connected to the network needs to be launched.
|
||||||
|
|
||||||
|
For example, running the tests in a container can be done through the following command from
|
||||||
|
the root of the git repository:
|
||||||
|
|
||||||
|
`$ docker run -t -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10.3-alpine go test github.com/paypal/gorealis`
|
||||||
|
|
||||||
|
Or
|
||||||
|
|
||||||
|
`$ ./runTestsMac.sh`
|
||||||
|
|
||||||
|
Alternatively, if an interactive shell is necessary, the following command may be used:
|
||||||
|
`$ docker run -it -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10.3-alpine /bin/sh`
|
||||||
|
|
||||||
|
### Cleaning up the cluster
|
||||||
|
|
||||||
|
If something went wrong while developing and a clean environment is desired, perform the
|
||||||
|
following command from the root of the git directory:
|
||||||
|
|
||||||
|
`$ docker-compose down && docker-compose up -d`
|
||||||
|
|
||||||
|
|
||||||
|
### Tearing down the cluster
|
||||||
|
|
||||||
|
Once development is done, the environment may be torn down by executing (from the root of the
|
||||||
|
git directory):
|
||||||
|
|
||||||
|
`$ docker-compose down`
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,27 +1,11 @@
|
||||||
# Running custom executors on Aurora
|
# Running custom executors on Aurora
|
||||||
|
|
||||||
In this document, we will be using the docker-compose executor to demonstrate
|
In this document we will be using the docker-compose executor to demonstrate
|
||||||
how Aurora can use multiple executors on a single Scheduler. For this guide,
|
how Aurora can use multiple executors on a single Scheduler. For this guide,
|
||||||
we will be using a vagrant instance to demonstrate the setup process. Many of the same
|
we will be using a vagrant instance to demonstrate the setup process. Many of the same
|
||||||
steps also apply to an Aurora installation made via a package manager. Differences in how to configure
|
steps also apply to an Aurora installation made via a package manager. Differences in how to configure
|
||||||
the cluster between the vagrant image and the package manager will be clarified when necessary.
|
the cluster between the vagrant image and the package manager will be clarified when necessary.
|
||||||
|
|
||||||
## Pre-configured Aurora Vagrant box
|
|
||||||
Alternatively, if Vagrant and VirtualBox are already configured your machine,
|
|
||||||
you may use a pre-configured vagrant image and skip to the [Creating Aurora Jobs](#creating-aurora-jobs).
|
|
||||||
|
|
||||||
To take this path, start by cloning the following repository and checking out the DockerComposeExecutor branch:
|
|
||||||
```
|
|
||||||
$ git clone -b DockerComposeExecutor git@github.com:rdelval/aurora.git
|
|
||||||
```
|
|
||||||
|
|
||||||
And bringing the vagrant box
|
|
||||||
```
|
|
||||||
$ cd aurora
|
|
||||||
$ vagrant up
|
|
||||||
```
|
|
||||||
**The pre-configured Vagrant box will most likely run on a stale version of Aurora (compared to the master)**
|
|
||||||
|
|
||||||
## Configuring Aurora manually
|
## Configuring Aurora manually
|
||||||
### Spinning up an Aurora instance with Vagrant
|
### Spinning up an Aurora instance with Vagrant
|
||||||
Follow the guide at http://aurora.apache.org/documentation/latest/getting-started/vagrant/
|
Follow the guide at http://aurora.apache.org/documentation/latest/getting-started/vagrant/
|
||||||
|
@ -31,7 +15,7 @@ until the end of step 4 (Start the local cluster) and skip to configuring Docker
|
||||||
Follow the guide at http://aurora.apache.org/documentation/latest/operations/installation/
|
Follow the guide at http://aurora.apache.org/documentation/latest/operations/installation/
|
||||||
|
|
||||||
### Configuring Scheduler to use Docker-Compose executor
|
### Configuring Scheduler to use Docker-Compose executor
|
||||||
In order use the docker compose executor with Aurora, we must first give the scheduler
|
In order to use the docker compose executor with Aurora, we must first give the scheduler
|
||||||
a configuration file that contains information on how to run the executor.
|
a configuration file that contains information on how to run the executor.
|
||||||
|
|
||||||
#### Configuration file
|
#### Configuration file
|
||||||
|
@ -63,10 +47,8 @@ A sample config file for the docker-compose executor looks like this:
|
||||||
"task_prefix":"compose-"
|
"task_prefix":"compose-"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
#### Configuring the Scheduler to run a custom executor
|
#### Configuring the Scheduler to run a custom executor
|
||||||
##### Setting the proper flags
|
##### Setting the proper flags
|
||||||
Some flags need to be set on the Aurora scheduler in order for custom executors to work properly.
|
Some flags need to be set on the Aurora scheduler in order for custom executors to work properly.
|
||||||
|
@ -106,96 +88,31 @@ On Ubuntu, restarting the aurora-scheduler can be achieved by running the follow
|
||||||
$ sudo service aurora-scheduler restart
|
$ sudo service aurora-scheduler restart
|
||||||
```
|
```
|
||||||
|
|
||||||
### Using a custom client
|
## Using [dce-go](https://github.com/paypal/dce-go)
|
||||||
Pystachio does yet support launching tasks using custom executors. Therefore, a custom
|
Instead of manually configuring Aurora to run the docker-compose executor, one can follow the instructions provided [here](https://github.com/paypal/dce-go/blob/develop/docs/environment.md) to quickly create a DCE environment that would include mesos, aurora, golang1.7, docker, docker-compose and DCE installed.
|
||||||
client must be used in order to launch tasks using a custom executor. In this case,
|
|
||||||
we will be using [gorealis](https://github.com/rdelval/gorealis) to launch a task with
|
Please note that when using dce-go, the endpoints are going to be as shown below,
|
||||||
the compose executor on Aurora.
|
```
|
||||||
|
Aurora endpoint --> http://192.168.33.8:8081
|
||||||
|
Mesos endpoint --> http://192.168.33.8:5050
|
||||||
|
```
|
||||||
|
|
||||||
## Configuring the system to run a custom client and docker-compose executor
|
## Configuring the system to run a custom client and docker-compose executor
|
||||||
|
|
||||||
### Installing Go
|
### Installing Go
|
||||||
|
|
||||||
#### Linux
|
Follow the instructions at the official golang website: [golang.org/doc/install](https://golang.org/doc/install)
|
||||||
|
|
||||||
##### Ubuntu
|
### Installing docker-compose
|
||||||
|
|
||||||
###### Adding a PPA and install via apt-get
|
Agents which will run dce-go will need docker-compose in order to sucessfully run the executor.
|
||||||
```
|
Instructions for installing docker-compose on various platforms may be found on Docker's webiste: [docs.docker.com/compose/install/](https://docs.docker.com/compose/install/)
|
||||||
$ sudo add-apt-repository ppa:ubuntu-lxc/lxd-stable
|
|
||||||
$ sudo apt-get update
|
|
||||||
$ sudo apt-get install golang
|
|
||||||
```
|
|
||||||
|
|
||||||
###### Configuring the GOPATH
|
|
||||||
|
|
||||||
Configure the environment to be able to compile and run Go code.
|
|
||||||
```
|
|
||||||
$ mkdir $HOME/go
|
|
||||||
$ echo export GOPATH=$HOME/go >> $HOME/.bashrc
|
|
||||||
$ echo export GOROOT=/usr/lib/go >> $HOME/.bashrc
|
|
||||||
$ echo export PATH=$PATH:$GOPATH/bin >> $HOME/.bashrc
|
|
||||||
$ echo export PATH=$PATH:$GOROOT/bin >> $HOME/.bashrc
|
|
||||||
```
|
|
||||||
|
|
||||||
Finally we must reload the .bashrc configuration:
|
|
||||||
```
|
|
||||||
$ source $HOME/.bashrc
|
|
||||||
```
|
|
||||||
|
|
||||||
#### OS X
|
|
||||||
|
|
||||||
One way to install go on OS X is by using [Homebrew](http://brew.sh/)
|
|
||||||
|
|
||||||
##### Installing Homebrew
|
|
||||||
Run the following command from the terminal to install Hombrew:
|
|
||||||
```
|
|
||||||
$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Installing Go using Hombrew
|
|
||||||
|
|
||||||
Run the following command from the terminal to install Go:
|
|
||||||
```
|
|
||||||
$ brew install go
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Configuring the GOPATH
|
|
||||||
|
|
||||||
Configure the environment to be able to compile and run Go code.
|
|
||||||
```
|
|
||||||
$ mkdir $HOME/go
|
|
||||||
$ echo export GOPATH=$HOME/go >> $HOME/.profile
|
|
||||||
$ echo export GOROOT=/usr/local/opt/go/libexec >> $HOME/.profile
|
|
||||||
$ echo export PATH=$PATH:$GOPATH/bin >> $HOME/.profile
|
|
||||||
$ echo export PATH=$PATH:$GOROOT/bin >> $HOME/.profile
|
|
||||||
```
|
|
||||||
|
|
||||||
Finally we must reload the .profile configuration:
|
|
||||||
```
|
|
||||||
$ source $HOME/.profile
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Windows
|
|
||||||
|
|
||||||
Download and run the msi installer from https://golang.org/dl/
|
|
||||||
|
|
||||||
## Installing Docker Compose
|
|
||||||
To show Aurora's new multi executor feature, we need to use at least one custom executor.
|
|
||||||
In this case we will be using the [docker-compose-executor](https://github.com/mesos/docker-compose-executor).
|
|
||||||
|
|
||||||
In order to run the docker-compose executor, each agent must have docker-compose installed on it.
|
|
||||||
|
|
||||||
This can be done using pip:
|
|
||||||
```
|
|
||||||
$ sudo pip install docker-compose
|
|
||||||
```
|
|
||||||
|
|
||||||
## Downloading gorealis
|
## Downloading gorealis
|
||||||
Finally, we must get `gorealis` using the `go get` command:
|
Finally, we must get `gorealis` using the `go get` command:
|
||||||
|
|
||||||
```
|
```
|
||||||
go get github.com/rdelval/gorealis
|
go get github.com/paypal/gorealis
|
||||||
```
|
```
|
||||||
|
|
||||||
# Creating Aurora Jobs
|
# Creating Aurora Jobs
|
||||||
|
@ -264,7 +181,7 @@ job = realis.NewJob().
|
||||||
|
|
||||||
Using a vagrant setup as an example, we can run the following command to create a compose job:
|
Using a vagrant setup as an example, we can run the following command to create a compose job:
|
||||||
```
|
```
|
||||||
go run $GOPATH/src/github.com/rdelval/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=create
|
go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=create
|
||||||
```
|
```
|
||||||
|
|
||||||
If everything went according to plan, a new job will be shown in the Aurora UI.
|
If everything went according to plan, a new job will be shown in the Aurora UI.
|
||||||
|
@ -281,7 +198,7 @@ A message from the executor should greet us.
|
||||||
It is also possible to create a thermos job using gorealis. To do this, however,
|
It is also possible to create a thermos job using gorealis. To do this, however,
|
||||||
a thermos payload is required. A thermos payload consists of a JSON blob that details
|
a thermos payload is required. A thermos payload consists of a JSON blob that details
|
||||||
the entire task as it exists inside the Aurora Scheduler. *Creating the blob is unfortunately
|
the entire task as it exists inside the Aurora Scheduler. *Creating the blob is unfortunately
|
||||||
out of the scope of was gorealis does*, so a thermos payload must be generated beforehand or
|
out of the scope of what gorealis does*, so a thermos payload must be generated beforehand or
|
||||||
retrieved from the structdump of an existing task for testing purposes.
|
retrieved from the structdump of an existing task for testing purposes.
|
||||||
|
|
||||||
A sample thermos JSON payload may be found [here](../examples/thermos_payload.json) in the examples folder.
|
A sample thermos JSON payload may be found [here](../examples/thermos_payload.json) in the examples folder.
|
||||||
|
@ -306,17 +223,36 @@ job = realis.NewJob().
|
||||||
|
|
||||||
Using a vagrant setup as an example, we can run the following command to create a Thermos job:
|
Using a vagrant setup as an example, we can run the following command to create a Thermos job:
|
||||||
```
|
```
|
||||||
$ cd $GOPATH/src/github.com/rdelval/gorealis
|
$ cd $GOPATH/src/github.com/paypal/gorealis
|
||||||
$ go run examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=create -executor=thermos
|
$ go run examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=create -executor=thermos
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Creating jobs using gorealis JSON client
|
||||||
|
We can also use the [JSON client](../examples/jsonClient.go) to create Aurora jobs using gorealis.
|
||||||
|
|
||||||
|
If using _dce-go_, then use `http://192.168.33.8:8081` as the scheduler URL.
|
||||||
|
|
||||||
|
```
|
||||||
|
$ cd $GOPATH/src/github.com/paypal/gorealis/examples
|
||||||
|
```
|
||||||
|
|
||||||
|
To launch a job using the Thermos executor,
|
||||||
|
```
|
||||||
|
$ go run jsonClient.go -job=job_thermos.json -config=config.json
|
||||||
|
```
|
||||||
|
|
||||||
|
To launch a job using docker-compose executor,
|
||||||
|
```
|
||||||
|
$ go run jsonClient.go -job=job_dce.json -config=config.json
|
||||||
|
```
|
||||||
|
|
||||||
# Cleaning up
|
# Cleaning up
|
||||||
|
|
||||||
To stop the jobs we've launched, we need to send a job kill request to Aurora.
|
To stop the jobs we've launched, we need to send a job kill request to Aurora.
|
||||||
It should be noted that although we can't create jobs using a custom executor using the default Aurora client,
|
It should be noted that although we can't create jobs using a custom executor using the default Aurora client,
|
||||||
we ~can~ use the default Aurora client to kill them. Additionally, we can use gorealis perform the clean up as well.
|
we ~can~ use the default Aurora client to kill them. Additionally, we can use gorealis perform the clean up as well.
|
||||||
|
|
||||||
## Using the Default Client
|
## Using the Default Client (if manually configured Aurora)
|
||||||
|
|
||||||
```
|
```
|
||||||
$ aurora job killall devcluster/www-data/prod/hello
|
$ aurora job killall devcluster/www-data/prod/hello
|
||||||
|
@ -326,6 +262,6 @@ $ aurora job killall devcluster/vagrant/prod/docker-compose
|
||||||
## Using gorealis
|
## Using gorealis
|
||||||
|
|
||||||
```
|
```
|
||||||
$ go run $GOPATH/src/github.com/rdelval/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=kill
|
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=kill
|
||||||
$ go run $GOPATH/src/github.com/rdelval/gorealis/examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=kill
|
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=kill
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# How to leverage the library (based on the [sample client](../examples/client.go))
|
# How to leverage the library (based on the [sample client](../examples/client.go))
|
||||||
|
|
||||||
For a more complete look at the API, please visit https://godoc.org/github.com/rdelval/gorealis
|
For a more complete look at the API, please visit https://godoc.org/github.com/paypal/gorealis
|
||||||
|
|
||||||
* Create a default configuration file (alternatively, manually create your own Config):
|
* Create a default configuration file (alternatively, manually create your own Config):
|
||||||
```
|
```
|
||||||
|
@ -58,3 +58,18 @@ updateJob.InstanceCount(1)
|
||||||
updateJob.Ram(128)
|
updateJob.Ram(128)
|
||||||
msg, err := r.UpdateJob(updateJob, "")
|
msg, err := r.UpdateJob(updateJob, "")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
* Handling a timeout scenario:
|
||||||
|
|
||||||
|
When sending an API call to Aurora, the call may timeout at the client side.
|
||||||
|
This means that the time limit has been reached while waiting for the scheduler
|
||||||
|
to reply. In such a case it is recommended that the timeout is increased through
|
||||||
|
the use of the `realis.TimeoutMS()` option.
|
||||||
|
|
||||||
|
As these timeouts cannot be totally avoided, there exists a mechanism to mitigate such
|
||||||
|
scenarios. The `StartJobUpdate` and `CreateService` API will return an error that
|
||||||
|
implements the Timeout interface.
|
||||||
|
|
||||||
|
An error can be checked to see if it is a Timeout error by using the `realis.IsTimeout()`
|
||||||
|
function.
|
|
@ -29,21 +29,21 @@ executor examples, the vagrant box must be configured properly to use the docker
|
||||||
|
|
||||||
#### Creating a Thermos job
|
#### Creating a Thermos job
|
||||||
```
|
```
|
||||||
$ cd $GOPATH/src/github.com/rdelval/gorealis
|
$ cd $GOPATH/src/github.com/paypal/gorealis/examples
|
||||||
$ go run examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=create
|
$ go run client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=create
|
||||||
```
|
```
|
||||||
#### Kill a Thermos job
|
#### Kill a Thermos job
|
||||||
```
|
```
|
||||||
$ go run $GOPATH/src/github.com/rdelval/gorealis.git/examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=kill
|
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=kill
|
||||||
```
|
```
|
||||||
|
|
||||||
### Docker Compose executor (custom executor)
|
### Docker Compose executor (custom executor)
|
||||||
|
|
||||||
#### Creating Docker Compose executor job
|
#### Creating Docker Compose executor job
|
||||||
```
|
```
|
||||||
$ go run $GOPATH/src/github.com/rdelval/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=create
|
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=create
|
||||||
```
|
```
|
||||||
#### Kill a Docker Compose executor job
|
#### Kill a Docker Compose executor job
|
||||||
```
|
```
|
||||||
$ go run $GOPATH/src/github.com/rdelval/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=kill
|
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=kill
|
||||||
```
|
```
|
||||||
|
|
104
errors.go
Normal file
104
errors.go
Normal file
|
@ -0,0 +1,104 @@
|
||||||
|
/**
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package realis
|
||||||
|
|
||||||
|
// Using a pattern described by Dave Cheney to differentiate errors
|
||||||
|
// https://dave.cheney.net/2016/04/27/dont-just-check-errors-handle-them-gracefully
|
||||||
|
|
||||||
|
// Timeout errors are returned when a function is unable to continue executing due
|
||||||
|
// to a time constraint or meeting a set number of retries.
|
||||||
|
type timeout interface {
|
||||||
|
Timedout() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsTimeout returns true if the error being passed as an argument implements the Timeout interface
|
||||||
|
// and the Timedout function returns true.
|
||||||
|
func IsTimeout(err error) bool {
|
||||||
|
temp, ok := err.(timeout)
|
||||||
|
return ok && temp.Timedout()
|
||||||
|
}
|
||||||
|
|
||||||
|
type timeoutErr struct {
|
||||||
|
error
|
||||||
|
timedout bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *timeoutErr) Timedout() bool {
|
||||||
|
return r.timedout
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTimedoutError(err error) *timeoutErr {
|
||||||
|
return &timeoutErr{error: err, timedout: true}
|
||||||
|
}
|
||||||
|
|
||||||
|
// retryErr is a superset of timeout which includes extra context
|
||||||
|
// with regards to our retry mechanism. This is done in order to make sure
|
||||||
|
// that our retry mechanism works as expected through our tests and should
|
||||||
|
// never be relied on or used directly. It is not made part of the public API
|
||||||
|
// on purpose.
|
||||||
|
type retryErr struct {
|
||||||
|
error
|
||||||
|
timedout bool
|
||||||
|
retryCount int // How many times did the mechanism retry the command
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retry error is a timeout type error with added context.
|
||||||
|
func (r *retryErr) Timedout() bool {
|
||||||
|
return r.timedout
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *retryErr) RetryCount() int {
|
||||||
|
return r.retryCount
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToRetryCount is a helper function for testing verification to avoid whitebox testing
|
||||||
|
// as well as keeping retryErr as a private.
|
||||||
|
// Should NOT be used under any other context.
|
||||||
|
func ToRetryCount(err error) *retryErr {
|
||||||
|
if retryErr, ok := err.(*retryErr); ok {
|
||||||
|
return retryErr
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRetryError(err error, retryCount int) *retryErr {
|
||||||
|
return &retryErr{error: err, timedout: true, retryCount: retryCount}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Temporary errors indicate that the action may or should be retried.
|
||||||
|
type temporary interface {
|
||||||
|
Temporary() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsTemporary indicates whether the error passed in as an argument implements the temporary interface
|
||||||
|
// and if the Temporary function returns true.
|
||||||
|
func IsTemporary(err error) bool {
|
||||||
|
temp, ok := err.(temporary)
|
||||||
|
return ok && temp.Temporary()
|
||||||
|
}
|
||||||
|
|
||||||
|
type temporaryErr struct {
|
||||||
|
error
|
||||||
|
temporary bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *temporaryErr) Temporary() bool {
|
||||||
|
return t.temporary
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTemporaryError creates a new error which satisfies the Temporary interface.
|
||||||
|
func NewTemporaryError(err error) *temporaryErr {
|
||||||
|
return &temporaryErr{error: err, temporary: true}
|
||||||
|
}
|
22
examples/certs/client.cert
Normal file
22
examples/certs/client.cert
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIDrTCCApWgAwIBAgIJAM+bKx50CY9JMA0GCSqGSIb3DQEBCwUAMG0xCzAJBgNV
|
||||||
|
BAYTAkdCMQ8wDQYDVQQIDAZMb25kb24xDzANBgNVBAcMBkxvbmRvbjEYMBYGA1UE
|
||||||
|
CgwPR2xvYmFsIFNlY3VyaXR5MRYwFAYDVQQLDA1JVCBEZXBhcnRtZW50MQowCAYD
|
||||||
|
VQQDDAEqMB4XDTE3MTIwODIwNTMwMVoXDTI3MTIwNjIwNTMwMVowbTELMAkGA1UE
|
||||||
|
BhMCR0IxDzANBgNVBAgMBkxvbmRvbjEPMA0GA1UEBwwGTG9uZG9uMRgwFgYDVQQK
|
||||||
|
DA9HbG9iYWwgU2VjdXJpdHkxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxCjAIBgNV
|
||||||
|
BAMMASowggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDhdN0KH80BF3Dk
|
||||||
|
RQqAARcf7F87uNhQM05HXK8ffpESvKhzrO9BHuDZ0yS3il0BK9XpTyTtHSLIbphk
|
||||||
|
rO3BOsmPj0zhaM20LsPtwy8GmMCym3hVNSYYyP5XCdjA3uZIYq2R8ruk+vZTe4Zr
|
||||||
|
F8GHV/xGYU4zKPMGzsQbICjZhj0yiYF9UQ2J+xw79nsqPTmo8+EdVuunLz39dt2o
|
||||||
|
SbDA01g/kPTIg9K2CAUH0mm4zegiqytwpn2JKVoemmgrDYECWnhLprWlvN9t/fX9
|
||||||
|
IgprDAHN1BsMrzfmfQXZpVmbIlTriVSdYVeTwG8rT7Tg8soIHqBrnJ1ykTpY4VrO
|
||||||
|
6tc2z4kTAgMBAAGjUDBOMB0GA1UdDgQWBBSLvwax1Zd6ZiE7TjRklWYNPwgZ2zAf
|
||||||
|
BgNVHSMEGDAWgBSLvwax1Zd6ZiE7TjRklWYNPwgZ2zAMBgNVHRMEBTADAQH/MA0G
|
||||||
|
CSqGSIb3DQEBCwUAA4IBAQCJY/EJxlyiSrnO82QcsWm9cT/ciU/G7Y4vX/tGs74C
|
||||||
|
tNxuBpc0vMfW4a9u6tmi3cW3EXD/KRvPwKZXxzTOhoQY9ZpbZLZ6VvCQ+aWQaXWT
|
||||||
|
664IS/mrEUZ/p3pgqTNtifdpPAZqVqNdS+Od8/B3/nWUn6JBkDZ4WaFQgfsSulxK
|
||||||
|
yzYN6UbwhLHfQUupFFhPfvYIVLH9ErGzcv5ZCHX9FornCc0W/8hL4EdjmpTW2ML2
|
||||||
|
hM5aTKynMiR1GuGSdSpJ+BOeiUI7Go1jGwjV+H9Pw/kfmooq2wuuUGti5dr0Qq7h
|
||||||
|
CQx1a14BmDBwGoMIOdjFATRwnami5e188fAJozL++i+s
|
||||||
|
-----END CERTIFICATE-----
|
28
examples/certs/client.key
Normal file
28
examples/certs/client.key
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDhdN0KH80BF3Dk
|
||||||
|
RQqAARcf7F87uNhQM05HXK8ffpESvKhzrO9BHuDZ0yS3il0BK9XpTyTtHSLIbphk
|
||||||
|
rO3BOsmPj0zhaM20LsPtwy8GmMCym3hVNSYYyP5XCdjA3uZIYq2R8ruk+vZTe4Zr
|
||||||
|
F8GHV/xGYU4zKPMGzsQbICjZhj0yiYF9UQ2J+xw79nsqPTmo8+EdVuunLz39dt2o
|
||||||
|
SbDA01g/kPTIg9K2CAUH0mm4zegiqytwpn2JKVoemmgrDYECWnhLprWlvN9t/fX9
|
||||||
|
IgprDAHN1BsMrzfmfQXZpVmbIlTriVSdYVeTwG8rT7Tg8soIHqBrnJ1ykTpY4VrO
|
||||||
|
6tc2z4kTAgMBAAECggEBAMZL7SY8dikhnu+HMgcH7njrg4+ZsthHZ/AoOvcucRbT
|
||||||
|
zC2ByyWxrP6pUUAFeGvRTGHadJYA7FjxvSO/XZZ4yFN2LJ6NeW+jOjzjUXcx3zq4
|
||||||
|
t4vqJUnjbqDLTlPFOTItaJBXuGcRPJqMqNuEl3kdEAwvBYLF34r7TUy2and4NFc/
|
||||||
|
JziGljkiucoNBk62TCDrffnvxMJXht+ab6PMWO87PzMVs4xUFPe0ezv4O54btUcV
|
||||||
|
EJsU58013EHeCai8AnxjcIPlMlB+lg4Y3C4VXf0mJ//cBvbCp+kyWybMw/e+e222
|
||||||
|
xq/98vnCOIqcy4u+9ENPLJQe7hXZ3Sqh38kf0GuOh8ECgYEA+VFvuuBP0OQHTxeE
|
||||||
|
dUizR3Iz/xkeGDUZ/8Ix4TCUmRRuhEXrV7ShwUmuanO3pNhChW6hXZ6qj/yuhfOC
|
||||||
|
D4V4upEnJDccz/cbH1PdBsfALhC8/C0WSGvnEWZMw/SggmY4KwReqWwN9aA8qjdq
|
||||||
|
kFTOJc2Js+dCHP9kn9J3U16A+oMCgYEA53+2lhckAI8bsrbCayWRZAVx7hUNPijt
|
||||||
|
MQvH+PCJ3QeZ0z801zk+4ny5WQ1BT0vRzwj8an4Byi2ZuTQU//N4oawDK0JVYi7q
|
||||||
|
rjKX/AhAx/puoGAgqiS1nDmuiUiplW06HqayCFbpJ1CoXz8+MwdRXiJ8dgioafVJ
|
||||||
|
+7wHZDVmMjECgYEAoULxd/ia58x2hcv6Wzo469+MjlYaxyGhvXJIfRXFJ/a1PU1U
|
||||||
|
Whh1/+W+sRBEGpXfARt7uGhmfle8Mtw8pfl5C4PTw3L6afG1U2AVOMt/HMyq0JoB
|
||||||
|
LbrNbM20nZLfNzkS35AmAoPny5ZnZtoNTWntJTp69SiB9OuklFO35u7bki0CgYAL
|
||||||
|
qQYkVzQMBylI/iWaygChvhh3+n15RQx1bPd8lXkMNgbMeiGKOaruM4QOdTl16ga+
|
||||||
|
W+CC6KfkbBmTF4l7PuMzmXtrYWL1mBFgBtJa8nt41yddUpoyl7jCDrG43n0UNrU3
|
||||||
|
uAO9ocsKnOhuK7xRS6wQhsIoG9WHyMAaOuVQadQk8QKBgQDVibcvOPXNcF1aRMG7
|
||||||
|
V24nBb+YYz+00g/cLRkDnBX9/HORle0HSfeT70ctRhuFCoHHbHF4fnp/iAwDgxdB
|
||||||
|
dNufthftTZTtFGITUsJDN36fSXNjEvKzmKEAlEYkGAYijLlDwknPB+bf4NQ6T0R+
|
||||||
|
AtnKQY6G4kFSfw9AKgWGy7ZKfg==
|
||||||
|
-----END PRIVATE KEY-----
|
18
examples/certs/server.crt
Normal file
18
examples/certs/server.crt
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIC6zCCAdOgAwIBAgIJAMgY8gND5lFnMA0GCSqGSIb3DQEBCwUAMAwxCjAIBgNV
|
||||||
|
BAMMASowHhcNMTcxMjA4MjA1MTQyWhcNMjcxMjA2MjA1MTQyWjAMMQowCAYDVQQD
|
||||||
|
DAEqMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwKcyyXg90wen25yh
|
||||||
|
QA17MDyzjzBsIL9+kznzRD1azoNqA3RShAAWXn5a81HeWvncpVL+TKPMU3UC02XT
|
||||||
|
I6GtX1U7xmdKstBLKiHQxGWX04DshSrVgcVzLUI6OHBG6feoL1mGAa8jB2UEE6ER
|
||||||
|
uXdgYgKbLUrvduSn4fBvPIhhXg9YL2n2TVujkaY9bPZ9M5tQ5K+g4wRwCAYgjTUN
|
||||||
|
55J82uzAsLCs+AQi9D4bLJmw0z2H7enRLkd9sRE2pArhXm4LLg/QlL8I5ZHv7vfl
|
||||||
|
RYdOoC3bjgKk+OVOmb2Fb/dWVlOMcnO8qeo9WyQbhAcjNK2W9Tqk5E5orGZ/bkw/
|
||||||
|
iZc0MwIDAQABo1AwTjAdBgNVHQ4EFgQUA0xmNKQqxUQgaM9ceCzFyocn9jswHwYD
|
||||||
|
VR0jBBgwFoAUA0xmNKQqxUQgaM9ceCzFyocn9jswDAYDVR0TBAUwAwEB/zANBgkq
|
||||||
|
hkiG9w0BAQsFAAOCAQEAnL7VvBTcFyLeNeuTAWmM0bjlwWsuL9Va2LZitnATgzE7
|
||||||
|
ACS+ZNURnpK/o3UHGc2ePDCFgPsF2mnh4Jmye2tl5uPxQS2zR96hp16ZGVi9N1gx
|
||||||
|
4aQyknKt6UFRP/cvWwgDN5N3pnRZQ7J0kaAWCPtAIldeGK7UDjOJ1DLDVLeByr7x
|
||||||
|
27TCt69ysisTtz6Tzr5vvVDEtu2yNIf/uGk3od+pe/0E1UXVCTItvwM30wvfcTPU
|
||||||
|
aMZXBYNmSrjnJ4k/9FSjZYNKPtK1c/JR+zUng1h+I3b7itY5VBGdzdq9fEk20PHm
|
||||||
|
Xdg1Ptbebtl6PJqWX+rydXuen6SUt8vFJE89MkbWSw==
|
||||||
|
-----END CERTIFICATE-----
|
|
@ -17,67 +17,108 @@ package main
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"gen-go/apache/aurora"
|
|
||||||
"github.com/rdelval/gorealis"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"log"
|
||||||
"github.com/rdelval/gorealis/response"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
realis "github.com/paypal/gorealis"
|
||||||
|
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
||||||
|
"github.com/paypal/gorealis/response"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var cmd, executor, url, clustersConfig, clusterName, updateId, username, password, zkUrl, hostList, role string
|
||||||
|
var caCertsPath string
|
||||||
|
var clientKey, clientCert string
|
||||||
|
|
||||||
|
var ConnectionTimeout = 20000
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
flag.StringVar(&cmd, "cmd", "", "Job request type to send to Aurora Scheduler")
|
||||||
|
flag.StringVar(&executor, "executor", "thermos", "Executor to use")
|
||||||
|
flag.StringVar(&url, "url", "", "URL at which the Aurora Scheduler exists as [url]:[port]")
|
||||||
|
flag.StringVar(&clustersConfig, "clusters", "", "Location of the clusters.json file used by aurora.")
|
||||||
|
flag.StringVar(&clusterName, "cluster", "devcluster", "Name of cluster to run job on (only necessary if clusters is set)")
|
||||||
|
flag.StringVar(&updateId, "updateId", "", "Update ID to operate on")
|
||||||
|
flag.StringVar(&username, "username", "aurora", "Username to use for authorization")
|
||||||
|
flag.StringVar(&password, "password", "secret", "Password to use for authorization")
|
||||||
|
flag.StringVar(&zkUrl, "zkurl", "", "zookeeper url")
|
||||||
|
flag.StringVar(&hostList, "hostList", "", "Comma separated list of hosts to operate on")
|
||||||
|
flag.StringVar(&role, "role", "", "owner role to use")
|
||||||
|
flag.StringVar(&caCertsPath, "caCertsPath", "", "Path to CA certs on local machine.")
|
||||||
|
flag.StringVar(&clientCert, "clientCert", "", "Client certificate to use to connect to Aurora.")
|
||||||
|
flag.StringVar(&clientKey, "clientKey", "", "Client private key to use to connect to Aurora.")
|
||||||
|
|
||||||
func main() {
|
|
||||||
cmd := flag.String("cmd", "", "Job request type to send to Aurora Scheduler")
|
|
||||||
executor := flag.String("executor", "thermos", "Executor to use")
|
|
||||||
url := flag.String("url", "", "URL at which the Aurora Scheduler exists as [url]:[port]")
|
|
||||||
clustersConfig := flag.String("clusters", "", "Location of the clusters.json file used by aurora.")
|
|
||||||
clusterName := flag.String("cluster", "devcluster", "Name of cluster to run job on")
|
|
||||||
updateId := flag.String("updateId", "", "Update ID to operate on")
|
|
||||||
username := flag.String("username", "aurora", "Username to use for authorization")
|
|
||||||
password := flag.String("password", "secret", "Password to use for authorization")
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
// Attempt to load leader from zookeeper
|
// Attempt to load leader from zookeeper using a
|
||||||
if *clustersConfig != "" {
|
// cluster.json file used for the default aurora client if provided.
|
||||||
clusters, err := realis.LoadClusters(*clustersConfig)
|
// This will override the provided url in the arguments
|
||||||
|
if clustersConfig != "" {
|
||||||
|
clusters, err := realis.LoadClusters(clustersConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatalln(err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cluster, ok := clusters[*clusterName]
|
cluster, ok := clusters[clusterName]
|
||||||
if !ok {
|
if !ok {
|
||||||
fmt.Printf("Cluster %s chosen doesn't exist\n", *clusterName)
|
log.Fatalf("Cluster %s doesn't exist in the file provided\n", clusterName)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
*url, err = realis.LeaderFromZK(cluster)
|
url, err = realis.LeaderFromZK(cluster)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatalln(err)
|
||||||
os.Exit(1)
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
//Create new configuration with default transport layer
|
func main() {
|
||||||
config, err := realis.NewDefaultConfig(*url)
|
|
||||||
|
var job realis.Job
|
||||||
|
var err error
|
||||||
|
var monitor *realis.Monitor
|
||||||
|
var r realis.Realis
|
||||||
|
|
||||||
|
clientOptions := []realis.ClientOption{
|
||||||
|
realis.BasicAuth(username, password),
|
||||||
|
realis.ThriftJSON(),
|
||||||
|
realis.TimeoutMS(ConnectionTimeout),
|
||||||
|
realis.BackOff(realis.Backoff{
|
||||||
|
Steps: 2,
|
||||||
|
Duration: 10 * time.Second,
|
||||||
|
Factor: 2.0,
|
||||||
|
Jitter: 0.1,
|
||||||
|
}),
|
||||||
|
realis.Debug(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if zkUrl != "" {
|
||||||
|
fmt.Println("zkUrl: ", zkUrl)
|
||||||
|
clientOptions = append(clientOptions, realis.ZKUrl(zkUrl))
|
||||||
|
} else {
|
||||||
|
clientOptions = append(clientOptions, realis.SchedulerUrl(url))
|
||||||
|
}
|
||||||
|
|
||||||
|
if caCertsPath != "" {
|
||||||
|
clientOptions = append(clientOptions, realis.Certspath(caCertsPath))
|
||||||
|
}
|
||||||
|
|
||||||
|
if clientKey != "" && clientCert != "" {
|
||||||
|
clientOptions = append(clientOptions, realis.ClientCerts(clientKey, clientCert))
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err = realis.NewRealisClient(clientOptions...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatalln(err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
monitor = &realis.Monitor{Client: r}
|
||||||
// Configured for vagrant
|
|
||||||
realis.AddBasicAuth(&config, *username, *password)
|
|
||||||
r := realis.NewClient(config)
|
|
||||||
defer r.Close()
|
defer r.Close()
|
||||||
|
|
||||||
monitor := &realis.Monitor{r}
|
switch executor {
|
||||||
var job realis.Job
|
|
||||||
|
|
||||||
switch *executor {
|
|
||||||
case "thermos":
|
case "thermos":
|
||||||
payload, err := ioutil.ReadFile("examples/thermos_payload.json")
|
payload, err := ioutil.ReadFile("examples/thermos_payload.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("Error reading json config file: ", err)
|
log.Fatalln("Error reading json config file: ", err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
job = realis.NewJob().
|
job = realis.NewJob().
|
||||||
|
@ -92,162 +133,525 @@ func main() {
|
||||||
IsService(true).
|
IsService(true).
|
||||||
InstanceCount(1).
|
InstanceCount(1).
|
||||||
AddPorts(1)
|
AddPorts(1)
|
||||||
break
|
|
||||||
case "compose":
|
case "compose":
|
||||||
job = realis.NewJob().
|
job = realis.NewJob().
|
||||||
Environment("prod").
|
Environment("prod").
|
||||||
Role("vagrant").
|
Role("vagrant").
|
||||||
Name("docker-compose").
|
Name("docker-compose-test").
|
||||||
ExecutorName("docker-compose-executor").
|
ExecutorName("docker-compose-executor").
|
||||||
ExecutorData("{}").
|
ExecutorData("{}").
|
||||||
|
CPU(0.25).
|
||||||
|
RAM(512).
|
||||||
|
Disk(100).
|
||||||
|
IsService(true).
|
||||||
|
InstanceCount(1).
|
||||||
|
AddPorts(4).
|
||||||
|
AddLabel("fileName", "sample-app/docker-compose.yml").
|
||||||
|
AddURIs(true, true, "https://github.com/mesos/docker-compose-executor/releases/download/0.1.0/sample-app.tar.gz")
|
||||||
|
case "none":
|
||||||
|
job = realis.NewJob().
|
||||||
|
Environment("prod").
|
||||||
|
Role("vagrant").
|
||||||
|
Name("docker_as_task").
|
||||||
CPU(1).
|
CPU(1).
|
||||||
RAM(64).
|
RAM(64).
|
||||||
Disk(100).
|
Disk(100).
|
||||||
IsService(false).
|
IsService(true).
|
||||||
InstanceCount(1).
|
InstanceCount(1).
|
||||||
AddPorts(1).
|
AddPorts(1)
|
||||||
AddLabel("fileName", "sample-app/docker-compose.yml").
|
|
||||||
AddURIs(true, true, "https://github.com/mesos/docker-compose-executor/releases/download/0.1.0/sample-app.tar.gz")
|
|
||||||
break
|
|
||||||
default:
|
default:
|
||||||
fmt.Println("Only thermos and compose are supported for now")
|
log.Fatalln("Only thermos, compose, and none are supported for now")
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
switch *cmd {
|
switch cmd {
|
||||||
case "create":
|
case "create":
|
||||||
fmt.Println("Creating job")
|
fmt.Println("Creating job")
|
||||||
resp, err := r.CreateJob(job)
|
resp, err := r.CreateJob(job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatalln(err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
fmt.Println(resp.String())
|
fmt.Println(resp.String())
|
||||||
|
|
||||||
if(resp.ResponseCode == aurora.ResponseCode_OK) {
|
if ok, mErr := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 5, 50); !ok || mErr != nil {
|
||||||
if(!monitor.Instances(job.JobKey(), job.GetInstanceCount(), 5, 50)) {
|
|
||||||
_, err := r.KillJob(job.JobKey())
|
_, err := r.KillJob(job.JobKey())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatalln(err)
|
||||||
os.Exit(1)
|
}
|
||||||
|
log.Fatalf("ok: %v\n err: %v", ok, mErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "createService":
|
||||||
|
// Create a service with three instances using the update API instead of the createJob API
|
||||||
|
fmt.Println("Creating service")
|
||||||
|
settings := realis.NewUpdateSettings()
|
||||||
|
job.InstanceCount(3)
|
||||||
|
resp, result, err := r.CreateService(job, settings)
|
||||||
|
if err != nil {
|
||||||
|
log.Println("error: ", err)
|
||||||
|
log.Fatal("response: ", resp.String())
|
||||||
|
}
|
||||||
|
fmt.Println(result.String())
|
||||||
|
|
||||||
|
if ok, mErr := monitor.JobUpdate(*result.GetKey(), 5, 180); !ok || mErr != nil {
|
||||||
|
_, err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out")
|
||||||
|
_, err = r.KillJob(job.JobKey())
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
log.Fatalf("ok: %v\n err: %v", ok, mErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "createDocker":
|
||||||
|
fmt.Println("Creating a docker based job")
|
||||||
|
container := realis.NewDockerContainer().Image("python:2.7").AddParameter("network", "host")
|
||||||
|
job.Container(container)
|
||||||
|
resp, err := r.CreateJob(job)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Println(resp.String())
|
||||||
|
|
||||||
|
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
|
||||||
|
_, err := r.KillJob(job.JobKey())
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case "createMesosContainer":
|
||||||
|
fmt.Println("Creating a docker based job")
|
||||||
|
container := realis.NewMesosContainer().DockerImage("python", "2.7")
|
||||||
|
job.Container(container)
|
||||||
|
resp, err := r.CreateJob(job)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
break
|
fmt.Println(resp.String())
|
||||||
|
|
||||||
|
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
|
||||||
|
_, err := r.KillJob(job.JobKey())
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case "scheduleCron":
|
||||||
|
fmt.Println("Scheduling a Cron job")
|
||||||
|
// Cron config
|
||||||
|
job.CronSchedule("* * * * *")
|
||||||
|
job.IsService(false)
|
||||||
|
resp, err := r.ScheduleCronJob(job)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Println(resp.String())
|
||||||
|
|
||||||
|
case "startCron":
|
||||||
|
fmt.Println("Starting a Cron job")
|
||||||
|
resp, err := r.StartCronJob(job.JobKey())
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Println(resp.String())
|
||||||
|
|
||||||
|
case "descheduleCron":
|
||||||
|
fmt.Println("Descheduling a Cron job")
|
||||||
|
resp, err := r.DescheduleCronJob(job.JobKey())
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Println(resp.String())
|
||||||
|
|
||||||
case "kill":
|
case "kill":
|
||||||
fmt.Println("Killing job")
|
fmt.Println("Killing job")
|
||||||
|
|
||||||
resp, err := r.KillJob(job.JobKey())
|
resp, err := r.KillJob(job.JobKey())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatal(err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if(resp.ResponseCode == aurora.ResponseCode_OK) {
|
if ok, err := monitor.Instances(job.JobKey(), 0, 5, 50); !ok || err != nil {
|
||||||
if(!monitor.Instances(job.JobKey(), 0, 5, 50)) {
|
log.Fatal("Unable to kill all instances of job")
|
||||||
fmt.Println("Unable to kill all instances of job")
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
fmt.Println(resp.String())
|
fmt.Println(resp.String())
|
||||||
break
|
|
||||||
case "restart":
|
case "restart":
|
||||||
fmt.Println("Restarting job")
|
fmt.Println("Restarting job")
|
||||||
resp, err := r.RestartJob(job.JobKey())
|
resp, err := r.RestartJob(job.JobKey())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatal(err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(resp.String())
|
fmt.Println(resp.String())
|
||||||
break
|
|
||||||
case "liveCount":
|
case "liveCount":
|
||||||
fmt.Println("Getting instance count")
|
fmt.Println("Getting instance count")
|
||||||
|
|
||||||
live, err := r.GetInstanceIds(job.JobKey(), aurora.LIVE_STATES)
|
live, err := r.GetInstanceIds(job.JobKey(), aurora.LIVE_STATES)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatal(err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("Number of live instances: ", len(live))
|
fmt.Printf("Live instances: %+v\n", live)
|
||||||
break
|
|
||||||
case "activeCount":
|
case "activeCount":
|
||||||
fmt.Println("Getting instance count")
|
fmt.Println("Getting instance count")
|
||||||
|
|
||||||
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
|
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatal(err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("Number of live instances: ", len(live))
|
fmt.Println("Number of live instances: ", len(live))
|
||||||
break
|
|
||||||
case "flexUp":
|
case "flexUp":
|
||||||
fmt.Println("Flexing up job")
|
fmt.Println("Flexing up job")
|
||||||
|
|
||||||
numOfInstances := int32(5)
|
numOfInstances := int32(4)
|
||||||
resp, err := r.AddInstances(aurora.InstanceKey{job.JobKey(), 0}, numOfInstances)
|
|
||||||
|
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatal(err)
|
||||||
os.Exit(1)
|
}
|
||||||
|
currInstances := int32(len(live))
|
||||||
|
fmt.Println("Current num of instances: ", currInstances)
|
||||||
|
resp, err := r.AddInstances(aurora.InstanceKey{
|
||||||
|
JobKey: job.JobKey(),
|
||||||
|
InstanceId: live[0],
|
||||||
|
},
|
||||||
|
numOfInstances)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if(resp.ResponseCode == aurora.ResponseCode_OK) {
|
if ok, err := monitor.Instances(job.JobKey(), currInstances+numOfInstances, 5, 50); !ok || err != nil {
|
||||||
if(!monitor.Instances(job.JobKey(), job.GetInstanceCount()+numOfInstances, 5, 50)) {
|
|
||||||
fmt.Println("Flexing up failed")
|
fmt.Println("Flexing up failed")
|
||||||
}
|
}
|
||||||
}
|
|
||||||
fmt.Println(resp.String())
|
fmt.Println(resp.String())
|
||||||
break
|
|
||||||
case "update":
|
case "flexDown":
|
||||||
fmt.Println("Updating a job with with more RAM and to 3 instances")
|
fmt.Println("Flexing down job")
|
||||||
taskConfig, err := r.FetchTaskConfig(aurora.InstanceKey{job.JobKey(), 0})
|
|
||||||
|
numOfInstances := int32(2)
|
||||||
|
|
||||||
|
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatal(err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
updateJob := realis.NewUpdateJob(taskConfig)
|
currInstances := int32(len(live))
|
||||||
|
fmt.Println("Current num of instances: ", currInstances)
|
||||||
|
|
||||||
|
resp, err := r.RemoveInstances(job.JobKey(), numOfInstances)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok, err := monitor.Instances(job.JobKey(), currInstances-numOfInstances, 5, 100); !ok || err != nil {
|
||||||
|
fmt.Println("flexDown failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(resp.String())
|
||||||
|
|
||||||
|
case "update":
|
||||||
|
fmt.Println("Updating a job with with more RAM and to 5 instances")
|
||||||
|
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
taskConfig, err := r.FetchTaskConfig(aurora.InstanceKey{
|
||||||
|
JobKey: job.JobKey(),
|
||||||
|
InstanceId: live[0],
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
updateJob := realis.NewDefaultUpdateJob(taskConfig)
|
||||||
updateJob.InstanceCount(5).RAM(128)
|
updateJob.InstanceCount(5).RAM(128)
|
||||||
|
|
||||||
resp, err := r.StartJobUpdate(updateJob, "")
|
resp, err := r.StartJobUpdate(updateJob, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatal(err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
jobUpdateKey := response.JobUpdateKey(resp)
|
jobUpdateKey := response.JobUpdateKey(resp)
|
||||||
monitor.JobUpdate(*jobUpdateKey, 5, 100)
|
monitor.JobUpdate(*jobUpdateKey, 5, 500)
|
||||||
|
|
||||||
|
case "pauseJobUpdate":
|
||||||
|
resp, err := r.PauseJobUpdate(&aurora.JobUpdateKey{
|
||||||
|
Job: job.JobKey(),
|
||||||
|
ID: updateId,
|
||||||
|
}, "")
|
||||||
|
|
||||||
break
|
|
||||||
case "updateDetails":
|
|
||||||
resp, err := r.JobUpdateDetails(aurora.JobUpdateKey{job.JobKey(), *updateId})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatal(err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
response.JobUpdateDetails(resp)
|
fmt.Println("PauseJobUpdate response: ", resp.String())
|
||||||
break
|
|
||||||
|
case "resumeJobUpdate":
|
||||||
|
resp, err := r.ResumeJobUpdate(&aurora.JobUpdateKey{
|
||||||
|
Job: job.JobKey(),
|
||||||
|
ID: updateId,
|
||||||
|
}, "")
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Println("ResumeJobUpdate response: ", resp.String())
|
||||||
|
|
||||||
|
case "pulseJobUpdate":
|
||||||
|
resp, err := r.PulseJobUpdate(&aurora.JobUpdateKey{
|
||||||
|
Job: job.JobKey(),
|
||||||
|
ID: updateId,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("PulseJobUpdate response: ", resp.String())
|
||||||
|
|
||||||
|
case "updateDetails":
|
||||||
|
resp, err := r.JobUpdateDetails(aurora.JobUpdateQuery{
|
||||||
|
Key: &aurora.JobUpdateKey{
|
||||||
|
Job: job.JobKey(),
|
||||||
|
ID: updateId,
|
||||||
|
},
|
||||||
|
Limit: 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(response.JobUpdateDetails(resp))
|
||||||
|
|
||||||
case "abortUpdate":
|
case "abortUpdate":
|
||||||
fmt.Println("Abort update")
|
fmt.Println("Abort update")
|
||||||
resp, err := r.AbortJobUpdate(aurora.JobUpdateKey{job.JobKey(), *updateId}, "")
|
resp, err := r.AbortJobUpdate(aurora.JobUpdateKey{
|
||||||
|
Job: job.JobKey(),
|
||||||
|
ID: updateId,
|
||||||
|
},
|
||||||
|
"")
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatal(err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
fmt.Println(resp.String())
|
fmt.Println(resp.String())
|
||||||
break
|
|
||||||
|
case "rollbackUpdate":
|
||||||
|
fmt.Println("Abort update")
|
||||||
|
resp, err := r.RollbackJobUpdate(aurora.JobUpdateKey{
|
||||||
|
Job: job.JobKey(),
|
||||||
|
ID: updateId,
|
||||||
|
},
|
||||||
|
"")
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Println(resp.String())
|
||||||
|
|
||||||
case "taskConfig":
|
case "taskConfig":
|
||||||
fmt.Println("Getting job info")
|
fmt.Println("Getting job info")
|
||||||
config, err := r.FetchTaskConfig(aurora.InstanceKey{job.JobKey(), 0})
|
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
log.Fatal(err)
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
print(config.String())
|
config, err := r.FetchTaskConfig(aurora.InstanceKey{
|
||||||
break
|
JobKey: job.JobKey(),
|
||||||
|
InstanceId: live[0],
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println(config.String())
|
||||||
|
|
||||||
|
case "updatesummary":
|
||||||
|
fmt.Println("Getting job update summary")
|
||||||
|
jobquery := &aurora.JobUpdateQuery{
|
||||||
|
Role: &job.JobKey().Role,
|
||||||
|
JobKey: job.JobKey(),
|
||||||
|
}
|
||||||
|
updatesummary, err := r.GetJobUpdateSummaries(jobquery)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error while getting update summary: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println(updatesummary)
|
||||||
|
|
||||||
|
case "taskStatus":
|
||||||
|
fmt.Println("Getting task status")
|
||||||
|
taskQ := &aurora.TaskQuery{
|
||||||
|
Role: &job.JobKey().Role,
|
||||||
|
Environment: &job.JobKey().Environment,
|
||||||
|
JobName: &job.JobKey().Name,
|
||||||
|
}
|
||||||
|
tasks, err := r.GetTaskStatus(taskQ)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %+v\n ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("length: %d\n ", len(tasks))
|
||||||
|
fmt.Printf("tasks: %+v\n", tasks)
|
||||||
|
|
||||||
|
case "tasksWithoutConfig":
|
||||||
|
fmt.Println("Getting task status")
|
||||||
|
taskQ := &aurora.TaskQuery{
|
||||||
|
Role: &job.JobKey().Role,
|
||||||
|
Environment: &job.JobKey().Environment,
|
||||||
|
JobName: &job.JobKey().Name,
|
||||||
|
}
|
||||||
|
tasks, err := r.GetTasksWithoutConfigs(taskQ)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %+v\n ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("length: %d\n ", len(tasks))
|
||||||
|
fmt.Printf("tasks: %+v\n", tasks)
|
||||||
|
|
||||||
|
case "drainHosts":
|
||||||
|
fmt.Println("Setting hosts to DRAINING")
|
||||||
|
if hostList == "" {
|
||||||
|
log.Fatal("No hosts specified to drain")
|
||||||
|
}
|
||||||
|
hosts := strings.Split(hostList, ",")
|
||||||
|
_, result, err := r.DrainHosts(hosts...)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Monitor change to DRAINING and DRAINED mode
|
||||||
|
hostResult, err := monitor.HostMaintenance(
|
||||||
|
hosts,
|
||||||
|
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
|
||||||
|
5,
|
||||||
|
10)
|
||||||
|
if err != nil {
|
||||||
|
for host, ok := range hostResult {
|
||||||
|
if !ok {
|
||||||
|
fmt.Printf("Host %s did not transtion into desired mode(s)\n", host)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Print(result.String())
|
||||||
|
|
||||||
|
case "SLADrainHosts":
|
||||||
|
fmt.Println("Setting hosts to DRAINING using SLA aware draining")
|
||||||
|
if hostList == "" {
|
||||||
|
log.Fatal("No hosts specified to drain")
|
||||||
|
}
|
||||||
|
hosts := strings.Split(hostList, ",")
|
||||||
|
|
||||||
|
policy := aurora.SlaPolicy{PercentageSlaPolicy: &aurora.PercentageSlaPolicy{Percentage: 50.0}}
|
||||||
|
|
||||||
|
result, err := r.SLADrainHosts(&policy, 30, hosts...)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Monitor change to DRAINING and DRAINED mode
|
||||||
|
hostResult, err := monitor.HostMaintenance(
|
||||||
|
hosts,
|
||||||
|
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
|
||||||
|
5,
|
||||||
|
10)
|
||||||
|
if err != nil {
|
||||||
|
for host, ok := range hostResult {
|
||||||
|
if !ok {
|
||||||
|
fmt.Printf("Host %s did not transtion into desired mode(s)\n", host)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Print(result.String())
|
||||||
|
|
||||||
|
case "endMaintenance":
|
||||||
|
fmt.Println("Setting hosts to ACTIVE")
|
||||||
|
if hostList == "" {
|
||||||
|
log.Fatal("No hosts specified to drain")
|
||||||
|
}
|
||||||
|
hosts := strings.Split(hostList, ",")
|
||||||
|
_, result, err := r.EndMaintenance(hosts...)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Monitor change to DRAINING and DRAINED mode
|
||||||
|
hostResult, err := monitor.HostMaintenance(
|
||||||
|
hosts,
|
||||||
|
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
|
||||||
|
5,
|
||||||
|
10)
|
||||||
|
if err != nil {
|
||||||
|
for host, ok := range hostResult {
|
||||||
|
if !ok {
|
||||||
|
fmt.Printf("Host %s did not transtion into desired mode(s)\n", host)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Print(result.String())
|
||||||
|
|
||||||
|
case "getPendingReasons":
|
||||||
|
fmt.Println("Getting pending reasons")
|
||||||
|
taskQ := &aurora.TaskQuery{
|
||||||
|
Role: &job.JobKey().Role,
|
||||||
|
Environment: &job.JobKey().Environment,
|
||||||
|
JobName: &job.JobKey().Name,
|
||||||
|
}
|
||||||
|
reasons, err := r.GetPendingReason(taskQ)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %+v\n ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("length: %d\n ", len(reasons))
|
||||||
|
fmt.Printf("tasks: %+v\n", reasons)
|
||||||
|
|
||||||
|
case "getJobs":
|
||||||
|
fmt.Println("GetJobs...role: ", role)
|
||||||
|
_, result, err := r.GetJobs(role)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
|
}
|
||||||
|
fmt.Println("map size: ", len(result.Configs))
|
||||||
|
fmt.Println(result.String())
|
||||||
|
|
||||||
|
case "snapshot":
|
||||||
|
fmt.Println("Forcing scheduler to write snapshot to mesos replicated log")
|
||||||
|
err := r.Snapshot()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
case "performBackup":
|
||||||
|
fmt.Println("Writing Backup of Snapshot to file system")
|
||||||
|
err := r.PerformBackup()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
case "forceExplicitRecon":
|
||||||
|
fmt.Println("Force an explicit recon")
|
||||||
|
err := r.ForceExplicitTaskReconciliation(nil)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
case "forceImplicitRecon":
|
||||||
|
fmt.Println("Force an implicit recon")
|
||||||
|
err := r.ForceImplicitTaskReconciliation()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("error: %+v\n", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
fmt.Println("Command not supported")
|
log.Fatal("Command not supported")
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
13
examples/config.json
Normal file
13
examples/config.json
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
{
|
||||||
|
"username": "aurora",
|
||||||
|
"password": "secret",
|
||||||
|
"sched_url": "http://192.168.33.7:8081",
|
||||||
|
"cluster" : {
|
||||||
|
"name": "devcluster",
|
||||||
|
"zk": "192.168.33.7",
|
||||||
|
"scheduler_zk_path": "/aurora/scheduler",
|
||||||
|
"auth_mechanism": "UNAUTHENTICATED",
|
||||||
|
"slave_run_directory": "latest",
|
||||||
|
"slave_root": "/var/lib/mesos"
|
||||||
|
}
|
||||||
|
}
|
21
examples/job_dce.json
Normal file
21
examples/job_dce.json
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
{
|
||||||
|
"name": "sampleapp",
|
||||||
|
"cpu": 0.25,
|
||||||
|
"ram_mb": 256,
|
||||||
|
"disk_mb": 100,
|
||||||
|
"executor": "docker-compose-executor",
|
||||||
|
"service": true,
|
||||||
|
"ports": 4,
|
||||||
|
"instances": 1,
|
||||||
|
"uris": [
|
||||||
|
{
|
||||||
|
"uri": "http://192.168.33.8/app.tar.gz",
|
||||||
|
"extract": true,
|
||||||
|
"cache": false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"labels":{
|
||||||
|
"fileName":"sampleapp/docker-compose.yml,sampleapp/docker-compose-healthcheck.yml"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
11
examples/job_thermos.json
Normal file
11
examples/job_thermos.json
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
{
|
||||||
|
"name": "hello_world_from_gorealis",
|
||||||
|
"cpu": 1.0,
|
||||||
|
"ram_mb": 64,
|
||||||
|
"disk_mb": 100,
|
||||||
|
"executor": "thermos",
|
||||||
|
"exec_data_file": "examples/thermos_payload.json",
|
||||||
|
"service": true,
|
||||||
|
"ports": 1,
|
||||||
|
"instances": 1
|
||||||
|
}
|
|
@ -18,8 +18,14 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/rdelval/gorealis"
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
"os"
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
realis "github.com/paypal/gorealis"
|
||||||
|
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
type URIJson struct {
|
type URIJson struct {
|
||||||
|
@ -34,6 +40,7 @@ type JobJson struct {
|
||||||
RAM int64 `json:"ram_mb"`
|
RAM int64 `json:"ram_mb"`
|
||||||
Disk int64 `json:"disk_mb"`
|
Disk int64 `json:"disk_mb"`
|
||||||
Executor string `json:"executor"`
|
Executor string `json:"executor"`
|
||||||
|
ExecutorDataFile string `json:"exec_data_file,omitempty"`
|
||||||
Instances int32 `json:"instances"`
|
Instances int32 `json:"instances"`
|
||||||
URIs []URIJson `json:"uris"`
|
URIs []URIJson `json:"uris"`
|
||||||
Labels map[string]string `json:"labels"`
|
Labels map[string]string `json:"labels"`
|
||||||
|
@ -62,67 +69,158 @@ func (j *JobJson) Validate() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
type Config struct {
|
||||||
|
realis.Cluster `json:"cluster"`
|
||||||
|
Username string `json:"username"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
SchedUrl string `json:"sched_url"`
|
||||||
|
Transport string `json:"transport,omitempty"`
|
||||||
|
Debug bool `json:"debug,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Command-line arguments for config and job JSON files.
|
||||||
|
var configJSONFile, jobJSONFile string
|
||||||
|
|
||||||
|
var job *JobJson
|
||||||
|
var config *Config
|
||||||
|
|
||||||
|
// Reading command line arguments and validating.
|
||||||
|
// If Aurora scheduler URL not provided, then using zookeeper to locate the leader.
|
||||||
|
func init() {
|
||||||
|
flag.StringVar(&configJSONFile, "config", "./config.json", "The config file that contains username, password, and the cluster configuration information.")
|
||||||
|
flag.StringVar(&jobJSONFile, "job", "./job.json", "JSON file containing job definitions.")
|
||||||
|
|
||||||
jsonFile := flag.String("file", "", "JSON file containing job definition")
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
if *jsonFile == "" {
|
job = new(JobJson)
|
||||||
|
config = new(Config)
|
||||||
|
|
||||||
|
if jobsFile, jobJSONReadErr := os.Open(jobJSONFile); jobJSONReadErr != nil {
|
||||||
flag.Usage()
|
flag.Usage()
|
||||||
|
fmt.Println("Error reading the job JSON file: ", jobJSONReadErr)
|
||||||
|
os.Exit(1)
|
||||||
|
} else {
|
||||||
|
if unmarshallErr := json.NewDecoder(jobsFile).Decode(job); unmarshallErr != nil {
|
||||||
|
flag.Usage()
|
||||||
|
fmt.Println("Error parsing job json file: ", unmarshallErr)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := os.Open(*jsonFile)
|
// Need to validate the job JSON file.
|
||||||
if err != nil {
|
if !job.Validate() {
|
||||||
fmt.Println("Error opening file ", err)
|
fmt.Println("Invalid Job.")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if configFile, configJSONErr := os.Open(configJSONFile); configJSONErr != nil {
|
||||||
|
flag.Usage()
|
||||||
|
fmt.Println("Error reading the config JSON file: ", configJSONErr)
|
||||||
|
os.Exit(1)
|
||||||
|
} else {
|
||||||
|
if unmarshallErr := json.NewDecoder(configFile).Decode(config); unmarshallErr != nil {
|
||||||
|
fmt.Println("Error parsing config JSON file: ", unmarshallErr)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateRealisClient(config *Config) (realis.Realis, error) {
|
||||||
|
var transportOption realis.ClientOption
|
||||||
|
// Configuring transport protocol. If not transport is provided, then using JSON as the
|
||||||
|
// default transport protocol.
|
||||||
|
switch config.Transport {
|
||||||
|
case "binary":
|
||||||
|
transportOption = realis.ThriftBinary()
|
||||||
|
case "json", "":
|
||||||
|
transportOption = realis.ThriftJSON()
|
||||||
|
default:
|
||||||
|
fmt.Println("Invalid transport option provided!")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
jsonJob := new(JobJson)
|
clientOptions := []realis.ClientOption{
|
||||||
|
realis.BasicAuth(config.Username, config.Password),
|
||||||
err = json.NewDecoder(file).Decode(jsonJob)
|
transportOption,
|
||||||
if err != nil {
|
realis.ZKCluster(&config.Cluster),
|
||||||
fmt.Println("Error parsing file ", err)
|
// realis.SchedulerUrl(config.SchedUrl),
|
||||||
os.Exit(1)
|
realis.SetLogger(log.New(os.Stdout, "realis-debug: ", log.Ldate)),
|
||||||
|
realis.BackOff(realis.Backoff{
|
||||||
|
Steps: 2,
|
||||||
|
Duration: 10 * time.Second,
|
||||||
|
Factor: 2.0,
|
||||||
|
Jitter: 0.1,
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
jsonJob.Validate()
|
if config.Debug {
|
||||||
|
clientOptions = append(clientOptions, realis.Debug())
|
||||||
//Create new configuration with default transport layer
|
|
||||||
config, err := realis.NewDefaultConfig("http://192.168.33.7:8081")
|
|
||||||
if err != nil {
|
|
||||||
fmt.Print(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
realis.AddBasicAuth(&config, "aurora", "secret")
|
return realis.NewRealisClient(clientOptions...)
|
||||||
r := realis.NewClient(config)
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if r, clientCreationErr := CreateRealisClient(config); clientCreationErr != nil {
|
||||||
|
fmt.Println(clientCreationErr)
|
||||||
|
os.Exit(1)
|
||||||
|
} else {
|
||||||
|
monitor := &realis.Monitor{Client: r}
|
||||||
|
defer r.Close()
|
||||||
|
uris := job.URIs
|
||||||
|
labels := job.Labels
|
||||||
|
|
||||||
auroraJob := realis.NewJob().
|
auroraJob := realis.NewJob().
|
||||||
Environment("prod").
|
Environment("prod").
|
||||||
Role("vagrant").
|
Role("vagrant").
|
||||||
Name(jsonJob.Name).
|
Name(job.Name).
|
||||||
CPU(jsonJob.CPU).
|
CPU(job.CPU).
|
||||||
RAM(jsonJob.RAM).
|
RAM(job.RAM).
|
||||||
Disk(jsonJob.Disk).
|
Disk(job.Disk).
|
||||||
ExecutorName(jsonJob.Executor).
|
IsService(job.Service).
|
||||||
InstanceCount(jsonJob.Instances).
|
InstanceCount(job.Instances).
|
||||||
IsService(jsonJob.Service).
|
AddPorts(job.Ports)
|
||||||
AddPorts(jsonJob.Ports)
|
|
||||||
|
|
||||||
for _, uri := range jsonJob.URIs {
|
// If thermos executor, then reading in the thermos payload.
|
||||||
|
if (job.Executor == aurora.AURORA_EXECUTOR_NAME) || (job.Executor == "thermos") {
|
||||||
|
payload, err := ioutil.ReadFile(job.ExecutorDataFile)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(errors.Wrap(err, "Invalid thermos payload file!"))
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
auroraJob.ExecutorName(aurora.AURORA_EXECUTOR_NAME).
|
||||||
|
ExecutorData(string(payload))
|
||||||
|
} else {
|
||||||
|
auroraJob.ExecutorName(job.Executor)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adding URIs.
|
||||||
|
for _, uri := range uris {
|
||||||
auroraJob.AddURIs(uri.Extract, uri.Cache, uri.URI)
|
auroraJob.AddURIs(uri.Extract, uri.Cache, uri.URI)
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range jsonJob.Labels {
|
// Adding Labels.
|
||||||
auroraJob.AddLabel(k, v)
|
for key, value := range labels {
|
||||||
|
auroraJob.AddLabel(key, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := r.CreateJob(auroraJob)
|
fmt.Println("Creating Job...")
|
||||||
if err != nil {
|
if resp, jobCreationErr := r.CreateJob(auroraJob); jobCreationErr != nil {
|
||||||
fmt.Println(err)
|
fmt.Println("Error creating Aurora job: ", jobCreationErr)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
} else {
|
||||||
|
if resp.ResponseCode == aurora.ResponseCode_OK {
|
||||||
|
if ok, monitorErr := monitor.Instances(auroraJob.JobKey(), auroraJob.GetInstanceCount(), 5, 50); !ok || monitorErr != nil {
|
||||||
|
if _, jobErr := r.KillJob(auroraJob.JobKey()); jobErr !=
|
||||||
|
nil {
|
||||||
|
fmt.Println(jobErr)
|
||||||
|
os.Exit(1)
|
||||||
|
} else {
|
||||||
|
fmt.Println("ok: ", ok)
|
||||||
|
fmt.Println("jobErr: ", jobErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(resp)
|
|
||||||
}
|
}
|
||||||
|
|
62
examples/thermos_cron_payload.json
Normal file
62
examples/thermos_cron_payload.json
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
{
|
||||||
|
"environment": "prod",
|
||||||
|
"health_check_config": {
|
||||||
|
"initial_interval_secs": 15.0,
|
||||||
|
"health_checker": {
|
||||||
|
"http": {
|
||||||
|
"expected_response_code": 0,
|
||||||
|
"endpoint": "/health",
|
||||||
|
"expected_response": "ok"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"interval_secs": 10.0,
|
||||||
|
"timeout_secs": 1.0,
|
||||||
|
"max_consecutive_failures": 0
|
||||||
|
},
|
||||||
|
"name": "hello_world_from_gorealis",
|
||||||
|
"service": false,
|
||||||
|
"max_task_failures": 1,
|
||||||
|
"cron_collision_policy": "KILL_EXISTING",
|
||||||
|
"enable_hooks": false,
|
||||||
|
"cluster": "devcluster",
|
||||||
|
"task": {
|
||||||
|
"processes": [
|
||||||
|
{
|
||||||
|
"daemon": false,
|
||||||
|
"name": "hello",
|
||||||
|
"ephemeral": false,
|
||||||
|
"max_failures": 1,
|
||||||
|
"min_duration": 5,
|
||||||
|
"cmdline": "echo hello world from gorealis; sleep 10",
|
||||||
|
"final": false
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"name": "hello",
|
||||||
|
"finalization_wait": 30,
|
||||||
|
"max_failures": 1,
|
||||||
|
"max_concurrency": 0,
|
||||||
|
"resources": {
|
||||||
|
"gpu": 0,
|
||||||
|
"disk": 134217728,
|
||||||
|
"ram": 134217728,
|
||||||
|
"cpu": 1.0
|
||||||
|
},
|
||||||
|
"constraints": [
|
||||||
|
{
|
||||||
|
"order": [
|
||||||
|
"hello"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"production": false,
|
||||||
|
"role": "vagrant",
|
||||||
|
"lifecycle": {
|
||||||
|
"http": {
|
||||||
|
"graceful_shutdown_endpoint": "/quitquitquit",
|
||||||
|
"port": "health",
|
||||||
|
"shutdown_endpoint": "/abortabortabort"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"priority": 0
|
||||||
|
}
|
6
gen-go/apache/aurora/GoUnusedProtection__.go
Normal file
6
gen-go/apache/aurora/GoUnusedProtection__.go
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
|
||||||
|
|
||||||
|
package aurora
|
||||||
|
|
||||||
|
var GoUnusedProtection__ int;
|
||||||
|
|
53
gen-go/apache/aurora/auroraAPI-consts.go
Normal file
53
gen-go/apache/aurora/auroraAPI-consts.go
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
|
||||||
|
|
||||||
|
package aurora
|
||||||
|
|
||||||
|
import(
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
|
)
|
||||||
|
|
||||||
|
// (needed to ensure safety because of naive import list construction.)
|
||||||
|
var _ = thrift.ZERO
|
||||||
|
var _ = fmt.Printf
|
||||||
|
var _ = context.Background
|
||||||
|
var _ = time.Now
|
||||||
|
var _ = bytes.Equal
|
||||||
|
|
||||||
|
const AURORA_EXECUTOR_NAME = "AuroraExecutor"
|
||||||
|
var ACTIVE_STATES []ScheduleStatus
|
||||||
|
var SLAVE_ASSIGNED_STATES []ScheduleStatus
|
||||||
|
var LIVE_STATES []ScheduleStatus
|
||||||
|
var TERMINAL_STATES []ScheduleStatus
|
||||||
|
const GOOD_IDENTIFIER_PATTERN = "^[\\w\\-\\.]+$"
|
||||||
|
const GOOD_IDENTIFIER_PATTERN_JVM = "^[\\w\\-\\.]+$"
|
||||||
|
const GOOD_IDENTIFIER_PATTERN_PYTHON = "^[\\w\\-\\.]+$"
|
||||||
|
var ACTIVE_JOB_UPDATE_STATES []JobUpdateStatus
|
||||||
|
var AWAITNG_PULSE_JOB_UPDATE_STATES []JobUpdateStatus
|
||||||
|
const BYPASS_LEADER_REDIRECT_HEADER_NAME = "Bypass-Leader-Redirect"
|
||||||
|
const TASK_FILESYSTEM_MOUNT_POINT = "taskfs"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
ACTIVE_STATES = []ScheduleStatus{
|
||||||
|
9, 17, 6, 0, 13, 12, 2, 1, 18, 16, }
|
||||||
|
|
||||||
|
SLAVE_ASSIGNED_STATES = []ScheduleStatus{
|
||||||
|
9, 17, 6, 13, 12, 2, 18, 1, }
|
||||||
|
|
||||||
|
LIVE_STATES = []ScheduleStatus{
|
||||||
|
6, 13, 12, 17, 18, 2, }
|
||||||
|
|
||||||
|
TERMINAL_STATES = []ScheduleStatus{
|
||||||
|
4, 3, 5, 7, }
|
||||||
|
|
||||||
|
ACTIVE_JOB_UPDATE_STATES = []JobUpdateStatus{
|
||||||
|
0, 1, 2, 3, 9, 10, }
|
||||||
|
|
||||||
|
AWAITNG_PULSE_JOB_UPDATE_STATES = []JobUpdateStatus{
|
||||||
|
9, 10, }
|
||||||
|
|
||||||
|
}
|
||||||
|
|
32711
gen-go/apache/aurora/auroraAPI.go
Normal file
32711
gen-go/apache/aurora/auroraAPI.go
Normal file
File diff suppressed because it is too large
Load diff
1198
gen-go/apache/aurora/aurora_admin-remote/aurora_admin-remote.go
Executable file
1198
gen-go/apache/aurora/aurora_admin-remote/aurora_admin-remote.go
Executable file
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,830 @@
|
||||||
|
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
|
"apache/aurora"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = aurora.GoUnusedProtection__
|
||||||
|
|
||||||
|
func Usage() {
|
||||||
|
fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
|
||||||
|
flag.PrintDefaults()
|
||||||
|
fmt.Fprintln(os.Stderr, "\nFunctions:")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response createJob(JobConfiguration description)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response scheduleCronJob(JobConfiguration description)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response descheduleCronJob(JobKey job)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response startCronJob(JobKey job)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response restartShards(JobKey job, shardIds)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response killTasks(JobKey job, instances, string message)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response addInstances(InstanceKey key, i32 count)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response replaceCronTemplate(JobConfiguration config)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response startJobUpdate(JobUpdateRequest request, string message)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response pauseJobUpdate(JobUpdateKey key, string message)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response resumeJobUpdate(JobUpdateKey key, string message)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response abortJobUpdate(JobUpdateKey key, string message)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response rollbackJobUpdate(JobUpdateKey key, string message)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response pulseJobUpdate(JobUpdateKey key)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getRoleSummary()")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getJobSummary(string role)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getTasksStatus(TaskQuery query)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getTasksWithoutConfigs(TaskQuery query)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getPendingReason(TaskQuery query)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getConfigSummary(JobKey job)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getJobs(string ownerRole)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getQuota(string ownerRole)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response populateJobConfig(JobConfiguration description)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getJobUpdateSummaries(JobUpdateQuery jobUpdateQuery)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getJobUpdateDetails(JobUpdateQuery query)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getJobUpdateDiff(JobUpdateRequest request)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getTierConfigs()")
|
||||||
|
fmt.Fprintln(os.Stderr)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpHeaders map[string]string
|
||||||
|
|
||||||
|
func (h httpHeaders) String() string {
|
||||||
|
var m map[string]string = h
|
||||||
|
return fmt.Sprintf("%s", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h httpHeaders) Set(value string) error {
|
||||||
|
parts := strings.Split(value, ": ")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return fmt.Errorf("header should be of format 'Key: Value'")
|
||||||
|
}
|
||||||
|
h[parts[0]] = parts[1]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Usage = Usage
|
||||||
|
var host string
|
||||||
|
var port int
|
||||||
|
var protocol string
|
||||||
|
var urlString string
|
||||||
|
var framed bool
|
||||||
|
var useHttp bool
|
||||||
|
headers := make(httpHeaders)
|
||||||
|
var parsedUrl *url.URL
|
||||||
|
var trans thrift.TTransport
|
||||||
|
_ = strconv.Atoi
|
||||||
|
_ = math.Abs
|
||||||
|
flag.Usage = Usage
|
||||||
|
flag.StringVar(&host, "h", "localhost", "Specify host and port")
|
||||||
|
flag.IntVar(&port, "p", 9090, "Specify port")
|
||||||
|
flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)")
|
||||||
|
flag.StringVar(&urlString, "u", "", "Specify the url")
|
||||||
|
flag.BoolVar(&framed, "framed", false, "Use framed transport")
|
||||||
|
flag.BoolVar(&useHttp, "http", false, "Use http")
|
||||||
|
flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if len(urlString) > 0 {
|
||||||
|
var err error
|
||||||
|
parsedUrl, err = url.Parse(urlString)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
host = parsedUrl.Host
|
||||||
|
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https"
|
||||||
|
} else if useHttp {
|
||||||
|
_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := flag.Arg(0)
|
||||||
|
var err error
|
||||||
|
if useHttp {
|
||||||
|
trans, err = thrift.NewTHttpClient(parsedUrl.String())
|
||||||
|
if len(headers) > 0 {
|
||||||
|
httptrans := trans.(*thrift.THttpClient)
|
||||||
|
for key, value := range headers {
|
||||||
|
httptrans.SetHeader(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
portStr := fmt.Sprint(port)
|
||||||
|
if strings.Contains(host, ":") {
|
||||||
|
host, portStr, err = net.SplitHostPort(host)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "error with host:", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "error resolving address:", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if framed {
|
||||||
|
trans = thrift.NewTFramedTransport(trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "Error creating transport", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
defer trans.Close()
|
||||||
|
var protocolFactory thrift.TProtocolFactory
|
||||||
|
switch protocol {
|
||||||
|
case "compact":
|
||||||
|
protocolFactory = thrift.NewTCompactProtocolFactory()
|
||||||
|
break
|
||||||
|
case "simplejson":
|
||||||
|
protocolFactory = thrift.NewTSimpleJSONProtocolFactory()
|
||||||
|
break
|
||||||
|
case "json":
|
||||||
|
protocolFactory = thrift.NewTJSONProtocolFactory()
|
||||||
|
break
|
||||||
|
case "binary", "":
|
||||||
|
protocolFactory = thrift.NewTBinaryProtocolFactoryDefault()
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol)
|
||||||
|
Usage()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
iprot := protocolFactory.GetProtocol(trans)
|
||||||
|
oprot := protocolFactory.GetProtocol(trans)
|
||||||
|
client := aurora.NewAuroraSchedulerManagerClient(thrift.NewTStandardClient(iprot, oprot))
|
||||||
|
if err := trans.Open(); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch cmd {
|
||||||
|
case "createJob":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "CreateJob requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg213 := flag.Arg(1)
|
||||||
|
mbTrans214 := thrift.NewTMemoryBufferLen(len(arg213))
|
||||||
|
defer mbTrans214.Close()
|
||||||
|
_, err215 := mbTrans214.WriteString(arg213)
|
||||||
|
if err215 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory216 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt217 := factory216.GetProtocol(mbTrans214)
|
||||||
|
argvalue0 := aurora.NewJobConfiguration()
|
||||||
|
err218 := argvalue0.Read(context.Background(), jsProt217)
|
||||||
|
if err218 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.CreateJob(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "scheduleCronJob":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "ScheduleCronJob requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg219 := flag.Arg(1)
|
||||||
|
mbTrans220 := thrift.NewTMemoryBufferLen(len(arg219))
|
||||||
|
defer mbTrans220.Close()
|
||||||
|
_, err221 := mbTrans220.WriteString(arg219)
|
||||||
|
if err221 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory222 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt223 := factory222.GetProtocol(mbTrans220)
|
||||||
|
argvalue0 := aurora.NewJobConfiguration()
|
||||||
|
err224 := argvalue0.Read(context.Background(), jsProt223)
|
||||||
|
if err224 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.ScheduleCronJob(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "descheduleCronJob":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "DescheduleCronJob requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg225 := flag.Arg(1)
|
||||||
|
mbTrans226 := thrift.NewTMemoryBufferLen(len(arg225))
|
||||||
|
defer mbTrans226.Close()
|
||||||
|
_, err227 := mbTrans226.WriteString(arg225)
|
||||||
|
if err227 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory228 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt229 := factory228.GetProtocol(mbTrans226)
|
||||||
|
argvalue0 := aurora.NewJobKey()
|
||||||
|
err230 := argvalue0.Read(context.Background(), jsProt229)
|
||||||
|
if err230 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.DescheduleCronJob(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "startCronJob":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "StartCronJob requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg231 := flag.Arg(1)
|
||||||
|
mbTrans232 := thrift.NewTMemoryBufferLen(len(arg231))
|
||||||
|
defer mbTrans232.Close()
|
||||||
|
_, err233 := mbTrans232.WriteString(arg231)
|
||||||
|
if err233 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory234 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt235 := factory234.GetProtocol(mbTrans232)
|
||||||
|
argvalue0 := aurora.NewJobKey()
|
||||||
|
err236 := argvalue0.Read(context.Background(), jsProt235)
|
||||||
|
if err236 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.StartCronJob(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "restartShards":
|
||||||
|
if flag.NArg() - 1 != 2 {
|
||||||
|
fmt.Fprintln(os.Stderr, "RestartShards requires 2 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg237 := flag.Arg(1)
|
||||||
|
mbTrans238 := thrift.NewTMemoryBufferLen(len(arg237))
|
||||||
|
defer mbTrans238.Close()
|
||||||
|
_, err239 := mbTrans238.WriteString(arg237)
|
||||||
|
if err239 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory240 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt241 := factory240.GetProtocol(mbTrans238)
|
||||||
|
argvalue0 := aurora.NewJobKey()
|
||||||
|
err242 := argvalue0.Read(context.Background(), jsProt241)
|
||||||
|
if err242 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
arg243 := flag.Arg(2)
|
||||||
|
mbTrans244 := thrift.NewTMemoryBufferLen(len(arg243))
|
||||||
|
defer mbTrans244.Close()
|
||||||
|
_, err245 := mbTrans244.WriteString(arg243)
|
||||||
|
if err245 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory246 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt247 := factory246.GetProtocol(mbTrans244)
|
||||||
|
containerStruct1 := aurora.NewAuroraSchedulerManagerRestartShardsArgs()
|
||||||
|
err248 := containerStruct1.ReadField2(context.Background(), jsProt247)
|
||||||
|
if err248 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
argvalue1 := containerStruct1.ShardIds
|
||||||
|
value1 := argvalue1
|
||||||
|
fmt.Print(client.RestartShards(context.Background(), value0, value1))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "killTasks":
|
||||||
|
if flag.NArg() - 1 != 3 {
|
||||||
|
fmt.Fprintln(os.Stderr, "KillTasks requires 3 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg249 := flag.Arg(1)
|
||||||
|
mbTrans250 := thrift.NewTMemoryBufferLen(len(arg249))
|
||||||
|
defer mbTrans250.Close()
|
||||||
|
_, err251 := mbTrans250.WriteString(arg249)
|
||||||
|
if err251 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory252 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt253 := factory252.GetProtocol(mbTrans250)
|
||||||
|
argvalue0 := aurora.NewJobKey()
|
||||||
|
err254 := argvalue0.Read(context.Background(), jsProt253)
|
||||||
|
if err254 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
arg255 := flag.Arg(2)
|
||||||
|
mbTrans256 := thrift.NewTMemoryBufferLen(len(arg255))
|
||||||
|
defer mbTrans256.Close()
|
||||||
|
_, err257 := mbTrans256.WriteString(arg255)
|
||||||
|
if err257 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory258 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt259 := factory258.GetProtocol(mbTrans256)
|
||||||
|
containerStruct1 := aurora.NewAuroraSchedulerManagerKillTasksArgs()
|
||||||
|
err260 := containerStruct1.ReadField2(context.Background(), jsProt259)
|
||||||
|
if err260 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
argvalue1 := containerStruct1.Instances
|
||||||
|
value1 := argvalue1
|
||||||
|
argvalue2 := flag.Arg(3)
|
||||||
|
value2 := argvalue2
|
||||||
|
fmt.Print(client.KillTasks(context.Background(), value0, value1, value2))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "addInstances":
|
||||||
|
if flag.NArg() - 1 != 2 {
|
||||||
|
fmt.Fprintln(os.Stderr, "AddInstances requires 2 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg262 := flag.Arg(1)
|
||||||
|
mbTrans263 := thrift.NewTMemoryBufferLen(len(arg262))
|
||||||
|
defer mbTrans263.Close()
|
||||||
|
_, err264 := mbTrans263.WriteString(arg262)
|
||||||
|
if err264 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory265 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt266 := factory265.GetProtocol(mbTrans263)
|
||||||
|
argvalue0 := aurora.NewInstanceKey()
|
||||||
|
err267 := argvalue0.Read(context.Background(), jsProt266)
|
||||||
|
if err267 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
tmp1, err268 := (strconv.Atoi(flag.Arg(2)))
|
||||||
|
if err268 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
argvalue1 := int32(tmp1)
|
||||||
|
value1 := argvalue1
|
||||||
|
fmt.Print(client.AddInstances(context.Background(), value0, value1))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "replaceCronTemplate":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "ReplaceCronTemplate requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg269 := flag.Arg(1)
|
||||||
|
mbTrans270 := thrift.NewTMemoryBufferLen(len(arg269))
|
||||||
|
defer mbTrans270.Close()
|
||||||
|
_, err271 := mbTrans270.WriteString(arg269)
|
||||||
|
if err271 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory272 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt273 := factory272.GetProtocol(mbTrans270)
|
||||||
|
argvalue0 := aurora.NewJobConfiguration()
|
||||||
|
err274 := argvalue0.Read(context.Background(), jsProt273)
|
||||||
|
if err274 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.ReplaceCronTemplate(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "startJobUpdate":
|
||||||
|
if flag.NArg() - 1 != 2 {
|
||||||
|
fmt.Fprintln(os.Stderr, "StartJobUpdate requires 2 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg275 := flag.Arg(1)
|
||||||
|
mbTrans276 := thrift.NewTMemoryBufferLen(len(arg275))
|
||||||
|
defer mbTrans276.Close()
|
||||||
|
_, err277 := mbTrans276.WriteString(arg275)
|
||||||
|
if err277 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory278 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt279 := factory278.GetProtocol(mbTrans276)
|
||||||
|
argvalue0 := aurora.NewJobUpdateRequest()
|
||||||
|
err280 := argvalue0.Read(context.Background(), jsProt279)
|
||||||
|
if err280 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
argvalue1 := flag.Arg(2)
|
||||||
|
value1 := argvalue1
|
||||||
|
fmt.Print(client.StartJobUpdate(context.Background(), value0, value1))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "pauseJobUpdate":
|
||||||
|
if flag.NArg() - 1 != 2 {
|
||||||
|
fmt.Fprintln(os.Stderr, "PauseJobUpdate requires 2 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg282 := flag.Arg(1)
|
||||||
|
mbTrans283 := thrift.NewTMemoryBufferLen(len(arg282))
|
||||||
|
defer mbTrans283.Close()
|
||||||
|
_, err284 := mbTrans283.WriteString(arg282)
|
||||||
|
if err284 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory285 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt286 := factory285.GetProtocol(mbTrans283)
|
||||||
|
argvalue0 := aurora.NewJobUpdateKey()
|
||||||
|
err287 := argvalue0.Read(context.Background(), jsProt286)
|
||||||
|
if err287 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
argvalue1 := flag.Arg(2)
|
||||||
|
value1 := argvalue1
|
||||||
|
fmt.Print(client.PauseJobUpdate(context.Background(), value0, value1))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "resumeJobUpdate":
|
||||||
|
if flag.NArg() - 1 != 2 {
|
||||||
|
fmt.Fprintln(os.Stderr, "ResumeJobUpdate requires 2 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg289 := flag.Arg(1)
|
||||||
|
mbTrans290 := thrift.NewTMemoryBufferLen(len(arg289))
|
||||||
|
defer mbTrans290.Close()
|
||||||
|
_, err291 := mbTrans290.WriteString(arg289)
|
||||||
|
if err291 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory292 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt293 := factory292.GetProtocol(mbTrans290)
|
||||||
|
argvalue0 := aurora.NewJobUpdateKey()
|
||||||
|
err294 := argvalue0.Read(context.Background(), jsProt293)
|
||||||
|
if err294 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
argvalue1 := flag.Arg(2)
|
||||||
|
value1 := argvalue1
|
||||||
|
fmt.Print(client.ResumeJobUpdate(context.Background(), value0, value1))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "abortJobUpdate":
|
||||||
|
if flag.NArg() - 1 != 2 {
|
||||||
|
fmt.Fprintln(os.Stderr, "AbortJobUpdate requires 2 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg296 := flag.Arg(1)
|
||||||
|
mbTrans297 := thrift.NewTMemoryBufferLen(len(arg296))
|
||||||
|
defer mbTrans297.Close()
|
||||||
|
_, err298 := mbTrans297.WriteString(arg296)
|
||||||
|
if err298 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory299 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt300 := factory299.GetProtocol(mbTrans297)
|
||||||
|
argvalue0 := aurora.NewJobUpdateKey()
|
||||||
|
err301 := argvalue0.Read(context.Background(), jsProt300)
|
||||||
|
if err301 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
argvalue1 := flag.Arg(2)
|
||||||
|
value1 := argvalue1
|
||||||
|
fmt.Print(client.AbortJobUpdate(context.Background(), value0, value1))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "rollbackJobUpdate":
|
||||||
|
if flag.NArg() - 1 != 2 {
|
||||||
|
fmt.Fprintln(os.Stderr, "RollbackJobUpdate requires 2 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg303 := flag.Arg(1)
|
||||||
|
mbTrans304 := thrift.NewTMemoryBufferLen(len(arg303))
|
||||||
|
defer mbTrans304.Close()
|
||||||
|
_, err305 := mbTrans304.WriteString(arg303)
|
||||||
|
if err305 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory306 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt307 := factory306.GetProtocol(mbTrans304)
|
||||||
|
argvalue0 := aurora.NewJobUpdateKey()
|
||||||
|
err308 := argvalue0.Read(context.Background(), jsProt307)
|
||||||
|
if err308 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
argvalue1 := flag.Arg(2)
|
||||||
|
value1 := argvalue1
|
||||||
|
fmt.Print(client.RollbackJobUpdate(context.Background(), value0, value1))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "pulseJobUpdate":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "PulseJobUpdate requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg310 := flag.Arg(1)
|
||||||
|
mbTrans311 := thrift.NewTMemoryBufferLen(len(arg310))
|
||||||
|
defer mbTrans311.Close()
|
||||||
|
_, err312 := mbTrans311.WriteString(arg310)
|
||||||
|
if err312 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory313 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt314 := factory313.GetProtocol(mbTrans311)
|
||||||
|
argvalue0 := aurora.NewJobUpdateKey()
|
||||||
|
err315 := argvalue0.Read(context.Background(), jsProt314)
|
||||||
|
if err315 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.PulseJobUpdate(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getRoleSummary":
|
||||||
|
if flag.NArg() - 1 != 0 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetRoleSummary requires 0 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
fmt.Print(client.GetRoleSummary(context.Background()))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getJobSummary":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetJobSummary requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
argvalue0 := flag.Arg(1)
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetJobSummary(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getTasksStatus":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg317 := flag.Arg(1)
|
||||||
|
mbTrans318 := thrift.NewTMemoryBufferLen(len(arg317))
|
||||||
|
defer mbTrans318.Close()
|
||||||
|
_, err319 := mbTrans318.WriteString(arg317)
|
||||||
|
if err319 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory320 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt321 := factory320.GetProtocol(mbTrans318)
|
||||||
|
argvalue0 := aurora.NewTaskQuery()
|
||||||
|
err322 := argvalue0.Read(context.Background(), jsProt321)
|
||||||
|
if err322 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetTasksStatus(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getTasksWithoutConfigs":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg323 := flag.Arg(1)
|
||||||
|
mbTrans324 := thrift.NewTMemoryBufferLen(len(arg323))
|
||||||
|
defer mbTrans324.Close()
|
||||||
|
_, err325 := mbTrans324.WriteString(arg323)
|
||||||
|
if err325 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory326 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt327 := factory326.GetProtocol(mbTrans324)
|
||||||
|
argvalue0 := aurora.NewTaskQuery()
|
||||||
|
err328 := argvalue0.Read(context.Background(), jsProt327)
|
||||||
|
if err328 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetTasksWithoutConfigs(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getPendingReason":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg329 := flag.Arg(1)
|
||||||
|
mbTrans330 := thrift.NewTMemoryBufferLen(len(arg329))
|
||||||
|
defer mbTrans330.Close()
|
||||||
|
_, err331 := mbTrans330.WriteString(arg329)
|
||||||
|
if err331 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory332 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt333 := factory332.GetProtocol(mbTrans330)
|
||||||
|
argvalue0 := aurora.NewTaskQuery()
|
||||||
|
err334 := argvalue0.Read(context.Background(), jsProt333)
|
||||||
|
if err334 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetPendingReason(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getConfigSummary":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg335 := flag.Arg(1)
|
||||||
|
mbTrans336 := thrift.NewTMemoryBufferLen(len(arg335))
|
||||||
|
defer mbTrans336.Close()
|
||||||
|
_, err337 := mbTrans336.WriteString(arg335)
|
||||||
|
if err337 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory338 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt339 := factory338.GetProtocol(mbTrans336)
|
||||||
|
argvalue0 := aurora.NewJobKey()
|
||||||
|
err340 := argvalue0.Read(context.Background(), jsProt339)
|
||||||
|
if err340 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetConfigSummary(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getJobs":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetJobs requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
argvalue0 := flag.Arg(1)
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetJobs(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getQuota":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetQuota requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
argvalue0 := flag.Arg(1)
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetQuota(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "populateJobConfig":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg343 := flag.Arg(1)
|
||||||
|
mbTrans344 := thrift.NewTMemoryBufferLen(len(arg343))
|
||||||
|
defer mbTrans344.Close()
|
||||||
|
_, err345 := mbTrans344.WriteString(arg343)
|
||||||
|
if err345 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory346 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt347 := factory346.GetProtocol(mbTrans344)
|
||||||
|
argvalue0 := aurora.NewJobConfiguration()
|
||||||
|
err348 := argvalue0.Read(context.Background(), jsProt347)
|
||||||
|
if err348 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.PopulateJobConfig(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getJobUpdateSummaries":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg349 := flag.Arg(1)
|
||||||
|
mbTrans350 := thrift.NewTMemoryBufferLen(len(arg349))
|
||||||
|
defer mbTrans350.Close()
|
||||||
|
_, err351 := mbTrans350.WriteString(arg349)
|
||||||
|
if err351 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory352 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt353 := factory352.GetProtocol(mbTrans350)
|
||||||
|
argvalue0 := aurora.NewJobUpdateQuery()
|
||||||
|
err354 := argvalue0.Read(context.Background(), jsProt353)
|
||||||
|
if err354 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetJobUpdateSummaries(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getJobUpdateDetails":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg355 := flag.Arg(1)
|
||||||
|
mbTrans356 := thrift.NewTMemoryBufferLen(len(arg355))
|
||||||
|
defer mbTrans356.Close()
|
||||||
|
_, err357 := mbTrans356.WriteString(arg355)
|
||||||
|
if err357 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory358 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt359 := factory358.GetProtocol(mbTrans356)
|
||||||
|
argvalue0 := aurora.NewJobUpdateQuery()
|
||||||
|
err360 := argvalue0.Read(context.Background(), jsProt359)
|
||||||
|
if err360 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetJobUpdateDetails(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getJobUpdateDiff":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg361 := flag.Arg(1)
|
||||||
|
mbTrans362 := thrift.NewTMemoryBufferLen(len(arg361))
|
||||||
|
defer mbTrans362.Close()
|
||||||
|
_, err363 := mbTrans362.WriteString(arg361)
|
||||||
|
if err363 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory364 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt365 := factory364.GetProtocol(mbTrans362)
|
||||||
|
argvalue0 := aurora.NewJobUpdateRequest()
|
||||||
|
err366 := argvalue0.Read(context.Background(), jsProt365)
|
||||||
|
if err366 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetJobUpdateDiff(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getTierConfigs":
|
||||||
|
if flag.NArg() - 1 != 0 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetTierConfigs requires 0 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
fmt.Print(client.GetTierConfigs(context.Background()))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "":
|
||||||
|
Usage()
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
fmt.Fprintln(os.Stderr, "Invalid function ", cmd)
|
||||||
|
}
|
||||||
|
}
|
411
gen-go/apache/aurora/read_only_scheduler-remote/read_only_scheduler-remote.go
Executable file
411
gen-go/apache/aurora/read_only_scheduler-remote/read_only_scheduler-remote.go
Executable file
|
@ -0,0 +1,411 @@
|
||||||
|
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
|
"apache/aurora"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = aurora.GoUnusedProtection__
|
||||||
|
|
||||||
|
func Usage() {
|
||||||
|
fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
|
||||||
|
flag.PrintDefaults()
|
||||||
|
fmt.Fprintln(os.Stderr, "\nFunctions:")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getRoleSummary()")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getJobSummary(string role)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getTasksStatus(TaskQuery query)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getTasksWithoutConfigs(TaskQuery query)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getPendingReason(TaskQuery query)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getConfigSummary(JobKey job)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getJobs(string ownerRole)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getQuota(string ownerRole)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response populateJobConfig(JobConfiguration description)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getJobUpdateSummaries(JobUpdateQuery jobUpdateQuery)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getJobUpdateDetails(JobUpdateQuery query)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getJobUpdateDiff(JobUpdateRequest request)")
|
||||||
|
fmt.Fprintln(os.Stderr, " Response getTierConfigs()")
|
||||||
|
fmt.Fprintln(os.Stderr)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
type httpHeaders map[string]string
|
||||||
|
|
||||||
|
func (h httpHeaders) String() string {
|
||||||
|
var m map[string]string = h
|
||||||
|
return fmt.Sprintf("%s", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h httpHeaders) Set(value string) error {
|
||||||
|
parts := strings.Split(value, ": ")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return fmt.Errorf("header should be of format 'Key: Value'")
|
||||||
|
}
|
||||||
|
h[parts[0]] = parts[1]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Usage = Usage
|
||||||
|
var host string
|
||||||
|
var port int
|
||||||
|
var protocol string
|
||||||
|
var urlString string
|
||||||
|
var framed bool
|
||||||
|
var useHttp bool
|
||||||
|
headers := make(httpHeaders)
|
||||||
|
var parsedUrl *url.URL
|
||||||
|
var trans thrift.TTransport
|
||||||
|
_ = strconv.Atoi
|
||||||
|
_ = math.Abs
|
||||||
|
flag.Usage = Usage
|
||||||
|
flag.StringVar(&host, "h", "localhost", "Specify host and port")
|
||||||
|
flag.IntVar(&port, "p", 9090, "Specify port")
|
||||||
|
flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)")
|
||||||
|
flag.StringVar(&urlString, "u", "", "Specify the url")
|
||||||
|
flag.BoolVar(&framed, "framed", false, "Use framed transport")
|
||||||
|
flag.BoolVar(&useHttp, "http", false, "Use http")
|
||||||
|
flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if len(urlString) > 0 {
|
||||||
|
var err error
|
||||||
|
parsedUrl, err = url.Parse(urlString)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
host = parsedUrl.Host
|
||||||
|
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https"
|
||||||
|
} else if useHttp {
|
||||||
|
_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := flag.Arg(0)
|
||||||
|
var err error
|
||||||
|
if useHttp {
|
||||||
|
trans, err = thrift.NewTHttpClient(parsedUrl.String())
|
||||||
|
if len(headers) > 0 {
|
||||||
|
httptrans := trans.(*thrift.THttpClient)
|
||||||
|
for key, value := range headers {
|
||||||
|
httptrans.SetHeader(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
portStr := fmt.Sprint(port)
|
||||||
|
if strings.Contains(host, ":") {
|
||||||
|
host, portStr, err = net.SplitHostPort(host)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "error with host:", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr))
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "error resolving address:", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if framed {
|
||||||
|
trans = thrift.NewTFramedTransport(trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "Error creating transport", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
defer trans.Close()
|
||||||
|
var protocolFactory thrift.TProtocolFactory
|
||||||
|
switch protocol {
|
||||||
|
case "compact":
|
||||||
|
protocolFactory = thrift.NewTCompactProtocolFactory()
|
||||||
|
break
|
||||||
|
case "simplejson":
|
||||||
|
protocolFactory = thrift.NewTSimpleJSONProtocolFactory()
|
||||||
|
break
|
||||||
|
case "json":
|
||||||
|
protocolFactory = thrift.NewTJSONProtocolFactory()
|
||||||
|
break
|
||||||
|
case "binary", "":
|
||||||
|
protocolFactory = thrift.NewTBinaryProtocolFactoryDefault()
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol)
|
||||||
|
Usage()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
iprot := protocolFactory.GetProtocol(trans)
|
||||||
|
oprot := protocolFactory.GetProtocol(trans)
|
||||||
|
client := aurora.NewReadOnlySchedulerClient(thrift.NewTStandardClient(iprot, oprot))
|
||||||
|
if err := trans.Open(); err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch cmd {
|
||||||
|
case "getRoleSummary":
|
||||||
|
if flag.NArg() - 1 != 0 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetRoleSummary requires 0 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
fmt.Print(client.GetRoleSummary(context.Background()))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getJobSummary":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetJobSummary requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
argvalue0 := flag.Arg(1)
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetJobSummary(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getTasksStatus":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg132 := flag.Arg(1)
|
||||||
|
mbTrans133 := thrift.NewTMemoryBufferLen(len(arg132))
|
||||||
|
defer mbTrans133.Close()
|
||||||
|
_, err134 := mbTrans133.WriteString(arg132)
|
||||||
|
if err134 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory135 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt136 := factory135.GetProtocol(mbTrans133)
|
||||||
|
argvalue0 := aurora.NewTaskQuery()
|
||||||
|
err137 := argvalue0.Read(context.Background(), jsProt136)
|
||||||
|
if err137 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetTasksStatus(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getTasksWithoutConfigs":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg138 := flag.Arg(1)
|
||||||
|
mbTrans139 := thrift.NewTMemoryBufferLen(len(arg138))
|
||||||
|
defer mbTrans139.Close()
|
||||||
|
_, err140 := mbTrans139.WriteString(arg138)
|
||||||
|
if err140 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory141 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt142 := factory141.GetProtocol(mbTrans139)
|
||||||
|
argvalue0 := aurora.NewTaskQuery()
|
||||||
|
err143 := argvalue0.Read(context.Background(), jsProt142)
|
||||||
|
if err143 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetTasksWithoutConfigs(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getPendingReason":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg144 := flag.Arg(1)
|
||||||
|
mbTrans145 := thrift.NewTMemoryBufferLen(len(arg144))
|
||||||
|
defer mbTrans145.Close()
|
||||||
|
_, err146 := mbTrans145.WriteString(arg144)
|
||||||
|
if err146 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory147 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt148 := factory147.GetProtocol(mbTrans145)
|
||||||
|
argvalue0 := aurora.NewTaskQuery()
|
||||||
|
err149 := argvalue0.Read(context.Background(), jsProt148)
|
||||||
|
if err149 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetPendingReason(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getConfigSummary":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg150 := flag.Arg(1)
|
||||||
|
mbTrans151 := thrift.NewTMemoryBufferLen(len(arg150))
|
||||||
|
defer mbTrans151.Close()
|
||||||
|
_, err152 := mbTrans151.WriteString(arg150)
|
||||||
|
if err152 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory153 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt154 := factory153.GetProtocol(mbTrans151)
|
||||||
|
argvalue0 := aurora.NewJobKey()
|
||||||
|
err155 := argvalue0.Read(context.Background(), jsProt154)
|
||||||
|
if err155 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetConfigSummary(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getJobs":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetJobs requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
argvalue0 := flag.Arg(1)
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetJobs(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getQuota":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetQuota requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
argvalue0 := flag.Arg(1)
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetQuota(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "populateJobConfig":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg158 := flag.Arg(1)
|
||||||
|
mbTrans159 := thrift.NewTMemoryBufferLen(len(arg158))
|
||||||
|
defer mbTrans159.Close()
|
||||||
|
_, err160 := mbTrans159.WriteString(arg158)
|
||||||
|
if err160 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory161 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt162 := factory161.GetProtocol(mbTrans159)
|
||||||
|
argvalue0 := aurora.NewJobConfiguration()
|
||||||
|
err163 := argvalue0.Read(context.Background(), jsProt162)
|
||||||
|
if err163 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.PopulateJobConfig(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getJobUpdateSummaries":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg164 := flag.Arg(1)
|
||||||
|
mbTrans165 := thrift.NewTMemoryBufferLen(len(arg164))
|
||||||
|
defer mbTrans165.Close()
|
||||||
|
_, err166 := mbTrans165.WriteString(arg164)
|
||||||
|
if err166 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory167 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt168 := factory167.GetProtocol(mbTrans165)
|
||||||
|
argvalue0 := aurora.NewJobUpdateQuery()
|
||||||
|
err169 := argvalue0.Read(context.Background(), jsProt168)
|
||||||
|
if err169 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetJobUpdateSummaries(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getJobUpdateDetails":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg170 := flag.Arg(1)
|
||||||
|
mbTrans171 := thrift.NewTMemoryBufferLen(len(arg170))
|
||||||
|
defer mbTrans171.Close()
|
||||||
|
_, err172 := mbTrans171.WriteString(arg170)
|
||||||
|
if err172 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory173 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt174 := factory173.GetProtocol(mbTrans171)
|
||||||
|
argvalue0 := aurora.NewJobUpdateQuery()
|
||||||
|
err175 := argvalue0.Read(context.Background(), jsProt174)
|
||||||
|
if err175 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetJobUpdateDetails(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getJobUpdateDiff":
|
||||||
|
if flag.NArg() - 1 != 1 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
arg176 := flag.Arg(1)
|
||||||
|
mbTrans177 := thrift.NewTMemoryBufferLen(len(arg176))
|
||||||
|
defer mbTrans177.Close()
|
||||||
|
_, err178 := mbTrans177.WriteString(arg176)
|
||||||
|
if err178 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
factory179 := thrift.NewTJSONProtocolFactory()
|
||||||
|
jsProt180 := factory179.GetProtocol(mbTrans177)
|
||||||
|
argvalue0 := aurora.NewJobUpdateRequest()
|
||||||
|
err181 := argvalue0.Read(context.Background(), jsProt180)
|
||||||
|
if err181 != nil {
|
||||||
|
Usage()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
value0 := argvalue0
|
||||||
|
fmt.Print(client.GetJobUpdateDiff(context.Background(), value0))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "getTierConfigs":
|
||||||
|
if flag.NArg() - 1 != 0 {
|
||||||
|
fmt.Fprintln(os.Stderr, "GetTierConfigs requires 0 args")
|
||||||
|
flag.Usage()
|
||||||
|
}
|
||||||
|
fmt.Print(client.GetTierConfigs(context.Background()))
|
||||||
|
fmt.Print("\n")
|
||||||
|
break
|
||||||
|
case "":
|
||||||
|
Usage()
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
fmt.Fprintln(os.Stderr, "Invalid function ", cmd)
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,11 +1,11 @@
|
||||||
#! /bin/bash
|
#! /bin/bash
|
||||||
|
|
||||||
THRIFT_VER=0.9.3
|
THRIFT_VER=0.14.0
|
||||||
|
|
||||||
if [[ $(thrift -version | grep -e $THRIFT_VER -c) -ne 1 ]]; then
|
if [[ $(thrift -version | grep -e $THRIFT_VER -c) -ne 1 ]]; then
|
||||||
echo "Warning: This wrapper has only been tested with version" $THRIFT_VER;
|
echo "Warning: This wrapper has only been tested with version" $THRIFT_VER;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Generating bindings and placing them in the vendor folder...";
|
echo "Generating bindings...";
|
||||||
thrift -o vendor/ -r -gen go:package=apache.aurora auroraAPI.thrift;
|
thrift -o ./ -r -gen go:package=apache.aurora auroraAPI.thrift;
|
||||||
echo "Done";
|
echo "Done";
|
||||||
|
|
12
go.mod
Normal file
12
go.mod
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
module github.com/paypal/gorealis
|
||||||
|
|
||||||
|
go 1.13
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/apache/thrift v0.14.0
|
||||||
|
github.com/davecgh/go-spew v1.1.0 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a
|
||||||
|
github.com/stretchr/testify v1.7.0
|
||||||
|
)
|
30
go.sum
Normal file
30
go.sum
Normal file
|
@ -0,0 +1,30 @@
|
||||||
|
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
|
||||||
|
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||||
|
github.com/apache/thrift v0.14.0 h1:vqZ2DP42i8th2OsgCcYZkirtbzvpZEFx53LiWDJXIAs=
|
||||||
|
github.com/apache/thrift v0.14.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||||
|
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e h1:+RHxT/gm0O3UF7nLJbdNzAmULvCFt4XfXHWzh3XI/zs=
|
||||||
|
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/ridv/thrift v0.12.1 h1:b80V1Oa2Mbd++jrlJZbJsIybO5/MCfbXKzd1A5v4aSo=
|
||||||
|
github.com/ridv/thrift v0.12.1/go.mod h1:yTMRF94RCZjO1fY1xt69yncvMbQCPdRL8BhbwIrjPx8=
|
||||||
|
github.com/ridv/thrift v0.13.1 h1:/8XnTRUqJJeiuqoL7mfnJQmXQa4GJn9tUCiP7+i6Y9o=
|
||||||
|
github.com/ridv/thrift v0.13.1/go.mod h1:yTMRF94RCZjO1fY1xt69yncvMbQCPdRL8BhbwIrjPx8=
|
||||||
|
github.com/ridv/thrift v0.13.2 h1:Q3Smr8poXd7VkWZPHvdJZzlQCJO+b5W37ECfoUL4qHc=
|
||||||
|
github.com/ridv/thrift v0.13.2/go.mod h1:yTMRF94RCZjO1fY1xt69yncvMbQCPdRL8BhbwIrjPx8=
|
||||||
|
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a h1:EYL2xz/Zdo0hyqdZMXR4lmT2O11jDLTPCEqIe/FR6W4=
|
||||||
|
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||||
|
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.0 h1:LThGCOvhuJic9Gyd1VBCkhyUXmO8vKaBFvBsJ2k03rg=
|
||||||
|
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
21
helpers.go
Normal file
21
helpers.go
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
package realis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *realisClient) jobExists(key aurora.JobKey) (bool, error) {
|
||||||
|
resp, err := r.client.GetConfigSummary(context.TODO(), &key)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp == nil ||
|
||||||
|
resp.GetResult_() == nil ||
|
||||||
|
resp.GetResult_().GetConfigSummaryResult_() == nil ||
|
||||||
|
resp.GetResult_().GetConfigSummaryResult_().GetSummary() == nil ||
|
||||||
|
resp.GetResponseCode() != aurora.ResponseCode_OK,
|
||||||
|
nil
|
||||||
|
}
|
293
job.go
293
job.go
|
@ -15,18 +15,25 @@
|
||||||
package realis
|
package realis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gen-go/apache/aurora"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Job inteface is used to define a set of functions an Aurora Job object
|
||||||
|
// must implemement.
|
||||||
|
// TODO(rdelvalle): Consider getting rid of the Job interface
|
||||||
type Job interface {
|
type Job interface {
|
||||||
// Set Job Key environment.
|
// Set Job Key environment.
|
||||||
Environment(env string) Job
|
Environment(env string) Job
|
||||||
Role(role string) Job
|
Role(role string) Job
|
||||||
Name(name string) Job
|
Name(name string) Job
|
||||||
|
CronSchedule(cron string) Job
|
||||||
|
CronCollisionPolicy(policy aurora.CronCollisionPolicy) Job
|
||||||
CPU(cpus float64) Job
|
CPU(cpus float64) Job
|
||||||
Disk(disk int64) Job
|
Disk(disk int64) Job
|
||||||
RAM(ram int64) Job
|
RAM(ram int64) Job
|
||||||
|
GPU(gpu int64) Job
|
||||||
ExecutorName(name string) Job
|
ExecutorName(name string) Job
|
||||||
ExecutorData(data string) Job
|
ExecutorData(data string) Job
|
||||||
AddPorts(num int) Job
|
AddPorts(num int) Job
|
||||||
|
@ -34,6 +41,15 @@ type Job interface {
|
||||||
AddNamedPorts(names ...string) Job
|
AddNamedPorts(names ...string) Job
|
||||||
AddLimitConstraint(name string, limit int32) Job
|
AddLimitConstraint(name string, limit int32) Job
|
||||||
AddValueConstraint(name string, negated bool, values ...string) Job
|
AddValueConstraint(name string, negated bool, values ...string) Job
|
||||||
|
|
||||||
|
// From Aurora Docs:
|
||||||
|
// dedicated attribute. Aurora treats this specially, and only allows matching jobs
|
||||||
|
// to run on these machines, and will only schedule matching jobs on these machines.
|
||||||
|
// When a job is created, the scheduler requires that the $role component matches
|
||||||
|
// the role field in the job configuration, and will reject the job creation otherwise.
|
||||||
|
// A wildcard (*) may be used for the role portion of the dedicated attribute, which
|
||||||
|
// will allow any owner to elect for a job to run on the host(s)
|
||||||
|
AddDedicatedConstraint(role, name string) Job
|
||||||
AddURIs(extract bool, cache bool, values ...string) Job
|
AddURIs(extract bool, cache bool, values ...string) Job
|
||||||
JobKey() *aurora.JobKey
|
JobKey() *aurora.JobKey
|
||||||
JobConfig() *aurora.JobConfiguration
|
JobConfig() *aurora.JobConfiguration
|
||||||
|
@ -42,16 +58,34 @@ type Job interface {
|
||||||
InstanceCount(instCount int32) Job
|
InstanceCount(instCount int32) Job
|
||||||
GetInstanceCount() int32
|
GetInstanceCount() int32
|
||||||
MaxFailure(maxFail int32) Job
|
MaxFailure(maxFail int32) Job
|
||||||
|
Container(container Container) Job
|
||||||
|
PartitionPolicy(policy *aurora.PartitionPolicy) Job
|
||||||
|
Tier(tier string) Job
|
||||||
|
SlaPolicy(policy *aurora.SlaPolicy) Job
|
||||||
|
Priority(priority int32) Job
|
||||||
}
|
}
|
||||||
|
|
||||||
// Structure to collect all information pertaining to an Aurora job.
|
type resourceType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
CPU resourceType = iota
|
||||||
|
RAM
|
||||||
|
DISK
|
||||||
|
GPU
|
||||||
|
)
|
||||||
|
|
||||||
|
const portNamePrefix = "org.apache.aurora.port."
|
||||||
|
|
||||||
|
// AuroraJob is a structure to collect all information pertaining to an Aurora job.
|
||||||
type AuroraJob struct {
|
type AuroraJob struct {
|
||||||
jobConfig *aurora.JobConfiguration
|
jobConfig *aurora.JobConfiguration
|
||||||
resources map[string]*aurora.Resource
|
resources map[resourceType]*aurora.Resource
|
||||||
|
metadata map[string]*aurora.Metadata
|
||||||
|
constraints map[string]*aurora.Constraint
|
||||||
portCount int
|
portCount int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a Job object with everything initialized.
|
// NewJob is used to create a Job object with everything initialized.
|
||||||
func NewJob() Job {
|
func NewJob() Job {
|
||||||
jobConfig := aurora.NewJobConfiguration()
|
jobConfig := aurora.NewJobConfiguration()
|
||||||
taskConfig := aurora.NewTaskConfig()
|
taskConfig := aurora.NewTaskConfig()
|
||||||
|
@ -65,188 +99,293 @@ func NewJob() Job {
|
||||||
taskConfig.Job = jobKey
|
taskConfig.Job = jobKey
|
||||||
taskConfig.Container = aurora.NewContainer()
|
taskConfig.Container = aurora.NewContainer()
|
||||||
taskConfig.Container.Mesos = aurora.NewMesosContainer()
|
taskConfig.Container.Mesos = aurora.NewMesosContainer()
|
||||||
taskConfig.ExecutorConfig = aurora.NewExecutorConfig()
|
|
||||||
taskConfig.MesosFetcherUris = make(map[*aurora.MesosFetcherURI]bool)
|
|
||||||
taskConfig.Metadata = make(map[*aurora.Metadata]bool)
|
|
||||||
taskConfig.Constraints = make(map[*aurora.Constraint]bool)
|
|
||||||
|
|
||||||
// Resources
|
// Resources
|
||||||
numCpus := aurora.NewResource()
|
numCpus := aurora.NewResource()
|
||||||
ramMb := aurora.NewResource()
|
ramMb := aurora.NewResource()
|
||||||
diskMb := aurora.NewResource()
|
diskMb := aurora.NewResource()
|
||||||
|
|
||||||
resources := make(map[string]*aurora.Resource)
|
resources := map[resourceType]*aurora.Resource{CPU: numCpus, RAM: ramMb, DISK: diskMb}
|
||||||
resources["cpu"] = numCpus
|
taskConfig.Resources = []*aurora.Resource{numCpus, ramMb, diskMb}
|
||||||
resources["ram"] = ramMb
|
|
||||||
resources["disk"] = diskMb
|
|
||||||
|
|
||||||
taskConfig.Resources = make(map[*aurora.Resource]bool)
|
numCpus.NumCpus = new(float64)
|
||||||
taskConfig.Resources[numCpus] = true
|
ramMb.RamMb = new(int64)
|
||||||
taskConfig.Resources[ramMb] = true
|
diskMb.DiskMb = new(int64)
|
||||||
taskConfig.Resources[diskMb] = true
|
|
||||||
|
|
||||||
return AuroraJob{jobConfig, resources, 0}
|
return &AuroraJob{
|
||||||
|
jobConfig: jobConfig,
|
||||||
|
resources: resources,
|
||||||
|
metadata: make(map[string]*aurora.Metadata),
|
||||||
|
constraints: make(map[string]*aurora.Constraint),
|
||||||
|
portCount: 0,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set Job Key environment.
|
// Environment sets the Job Key environment.
|
||||||
func (j AuroraJob) Environment(env string) Job {
|
func (j *AuroraJob) Environment(env string) Job {
|
||||||
j.jobConfig.Key.Environment = env
|
j.jobConfig.Key.Environment = env
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set Job Key Role.
|
// Role sets the Job Key role.
|
||||||
func (j AuroraJob) Role(role string) Job {
|
func (j *AuroraJob) Role(role string) Job {
|
||||||
j.jobConfig.Key.Role = role
|
j.jobConfig.Key.Role = role
|
||||||
|
|
||||||
// Will be deprecated
|
// Will be deprecated
|
||||||
identity := &aurora.Identity{role}
|
identity := &aurora.Identity{User: role}
|
||||||
j.jobConfig.Owner = identity
|
j.jobConfig.Owner = identity
|
||||||
j.jobConfig.TaskConfig.Owner = identity
|
j.jobConfig.TaskConfig.Owner = identity
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set Job Key Name.
|
// Name sets the Job Key Name.
|
||||||
func (j AuroraJob) Name(name string) Job {
|
func (j *AuroraJob) Name(name string) Job {
|
||||||
j.jobConfig.Key.Name = name
|
j.jobConfig.Key.Name = name
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set name of the executor that will the task will be configured to.
|
// ExecutorName sets the name of the executor that will the task will be configured to.
|
||||||
func (j AuroraJob) ExecutorName(name string) Job {
|
func (j *AuroraJob) ExecutorName(name string) Job {
|
||||||
|
|
||||||
|
if j.jobConfig.TaskConfig.ExecutorConfig == nil {
|
||||||
|
j.jobConfig.TaskConfig.ExecutorConfig = aurora.NewExecutorConfig()
|
||||||
|
}
|
||||||
|
|
||||||
j.jobConfig.TaskConfig.ExecutorConfig.Name = name
|
j.jobConfig.TaskConfig.ExecutorConfig.Name = name
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// Will be included as part of entire task inside the scheduler that will be serialized.
|
// ExecutorData sets the data blob that will be passed to the Mesos executor.
|
||||||
func (j AuroraJob) ExecutorData(data string) Job {
|
func (j *AuroraJob) ExecutorData(data string) Job {
|
||||||
|
|
||||||
|
if j.jobConfig.TaskConfig.ExecutorConfig == nil {
|
||||||
|
j.jobConfig.TaskConfig.ExecutorConfig = aurora.NewExecutorConfig()
|
||||||
|
}
|
||||||
|
|
||||||
j.jobConfig.TaskConfig.ExecutorConfig.Data = data
|
j.jobConfig.TaskConfig.ExecutorConfig.Data = data
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j AuroraJob) CPU(cpus float64) Job {
|
// CPU sets the amount of CPU each task will use in an Aurora Job.
|
||||||
j.resources["cpu"].NumCpus = &cpus
|
func (j *AuroraJob) CPU(cpus float64) Job {
|
||||||
j.jobConfig.TaskConfig.NumCpus = cpus //Will be deprecated soon
|
*j.resources[CPU].NumCpus = cpus
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j AuroraJob) RAM(ram int64) Job {
|
// RAM sets the amount of RAM each task will use in an Aurora Job.
|
||||||
j.resources["ram"].RamMb = &ram
|
func (j *AuroraJob) RAM(ram int64) Job {
|
||||||
j.jobConfig.TaskConfig.RamMb = ram //Will be deprecated soon
|
*j.resources[RAM].RamMb = ram
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j AuroraJob) Disk(disk int64) Job {
|
// Disk sets the amount of Disk each task will use in an Aurora Job.
|
||||||
j.resources["disk"].DiskMb = &disk
|
func (j *AuroraJob) Disk(disk int64) Job {
|
||||||
j.jobConfig.TaskConfig.DiskMb = disk //Will be deprecated
|
*j.resources[DISK].DiskMb = disk
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// How many failures to tolerate before giving up.
|
// GPU sets the amount of GPU each task will use in an Aurora Job.
|
||||||
func (j AuroraJob) MaxFailure(maxFail int32) Job {
|
func (j *AuroraJob) GPU(gpu int64) Job {
|
||||||
|
// GPU resource must be set explicitly since the scheduler by default
|
||||||
|
// rejects jobs with GPU resources attached to it.
|
||||||
|
if _, ok := j.resources[GPU]; !ok {
|
||||||
|
j.resources[GPU] = &aurora.Resource{}
|
||||||
|
j.JobConfig().GetTaskConfig().Resources = append(
|
||||||
|
j.JobConfig().GetTaskConfig().Resources,
|
||||||
|
j.resources[GPU])
|
||||||
|
}
|
||||||
|
|
||||||
|
j.resources[GPU].NumGpus = &gpu
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxFailure sets how many failures to tolerate before giving up per Job.
|
||||||
|
func (j *AuroraJob) MaxFailure(maxFail int32) Job {
|
||||||
j.jobConfig.TaskConfig.MaxTaskFailures = maxFail
|
j.jobConfig.TaskConfig.MaxTaskFailures = maxFail
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// How many instances of the job to run
|
// InstanceCount sets how many instances of the task to run for this Job.
|
||||||
func (j AuroraJob) InstanceCount(instCount int32) Job {
|
func (j *AuroraJob) InstanceCount(instCount int32) Job {
|
||||||
j.jobConfig.InstanceCount = instCount
|
j.jobConfig.InstanceCount = instCount
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// How many instances of the job to run
|
// CronSchedule allows the user to configure a cron schedule for this job to run in.
|
||||||
func (j AuroraJob) GetInstanceCount() int32 {
|
func (j *AuroraJob) CronSchedule(cron string) Job {
|
||||||
|
j.jobConfig.CronSchedule = &cron
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// CronCollisionPolicy allows the user to decide what happens if two or more instances
|
||||||
|
// of the same Cron job need to run.
|
||||||
|
func (j *AuroraJob) CronCollisionPolicy(policy aurora.CronCollisionPolicy) Job {
|
||||||
|
j.jobConfig.CronCollisionPolicy = policy
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetInstanceCount returns how many tasks this Job contains.
|
||||||
|
func (j *AuroraJob) GetInstanceCount() int32 {
|
||||||
return j.jobConfig.InstanceCount
|
return j.jobConfig.InstanceCount
|
||||||
}
|
}
|
||||||
|
|
||||||
// Restart the job's tasks if they fail
|
// IsService returns true if the job is a long term running job or false if it is an ad-hoc job.
|
||||||
func (j AuroraJob) IsService(isService bool) Job {
|
func (j *AuroraJob) IsService(isService bool) Job {
|
||||||
j.jobConfig.TaskConfig.IsService = isService
|
j.jobConfig.TaskConfig.IsService = isService
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the current job configurations key to use for some realis calls.
|
// JobKey returns the job's configuration key.
|
||||||
func (j AuroraJob) JobKey() *aurora.JobKey {
|
func (j *AuroraJob) JobKey() *aurora.JobKey {
|
||||||
return j.jobConfig.Key
|
return j.jobConfig.Key
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the current job configurations key to use for some realis calls.
|
// JobConfig returns the job's configuration.
|
||||||
func (j AuroraJob) JobConfig() *aurora.JobConfiguration {
|
func (j *AuroraJob) JobConfig() *aurora.JobConfiguration {
|
||||||
return j.jobConfig
|
return j.jobConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j AuroraJob) TaskConfig() *aurora.TaskConfig {
|
// TaskConfig returns the job's task(shard) configuration.
|
||||||
|
func (j *AuroraJob) TaskConfig() *aurora.TaskConfig {
|
||||||
return j.jobConfig.TaskConfig
|
return j.jobConfig.TaskConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add a list of URIs with the same extract and cache configuration. Scheduler must have
|
// AddURIs adds a list of URIs with the same extract and cache configuration. Scheduler must have
|
||||||
// --enable_mesos_fetcher flag enabled. Currently there is no duplicate detection.
|
// --enable_mesos_fetcher flag enabled. Currently there is no duplicate detection.
|
||||||
func (j AuroraJob) AddURIs(extract bool, cache bool, values ...string) Job {
|
func (j *AuroraJob) AddURIs(extract bool, cache bool, values ...string) Job {
|
||||||
for _, value := range values {
|
for _, value := range values {
|
||||||
j.jobConfig.
|
j.jobConfig.TaskConfig.MesosFetcherUris = append(j.jobConfig.TaskConfig.MesosFetcherUris,
|
||||||
TaskConfig.
|
&aurora.MesosFetcherURI{Value: value, Extract: &extract, Cache: &cache})
|
||||||
MesosFetcherUris[&aurora.MesosFetcherURI{value, &extract, &cache}] = true
|
|
||||||
}
|
}
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds a Mesos label to the job. Note that Aurora will add the
|
// AddLabel adds a Mesos label to the job. Note that Aurora will add the
|
||||||
// prefix "org.apache.aurora.metadata." to the beginning of each key.
|
// prefix "org.apache.aurora.metadata." to the beginning of each key.
|
||||||
func (j AuroraJob) AddLabel(key string, value string) Job {
|
func (j *AuroraJob) AddLabel(key string, value string) Job {
|
||||||
j.jobConfig.TaskConfig.Metadata[&aurora.Metadata{key, value}] = true
|
if _, ok := j.metadata[key]; !ok {
|
||||||
|
j.metadata[key] = &aurora.Metadata{Key: key}
|
||||||
|
j.jobConfig.TaskConfig.Metadata = append(j.jobConfig.TaskConfig.Metadata, j.metadata[key])
|
||||||
|
}
|
||||||
|
|
||||||
|
j.metadata[key].Value = value
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add a named port to the job configuration These are random ports as it's
|
// AddNamedPorts adds a named port to the job configuration These are random ports as it's
|
||||||
// not currently possible to request specific ports using Aurora.
|
// not currently possible to request specific ports using Aurora.
|
||||||
func (j AuroraJob) AddNamedPorts(names ...string) Job {
|
func (j *AuroraJob) AddNamedPorts(names ...string) Job {
|
||||||
j.portCount += len(names)
|
j.portCount += len(names)
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
j.jobConfig.TaskConfig.Resources[&aurora.Resource{NamedPort: &name}] = true
|
j.jobConfig.TaskConfig.Resources = append(
|
||||||
|
j.jobConfig.TaskConfig.Resources,
|
||||||
|
&aurora.Resource{NamedPort: &name})
|
||||||
}
|
}
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds a request for a number of ports to the job configuration. The names chosen for these ports
|
// AddPorts adds a request for a number of ports to the job configuration. The names chosen for these ports
|
||||||
// will be org.apache.aurora.port.X, where X is the current port count for the job configuration
|
// will be org.apache.aurora.port.X, where X is the current port count for the job configuration
|
||||||
// starting at 0. These are random ports as it's not currently possible to request
|
// starting at 0. These are random ports as it's not currently possible to request
|
||||||
// specific ports using Aurora.
|
// specific ports using Aurora.
|
||||||
func (j AuroraJob) AddPorts(num int) Job {
|
func (j *AuroraJob) AddPorts(num int) Job {
|
||||||
start := j.portCount
|
start := j.portCount
|
||||||
j.portCount += num
|
j.portCount += num
|
||||||
for i := start; i < j.portCount; i++ {
|
for i := start; i < j.portCount; i++ {
|
||||||
portName := "org.apache.aurora.port." + strconv.Itoa(i)
|
portName := portNamePrefix + strconv.Itoa(i)
|
||||||
j.jobConfig.TaskConfig.Resources[&aurora.Resource{NamedPort: &portName}] = true
|
j.jobConfig.TaskConfig.Resources = append(
|
||||||
|
j.jobConfig.TaskConfig.Resources,
|
||||||
|
&aurora.Resource{NamedPort: &portName})
|
||||||
}
|
}
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddValueConstraint allows the user to add a value constrain to the job to limit which agents the job's
|
||||||
|
// tasks can be run on. If the name matches a constraint that was previously set, the previous value will be
|
||||||
|
// overwritten. In case the previous constraint attached to the name was of type limit, the constraint will be clobbered
|
||||||
|
// by this new Value constraint.
|
||||||
// From Aurora Docs:
|
// From Aurora Docs:
|
||||||
// Add a Value constraint
|
// Add a Value constraint
|
||||||
// name - Mesos slave attribute that the constraint is matched against.
|
// name - Mesos slave attribute that the constraint is matched against.
|
||||||
// If negated = true , treat this as a 'not' - to avoid specific values.
|
// If negated = true , treat this as a 'not' - to avoid specific values.
|
||||||
// Values - list of values we look for in attribute name
|
// Values - list of values we look for in attribute name
|
||||||
func (j AuroraJob) AddValueConstraint(name string, negated bool, values ...string) Job {
|
func (j *AuroraJob) AddValueConstraint(name string, negated bool, values ...string) Job {
|
||||||
constraintValues := make(map[string]bool)
|
if _, ok := j.constraints[name]; !ok {
|
||||||
for _, value := range values {
|
j.constraints[name] = &aurora.Constraint{Name: name}
|
||||||
constraintValues[value] = true
|
j.jobConfig.TaskConfig.Constraints = append(j.jobConfig.TaskConfig.Constraints, j.constraints[name])
|
||||||
|
}
|
||||||
|
|
||||||
|
j.constraints[name].Constraint = &aurora.TaskConstraint{
|
||||||
|
Value: &aurora.ValueConstraint{
|
||||||
|
Negated: negated,
|
||||||
|
Values: values,
|
||||||
|
},
|
||||||
|
Limit: nil,
|
||||||
}
|
}
|
||||||
j.jobConfig.TaskConfig.Constraints[&aurora.Constraint{name,
|
|
||||||
&aurora.TaskConstraint{&aurora.ValueConstraint{negated, constraintValues}, nil}}] = true
|
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddLimitConstraint allows the user to limit how many tasks form the same Job are run on a single host.
|
||||||
|
// If the name matches a constraint that was previously set, the previous value will be
|
||||||
|
// overwritten. In case the previous constraint attached to the name was of type Value, the constraint will be clobbered
|
||||||
|
// by this new Limit constraint.
|
||||||
// From Aurora Docs:
|
// From Aurora Docs:
|
||||||
// A constraint that specifies the maximum number of active tasks on a host with
|
// A constraint that specifies the maximum number of active tasks on a host with
|
||||||
// a matching attribute that may be scheduled simultaneously.
|
// a matching attribute that may be scheduled simultaneously.
|
||||||
func (j AuroraJob) AddLimitConstraint(name string, limit int32) Job {
|
func (j *AuroraJob) AddLimitConstraint(name string, limit int32) Job {
|
||||||
j.jobConfig.TaskConfig.Constraints[&aurora.Constraint{name,
|
if _, ok := j.constraints[name]; !ok {
|
||||||
&aurora.TaskConstraint{nil, &aurora.LimitConstraint{limit}}}] = true
|
j.constraints[name] = &aurora.Constraint{Name: name}
|
||||||
|
j.jobConfig.TaskConfig.Constraints = append(j.jobConfig.TaskConfig.Constraints, j.constraints[name])
|
||||||
|
}
|
||||||
|
|
||||||
|
j.constraints[name].Constraint = &aurora.TaskConstraint{
|
||||||
|
Value: nil,
|
||||||
|
Limit: &aurora.LimitConstraint{Limit: limit},
|
||||||
|
}
|
||||||
|
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddDedicatedConstraint is a convenience function that allows the user to
|
||||||
|
// add a dedicated constraint to a Job configuration.
|
||||||
|
// In case a previous dedicated constraint was set, it will be clobbered by this new value.
|
||||||
|
func (j *AuroraJob) AddDedicatedConstraint(role, name string) Job {
|
||||||
|
j.AddValueConstraint("dedicated", false, role+"/"+name)
|
||||||
|
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// Container sets a container to run for the job configuration to run.
|
||||||
|
func (j *AuroraJob) Container(container Container) Job {
|
||||||
|
j.jobConfig.TaskConfig.Container = container.Build()
|
||||||
|
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// PartitionPolicy sets a partition policy for the job configuration to implement.
|
||||||
|
func (j *AuroraJob) PartitionPolicy(policy *aurora.PartitionPolicy) Job {
|
||||||
|
j.jobConfig.TaskConfig.PartitionPolicy = policy
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tier sets the Tier for the Job.
|
||||||
|
func (j *AuroraJob) Tier(tier string) Job {
|
||||||
|
j.jobConfig.TaskConfig.Tier = &tier
|
||||||
|
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
// SlaPolicy sets an SlaPolicy for the Job.
|
||||||
|
func (j *AuroraJob) SlaPolicy(policy *aurora.SlaPolicy) Job {
|
||||||
|
j.jobConfig.TaskConfig.SlaPolicy = policy
|
||||||
|
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *AuroraJob) Priority(priority int32) Job {
|
||||||
|
j.jobConfig.TaskConfig.Priority = priority
|
||||||
|
return j
|
||||||
|
}
|
||||||
|
|
87
logger.go
Normal file
87
logger.go
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
/**
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package realis
|
||||||
|
|
||||||
|
type logger interface {
|
||||||
|
Println(v ...interface{})
|
||||||
|
Printf(format string, v ...interface{})
|
||||||
|
Print(v ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoopLogger is a logger that can be attached to the client which will not print anything.
|
||||||
|
type NoopLogger struct{}
|
||||||
|
|
||||||
|
// Printf is a NOOP function here.
|
||||||
|
func (NoopLogger) Printf(format string, a ...interface{}) {}
|
||||||
|
|
||||||
|
// Print is a NOOP function here.
|
||||||
|
func (NoopLogger) Print(a ...interface{}) {}
|
||||||
|
|
||||||
|
// Println is a NOOP function here.
|
||||||
|
func (NoopLogger) Println(a ...interface{}) {}
|
||||||
|
|
||||||
|
// LevelLogger is a logger that can be configured to output different levels of information: Debug and Trace.
|
||||||
|
// Trace should only be enabled when very in depth information about the sequence of events a function took is needed.
|
||||||
|
type LevelLogger struct {
|
||||||
|
logger
|
||||||
|
debug bool
|
||||||
|
trace bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableDebug enables debug level logging for the LevelLogger
|
||||||
|
func (l *LevelLogger) EnableDebug(enable bool) {
|
||||||
|
l.debug = enable
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableTrace enables trace level logging for the LevelLogger
|
||||||
|
func (l *LevelLogger) EnableTrace(enable bool) {
|
||||||
|
l.trace = enable
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l LevelLogger) debugPrintf(format string, a ...interface{}) {
|
||||||
|
if l.debug {
|
||||||
|
l.Printf("[DEBUG] "+format, a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l LevelLogger) debugPrint(a ...interface{}) {
|
||||||
|
if l.debug {
|
||||||
|
l.Print(append([]interface{}{"[DEBUG] "}, a...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l LevelLogger) debugPrintln(a ...interface{}) {
|
||||||
|
if l.debug {
|
||||||
|
l.Println(append([]interface{}{"[DEBUG] "}, a...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l LevelLogger) tracePrintf(format string, a ...interface{}) {
|
||||||
|
if l.trace {
|
||||||
|
l.Printf("[TRACE] "+format, a...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l LevelLogger) tracePrint(a ...interface{}) {
|
||||||
|
if l.trace {
|
||||||
|
l.Print(append([]interface{}{"[TRACE] "}, a...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l LevelLogger) tracePrintln(a ...interface{}) {
|
||||||
|
if l.trace {
|
||||||
|
l.Println(append([]interface{}{"[TRACE] "}, a...)...)
|
||||||
|
}
|
||||||
|
}
|
297
monitors.go
297
monitors.go
|
@ -12,76 +12,285 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Collection of monitors to create synchronicity
|
|
||||||
package realis
|
package realis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"gen-go/apache/aurora"
|
|
||||||
"github.com/rdelval/gorealis/response"
|
|
||||||
"os"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Monitor is a wrapper for the Realis client which allows us to have functions
|
||||||
|
// with the same name for Monitoring purposes.
|
||||||
|
// TODO(rdelvalle): Deprecate monitors and instead add prefix Monitor to
|
||||||
|
// all functions in this file like it is done in V2.
|
||||||
type Monitor struct {
|
type Monitor struct {
|
||||||
Client Realis
|
Client Realis
|
||||||
}
|
}
|
||||||
|
|
||||||
// Polls the scheduler every certain amount of time to see if the update has succeeded
|
// JobUpdate polls the scheduler every certain amount of time to see if the update has entered a terminal state.
|
||||||
func (m *Monitor) JobUpdate(updateKey aurora.JobUpdateKey, interval int, timeout int) bool {
|
func (m *Monitor) JobUpdate(
|
||||||
|
updateKey aurora.JobUpdateKey,
|
||||||
|
interval int,
|
||||||
|
timeout int) (bool, error) {
|
||||||
|
|
||||||
|
updateQ := aurora.JobUpdateQuery{
|
||||||
|
Key: &updateKey,
|
||||||
|
Limit: 1,
|
||||||
|
UpdateStatuses: TerminalUpdateStates(),
|
||||||
|
}
|
||||||
|
updateSummaries, err := m.JobUpdateQuery(
|
||||||
|
updateQ,
|
||||||
|
time.Duration(interval)*time.Second,
|
||||||
|
time.Duration(timeout)*time.Second)
|
||||||
|
|
||||||
|
status := updateSummaries[0].State.Status
|
||||||
|
|
||||||
for i := 0; i*interval <= timeout; i++ {
|
|
||||||
respDetail, err := m.Client.JobUpdateDetails(updateKey)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
return false, err
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
updateDetail := response.JobUpdateDetails(respDetail)
|
m.Client.RealisConfig().logger.Printf("job update status: %v\n", status)
|
||||||
|
|
||||||
status := updateDetail.Update.Summary.State.Status
|
|
||||||
|
|
||||||
if _, ok := aurora.ACTIVE_JOB_UPDATE_STATES[status]; !ok {
|
|
||||||
|
|
||||||
// Rolled forward is the only state in which an update has been successfully updated
|
// Rolled forward is the only state in which an update has been successfully updated
|
||||||
// if we encounter an inactive state and it is not at rolled forward, update failed
|
// if we encounter an inactive state and it is not at rolled forward, update failed
|
||||||
if status == aurora.JobUpdateStatus_ROLLED_FORWARD {
|
switch status {
|
||||||
fmt.Println("Update succeded")
|
case aurora.JobUpdateStatus_ROLLED_FORWARD:
|
||||||
return true
|
return true, nil
|
||||||
} else {
|
case aurora.JobUpdateStatus_ROLLED_BACK,
|
||||||
fmt.Println("Update failed")
|
aurora.JobUpdateStatus_ABORTED,
|
||||||
return false
|
aurora.JobUpdateStatus_ERROR,
|
||||||
|
aurora.JobUpdateStatus_FAILED:
|
||||||
|
return false, errors.Errorf("bad terminal state for update: %v", status)
|
||||||
|
default:
|
||||||
|
return false, errors.Errorf("unexpected update state: %v", status)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("Polling, update still active...")
|
// JobUpdateStatus polls the scheduler every certain amount of time to see if the update has entered a specified state.
|
||||||
time.Sleep(time.Duration(interval) * time.Second)
|
func (m *Monitor) JobUpdateStatus(updateKey aurora.JobUpdateKey,
|
||||||
|
desiredStatuses []aurora.JobUpdateStatus,
|
||||||
|
interval, timeout time.Duration) (aurora.JobUpdateStatus, error) {
|
||||||
|
updateQ := aurora.JobUpdateQuery{
|
||||||
|
Key: &updateKey,
|
||||||
|
Limit: 1,
|
||||||
|
UpdateStatuses: desiredStatuses,
|
||||||
}
|
}
|
||||||
|
summary, err := m.JobUpdateQuery(updateQ, interval, timeout)
|
||||||
fmt.Println("Timed out")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
func (m *Monitor) Instances(key *aurora.JobKey, instances int32, interval int, timeout int) bool {
|
|
||||||
|
|
||||||
for i := 0; i*interval < timeout; i++ {
|
|
||||||
|
|
||||||
live, err := m.Client.GetInstanceIds(key, aurora.LIVE_STATES)
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
return 0, err
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if(len(live) == int(instances)){
|
return summary[0].State.Status, nil
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("Polling, instances running: ", len(live))
|
// JobUpdateQuery polls the scheduler every certain amount of time to see if the query call returns any results.
|
||||||
time.Sleep(time.Duration(interval) * time.Second)
|
func (m *Monitor) JobUpdateQuery(
|
||||||
|
updateQuery aurora.JobUpdateQuery,
|
||||||
|
interval time.Duration,
|
||||||
|
timeout time.Duration) ([]*aurora.JobUpdateSummary, error) {
|
||||||
|
|
||||||
|
ticker := time.NewTicker(interval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
timer := time.NewTimer(timeout)
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
var cliErr error
|
||||||
|
var respDetail *aurora.Response
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
respDetail, cliErr = m.Client.GetJobUpdateSummaries(&updateQuery)
|
||||||
|
if cliErr != nil {
|
||||||
|
return nil, cliErr
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("Timed out")
|
updateSummaries := respDetail.Result_.GetJobUpdateSummariesResult_.UpdateSummaries
|
||||||
return false
|
if len(updateSummaries) >= 1 {
|
||||||
|
return updateSummaries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-timer.C:
|
||||||
|
return nil, newTimedoutError(errors.New("job update monitor timed out"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoPausedUpdateMonitor is a special monitor for auto pause enabled batch updates. This monitor ensures that the update
|
||||||
|
// being monitored is capable of auto pausing and has auto pausing enabled. After verifying this information,
|
||||||
|
// the monitor watches for the job to enter the ROLL_FORWARD_PAUSED state and calculates the current batch
|
||||||
|
// the update is in using information from the update configuration.
|
||||||
|
func (m *Monitor) AutoPausedUpdateMonitor(key aurora.JobUpdateKey, interval, timeout time.Duration) (int, error) {
|
||||||
|
key.Job = &aurora.JobKey{
|
||||||
|
Role: key.Job.Role,
|
||||||
|
Environment: key.Job.Environment,
|
||||||
|
Name: key.Job.Name,
|
||||||
|
}
|
||||||
|
query := aurora.JobUpdateQuery{
|
||||||
|
UpdateStatuses: aurora.ACTIVE_JOB_UPDATE_STATES,
|
||||||
|
Limit: 1,
|
||||||
|
Key: &key,
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := m.Client.JobUpdateDetails(query)
|
||||||
|
if err != nil {
|
||||||
|
return -1, errors.Wrap(err, "unable to get information about update")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO (rdelvalle): check for possible nil values when going down the list of structs
|
||||||
|
updateDetails := response.Result_.GetJobUpdateDetailsResult_.DetailsList
|
||||||
|
if len(updateDetails) == 0 {
|
||||||
|
return -1, errors.Errorf("details for update could not be found")
|
||||||
|
}
|
||||||
|
|
||||||
|
updateStrategy := updateDetails[0].Update.Instructions.Settings.UpdateStrategy
|
||||||
|
|
||||||
|
var batchSizes []int32
|
||||||
|
switch {
|
||||||
|
case updateStrategy.IsSetVarBatchStrategy():
|
||||||
|
batchSizes = updateStrategy.VarBatchStrategy.GroupSizes
|
||||||
|
if !updateStrategy.VarBatchStrategy.AutopauseAfterBatch {
|
||||||
|
return -1, errors.Errorf("update does not have auto pause enabled")
|
||||||
|
}
|
||||||
|
case updateStrategy.IsSetBatchStrategy():
|
||||||
|
batchSizes = []int32{updateStrategy.BatchStrategy.GroupSize}
|
||||||
|
if !updateStrategy.BatchStrategy.AutopauseAfterBatch {
|
||||||
|
return -1, errors.Errorf("update does not have auto pause enabled")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return -1, errors.Errorf("update is not using a batch update strategy")
|
||||||
|
}
|
||||||
|
|
||||||
|
query.UpdateStatuses = append(TerminalUpdateStates(), aurora.JobUpdateStatus_ROLL_FORWARD_PAUSED)
|
||||||
|
summary, err := m.JobUpdateQuery(query, interval, timeout)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !(summary[0].State.Status == aurora.JobUpdateStatus_ROLL_FORWARD_PAUSED ||
|
||||||
|
summary[0].State.Status == aurora.JobUpdateStatus_ROLLED_FORWARD) {
|
||||||
|
return -1, errors.Errorf("update is in a terminal state %v", summary[0].State.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
updatingInstances := make(map[int32]struct{})
|
||||||
|
for _, e := range updateDetails[0].InstanceEvents {
|
||||||
|
// We only care about INSTANCE_UPDATING actions because we only care that they've been attempted
|
||||||
|
if e != nil && e.GetAction() == aurora.JobUpdateAction_INSTANCE_UPDATING {
|
||||||
|
updatingInstances[e.GetInstanceId()] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return calculateCurrentBatch(int32(len(updatingInstances)), batchSizes), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Instances will monitor a Job until all instances enter one of the LIVE_STATES
|
||||||
|
func (m *Monitor) Instances(key *aurora.JobKey, instances int32, interval, timeout int) (bool, error) {
|
||||||
|
return m.ScheduleStatus(key, instances, LiveStates, interval, timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScheduleStatus will monitor a Job until all instances enter a desired status.
|
||||||
|
// Defaults sets of desired statuses provided by the thrift API include:
|
||||||
|
// ACTIVE_STATES, SLAVE_ASSIGNED_STATES, LIVE_STATES, and TERMINAL_STATES
|
||||||
|
func (m *Monitor) ScheduleStatus(
|
||||||
|
key *aurora.JobKey,
|
||||||
|
instanceCount int32,
|
||||||
|
desiredStatuses map[aurora.ScheduleStatus]bool,
|
||||||
|
interval int,
|
||||||
|
timeout int) (bool, error) {
|
||||||
|
|
||||||
|
ticker := time.NewTicker(time.Second * time.Duration(interval))
|
||||||
|
defer ticker.Stop()
|
||||||
|
timer := time.NewTimer(time.Second * time.Duration(timeout))
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
wantedStatuses := make([]aurora.ScheduleStatus, 0)
|
||||||
|
|
||||||
|
for status := range desiredStatuses {
|
||||||
|
wantedStatuses = append(wantedStatuses, status)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
|
||||||
|
// Query Aurora for the state of the job key ever interval
|
||||||
|
instCount, cliErr := m.Client.GetInstanceIds(key, wantedStatuses)
|
||||||
|
if cliErr != nil {
|
||||||
|
return false, errors.Wrap(cliErr, "Unable to communicate with Aurora")
|
||||||
|
}
|
||||||
|
if len(instCount) == int(instanceCount) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
case <-timer.C:
|
||||||
|
|
||||||
|
// If the timer runs out, return a timeout error to user
|
||||||
|
return false, newTimedoutError(errors.New("schedule status monitor timed out"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostMaintenance will monitor host status until all hosts match the status provided.
|
||||||
|
// Returns a map where the value is true if the host
|
||||||
|
// is in one of the desired mode(s) or false if it is not as of the time when the monitor exited.
|
||||||
|
func (m *Monitor) HostMaintenance(
|
||||||
|
hosts []string,
|
||||||
|
modes []aurora.MaintenanceMode,
|
||||||
|
interval, timeout int) (map[string]bool, error) {
|
||||||
|
|
||||||
|
// Transform modes to monitor for into a set for easy lookup
|
||||||
|
desiredMode := make(map[aurora.MaintenanceMode]struct{})
|
||||||
|
for _, mode := range modes {
|
||||||
|
desiredMode[mode] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Turn slice into a host set to eliminate duplicates.
|
||||||
|
// We also can't use a simple count because multiple modes means
|
||||||
|
// we can have multiple matches for a single host.
|
||||||
|
// I.e. host A transitions from ACTIVE to DRAINING to DRAINED while monitored
|
||||||
|
remainingHosts := make(map[string]struct{})
|
||||||
|
for _, host := range hosts {
|
||||||
|
remainingHosts[host] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
hostResult := make(map[string]bool)
|
||||||
|
|
||||||
|
ticker := time.NewTicker(time.Second * time.Duration(interval))
|
||||||
|
defer ticker.Stop()
|
||||||
|
timer := time.NewTimer(time.Second * time.Duration(timeout))
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
// Client call has multiple retries internally
|
||||||
|
_, result, err := m.Client.MaintenanceStatus(hosts...)
|
||||||
|
if err != nil {
|
||||||
|
// Error is either a payload error or a severe connection error
|
||||||
|
for host := range remainingHosts {
|
||||||
|
hostResult[host] = false
|
||||||
|
}
|
||||||
|
return hostResult, errors.Wrap(err, "client error in monitor")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, status := range result.GetStatuses() {
|
||||||
|
|
||||||
|
if _, ok := desiredMode[status.GetMode()]; ok {
|
||||||
|
hostResult[status.GetHost()] = true
|
||||||
|
delete(remainingHosts, status.GetHost())
|
||||||
|
|
||||||
|
if len(remainingHosts) == 0 {
|
||||||
|
return hostResult, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-timer.C:
|
||||||
|
for host := range remainingHosts {
|
||||||
|
hostResult[host] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
return hostResult, newTimedoutError(errors.New("host maintenance monitor timed out"))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
309
realis_admin.go
Normal file
309
realis_admin.go
Normal file
|
@ -0,0 +1,309 @@
|
||||||
|
package realis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(rdelvalle): Consider moving these functions to another interface. It would be a backwards incompatible change,
|
||||||
|
// but would add safety.
|
||||||
|
|
||||||
|
// Set a list of nodes to DRAINING. This means nothing will be able to be scheduled on them and any existing
|
||||||
|
// tasks will be killed and re-scheduled elsewhere in the cluster. Tasks from DRAINING nodes are not guaranteed
|
||||||
|
// to return to running unless there is enough capacity in the cluster to run them.
|
||||||
|
func (r *realisClient) DrainHosts(hosts ...string) (*aurora.Response, *aurora.DrainHostsResult_, error) {
|
||||||
|
|
||||||
|
var result *aurora.DrainHostsResult_
|
||||||
|
|
||||||
|
if len(hosts) == 0 {
|
||||||
|
return nil, nil, errors.New("no hosts provided to drain")
|
||||||
|
}
|
||||||
|
|
||||||
|
drainList := aurora.NewHosts()
|
||||||
|
drainList.HostNames = hosts
|
||||||
|
|
||||||
|
r.logger.debugPrintf("DrainHosts Thrift Payload: %v\n", drainList)
|
||||||
|
|
||||||
|
resp, retryErr := r.thriftCallWithRetries(
|
||||||
|
false,
|
||||||
|
func() (*aurora.Response, error) {
|
||||||
|
return r.adminClient.DrainHosts(context.TODO(), drainList)
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
if retryErr != nil {
|
||||||
|
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.GetResult_() != nil {
|
||||||
|
result = resp.GetResult_().GetDrainHostsResult_()
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start SLA Aware Drain.
|
||||||
|
// defaultSlaPolicy is the fallback SlaPolicy to use if a task does not have an SlaPolicy.
|
||||||
|
// After timeoutSecs, tasks will be forcefully drained without checking SLA.
|
||||||
|
func (r *realisClient) SLADrainHosts(
|
||||||
|
policy *aurora.SlaPolicy,
|
||||||
|
timeout int64,
|
||||||
|
hosts ...string) (*aurora.DrainHostsResult_, error) {
|
||||||
|
var result *aurora.DrainHostsResult_
|
||||||
|
|
||||||
|
if len(hosts) == 0 {
|
||||||
|
return nil, errors.New("no hosts provided to drain")
|
||||||
|
}
|
||||||
|
|
||||||
|
if policy == nil || policy.CountSetFieldsSlaPolicy() == 0 {
|
||||||
|
policy = &defaultSlaPolicy
|
||||||
|
r.logger.Printf("Warning: start draining with default sla policy %v", policy)
|
||||||
|
}
|
||||||
|
|
||||||
|
if timeout < 0 {
|
||||||
|
r.logger.Printf("Warning: timeout %d secs is invalid, draining with default timeout %d secs",
|
||||||
|
timeout,
|
||||||
|
defaultSlaDrainTimeoutSecs)
|
||||||
|
timeout = defaultSlaDrainTimeoutSecs
|
||||||
|
}
|
||||||
|
|
||||||
|
drainList := aurora.NewHosts()
|
||||||
|
drainList.HostNames = hosts
|
||||||
|
|
||||||
|
r.logger.debugPrintf("SLADrainHosts Thrift Payload: %v\n", drainList)
|
||||||
|
|
||||||
|
resp, retryErr := r.thriftCallWithRetries(
|
||||||
|
false,
|
||||||
|
func() (*aurora.Response, error) {
|
||||||
|
return r.adminClient.SlaDrainHosts(context.TODO(), drainList, policy, timeout)
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
if retryErr != nil {
|
||||||
|
return result, errors.Wrap(retryErr, "Unable to recover connection")
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.GetResult_() != nil {
|
||||||
|
result = resp.GetResult_().GetDrainHostsResult_()
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *realisClient) StartMaintenance(hosts ...string) (*aurora.Response, *aurora.StartMaintenanceResult_, error) {
|
||||||
|
|
||||||
|
var result *aurora.StartMaintenanceResult_
|
||||||
|
|
||||||
|
if len(hosts) == 0 {
|
||||||
|
return nil, nil, errors.New("no hosts provided to start maintenance on")
|
||||||
|
}
|
||||||
|
|
||||||
|
hostList := aurora.NewHosts()
|
||||||
|
hostList.HostNames = hosts
|
||||||
|
|
||||||
|
r.logger.debugPrintf("StartMaintenance Thrift Payload: %v\n", hostList)
|
||||||
|
|
||||||
|
resp, retryErr := r.thriftCallWithRetries(
|
||||||
|
false,
|
||||||
|
func() (*aurora.Response, error) {
|
||||||
|
return r.adminClient.StartMaintenance(context.TODO(), hostList)
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
if retryErr != nil {
|
||||||
|
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.GetResult_() != nil {
|
||||||
|
result = resp.GetResult_().GetStartMaintenanceResult_()
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *realisClient) EndMaintenance(hosts ...string) (*aurora.Response, *aurora.EndMaintenanceResult_, error) {
|
||||||
|
|
||||||
|
var result *aurora.EndMaintenanceResult_
|
||||||
|
|
||||||
|
if len(hosts) == 0 {
|
||||||
|
return nil, nil, errors.New("no hosts provided to end maintenance on")
|
||||||
|
}
|
||||||
|
|
||||||
|
hostList := aurora.NewHosts()
|
||||||
|
hostList.HostNames = hosts
|
||||||
|
|
||||||
|
r.logger.debugPrintf("EndMaintenance Thrift Payload: %v\n", hostList)
|
||||||
|
|
||||||
|
resp, retryErr := r.thriftCallWithRetries(
|
||||||
|
false,
|
||||||
|
func() (*aurora.Response, error) {
|
||||||
|
return r.adminClient.EndMaintenance(context.TODO(), hostList)
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
if retryErr != nil {
|
||||||
|
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.GetResult_() != nil {
|
||||||
|
result = resp.GetResult_().GetEndMaintenanceResult_()
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *realisClient) MaintenanceStatus(hosts ...string) (*aurora.Response, *aurora.MaintenanceStatusResult_, error) {
|
||||||
|
|
||||||
|
var result *aurora.MaintenanceStatusResult_
|
||||||
|
|
||||||
|
if len(hosts) == 0 {
|
||||||
|
return nil, nil, errors.New("no hosts provided to get maintenance status from")
|
||||||
|
}
|
||||||
|
|
||||||
|
hostList := aurora.NewHosts()
|
||||||
|
hostList.HostNames = hosts
|
||||||
|
|
||||||
|
r.logger.debugPrintf("MaintenanceStatus Thrift Payload: %v\n", hostList)
|
||||||
|
|
||||||
|
// Make thrift call. If we encounter an error sending the call, attempt to reconnect
|
||||||
|
// and continue trying to resend command until we run out of retries.
|
||||||
|
resp, retryErr := r.thriftCallWithRetries(
|
||||||
|
false,
|
||||||
|
func() (*aurora.Response, error) {
|
||||||
|
return r.adminClient.MaintenanceStatus(context.TODO(), hostList)
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
if retryErr != nil {
|
||||||
|
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.GetResult_() != nil {
|
||||||
|
result = resp.GetResult_().GetMaintenanceStatusResult_()
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetQuota sets a quota aggregate for the given role
|
||||||
|
// TODO(zircote) Currently investigating an error that is returned
|
||||||
|
// from thrift calls that include resources for `NamedPort` and `NumGpu`
|
||||||
|
func (r *realisClient) SetQuota(role string, cpu *float64, ramMb *int64, diskMb *int64) (*aurora.Response, error) {
|
||||||
|
quota := &aurora.ResourceAggregate{
|
||||||
|
Resources: []*aurora.Resource{{NumCpus: cpu}, {RamMb: ramMb}, {DiskMb: diskMb}},
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, retryErr := r.thriftCallWithRetries(
|
||||||
|
false,
|
||||||
|
func() (*aurora.Response, error) {
|
||||||
|
return r.adminClient.SetQuota(context.TODO(), role, quota)
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
if retryErr != nil {
|
||||||
|
return resp, errors.Wrap(retryErr, "Unable to set role quota")
|
||||||
|
}
|
||||||
|
return resp, retryErr
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetQuota returns the resource aggregate for the given role
|
||||||
|
func (r *realisClient) GetQuota(role string) (*aurora.Response, error) {
|
||||||
|
|
||||||
|
resp, retryErr := r.thriftCallWithRetries(
|
||||||
|
false,
|
||||||
|
func() (*aurora.Response, error) {
|
||||||
|
return r.adminClient.GetQuota(context.TODO(), role)
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
if retryErr != nil {
|
||||||
|
return resp, errors.Wrap(retryErr, "Unable to get role quota")
|
||||||
|
}
|
||||||
|
return resp, retryErr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force Aurora Scheduler to perform a snapshot and write to Mesos log
|
||||||
|
func (r *realisClient) Snapshot() error {
|
||||||
|
|
||||||
|
_, retryErr := r.thriftCallWithRetries(
|
||||||
|
false,
|
||||||
|
func() (*aurora.Response, error) {
|
||||||
|
return r.adminClient.Snapshot(context.TODO())
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
if retryErr != nil {
|
||||||
|
return errors.Wrap(retryErr, "Unable to recover connection")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force Aurora Scheduler to write backup file to a file in the backup directory
|
||||||
|
func (r *realisClient) PerformBackup() error {
|
||||||
|
|
||||||
|
_, retryErr := r.thriftCallWithRetries(
|
||||||
|
false,
|
||||||
|
func() (*aurora.Response, error) {
|
||||||
|
return r.adminClient.PerformBackup(context.TODO())
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
if retryErr != nil {
|
||||||
|
return errors.Wrap(retryErr, "Unable to recover connection")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *realisClient) ForceImplicitTaskReconciliation() error {
|
||||||
|
|
||||||
|
_, retryErr := r.thriftCallWithRetries(
|
||||||
|
false,
|
||||||
|
func() (*aurora.Response, error) {
|
||||||
|
return r.adminClient.TriggerImplicitTaskReconciliation(context.TODO())
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
if retryErr != nil {
|
||||||
|
return errors.Wrap(retryErr, "Unable to recover connection")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *realisClient) ForceExplicitTaskReconciliation(batchSize *int32) error {
|
||||||
|
|
||||||
|
if batchSize != nil && *batchSize < 1 {
|
||||||
|
return errors.New("invalid batch size")
|
||||||
|
}
|
||||||
|
settings := aurora.NewExplicitReconciliationSettings()
|
||||||
|
|
||||||
|
settings.BatchSize = batchSize
|
||||||
|
|
||||||
|
_, retryErr := r.thriftCallWithRetries(false,
|
||||||
|
func() (*aurora.Response, error) {
|
||||||
|
return r.adminClient.TriggerExplicitTaskReconciliation(context.TODO(), settings)
|
||||||
|
},
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
if retryErr != nil {
|
||||||
|
return errors.Wrap(retryErr, "Unable to recover connection")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
1117
realis_e2e_test.go
Normal file
1117
realis_e2e_test.go
Normal file
File diff suppressed because it is too large
Load diff
|
@ -16,14 +16,54 @@
|
||||||
package response
|
package response
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gen-go/apache/aurora"
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Get key from a response created by a StartJobUpdate call
|
// Get key from a response created by a StartJobUpdate call
|
||||||
func JobUpdateKey(resp *aurora.Response) *aurora.JobUpdateKey {
|
func JobUpdateKey(resp *aurora.Response) *aurora.JobUpdateKey {
|
||||||
return resp.Result_.StartJobUpdateResult_.GetKey()
|
return resp.GetResult_().GetStartJobUpdateResult_().GetKey()
|
||||||
}
|
}
|
||||||
|
|
||||||
func JobUpdateDetails(resp *aurora.Response) *aurora.JobUpdateDetails {
|
func JobUpdateDetails(resp *aurora.Response) []*aurora.JobUpdateDetails {
|
||||||
return resp.Result_.GetJobUpdateDetailsResult_.Details
|
return resp.GetResult_().GetGetJobUpdateDetailsResult_().GetDetailsList()
|
||||||
|
}
|
||||||
|
|
||||||
|
func ScheduleStatusResult(resp *aurora.Response) *aurora.ScheduleStatusResult_ {
|
||||||
|
return resp.GetResult_().GetScheduleStatusResult_()
|
||||||
|
}
|
||||||
|
|
||||||
|
func JobUpdateSummaries(resp *aurora.Response) []*aurora.JobUpdateSummary {
|
||||||
|
if resp.GetResult_() == nil || resp.GetResult_().GetGetJobUpdateSummariesResult_() == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp.GetResult_().GetGetJobUpdateSummariesResult_().GetUpdateSummaries()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Replaced by checks done inside of thriftCallHelper
|
||||||
|
func ResponseCodeCheck(resp *aurora.Response) (*aurora.Response, error) {
|
||||||
|
if resp == nil {
|
||||||
|
return resp, errors.New("Response is nil")
|
||||||
|
}
|
||||||
|
if resp.GetResponseCode() != aurora.ResponseCode_OK {
|
||||||
|
return resp, errors.New(CombineMessage(resp))
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Based on aurora client: src/main/python/apache/aurora/client/base.py
|
||||||
|
func CombineMessage(resp *aurora.Response) string {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
for _, detail := range resp.GetDetails() {
|
||||||
|
buffer.WriteString(detail.GetMessage() + ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
if buffer.Len() > 0 {
|
||||||
|
buffer.Truncate(buffer.Len() - 2) // Get rid of trailing comma + space
|
||||||
|
}
|
||||||
|
return buffer.String()
|
||||||
}
|
}
|
||||||
|
|
294
retry.go
Normal file
294
retry.go
Normal file
|
@ -0,0 +1,294 @@
|
||||||
|
/**
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package realis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/apache/thrift/lib/go/thrift"
|
||||||
|
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
||||||
|
"github.com/paypal/gorealis/response"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Backoff determines how the retry mechanism should react after each failure and how many failures it should
|
||||||
|
// tolerate.
|
||||||
|
type Backoff struct {
|
||||||
|
Duration time.Duration // the base duration
|
||||||
|
Factor float64 // Duration is multiplied by a factor each iteration
|
||||||
|
Jitter float64 // The amount of jitter applied each iteration
|
||||||
|
Steps int // Exit with error after this many steps
|
||||||
|
}
|
||||||
|
|
||||||
|
// Jitter returns a time.Duration between duration and duration + maxFactor *
|
||||||
|
// duration.
|
||||||
|
//
|
||||||
|
// This allows clients to avoid converging on periodic behavior. If maxFactor
|
||||||
|
// is 0.0, a suggested default value will be chosen.
|
||||||
|
func Jitter(duration time.Duration, maxFactor float64) time.Duration {
|
||||||
|
if maxFactor <= 0.0 {
|
||||||
|
maxFactor = 1.0
|
||||||
|
}
|
||||||
|
wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
|
||||||
|
return wait
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConditionFunc returns true if the condition is satisfied, or an error
|
||||||
|
// if the loop should be aborted.
|
||||||
|
type ConditionFunc func() (done bool, err error)
|
||||||
|
|
||||||
|
// ExponentialBackoff is a modified version of the Kubernetes exponential-backoff code.
|
||||||
|
// It repeats a condition check with exponential backoff and checks the condition up to
|
||||||
|
// Steps times, increasing the wait by multiplying the previous duration by Factor.
|
||||||
|
//
|
||||||
|
// If Jitter is greater than zero, a random amount of each duration is added
|
||||||
|
// (between duration and duration*(1+jitter)).
|
||||||
|
//
|
||||||
|
// If the condition never returns true, ErrWaitTimeout is returned. Errors
|
||||||
|
// do not cause the function to return.
|
||||||
|
func ExponentialBackoff(backoff Backoff, logger logger, condition ConditionFunc) error {
|
||||||
|
var err error
|
||||||
|
var ok bool
|
||||||
|
var curStep int
|
||||||
|
duration := backoff.Duration
|
||||||
|
|
||||||
|
for curStep = 0; curStep < backoff.Steps; curStep++ {
|
||||||
|
|
||||||
|
// Only sleep if it's not the first iteration.
|
||||||
|
if curStep != 0 {
|
||||||
|
adjusted := duration
|
||||||
|
if backoff.Jitter > 0.0 {
|
||||||
|
adjusted = Jitter(duration, backoff.Jitter)
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Printf(
|
||||||
|
"A retryable error occurred during function call, backing off for %v before retrying\n", adjusted)
|
||||||
|
time.Sleep(adjusted)
|
||||||
|
duration = time.Duration(float64(duration) * backoff.Factor)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute function passed in.
|
||||||
|
ok, err = condition()
|
||||||
|
|
||||||
|
// If the function executed says it succeeded, stop retrying
|
||||||
|
if ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// If the error is temporary, continue retrying.
|
||||||
|
if !IsTemporary(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Print out the temporary error we experienced.
|
||||||
|
logger.Println(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if curStep > 1 {
|
||||||
|
logger.Printf("retried this function call %d time(s)", curStep)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Provide more information to the user wherever possible
|
||||||
|
if err != nil {
|
||||||
|
return newRetryError(errors.Wrap(err, "ran out of retries"), curStep)
|
||||||
|
}
|
||||||
|
|
||||||
|
return newRetryError(errors.New("ran out of retries"), curStep)
|
||||||
|
}
|
||||||
|
|
||||||
|
type auroraThriftCall func() (resp *aurora.Response, err error)
|
||||||
|
|
||||||
|
// verifyOntimeout defines the type of function that will be used to verify whether a Thirft call to the Scheduler
|
||||||
|
// made it to the scheduler or not. In general, these types of functions will have to interact with the scheduler
|
||||||
|
// through the very same Thrift API which previously encountered a time out from the client.
|
||||||
|
// This means that the functions themselves should be kept to a minimum number of Thrift calls.
|
||||||
|
// It should also be noted that this is a best effort mechanism and
|
||||||
|
// is likely to fail for the same reasons that the original call failed.
|
||||||
|
type verifyOnTimeout func() (*aurora.Response, bool)
|
||||||
|
|
||||||
|
// Duplicates the functionality of ExponentialBackoff but is specifically targeted towards ThriftCalls.
|
||||||
|
func (r *realisClient) thriftCallWithRetries(
|
||||||
|
returnOnTimeout bool,
|
||||||
|
thriftCall auroraThriftCall,
|
||||||
|
verifyOnTimeout verifyOnTimeout) (*aurora.Response, error) {
|
||||||
|
|
||||||
|
var resp *aurora.Response
|
||||||
|
var clientErr error
|
||||||
|
var curStep int
|
||||||
|
timeouts := 0
|
||||||
|
|
||||||
|
backoff := r.config.backoff
|
||||||
|
duration := backoff.Duration
|
||||||
|
|
||||||
|
for curStep = 0; curStep < backoff.Steps; curStep++ {
|
||||||
|
|
||||||
|
// If this isn't our first try, backoff before the next try.
|
||||||
|
if curStep != 0 {
|
||||||
|
adjusted := duration
|
||||||
|
if backoff.Jitter > 0.0 {
|
||||||
|
adjusted = Jitter(duration, backoff.Jitter)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.logger.Printf(
|
||||||
|
"A retryable error occurred during thrift call, backing off for %v before retry %v",
|
||||||
|
adjusted,
|
||||||
|
curStep)
|
||||||
|
|
||||||
|
time.Sleep(adjusted)
|
||||||
|
duration = time.Duration(float64(duration) * backoff.Factor)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only allow one go-routine make use or modify the thrift client connection.
|
||||||
|
// Placing this in an anonymous function in order to create a new, short-lived stack allowing unlock
|
||||||
|
// to be run in case of a panic inside of thriftCall.
|
||||||
|
func() {
|
||||||
|
r.lock.Lock()
|
||||||
|
defer r.lock.Unlock()
|
||||||
|
|
||||||
|
resp, clientErr = thriftCall()
|
||||||
|
|
||||||
|
r.logger.tracePrintf("Aurora Thrift Call ended resp: %v clientErr: %v", resp, clientErr)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Check if our thrift call is returning an error.
|
||||||
|
if clientErr != nil {
|
||||||
|
// Print out the error to the user
|
||||||
|
r.logger.Printf("Client Error: %v", clientErr)
|
||||||
|
|
||||||
|
temporary, timedout := isConnectionError(clientErr)
|
||||||
|
if !temporary && r.RealisConfig().failOnPermanentErrors {
|
||||||
|
return nil, errors.Wrap(clientErr, "permanent connection error")
|
||||||
|
}
|
||||||
|
|
||||||
|
// There exists a corner case where thrift payload was received by Aurora but
|
||||||
|
// connection timed out before Aurora was able to reply.
|
||||||
|
// Users can take special action on a timeout by using IsTimedout and reacting accordingly
|
||||||
|
// if they have configured the client to return on a timeout.
|
||||||
|
if timedout && returnOnTimeout {
|
||||||
|
return resp, newTimedoutError(errors.New("client connection closed before server answer"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// In the future, reestablish connection should be able to check if it is actually possible
|
||||||
|
// to make a thrift call to Aurora. For now, a reconnect should always lead to a retry.
|
||||||
|
// Ignoring error due to the fact that an error should be retried regardless
|
||||||
|
reestablishErr := r.ReestablishConn()
|
||||||
|
if reestablishErr != nil {
|
||||||
|
r.logger.debugPrintf("error re-establishing connection ", reestablishErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If users did not opt for a return on timeout in order to react to a timedout error,
|
||||||
|
// attempt to verify that the call made it to the scheduler after the connection was re-established.
|
||||||
|
if timedout {
|
||||||
|
timeouts++
|
||||||
|
r.logger.debugPrintf(
|
||||||
|
"Client closed connection %d times before server responded, "+
|
||||||
|
"consider increasing connection timeout",
|
||||||
|
timeouts)
|
||||||
|
|
||||||
|
// Allow caller to provide a function which checks if the original call was successful before
|
||||||
|
// it timed out.
|
||||||
|
if verifyOnTimeout != nil {
|
||||||
|
if verifyResp, ok := verifyOnTimeout(); ok {
|
||||||
|
r.logger.Print("verified that the call went through successfully after a client timeout")
|
||||||
|
// Response here might be different than the original as it is no longer constructed
|
||||||
|
// by the scheduler but mimicked.
|
||||||
|
// This is OK since the scheduler is very unlikely to change responses at this point in its
|
||||||
|
// development cycle but we must be careful to not return an incorrectly constructed response.
|
||||||
|
return verifyResp, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retry the thrift payload
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there was no client error, but the response is nil, something went wrong.
|
||||||
|
// Ideally, we'll never encounter this but we're placing a safeguard here.
|
||||||
|
if resp == nil {
|
||||||
|
return nil, errors.New("response from aurora is nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check Response Code from thrift and make a decision to continue retrying or not.
|
||||||
|
switch responseCode := resp.GetResponseCode(); responseCode {
|
||||||
|
|
||||||
|
// If the thrift call succeeded, stop retrying
|
||||||
|
case aurora.ResponseCode_OK:
|
||||||
|
return resp, nil
|
||||||
|
|
||||||
|
// If the response code is transient, continue retrying
|
||||||
|
case aurora.ResponseCode_ERROR_TRANSIENT:
|
||||||
|
r.logger.Println("Aurora replied with Transient error code, retrying")
|
||||||
|
continue
|
||||||
|
|
||||||
|
// Failure scenarios, these indicate a bad payload or a bad config. Stop retrying.
|
||||||
|
case aurora.ResponseCode_INVALID_REQUEST,
|
||||||
|
aurora.ResponseCode_ERROR,
|
||||||
|
aurora.ResponseCode_AUTH_FAILED,
|
||||||
|
aurora.ResponseCode_JOB_UPDATING_ERROR:
|
||||||
|
r.logger.Printf("Terminal Response Code %v from Aurora, won't retry\n", resp.GetResponseCode().String())
|
||||||
|
return resp, errors.New(response.CombineMessage(resp))
|
||||||
|
|
||||||
|
// The only case that should fall down to here is a WARNING response code.
|
||||||
|
// It is currently not used as a response in the scheduler so it is unknown how to handle it.
|
||||||
|
default:
|
||||||
|
r.logger.debugPrintf("unhandled response code %v received from Aurora\n", responseCode)
|
||||||
|
return nil, errors.Errorf("unhandled response code from Aurora %v", responseCode.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if curStep > 1 {
|
||||||
|
r.config.logger.Printf("this thrift call was retried %d time(s)", curStep)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Provide more information to the user wherever possible.
|
||||||
|
if clientErr != nil {
|
||||||
|
return nil, newRetryError(errors.Wrap(clientErr, "ran out of retries, including latest error"), curStep)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, newRetryError(errors.New("ran out of retries"), curStep)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isConnectionError processes the error received by the client.
|
||||||
|
// The return values indicate weather this was determined to be a temporary error
|
||||||
|
// and weather it was determined to be a timeout error
|
||||||
|
func isConnectionError(err error) (bool, bool) {
|
||||||
|
|
||||||
|
// Determine if error is a temporary URL error by going up the stack
|
||||||
|
transportException, ok := err.(thrift.TTransportException)
|
||||||
|
if !ok {
|
||||||
|
return false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
urlError, ok := transportException.Err().(*url.Error)
|
||||||
|
if !ok {
|
||||||
|
return false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// EOF error occurs when the server closes the read buffer of the client. This is common
|
||||||
|
// when the server is overloaded and we consider it temporary.
|
||||||
|
// All other which are not temporary as per the member function Temporary(),
|
||||||
|
// are considered not temporary (permanent).
|
||||||
|
if urlError.Err != io.EOF && !urlError.Temporary() {
|
||||||
|
return false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, urlError.Timeout()
|
||||||
|
}
|
13
runTests.sh
Executable file
13
runTests.sh
Executable file
|
@ -0,0 +1,13 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
docker-compose up -d
|
||||||
|
|
||||||
|
# If running docker-compose up gives any error, don't do anything.
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Since we run our docker compose setup in bridge mode to be able to run on MacOS, we have to launch a Docker container within the bridge network in order to avoid any routing issues.
|
||||||
|
docker run --rm -t -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10-stretch go test -v github.com/paypal/gorealis $@
|
||||||
|
|
||||||
|
docker-compose down
|
4
runTestsMac.sh
Normal file
4
runTestsMac.sh
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Since we run our docker compose setup in bridge mode to be able to run on MacOS, we have to launch a Docker container within the bridge network in order to avoid any routing issues.
|
||||||
|
docker run --rm -t -w /gorealis -v $GOPATH/pkg:/go/pkg -v $(pwd):/gorealis --network gorealis_aurora_cluster golang:1.16-buster go test -v github.com/paypal/gorealis $@
|
125
updatejob.go
125
updatejob.go
|
@ -15,95 +15,174 @@
|
||||||
package realis
|
package realis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"gen-go/apache/aurora"
|
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Structure to collect all information required to create job update
|
// UpdateJob is a structure to collect all information required to create job update.
|
||||||
type UpdateJob struct {
|
type UpdateJob struct {
|
||||||
Job // SetInstanceCount for job is hidden, access via full qualifier
|
Job // SetInstanceCount for job is hidden, access via full qualifier
|
||||||
req *aurora.JobUpdateRequest
|
req *aurora.JobUpdateRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a default UpdateJob object.
|
// NewDefaultUpdateJob creates an UpdateJob object with opinionated default settings.
|
||||||
func NewUpdateJob(config *aurora.TaskConfig) *UpdateJob {
|
func NewDefaultUpdateJob(config *aurora.TaskConfig) *UpdateJob {
|
||||||
|
|
||||||
req := aurora.NewJobUpdateRequest()
|
req := aurora.NewJobUpdateRequest()
|
||||||
req.TaskConfig = config
|
req.TaskConfig = config
|
||||||
req.Settings = aurora.NewJobUpdateSettings()
|
req.Settings = NewUpdateSettings()
|
||||||
|
|
||||||
|
job, ok := NewJob().(*AuroraJob)
|
||||||
|
if !ok {
|
||||||
|
// This should never happen but it is here as a safeguard
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
job := NewJob().(AuroraJob)
|
|
||||||
job.jobConfig.TaskConfig = config
|
job.jobConfig.TaskConfig = config
|
||||||
|
|
||||||
// Rebuild resource map from TaskConfig
|
// Rebuild resource map from TaskConfig
|
||||||
for ptr := range config.Resources {
|
for _, ptr := range config.Resources {
|
||||||
if ptr.NumCpus != nil {
|
if ptr.NumCpus != nil {
|
||||||
job.resources["cpu"].NumCpus = ptr.NumCpus
|
job.resources[CPU].NumCpus = ptr.NumCpus
|
||||||
continue // Guard against Union violations that Go won't enforce
|
continue // Guard against Union violations that Go won't enforce
|
||||||
}
|
}
|
||||||
|
|
||||||
if ptr.RamMb != nil {
|
if ptr.RamMb != nil {
|
||||||
job.resources["ram"].RamMb = ptr.RamMb
|
job.resources[RAM].RamMb = ptr.RamMb
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if ptr.DiskMb != nil {
|
if ptr.DiskMb != nil {
|
||||||
job.resources["disk"].DiskMb = ptr.DiskMb
|
job.resources[DISK].DiskMb = ptr.DiskMb
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if ptr.NumGpus != nil {
|
||||||
|
job.resources[GPU] = &aurora.Resource{NumGpus: ptr.NumGpus}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mirrors defaults set by Pystachio
|
// Mirrors defaults set by Pystachio
|
||||||
req.Settings.UpdateOnlyTheseInstances = make(map[*aurora.Range]bool)
|
|
||||||
req.Settings.UpdateGroupSize = 1
|
req.Settings.UpdateGroupSize = 1
|
||||||
req.Settings.WaitForBatchCompletion = false
|
req.Settings.WaitForBatchCompletion = false
|
||||||
req.Settings.MinWaitInInstanceRunningMs = 45000 // Deprecated
|
req.Settings.MinWaitInInstanceRunningMs = 45000
|
||||||
req.Settings.MaxPerInstanceFailures = 0
|
req.Settings.MaxPerInstanceFailures = 0
|
||||||
req.Settings.MaxFailedInstances = 0
|
req.Settings.MaxFailedInstances = 0
|
||||||
req.Settings.RollbackOnFailure = true
|
req.Settings.RollbackOnFailure = true
|
||||||
req.Settings.WaitForBatchCompletion = false
|
|
||||||
|
|
||||||
//TODO(rdelvalle): Deep copy job struct to avoid unexpected behavior
|
//TODO(rdelvalle): Deep copy job struct to avoid unexpected behavior
|
||||||
return &UpdateJob{job, req}
|
return &UpdateJob{Job: job, req: req}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set instance count the job will have after the update.
|
// NewUpdateJob creates an UpdateJob object wihtout default settings.
|
||||||
|
func NewUpdateJob(config *aurora.TaskConfig, settings *aurora.JobUpdateSettings) *UpdateJob {
|
||||||
|
|
||||||
|
req := aurora.NewJobUpdateRequest()
|
||||||
|
req.TaskConfig = config
|
||||||
|
req.Settings = settings
|
||||||
|
|
||||||
|
job, ok := NewJob().(*AuroraJob)
|
||||||
|
if !ok {
|
||||||
|
// This should never happen but it is here as a safeguard
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
job.jobConfig.TaskConfig = config
|
||||||
|
|
||||||
|
// Rebuild resource map from TaskConfig
|
||||||
|
for _, ptr := range config.Resources {
|
||||||
|
if ptr.NumCpus != nil {
|
||||||
|
job.resources[CPU].NumCpus = ptr.NumCpus
|
||||||
|
continue // Guard against Union violations that Go won't enforce
|
||||||
|
}
|
||||||
|
|
||||||
|
if ptr.RamMb != nil {
|
||||||
|
job.resources[RAM].RamMb = ptr.RamMb
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if ptr.DiskMb != nil {
|
||||||
|
job.resources[DISK].DiskMb = ptr.DiskMb
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if ptr.NumGpus != nil {
|
||||||
|
job.resources[GPU] = &aurora.Resource{}
|
||||||
|
job.resources[GPU].NumGpus = ptr.NumGpus
|
||||||
|
continue // Guard against Union violations that Go won't enforce
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO(rdelvalle): Deep copy job struct to avoid unexpected behavior
|
||||||
|
return &UpdateJob{Job: job, req: req}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceCount sets instance count the job will have after the update.
|
||||||
func (u *UpdateJob) InstanceCount(inst int32) *UpdateJob {
|
func (u *UpdateJob) InstanceCount(inst int32) *UpdateJob {
|
||||||
u.req.InstanceCount = inst
|
u.req.InstanceCount = inst
|
||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
// Max number of instances being updated at any given moment.
|
// BatchSize sets the max number of instances being updated at any given moment.
|
||||||
func (u *UpdateJob) BatchSize(size int32) *UpdateJob {
|
func (u *UpdateJob) BatchSize(size int32) *UpdateJob {
|
||||||
u.req.Settings.UpdateGroupSize = size
|
u.req.Settings.UpdateGroupSize = size
|
||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
// Minimum number of seconds a shard must remain in RUNNING state before considered a success.
|
// WatchTime sets the minimum number of seconds a shard must remain in RUNNING state before considered a success.
|
||||||
func (u *UpdateJob) WatchTime(milliseconds int32) *UpdateJob {
|
func (u *UpdateJob) WatchTime(ms int32) *UpdateJob {
|
||||||
u.req.Settings.MaxPerInstanceFailures = milliseconds
|
u.req.Settings.MinWaitInInstanceRunningMs = ms
|
||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for all instances in a group to be done before moving on.
|
// WaitForBatchCompletion configures the job update to wait for all instances in a group to be done before moving on.
|
||||||
func (u *UpdateJob) WaitForBatchCompletion(batchWait bool) *UpdateJob {
|
func (u *UpdateJob) WaitForBatchCompletion(batchWait bool) *UpdateJob {
|
||||||
u.req.Settings.WaitForBatchCompletion = batchWait
|
u.req.Settings.WaitForBatchCompletion = batchWait
|
||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
// Max number of instance failures to tolerate before marking instance as FAILED.
|
// MaxPerInstanceFailures sets the max number of instance failures to tolerate before marking instance as FAILED.
|
||||||
func (u *UpdateJob) MaxPerInstanceFailures(inst int32) *UpdateJob {
|
func (u *UpdateJob) MaxPerInstanceFailures(inst int32) *UpdateJob {
|
||||||
u.req.Settings.MaxPerInstanceFailures = inst
|
u.req.Settings.MaxPerInstanceFailures = inst
|
||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
// Max number of FAILED instances to tolerate before terminating the update.
|
// MaxFailedInstances sets the max number of FAILED instances to tolerate before terminating the update.
|
||||||
func (u *UpdateJob) MaxFailedInstances(inst int32) *UpdateJob {
|
func (u *UpdateJob) MaxFailedInstances(inst int32) *UpdateJob {
|
||||||
u.req.Settings.MaxFailedInstances = inst
|
u.req.Settings.MaxFailedInstances = inst
|
||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
// When False, prevents auto rollback of a failed update.
|
// RollbackOnFail configure the job to rollback automatically after a job update fails.
|
||||||
func (u *UpdateJob) RollbackOnFail(rollback bool) *UpdateJob {
|
func (u *UpdateJob) RollbackOnFail(rollback bool) *UpdateJob {
|
||||||
u.req.Settings.RollbackOnFailure = rollback
|
u.req.Settings.RollbackOnFailure = rollback
|
||||||
return u
|
return u
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewUpdateSettings return an opinionated set of job update settings.
|
||||||
|
func (u *UpdateJob) BatchUpdateStrategy(strategy aurora.BatchJobUpdateStrategy) *UpdateJob {
|
||||||
|
u.req.Settings.UpdateStrategy = &aurora.JobUpdateStrategy{BatchStrategy: &strategy}
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *UpdateJob) QueueUpdateStrategy(strategy aurora.QueueJobUpdateStrategy) *UpdateJob {
|
||||||
|
u.req.Settings.UpdateStrategy = &aurora.JobUpdateStrategy{QueueStrategy: &strategy}
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *UpdateJob) VariableBatchStrategy(strategy aurora.VariableBatchJobUpdateStrategy) *UpdateJob {
|
||||||
|
u.req.Settings.UpdateStrategy = &aurora.JobUpdateStrategy{VarBatchStrategy: &strategy}
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewUpdateSettings() *aurora.JobUpdateSettings {
|
||||||
|
us := new(aurora.JobUpdateSettings)
|
||||||
|
// Mirrors defaults set by Pystachio
|
||||||
|
us.UpdateGroupSize = 1
|
||||||
|
us.WaitForBatchCompletion = false
|
||||||
|
us.MinWaitInInstanceRunningMs = 45000
|
||||||
|
us.MaxPerInstanceFailures = 0
|
||||||
|
us.MaxFailedInstances = 0
|
||||||
|
us.RollbackOnFailure = true
|
||||||
|
|
||||||
|
return us
|
||||||
|
}
|
||||||
|
|
167
util.go
Normal file
167
util.go
Normal file
|
@ -0,0 +1,167 @@
|
||||||
|
package realis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/x509"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/paypal/gorealis/gen-go/apache/aurora"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const apiPath = "/api"
|
||||||
|
|
||||||
|
// ActiveStates - States a task may be in when active.
|
||||||
|
var ActiveStates = make(map[aurora.ScheduleStatus]bool)
|
||||||
|
|
||||||
|
// SlaveAssignedStates - States a task may be in when it has already been assigned to a Mesos agent.
|
||||||
|
var SlaveAssignedStates = make(map[aurora.ScheduleStatus]bool)
|
||||||
|
|
||||||
|
// LiveStates - States a task may be in when it is live (e.g. able to take traffic)
|
||||||
|
var LiveStates = make(map[aurora.ScheduleStatus]bool)
|
||||||
|
|
||||||
|
// TerminalStates - Set of states a task may not transition away from.
|
||||||
|
var TerminalStates = make(map[aurora.ScheduleStatus]bool)
|
||||||
|
|
||||||
|
// ActiveJobUpdateStates - States a Job Update may be in where it is considered active.
|
||||||
|
var ActiveJobUpdateStates = make(map[aurora.JobUpdateStatus]bool)
|
||||||
|
|
||||||
|
// TerminalUpdateStates returns a slice containing all the terminal states an update may be in.
|
||||||
|
// This is a function in order to avoid having a slice that can be accidentally mutated.
|
||||||
|
func TerminalUpdateStates() []aurora.JobUpdateStatus {
|
||||||
|
return []aurora.JobUpdateStatus{
|
||||||
|
aurora.JobUpdateStatus_ROLLED_FORWARD,
|
||||||
|
aurora.JobUpdateStatus_ROLLED_BACK,
|
||||||
|
aurora.JobUpdateStatus_ABORTED,
|
||||||
|
aurora.JobUpdateStatus_ERROR,
|
||||||
|
aurora.JobUpdateStatus_FAILED,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AwaitingPulseJobUpdateStates - States a job update may be in where it is waiting for a pulse.
|
||||||
|
var AwaitingPulseJobUpdateStates = make(map[aurora.JobUpdateStatus]bool)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
for _, status := range aurora.ACTIVE_STATES {
|
||||||
|
ActiveStates[status] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, status := range aurora.SLAVE_ASSIGNED_STATES {
|
||||||
|
SlaveAssignedStates[status] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, status := range aurora.LIVE_STATES {
|
||||||
|
LiveStates[status] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, status := range aurora.TERMINAL_STATES {
|
||||||
|
TerminalStates[status] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, status := range aurora.ACTIVE_JOB_UPDATE_STATES {
|
||||||
|
ActiveJobUpdateStates[status] = true
|
||||||
|
}
|
||||||
|
for _, status := range aurora.AWAITNG_PULSE_JOB_UPDATE_STATES {
|
||||||
|
AwaitingPulseJobUpdateStates[status] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// createCertPool will attempt to load certificates into a certificate pool from a given directory.
|
||||||
|
// Only files with an extension contained in the extension map are considered.
|
||||||
|
// This function ignores any files that cannot be read successfully or cannot be added to the certPool
|
||||||
|
// successfully.
|
||||||
|
func createCertPool(path string, extensions map[string]struct{}) (*x509.CertPool, error) {
|
||||||
|
_, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "unable to load certificates")
|
||||||
|
}
|
||||||
|
|
||||||
|
caFiles, err := ioutil.ReadDir(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
certPool := x509.NewCertPool()
|
||||||
|
loadedCerts := 0
|
||||||
|
for _, cert := range caFiles {
|
||||||
|
// Skip directories
|
||||||
|
if cert.IsDir() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip any files that do not contain the right extension
|
||||||
|
if _, ok := extensions[filepath.Ext(cert.Name())]; !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
pem, err := ioutil.ReadFile(filepath.Join(path, cert.Name()))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if certPool.AppendCertsFromPEM(pem) {
|
||||||
|
loadedCerts++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if loadedCerts == 0 {
|
||||||
|
return nil, errors.New("no certificates were able to be successfully loaded")
|
||||||
|
}
|
||||||
|
return certPool, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateAuroraURL(location string) (string, error) {
|
||||||
|
|
||||||
|
// If no protocol defined, assume http
|
||||||
|
if !strings.Contains(location, "://") {
|
||||||
|
location = "http://" + location
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := url.Parse(location)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "error parsing url")
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no path provided assume /api
|
||||||
|
if u.Path == "" {
|
||||||
|
u.Path = "/api"
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no port provided, assume default 8081
|
||||||
|
if u.Port() == "" {
|
||||||
|
u.Host = u.Host + ":8081"
|
||||||
|
}
|
||||||
|
|
||||||
|
if !(u.Scheme == "http" || u.Scheme == "https") {
|
||||||
|
return "", errors.Errorf("only protocols http and https are supported %v\n", u.Scheme)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This could theoretically be elsewhere but we'll be strict for the sake of simplicity
|
||||||
|
if u.Path != apiPath {
|
||||||
|
return "", errors.Errorf("expected /api path %v\n", u.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
return u.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateCurrentBatch(updatingInstances int32, batchSizes []int32) int {
|
||||||
|
for i, size := range batchSizes {
|
||||||
|
updatingInstances -= size
|
||||||
|
if updatingInstances <= 0 {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overflow batches
|
||||||
|
batchCount := len(batchSizes) - 1
|
||||||
|
lastBatchIndex := len(batchSizes) - 1
|
||||||
|
batchCount += int(updatingInstances / batchSizes[lastBatchIndex])
|
||||||
|
|
||||||
|
if updatingInstances%batchSizes[lastBatchIndex] != 0 {
|
||||||
|
batchCount++
|
||||||
|
}
|
||||||
|
return batchCount
|
||||||
|
}
|
114
util_test.go
Normal file
114
util_test.go
Normal file
|
@ -0,0 +1,114 @@
|
||||||
|
/**
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package realis
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAuroraURLValidator(t *testing.T) {
|
||||||
|
t.Run("badURL", func(t *testing.T) {
|
||||||
|
url, err := validateAuroraURL("http://badurl.com/badpath")
|
||||||
|
assert.Empty(t, url)
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("URLHttp", func(t *testing.T) {
|
||||||
|
url, err := validateAuroraURL("http://goodurl.com:8081/api")
|
||||||
|
assert.Equal(t, "http://goodurl.com:8081/api", url)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("URLHttps", func(t *testing.T) {
|
||||||
|
url, err := validateAuroraURL("https://goodurl.com:8081/api")
|
||||||
|
assert.Equal(t, "https://goodurl.com:8081/api", url)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("URLNoPath", func(t *testing.T) {
|
||||||
|
url, err := validateAuroraURL("http://goodurl.com:8081")
|
||||||
|
assert.Equal(t, "http://goodurl.com:8081/api", url)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ipAddrNoPath", func(t *testing.T) {
|
||||||
|
url, err := validateAuroraURL("http://192.168.1.33:8081")
|
||||||
|
assert.Equal(t, "http://192.168.1.33:8081/api", url)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("URLNoProtocol", func(t *testing.T) {
|
||||||
|
url, err := validateAuroraURL("goodurl.com:8081/api")
|
||||||
|
assert.Equal(t, "http://goodurl.com:8081/api", url)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("URLNoProtocolNoPathNoPort", func(t *testing.T) {
|
||||||
|
url, err := validateAuroraURL("goodurl.com")
|
||||||
|
assert.Equal(t, "http://goodurl.com:8081/api", url)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCurrentBatchCalculator(t *testing.T) {
|
||||||
|
t.Run("singleBatchOverflow", func(t *testing.T) {
|
||||||
|
curBatch := calculateCurrentBatch(10, []int32{2})
|
||||||
|
assert.Equal(t, 4, curBatch)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("noInstancesUpdating", func(t *testing.T) {
|
||||||
|
curBatch := calculateCurrentBatch(0, []int32{2})
|
||||||
|
assert.Equal(t, 0, curBatch)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("evenMatchSingleBatch", func(t *testing.T) {
|
||||||
|
curBatch := calculateCurrentBatch(2, []int32{2})
|
||||||
|
assert.Equal(t, 0, curBatch)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("moreInstancesThanBatches", func(t *testing.T) {
|
||||||
|
curBatch := calculateCurrentBatch(5, []int32{1, 2})
|
||||||
|
assert.Equal(t, 2, curBatch)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("moreInstancesThanBatchesDecreasing", func(t *testing.T) {
|
||||||
|
curBatch := calculateCurrentBatch(5, []int32{2, 1})
|
||||||
|
assert.Equal(t, 3, curBatch)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("unevenFit", func(t *testing.T) {
|
||||||
|
curBatch := calculateCurrentBatch(2, []int32{1, 2})
|
||||||
|
assert.Equal(t, 1, curBatch)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("halfWay", func(t *testing.T) {
|
||||||
|
curBatch := calculateCurrentBatch(1, []int32{1, 2})
|
||||||
|
assert.Equal(t, 0, curBatch)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCertPoolCreator(t *testing.T) {
|
||||||
|
extensions := map[string]struct{}{".crt": {}}
|
||||||
|
|
||||||
|
_, err := createCertPool("examples/certs", extensions)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
t.Run("badDir", func(t *testing.T) {
|
||||||
|
_, err := createCertPool("idontexist", extensions)
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,799 +0,0 @@
|
||||||
// Autogenerated by Thrift Compiler (0.9.3)
|
|
||||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"apache/aurora"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"git.apache.org/thrift.git/lib/go/thrift"
|
|
||||||
"math"
|
|
||||||
"net"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Usage() {
|
|
||||||
fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
|
|
||||||
flag.PrintDefaults()
|
|
||||||
fmt.Fprintln(os.Stderr, "\nFunctions:")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response createJob(JobConfiguration description)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response scheduleCronJob(JobConfiguration description)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response descheduleCronJob(JobKey job)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response startCronJob(JobKey job)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response restartShards(JobKey job, shardIds)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response killTasks(JobKey job, instances)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response addInstances(InstanceKey key, i32 count)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response replaceCronTemplate(JobConfiguration config)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response startJobUpdate(JobUpdateRequest request, string message)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response pauseJobUpdate(JobUpdateKey key, string message)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response resumeJobUpdate(JobUpdateKey key, string message)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response abortJobUpdate(JobUpdateKey key, string message)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response rollbackJobUpdate(JobUpdateKey key, string message)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response pulseJobUpdate(JobUpdateKey key)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getRoleSummary()")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getJobSummary(string role)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getTasksStatus(TaskQuery query)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getTasksWithoutConfigs(TaskQuery query)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getPendingReason(TaskQuery query)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getConfigSummary(JobKey job)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getJobs(string ownerRole)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getQuota(string ownerRole)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response populateJobConfig(JobConfiguration description)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getJobUpdateSummaries(JobUpdateQuery jobUpdateQuery)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getJobUpdateDetails(JobUpdateKey key)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getJobUpdateDiff(JobUpdateRequest request)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getTierConfigs()")
|
|
||||||
fmt.Fprintln(os.Stderr)
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Usage = Usage
|
|
||||||
var host string
|
|
||||||
var port int
|
|
||||||
var protocol string
|
|
||||||
var urlString string
|
|
||||||
var framed bool
|
|
||||||
var useHttp bool
|
|
||||||
var parsedUrl url.URL
|
|
||||||
var trans thrift.TTransport
|
|
||||||
_ = strconv.Atoi
|
|
||||||
_ = math.Abs
|
|
||||||
flag.Usage = Usage
|
|
||||||
flag.StringVar(&host, "h", "localhost", "Specify host and port")
|
|
||||||
flag.IntVar(&port, "p", 9090, "Specify port")
|
|
||||||
flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)")
|
|
||||||
flag.StringVar(&urlString, "u", "", "Specify the url")
|
|
||||||
flag.BoolVar(&framed, "framed", false, "Use framed transport")
|
|
||||||
flag.BoolVar(&useHttp, "http", false, "Use http")
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
if len(urlString) > 0 {
|
|
||||||
parsedUrl, err := url.Parse(urlString)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
host = parsedUrl.Host
|
|
||||||
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http"
|
|
||||||
} else if useHttp {
|
|
||||||
_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := flag.Arg(0)
|
|
||||||
var err error
|
|
||||||
if useHttp {
|
|
||||||
trans, err = thrift.NewTHttpClient(parsedUrl.String())
|
|
||||||
} else {
|
|
||||||
portStr := fmt.Sprint(port)
|
|
||||||
if strings.Contains(host, ":") {
|
|
||||||
host, portStr, err = net.SplitHostPort(host)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, "error with host:", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr))
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, "error resolving address:", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if framed {
|
|
||||||
trans = thrift.NewTFramedTransport(trans)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, "Error creating transport", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
defer trans.Close()
|
|
||||||
var protocolFactory thrift.TProtocolFactory
|
|
||||||
switch protocol {
|
|
||||||
case "compact":
|
|
||||||
protocolFactory = thrift.NewTCompactProtocolFactory()
|
|
||||||
break
|
|
||||||
case "simplejson":
|
|
||||||
protocolFactory = thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
break
|
|
||||||
case "json":
|
|
||||||
protocolFactory = thrift.NewTJSONProtocolFactory()
|
|
||||||
break
|
|
||||||
case "binary", "":
|
|
||||||
protocolFactory = thrift.NewTBinaryProtocolFactoryDefault()
|
|
||||||
break
|
|
||||||
default:
|
|
||||||
fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol)
|
|
||||||
Usage()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
client := aurora.NewAuroraSchedulerManagerClientFactory(trans, protocolFactory)
|
|
||||||
if err := trans.Open(); err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch cmd {
|
|
||||||
case "createJob":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "CreateJob requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg160 := flag.Arg(1)
|
|
||||||
mbTrans161 := thrift.NewTMemoryBufferLen(len(arg160))
|
|
||||||
defer mbTrans161.Close()
|
|
||||||
_, err162 := mbTrans161.WriteString(arg160)
|
|
||||||
if err162 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory163 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt164 := factory163.GetProtocol(mbTrans161)
|
|
||||||
argvalue0 := aurora.NewJobConfiguration()
|
|
||||||
err165 := argvalue0.Read(jsProt164)
|
|
||||||
if err165 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.CreateJob(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "scheduleCronJob":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "ScheduleCronJob requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg166 := flag.Arg(1)
|
|
||||||
mbTrans167 := thrift.NewTMemoryBufferLen(len(arg166))
|
|
||||||
defer mbTrans167.Close()
|
|
||||||
_, err168 := mbTrans167.WriteString(arg166)
|
|
||||||
if err168 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory169 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt170 := factory169.GetProtocol(mbTrans167)
|
|
||||||
argvalue0 := aurora.NewJobConfiguration()
|
|
||||||
err171 := argvalue0.Read(jsProt170)
|
|
||||||
if err171 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.ScheduleCronJob(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "descheduleCronJob":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "DescheduleCronJob requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg172 := flag.Arg(1)
|
|
||||||
mbTrans173 := thrift.NewTMemoryBufferLen(len(arg172))
|
|
||||||
defer mbTrans173.Close()
|
|
||||||
_, err174 := mbTrans173.WriteString(arg172)
|
|
||||||
if err174 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory175 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt176 := factory175.GetProtocol(mbTrans173)
|
|
||||||
argvalue0 := aurora.NewJobKey()
|
|
||||||
err177 := argvalue0.Read(jsProt176)
|
|
||||||
if err177 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.DescheduleCronJob(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "startCronJob":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "StartCronJob requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg178 := flag.Arg(1)
|
|
||||||
mbTrans179 := thrift.NewTMemoryBufferLen(len(arg178))
|
|
||||||
defer mbTrans179.Close()
|
|
||||||
_, err180 := mbTrans179.WriteString(arg178)
|
|
||||||
if err180 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory181 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt182 := factory181.GetProtocol(mbTrans179)
|
|
||||||
argvalue0 := aurora.NewJobKey()
|
|
||||||
err183 := argvalue0.Read(jsProt182)
|
|
||||||
if err183 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.StartCronJob(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "restartShards":
|
|
||||||
if flag.NArg()-1 != 2 {
|
|
||||||
fmt.Fprintln(os.Stderr, "RestartShards requires 2 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg184 := flag.Arg(1)
|
|
||||||
mbTrans185 := thrift.NewTMemoryBufferLen(len(arg184))
|
|
||||||
defer mbTrans185.Close()
|
|
||||||
_, err186 := mbTrans185.WriteString(arg184)
|
|
||||||
if err186 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory187 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt188 := factory187.GetProtocol(mbTrans185)
|
|
||||||
argvalue0 := aurora.NewJobKey()
|
|
||||||
err189 := argvalue0.Read(jsProt188)
|
|
||||||
if err189 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
arg190 := flag.Arg(2)
|
|
||||||
mbTrans191 := thrift.NewTMemoryBufferLen(len(arg190))
|
|
||||||
defer mbTrans191.Close()
|
|
||||||
_, err192 := mbTrans191.WriteString(arg190)
|
|
||||||
if err192 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory193 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt194 := factory193.GetProtocol(mbTrans191)
|
|
||||||
containerStruct1 := aurora.NewAuroraSchedulerManagerRestartShardsArgs()
|
|
||||||
err195 := containerStruct1.ReadField2(jsProt194)
|
|
||||||
if err195 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
argvalue1 := containerStruct1.ShardIds
|
|
||||||
value1 := argvalue1
|
|
||||||
fmt.Print(client.RestartShards(value0, value1))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "killTasks":
|
|
||||||
if flag.NArg()-1 != 2 {
|
|
||||||
fmt.Fprintln(os.Stderr, "KillTasks requires 2 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg196 := flag.Arg(1)
|
|
||||||
mbTrans197 := thrift.NewTMemoryBufferLen(len(arg196))
|
|
||||||
defer mbTrans197.Close()
|
|
||||||
_, err198 := mbTrans197.WriteString(arg196)
|
|
||||||
if err198 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory199 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt200 := factory199.GetProtocol(mbTrans197)
|
|
||||||
argvalue0 := aurora.NewJobKey()
|
|
||||||
err201 := argvalue0.Read(jsProt200)
|
|
||||||
if err201 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
arg202 := flag.Arg(2)
|
|
||||||
mbTrans203 := thrift.NewTMemoryBufferLen(len(arg202))
|
|
||||||
defer mbTrans203.Close()
|
|
||||||
_, err204 := mbTrans203.WriteString(arg202)
|
|
||||||
if err204 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory205 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt206 := factory205.GetProtocol(mbTrans203)
|
|
||||||
containerStruct1 := aurora.NewAuroraSchedulerManagerKillTasksArgs()
|
|
||||||
err207 := containerStruct1.ReadField2(jsProt206)
|
|
||||||
if err207 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
argvalue1 := containerStruct1.Instances
|
|
||||||
value1 := argvalue1
|
|
||||||
fmt.Print(client.KillTasks(value0, value1))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "addInstances":
|
|
||||||
if flag.NArg()-1 != 2 {
|
|
||||||
fmt.Fprintln(os.Stderr, "AddInstances requires 2 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg208 := flag.Arg(1)
|
|
||||||
mbTrans209 := thrift.NewTMemoryBufferLen(len(arg208))
|
|
||||||
defer mbTrans209.Close()
|
|
||||||
_, err210 := mbTrans209.WriteString(arg208)
|
|
||||||
if err210 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory211 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt212 := factory211.GetProtocol(mbTrans209)
|
|
||||||
argvalue0 := aurora.NewInstanceKey()
|
|
||||||
err213 := argvalue0.Read(jsProt212)
|
|
||||||
if err213 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
tmp1, err214 := (strconv.Atoi(flag.Arg(2)))
|
|
||||||
if err214 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
argvalue1 := int32(tmp1)
|
|
||||||
value1 := argvalue1
|
|
||||||
fmt.Print(client.AddInstances(value0, value1))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "replaceCronTemplate":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "ReplaceCronTemplate requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg215 := flag.Arg(1)
|
|
||||||
mbTrans216 := thrift.NewTMemoryBufferLen(len(arg215))
|
|
||||||
defer mbTrans216.Close()
|
|
||||||
_, err217 := mbTrans216.WriteString(arg215)
|
|
||||||
if err217 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory218 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt219 := factory218.GetProtocol(mbTrans216)
|
|
||||||
argvalue0 := aurora.NewJobConfiguration()
|
|
||||||
err220 := argvalue0.Read(jsProt219)
|
|
||||||
if err220 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.ReplaceCronTemplate(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "startJobUpdate":
|
|
||||||
if flag.NArg()-1 != 2 {
|
|
||||||
fmt.Fprintln(os.Stderr, "StartJobUpdate requires 2 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg221 := flag.Arg(1)
|
|
||||||
mbTrans222 := thrift.NewTMemoryBufferLen(len(arg221))
|
|
||||||
defer mbTrans222.Close()
|
|
||||||
_, err223 := mbTrans222.WriteString(arg221)
|
|
||||||
if err223 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory224 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt225 := factory224.GetProtocol(mbTrans222)
|
|
||||||
argvalue0 := aurora.NewJobUpdateRequest()
|
|
||||||
err226 := argvalue0.Read(jsProt225)
|
|
||||||
if err226 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
argvalue1 := flag.Arg(2)
|
|
||||||
value1 := argvalue1
|
|
||||||
fmt.Print(client.StartJobUpdate(value0, value1))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "pauseJobUpdate":
|
|
||||||
if flag.NArg()-1 != 2 {
|
|
||||||
fmt.Fprintln(os.Stderr, "PauseJobUpdate requires 2 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg228 := flag.Arg(1)
|
|
||||||
mbTrans229 := thrift.NewTMemoryBufferLen(len(arg228))
|
|
||||||
defer mbTrans229.Close()
|
|
||||||
_, err230 := mbTrans229.WriteString(arg228)
|
|
||||||
if err230 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory231 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt232 := factory231.GetProtocol(mbTrans229)
|
|
||||||
argvalue0 := aurora.NewJobUpdateKey()
|
|
||||||
err233 := argvalue0.Read(jsProt232)
|
|
||||||
if err233 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
argvalue1 := flag.Arg(2)
|
|
||||||
value1 := argvalue1
|
|
||||||
fmt.Print(client.PauseJobUpdate(value0, value1))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "resumeJobUpdate":
|
|
||||||
if flag.NArg()-1 != 2 {
|
|
||||||
fmt.Fprintln(os.Stderr, "ResumeJobUpdate requires 2 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg235 := flag.Arg(1)
|
|
||||||
mbTrans236 := thrift.NewTMemoryBufferLen(len(arg235))
|
|
||||||
defer mbTrans236.Close()
|
|
||||||
_, err237 := mbTrans236.WriteString(arg235)
|
|
||||||
if err237 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory238 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt239 := factory238.GetProtocol(mbTrans236)
|
|
||||||
argvalue0 := aurora.NewJobUpdateKey()
|
|
||||||
err240 := argvalue0.Read(jsProt239)
|
|
||||||
if err240 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
argvalue1 := flag.Arg(2)
|
|
||||||
value1 := argvalue1
|
|
||||||
fmt.Print(client.ResumeJobUpdate(value0, value1))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "abortJobUpdate":
|
|
||||||
if flag.NArg()-1 != 2 {
|
|
||||||
fmt.Fprintln(os.Stderr, "AbortJobUpdate requires 2 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg242 := flag.Arg(1)
|
|
||||||
mbTrans243 := thrift.NewTMemoryBufferLen(len(arg242))
|
|
||||||
defer mbTrans243.Close()
|
|
||||||
_, err244 := mbTrans243.WriteString(arg242)
|
|
||||||
if err244 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory245 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt246 := factory245.GetProtocol(mbTrans243)
|
|
||||||
argvalue0 := aurora.NewJobUpdateKey()
|
|
||||||
err247 := argvalue0.Read(jsProt246)
|
|
||||||
if err247 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
argvalue1 := flag.Arg(2)
|
|
||||||
value1 := argvalue1
|
|
||||||
fmt.Print(client.AbortJobUpdate(value0, value1))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "rollbackJobUpdate":
|
|
||||||
if flag.NArg()-1 != 2 {
|
|
||||||
fmt.Fprintln(os.Stderr, "RollbackJobUpdate requires 2 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg249 := flag.Arg(1)
|
|
||||||
mbTrans250 := thrift.NewTMemoryBufferLen(len(arg249))
|
|
||||||
defer mbTrans250.Close()
|
|
||||||
_, err251 := mbTrans250.WriteString(arg249)
|
|
||||||
if err251 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory252 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt253 := factory252.GetProtocol(mbTrans250)
|
|
||||||
argvalue0 := aurora.NewJobUpdateKey()
|
|
||||||
err254 := argvalue0.Read(jsProt253)
|
|
||||||
if err254 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
argvalue1 := flag.Arg(2)
|
|
||||||
value1 := argvalue1
|
|
||||||
fmt.Print(client.RollbackJobUpdate(value0, value1))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "pulseJobUpdate":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "PulseJobUpdate requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg256 := flag.Arg(1)
|
|
||||||
mbTrans257 := thrift.NewTMemoryBufferLen(len(arg256))
|
|
||||||
defer mbTrans257.Close()
|
|
||||||
_, err258 := mbTrans257.WriteString(arg256)
|
|
||||||
if err258 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory259 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt260 := factory259.GetProtocol(mbTrans257)
|
|
||||||
argvalue0 := aurora.NewJobUpdateKey()
|
|
||||||
err261 := argvalue0.Read(jsProt260)
|
|
||||||
if err261 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.PulseJobUpdate(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getRoleSummary":
|
|
||||||
if flag.NArg()-1 != 0 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetRoleSummary requires 0 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
fmt.Print(client.GetRoleSummary())
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getJobSummary":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetJobSummary requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
argvalue0 := flag.Arg(1)
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetJobSummary(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getTasksStatus":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg263 := flag.Arg(1)
|
|
||||||
mbTrans264 := thrift.NewTMemoryBufferLen(len(arg263))
|
|
||||||
defer mbTrans264.Close()
|
|
||||||
_, err265 := mbTrans264.WriteString(arg263)
|
|
||||||
if err265 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory266 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt267 := factory266.GetProtocol(mbTrans264)
|
|
||||||
argvalue0 := aurora.NewTaskQuery()
|
|
||||||
err268 := argvalue0.Read(jsProt267)
|
|
||||||
if err268 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetTasksStatus(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getTasksWithoutConfigs":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg269 := flag.Arg(1)
|
|
||||||
mbTrans270 := thrift.NewTMemoryBufferLen(len(arg269))
|
|
||||||
defer mbTrans270.Close()
|
|
||||||
_, err271 := mbTrans270.WriteString(arg269)
|
|
||||||
if err271 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory272 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt273 := factory272.GetProtocol(mbTrans270)
|
|
||||||
argvalue0 := aurora.NewTaskQuery()
|
|
||||||
err274 := argvalue0.Read(jsProt273)
|
|
||||||
if err274 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetTasksWithoutConfigs(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getPendingReason":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg275 := flag.Arg(1)
|
|
||||||
mbTrans276 := thrift.NewTMemoryBufferLen(len(arg275))
|
|
||||||
defer mbTrans276.Close()
|
|
||||||
_, err277 := mbTrans276.WriteString(arg275)
|
|
||||||
if err277 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory278 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt279 := factory278.GetProtocol(mbTrans276)
|
|
||||||
argvalue0 := aurora.NewTaskQuery()
|
|
||||||
err280 := argvalue0.Read(jsProt279)
|
|
||||||
if err280 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetPendingReason(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getConfigSummary":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg281 := flag.Arg(1)
|
|
||||||
mbTrans282 := thrift.NewTMemoryBufferLen(len(arg281))
|
|
||||||
defer mbTrans282.Close()
|
|
||||||
_, err283 := mbTrans282.WriteString(arg281)
|
|
||||||
if err283 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory284 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt285 := factory284.GetProtocol(mbTrans282)
|
|
||||||
argvalue0 := aurora.NewJobKey()
|
|
||||||
err286 := argvalue0.Read(jsProt285)
|
|
||||||
if err286 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetConfigSummary(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getJobs":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetJobs requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
argvalue0 := flag.Arg(1)
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetJobs(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getQuota":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetQuota requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
argvalue0 := flag.Arg(1)
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetQuota(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "populateJobConfig":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg289 := flag.Arg(1)
|
|
||||||
mbTrans290 := thrift.NewTMemoryBufferLen(len(arg289))
|
|
||||||
defer mbTrans290.Close()
|
|
||||||
_, err291 := mbTrans290.WriteString(arg289)
|
|
||||||
if err291 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory292 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt293 := factory292.GetProtocol(mbTrans290)
|
|
||||||
argvalue0 := aurora.NewJobConfiguration()
|
|
||||||
err294 := argvalue0.Read(jsProt293)
|
|
||||||
if err294 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.PopulateJobConfig(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getJobUpdateSummaries":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg295 := flag.Arg(1)
|
|
||||||
mbTrans296 := thrift.NewTMemoryBufferLen(len(arg295))
|
|
||||||
defer mbTrans296.Close()
|
|
||||||
_, err297 := mbTrans296.WriteString(arg295)
|
|
||||||
if err297 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory298 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt299 := factory298.GetProtocol(mbTrans296)
|
|
||||||
argvalue0 := aurora.NewJobUpdateQuery()
|
|
||||||
err300 := argvalue0.Read(jsProt299)
|
|
||||||
if err300 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetJobUpdateSummaries(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getJobUpdateDetails":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg301 := flag.Arg(1)
|
|
||||||
mbTrans302 := thrift.NewTMemoryBufferLen(len(arg301))
|
|
||||||
defer mbTrans302.Close()
|
|
||||||
_, err303 := mbTrans302.WriteString(arg301)
|
|
||||||
if err303 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory304 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt305 := factory304.GetProtocol(mbTrans302)
|
|
||||||
argvalue0 := aurora.NewJobUpdateKey()
|
|
||||||
err306 := argvalue0.Read(jsProt305)
|
|
||||||
if err306 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetJobUpdateDetails(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getJobUpdateDiff":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg307 := flag.Arg(1)
|
|
||||||
mbTrans308 := thrift.NewTMemoryBufferLen(len(arg307))
|
|
||||||
defer mbTrans308.Close()
|
|
||||||
_, err309 := mbTrans308.WriteString(arg307)
|
|
||||||
if err309 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory310 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt311 := factory310.GetProtocol(mbTrans308)
|
|
||||||
argvalue0 := aurora.NewJobUpdateRequest()
|
|
||||||
err312 := argvalue0.Read(jsProt311)
|
|
||||||
if err312 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetJobUpdateDiff(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getTierConfigs":
|
|
||||||
if flag.NArg()-1 != 0 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetTierConfigs requires 0 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
fmt.Print(client.GetTierConfigs())
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "":
|
|
||||||
Usage()
|
|
||||||
break
|
|
||||||
default:
|
|
||||||
fmt.Fprintln(os.Stderr, "Invalid function ", cmd)
|
|
||||||
}
|
|
||||||
}
|
|
4841
vendor/gen-go/apache/aurora/auroraadmin.go
vendored
4841
vendor/gen-go/apache/aurora/auroraadmin.go
vendored
File diff suppressed because it is too large
Load diff
5077
vendor/gen-go/apache/aurora/auroraschedulermanager.go
vendored
5077
vendor/gen-go/apache/aurora/auroraschedulermanager.go
vendored
File diff suppressed because it is too large
Load diff
80
vendor/gen-go/apache/aurora/constants.go
vendored
80
vendor/gen-go/apache/aurora/constants.go
vendored
|
@ -1,80 +0,0 @@
|
||||||
// Autogenerated by Thrift Compiler (0.9.3)
|
|
||||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
|
||||||
|
|
||||||
package aurora
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"git.apache.org/thrift.git/lib/go/thrift"
|
|
||||||
)
|
|
||||||
|
|
||||||
// (needed to ensure safety because of naive import list construction.)
|
|
||||||
var _ = thrift.ZERO
|
|
||||||
var _ = fmt.Printf
|
|
||||||
var _ = bytes.Equal
|
|
||||||
|
|
||||||
const AURORA_EXECUTOR_NAME = "AuroraExecutor"
|
|
||||||
|
|
||||||
var ACTIVE_STATES map[ScheduleStatus]bool
|
|
||||||
var SLAVE_ASSIGNED_STATES map[ScheduleStatus]bool
|
|
||||||
var LIVE_STATES map[ScheduleStatus]bool
|
|
||||||
var TERMINAL_STATES map[ScheduleStatus]bool
|
|
||||||
|
|
||||||
const GOOD_IDENTIFIER_PATTERN = "^[\\w\\-\\.]+$"
|
|
||||||
const GOOD_IDENTIFIER_PATTERN_JVM = "^[\\w\\-\\.]+$"
|
|
||||||
const GOOD_IDENTIFIER_PATTERN_PYTHON = "^[\\w\\-\\.]+$"
|
|
||||||
|
|
||||||
var ACTIVE_JOB_UPDATE_STATES map[JobUpdateStatus]bool
|
|
||||||
|
|
||||||
const BYPASS_LEADER_REDIRECT_HEADER_NAME = "Bypass-Leader-Redirect"
|
|
||||||
const TASK_FILESYSTEM_MOUNT_POINT = "taskfs"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
ACTIVE_STATES = map[ScheduleStatus]bool{
|
|
||||||
9: true,
|
|
||||||
17: true,
|
|
||||||
6: true,
|
|
||||||
0: true,
|
|
||||||
13: true,
|
|
||||||
12: true,
|
|
||||||
2: true,
|
|
||||||
1: true,
|
|
||||||
16: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
SLAVE_ASSIGNED_STATES = map[ScheduleStatus]bool{
|
|
||||||
9: true,
|
|
||||||
17: true,
|
|
||||||
6: true,
|
|
||||||
13: true,
|
|
||||||
12: true,
|
|
||||||
2: true,
|
|
||||||
1: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
LIVE_STATES = map[ScheduleStatus]bool{
|
|
||||||
6: true,
|
|
||||||
13: true,
|
|
||||||
12: true,
|
|
||||||
17: true,
|
|
||||||
2: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
TERMINAL_STATES = map[ScheduleStatus]bool{
|
|
||||||
4: true,
|
|
||||||
3: true,
|
|
||||||
5: true,
|
|
||||||
7: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
ACTIVE_JOB_UPDATE_STATES = map[JobUpdateStatus]bool{
|
|
||||||
0: true,
|
|
||||||
1: true,
|
|
||||||
2: true,
|
|
||||||
3: true,
|
|
||||||
9: true,
|
|
||||||
10: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,382 +0,0 @@
|
||||||
// Autogenerated by Thrift Compiler (0.9.3)
|
|
||||||
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"apache/aurora"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"git.apache.org/thrift.git/lib/go/thrift"
|
|
||||||
"math"
|
|
||||||
"net"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func Usage() {
|
|
||||||
fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
|
|
||||||
flag.PrintDefaults()
|
|
||||||
fmt.Fprintln(os.Stderr, "\nFunctions:")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getRoleSummary()")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getJobSummary(string role)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getTasksStatus(TaskQuery query)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getTasksWithoutConfigs(TaskQuery query)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getPendingReason(TaskQuery query)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getConfigSummary(JobKey job)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getJobs(string ownerRole)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getQuota(string ownerRole)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response populateJobConfig(JobConfiguration description)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getJobUpdateSummaries(JobUpdateQuery jobUpdateQuery)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getJobUpdateDetails(JobUpdateKey key)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getJobUpdateDiff(JobUpdateRequest request)")
|
|
||||||
fmt.Fprintln(os.Stderr, " Response getTierConfigs()")
|
|
||||||
fmt.Fprintln(os.Stderr)
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Usage = Usage
|
|
||||||
var host string
|
|
||||||
var port int
|
|
||||||
var protocol string
|
|
||||||
var urlString string
|
|
||||||
var framed bool
|
|
||||||
var useHttp bool
|
|
||||||
var parsedUrl url.URL
|
|
||||||
var trans thrift.TTransport
|
|
||||||
_ = strconv.Atoi
|
|
||||||
_ = math.Abs
|
|
||||||
flag.Usage = Usage
|
|
||||||
flag.StringVar(&host, "h", "localhost", "Specify host and port")
|
|
||||||
flag.IntVar(&port, "p", 9090, "Specify port")
|
|
||||||
flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)")
|
|
||||||
flag.StringVar(&urlString, "u", "", "Specify the url")
|
|
||||||
flag.BoolVar(&framed, "framed", false, "Use framed transport")
|
|
||||||
flag.BoolVar(&useHttp, "http", false, "Use http")
|
|
||||||
flag.Parse()
|
|
||||||
|
|
||||||
if len(urlString) > 0 {
|
|
||||||
parsedUrl, err := url.Parse(urlString)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
host = parsedUrl.Host
|
|
||||||
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http"
|
|
||||||
} else if useHttp {
|
|
||||||
_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := flag.Arg(0)
|
|
||||||
var err error
|
|
||||||
if useHttp {
|
|
||||||
trans, err = thrift.NewTHttpClient(parsedUrl.String())
|
|
||||||
} else {
|
|
||||||
portStr := fmt.Sprint(port)
|
|
||||||
if strings.Contains(host, ":") {
|
|
||||||
host, portStr, err = net.SplitHostPort(host)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, "error with host:", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr))
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, "error resolving address:", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
if framed {
|
|
||||||
trans = thrift.NewTFramedTransport(trans)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, "Error creating transport", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
defer trans.Close()
|
|
||||||
var protocolFactory thrift.TProtocolFactory
|
|
||||||
switch protocol {
|
|
||||||
case "compact":
|
|
||||||
protocolFactory = thrift.NewTCompactProtocolFactory()
|
|
||||||
break
|
|
||||||
case "simplejson":
|
|
||||||
protocolFactory = thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
break
|
|
||||||
case "json":
|
|
||||||
protocolFactory = thrift.NewTJSONProtocolFactory()
|
|
||||||
break
|
|
||||||
case "binary", "":
|
|
||||||
protocolFactory = thrift.NewTBinaryProtocolFactoryDefault()
|
|
||||||
break
|
|
||||||
default:
|
|
||||||
fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol)
|
|
||||||
Usage()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
client := aurora.NewReadOnlySchedulerClientFactory(trans, protocolFactory)
|
|
||||||
if err := trans.Open(); err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch cmd {
|
|
||||||
case "getRoleSummary":
|
|
||||||
if flag.NArg()-1 != 0 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetRoleSummary requires 0 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
fmt.Print(client.GetRoleSummary())
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getJobSummary":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetJobSummary requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
argvalue0 := flag.Arg(1)
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetJobSummary(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getTasksStatus":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg79 := flag.Arg(1)
|
|
||||||
mbTrans80 := thrift.NewTMemoryBufferLen(len(arg79))
|
|
||||||
defer mbTrans80.Close()
|
|
||||||
_, err81 := mbTrans80.WriteString(arg79)
|
|
||||||
if err81 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory82 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt83 := factory82.GetProtocol(mbTrans80)
|
|
||||||
argvalue0 := aurora.NewTaskQuery()
|
|
||||||
err84 := argvalue0.Read(jsProt83)
|
|
||||||
if err84 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetTasksStatus(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getTasksWithoutConfigs":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg85 := flag.Arg(1)
|
|
||||||
mbTrans86 := thrift.NewTMemoryBufferLen(len(arg85))
|
|
||||||
defer mbTrans86.Close()
|
|
||||||
_, err87 := mbTrans86.WriteString(arg85)
|
|
||||||
if err87 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory88 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt89 := factory88.GetProtocol(mbTrans86)
|
|
||||||
argvalue0 := aurora.NewTaskQuery()
|
|
||||||
err90 := argvalue0.Read(jsProt89)
|
|
||||||
if err90 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetTasksWithoutConfigs(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getPendingReason":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg91 := flag.Arg(1)
|
|
||||||
mbTrans92 := thrift.NewTMemoryBufferLen(len(arg91))
|
|
||||||
defer mbTrans92.Close()
|
|
||||||
_, err93 := mbTrans92.WriteString(arg91)
|
|
||||||
if err93 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory94 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt95 := factory94.GetProtocol(mbTrans92)
|
|
||||||
argvalue0 := aurora.NewTaskQuery()
|
|
||||||
err96 := argvalue0.Read(jsProt95)
|
|
||||||
if err96 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetPendingReason(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getConfigSummary":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg97 := flag.Arg(1)
|
|
||||||
mbTrans98 := thrift.NewTMemoryBufferLen(len(arg97))
|
|
||||||
defer mbTrans98.Close()
|
|
||||||
_, err99 := mbTrans98.WriteString(arg97)
|
|
||||||
if err99 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory100 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt101 := factory100.GetProtocol(mbTrans98)
|
|
||||||
argvalue0 := aurora.NewJobKey()
|
|
||||||
err102 := argvalue0.Read(jsProt101)
|
|
||||||
if err102 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetConfigSummary(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getJobs":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetJobs requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
argvalue0 := flag.Arg(1)
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetJobs(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getQuota":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetQuota requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
argvalue0 := flag.Arg(1)
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetQuota(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "populateJobConfig":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg105 := flag.Arg(1)
|
|
||||||
mbTrans106 := thrift.NewTMemoryBufferLen(len(arg105))
|
|
||||||
defer mbTrans106.Close()
|
|
||||||
_, err107 := mbTrans106.WriteString(arg105)
|
|
||||||
if err107 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory108 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt109 := factory108.GetProtocol(mbTrans106)
|
|
||||||
argvalue0 := aurora.NewJobConfiguration()
|
|
||||||
err110 := argvalue0.Read(jsProt109)
|
|
||||||
if err110 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.PopulateJobConfig(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getJobUpdateSummaries":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg111 := flag.Arg(1)
|
|
||||||
mbTrans112 := thrift.NewTMemoryBufferLen(len(arg111))
|
|
||||||
defer mbTrans112.Close()
|
|
||||||
_, err113 := mbTrans112.WriteString(arg111)
|
|
||||||
if err113 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory114 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt115 := factory114.GetProtocol(mbTrans112)
|
|
||||||
argvalue0 := aurora.NewJobUpdateQuery()
|
|
||||||
err116 := argvalue0.Read(jsProt115)
|
|
||||||
if err116 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetJobUpdateSummaries(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getJobUpdateDetails":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg117 := flag.Arg(1)
|
|
||||||
mbTrans118 := thrift.NewTMemoryBufferLen(len(arg117))
|
|
||||||
defer mbTrans118.Close()
|
|
||||||
_, err119 := mbTrans118.WriteString(arg117)
|
|
||||||
if err119 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory120 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt121 := factory120.GetProtocol(mbTrans118)
|
|
||||||
argvalue0 := aurora.NewJobUpdateKey()
|
|
||||||
err122 := argvalue0.Read(jsProt121)
|
|
||||||
if err122 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetJobUpdateDetails(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getJobUpdateDiff":
|
|
||||||
if flag.NArg()-1 != 1 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
arg123 := flag.Arg(1)
|
|
||||||
mbTrans124 := thrift.NewTMemoryBufferLen(len(arg123))
|
|
||||||
defer mbTrans124.Close()
|
|
||||||
_, err125 := mbTrans124.WriteString(arg123)
|
|
||||||
if err125 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
factory126 := thrift.NewTSimpleJSONProtocolFactory()
|
|
||||||
jsProt127 := factory126.GetProtocol(mbTrans124)
|
|
||||||
argvalue0 := aurora.NewJobUpdateRequest()
|
|
||||||
err128 := argvalue0.Read(jsProt127)
|
|
||||||
if err128 != nil {
|
|
||||||
Usage()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value0 := argvalue0
|
|
||||||
fmt.Print(client.GetJobUpdateDiff(value0))
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "getTierConfigs":
|
|
||||||
if flag.NArg()-1 != 0 {
|
|
||||||
fmt.Fprintln(os.Stderr, "GetTierConfigs requires 0 args")
|
|
||||||
flag.Usage()
|
|
||||||
}
|
|
||||||
fmt.Print(client.GetTierConfigs())
|
|
||||||
fmt.Print("\n")
|
|
||||||
break
|
|
||||||
case "":
|
|
||||||
Usage()
|
|
||||||
break
|
|
||||||
default:
|
|
||||||
fmt.Fprintln(os.Stderr, "Invalid function ", cmd)
|
|
||||||
}
|
|
||||||
}
|
|
4292
vendor/gen-go/apache/aurora/readonlyscheduler.go
vendored
4292
vendor/gen-go/apache/aurora/readonlyscheduler.go
vendored
File diff suppressed because it is too large
Load diff
15611
vendor/gen-go/apache/aurora/ttypes.go
vendored
15611
vendor/gen-go/apache/aurora/ttypes.go
vendored
File diff suppressed because it is too large
Load diff
239
vendor/git.apache.org/thrift.git/LICENSE
generated
vendored
239
vendor/git.apache.org/thrift.git/LICENSE
generated
vendored
|
@ -1,239 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
|
|
||||||
--------------------------------------------------
|
|
||||||
SOFTWARE DISTRIBUTED WITH THRIFT:
|
|
||||||
|
|
||||||
The Apache Thrift software includes a number of subcomponents with
|
|
||||||
separate copyright notices and license terms. Your use of the source
|
|
||||||
code for the these subcomponents is subject to the terms and
|
|
||||||
conditions of the following licenses.
|
|
||||||
|
|
||||||
--------------------------------------------------
|
|
||||||
Portions of the following files are licensed under the MIT License:
|
|
||||||
|
|
||||||
lib/erl/src/Makefile.am
|
|
||||||
|
|
||||||
Please see doc/otp-base-license.txt for the full terms of this license.
|
|
||||||
|
|
||||||
--------------------------------------------------
|
|
||||||
For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
|
|
||||||
|
|
||||||
# Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
|
|
||||||
#
|
|
||||||
# Copying and distribution of this file, with or without
|
|
||||||
# modification, are permitted in any medium without royalty provided
|
|
||||||
# the copyright notice and this notice are preserved.
|
|
||||||
|
|
||||||
--------------------------------------------------
|
|
||||||
For the lib/nodejs/lib/thrift/json_parse.js:
|
|
||||||
|
|
||||||
/*
|
|
||||||
json_parse.js
|
|
||||||
2015-05-02
|
|
||||||
Public Domain.
|
|
||||||
NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
|
|
||||||
|
|
||||||
*/
|
|
||||||
(By Douglas Crockford <douglas@crockford.com>)
|
|
||||||
--------------------------------------------------
|
|
5
vendor/git.apache.org/thrift.git/NOTICE
generated
vendored
5
vendor/git.apache.org/thrift.git/NOTICE
generated
vendored
|
@ -1,5 +0,0 @@
|
||||||
Apache Thrift
|
|
||||||
Copyright 2006-2010 The Apache Software Foundation.
|
|
||||||
|
|
||||||
This product includes software developed at
|
|
||||||
The Apache Software Foundation (http://www.apache.org/).
|
|
|
@ -30,11 +30,22 @@ const (
|
||||||
PROTOCOL_ERROR = 7
|
PROTOCOL_ERROR = 7
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var defaultApplicationExceptionMessage = map[int32]string{
|
||||||
|
UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception",
|
||||||
|
UNKNOWN_METHOD: "unknown method",
|
||||||
|
INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type",
|
||||||
|
WRONG_METHOD_NAME: "wrong method name",
|
||||||
|
BAD_SEQUENCE_ID: "bad sequence ID",
|
||||||
|
MISSING_RESULT: "missing result",
|
||||||
|
INTERNAL_ERROR: "unknown internal error",
|
||||||
|
PROTOCOL_ERROR: "unknown protocol error",
|
||||||
|
}
|
||||||
|
|
||||||
// Application level Thrift exception
|
// Application level Thrift exception
|
||||||
type TApplicationException interface {
|
type TApplicationException interface {
|
||||||
TException
|
TException
|
||||||
TypeId() int32
|
TypeId() int32
|
||||||
Read(iprot TProtocol) (TApplicationException, error)
|
Read(iprot TProtocol) error
|
||||||
Write(oprot TProtocol) error
|
Write(oprot TProtocol) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,8 +55,11 @@ type tApplicationException struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e tApplicationException) Error() string {
|
func (e tApplicationException) Error() string {
|
||||||
|
if e.message != "" {
|
||||||
return e.message
|
return e.message
|
||||||
}
|
}
|
||||||
|
return defaultApplicationExceptionMessage[e.type_]
|
||||||
|
}
|
||||||
|
|
||||||
func NewTApplicationException(type_ int32, message string) TApplicationException {
|
func NewTApplicationException(type_ int32, message string) TApplicationException {
|
||||||
return &tApplicationException{message, type_}
|
return &tApplicationException{message, type_}
|
||||||
|
@ -55,10 +69,11 @@ func (p *tApplicationException) TypeId() int32 {
|
||||||
return p.type_
|
return p.type_
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, error) {
|
func (p *tApplicationException) Read(iprot TProtocol) error {
|
||||||
|
// TODO: this should really be generated by the compiler
|
||||||
_, err := iprot.ReadStructBegin()
|
_, err := iprot.ReadStructBegin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
message := ""
|
message := ""
|
||||||
|
@ -67,7 +82,7 @@ func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, er
|
||||||
for {
|
for {
|
||||||
_, ttype, id, err := iprot.ReadFieldBegin()
|
_, ttype, id, err := iprot.ReadFieldBegin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
if ttype == STOP {
|
if ttype == STOP {
|
||||||
break
|
break
|
||||||
|
@ -76,33 +91,40 @@ func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, er
|
||||||
case 1:
|
case 1:
|
||||||
if ttype == STRING {
|
if ttype == STRING {
|
||||||
if message, err = iprot.ReadString(); err != nil {
|
if message, err = iprot.ReadString(); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err = SkipDefaultDepth(iprot, ttype); err != nil {
|
if err = SkipDefaultDepth(iprot, ttype); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case 2:
|
case 2:
|
||||||
if ttype == I32 {
|
if ttype == I32 {
|
||||||
if type_, err = iprot.ReadI32(); err != nil {
|
if type_, err = iprot.ReadI32(); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err = SkipDefaultDepth(iprot, ttype); err != nil {
|
if err = SkipDefaultDepth(iprot, ttype); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
if err = SkipDefaultDepth(iprot, ttype); err != nil {
|
if err = SkipDefaultDepth(iprot, ttype); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err = iprot.ReadFieldEnd(); err != nil {
|
if err = iprot.ReadFieldEnd(); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return NewTApplicationException(type_, message), iprot.ReadStructEnd()
|
if err := iprot.ReadStructEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
p.message = message
|
||||||
|
p.type_ = type_
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *tApplicationException) Write(oprot TProtocol) (err error) {
|
func (p *tApplicationException) Write(oprot TProtocol) (err error) {
|
41
vendor/github.com/apache/thrift/lib/go/thrift/application_exception_test.go
generated
vendored
Normal file
41
vendor/github.com/apache/thrift/lib/go/thrift/application_exception_test.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTApplicationException(t *testing.T) {
|
||||||
|
exc := NewTApplicationException(UNKNOWN_APPLICATION_EXCEPTION, "")
|
||||||
|
if exc.Error() != defaultApplicationExceptionMessage[UNKNOWN_APPLICATION_EXCEPTION] {
|
||||||
|
t.Fatalf("Expected empty string for exception but found '%s'", exc.Error())
|
||||||
|
}
|
||||||
|
if exc.TypeId() != UNKNOWN_APPLICATION_EXCEPTION {
|
||||||
|
t.Fatalf("Expected type UNKNOWN for exception but found '%v'", exc.TypeId())
|
||||||
|
}
|
||||||
|
exc = NewTApplicationException(WRONG_METHOD_NAME, "junk_method")
|
||||||
|
if exc.Error() != "junk_method" {
|
||||||
|
t.Fatalf("Expected 'junk_method' for exception but found '%s'", exc.Error())
|
||||||
|
}
|
||||||
|
if exc.TypeId() != WRONG_METHOD_NAME {
|
||||||
|
t.Fatalf("Expected type WRONG_METHOD_NAME for exception but found '%v'", exc.TypeId())
|
||||||
|
}
|
||||||
|
}
|
|
@ -21,6 +21,7 @@ package thrift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
@ -447,9 +448,6 @@ func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
|
||||||
if size < 0 {
|
if size < 0 {
|
||||||
return nil, invalidDataLength
|
return nil, invalidDataLength
|
||||||
}
|
}
|
||||||
if uint64(size) > p.trans.RemainingBytes() {
|
|
||||||
return nil, invalidDataLength
|
|
||||||
}
|
|
||||||
|
|
||||||
isize := int(size)
|
isize := int(size)
|
||||||
buf := make([]byte, isize)
|
buf := make([]byte, isize)
|
||||||
|
@ -457,8 +455,8 @@ func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
|
||||||
return buf, NewTProtocolException(err)
|
return buf, NewTProtocolException(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *TBinaryProtocol) Flush() (err error) {
|
func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) {
|
||||||
return NewTProtocolException(p.trans.Flush())
|
return NewTProtocolException(p.trans.Flush(ctx))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *TBinaryProtocol) Skip(fieldType TType) (err error) {
|
func (p *TBinaryProtocol) Skip(fieldType TType) (err error) {
|
||||||
|
@ -480,9 +478,6 @@ func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
|
||||||
if size < 0 {
|
if size < 0 {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
if uint64(size) > p.trans.RemainingBytes() {
|
|
||||||
return "", invalidDataLength
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
buf bytes.Buffer
|
buf bytes.Buffer
|
28
vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol_test.go
generated
vendored
Normal file
28
vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol_test.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReadWriteBinaryProtocol(t *testing.T) {
|
||||||
|
ReadWriteProtocolTest(t, NewTBinaryProtocolFactoryDefault())
|
||||||
|
}
|
|
@ -21,6 +21,7 @@ package thrift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"context"
|
||||||
)
|
)
|
||||||
|
|
||||||
type TBufferedTransportFactory struct {
|
type TBufferedTransportFactory struct {
|
||||||
|
@ -32,8 +33,8 @@ type TBufferedTransport struct {
|
||||||
tp TTransport
|
tp TTransport
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *TBufferedTransportFactory) GetTransport(trans TTransport) TTransport {
|
func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
|
||||||
return NewTBufferedTransport(trans, p.size)
|
return NewTBufferedTransport(trans, p.size), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory {
|
func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory {
|
||||||
|
@ -78,12 +79,12 @@ func (p *TBufferedTransport) Write(b []byte) (int, error) {
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *TBufferedTransport) Flush() error {
|
func (p *TBufferedTransport) Flush(ctx context.Context) error {
|
||||||
if err := p.ReadWriter.Flush(); err != nil {
|
if err := p.ReadWriter.Flush(); err != nil {
|
||||||
p.ReadWriter.Writer.Reset(p.tp)
|
p.ReadWriter.Writer.Reset(p.tp)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return p.tp.Flush()
|
return p.tp.Flush(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) {
|
func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) {
|
29
vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport_test.go
generated
vendored
Normal file
29
vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport_test.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBufferedTransport(t *testing.T) {
|
||||||
|
trans := NewTBufferedTransport(NewTMemoryBuffer(), 10240)
|
||||||
|
TransportTest(t, trans, trans)
|
||||||
|
}
|
85
vendor/github.com/apache/thrift/lib/go/thrift/client.go
generated
vendored
Normal file
85
vendor/github.com/apache/thrift/lib/go/thrift/client.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TClient interface {
|
||||||
|
Call(ctx context.Context, method string, args, result TStruct) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type TStandardClient struct {
|
||||||
|
seqId int32
|
||||||
|
iprot, oprot TProtocol
|
||||||
|
}
|
||||||
|
|
||||||
|
// TStandardClient implements TClient, and uses the standard message format for Thrift.
|
||||||
|
// It is not safe for concurrent use.
|
||||||
|
func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient {
|
||||||
|
return &TStandardClient{
|
||||||
|
iprot: inputProtocol,
|
||||||
|
oprot: outputProtocol,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error {
|
||||||
|
if err := oprot.WriteMessageBegin(method, CALL, seqId); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := args.Write(oprot); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := oprot.WriteMessageEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return oprot.Flush(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *TStandardClient) Recv(iprot TProtocol, seqId int32, method string, result TStruct) error {
|
||||||
|
rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if method != rMethod {
|
||||||
|
return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method))
|
||||||
|
} else if seqId != rSeqId {
|
||||||
|
return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method))
|
||||||
|
} else if rTypeId == EXCEPTION {
|
||||||
|
var exception tApplicationException
|
||||||
|
if err := exception.Read(iprot); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := iprot.ReadMessageEnd(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &exception
|
||||||
|
} else if rTypeId != REPLY {
|
||||||
|
return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method))
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := result.Read(iprot); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return iprot.ReadMessageEnd()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) error {
|
||||||
|
p.seqId++
|
||||||
|
seqId := p.seqId
|
||||||
|
|
||||||
|
if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// method is oneway
|
||||||
|
if result == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.Recv(p.iprot, seqId, method, result)
|
||||||
|
}
|
|
@ -19,12 +19,12 @@
|
||||||
|
|
||||||
package thrift
|
package thrift
|
||||||
|
|
||||||
// A processor is a generic object which operates upon an input stream and
|
import "context"
|
||||||
// writes to some output stream.
|
|
||||||
type TProcessor interface {
|
type mockProcessor struct {
|
||||||
Process(in, out TProtocol) (bool, TException)
|
ProcessFunc func(in, out TProtocol) (bool, TException)
|
||||||
}
|
}
|
||||||
|
|
||||||
type TProcessorFunction interface {
|
func (m *mockProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
|
||||||
Process(seqId int32, in, out TProtocol) (bool, TException)
|
return m.ProcessFunc(in, out)
|
||||||
}
|
}
|
|
@ -20,6 +20,7 @@
|
||||||
package thrift
|
package thrift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -561,9 +562,6 @@ func (p *TCompactProtocol) ReadString() (value string, err error) {
|
||||||
if length < 0 {
|
if length < 0 {
|
||||||
return "", invalidDataLength
|
return "", invalidDataLength
|
||||||
}
|
}
|
||||||
if uint64(length) > p.trans.RemainingBytes() {
|
|
||||||
return "", invalidDataLength
|
|
||||||
}
|
|
||||||
|
|
||||||
if length == 0 {
|
if length == 0 {
|
||||||
return "", nil
|
return "", nil
|
||||||
|
@ -590,17 +588,14 @@ func (p *TCompactProtocol) ReadBinary() (value []byte, err error) {
|
||||||
if length < 0 {
|
if length < 0 {
|
||||||
return nil, invalidDataLength
|
return nil, invalidDataLength
|
||||||
}
|
}
|
||||||
if uint64(length) > p.trans.RemainingBytes() {
|
|
||||||
return nil, invalidDataLength
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := make([]byte, length)
|
buf := make([]byte, length)
|
||||||
_, e = io.ReadFull(p.trans, buf)
|
_, e = io.ReadFull(p.trans, buf)
|
||||||
return buf, NewTProtocolException(e)
|
return buf, NewTProtocolException(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *TCompactProtocol) Flush() (err error) {
|
func (p *TCompactProtocol) Flush(ctx context.Context) (err error) {
|
||||||
return NewTProtocolException(p.trans.Flush())
|
return NewTProtocolException(p.trans.Flush(ctx))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *TCompactProtocol) Skip(fieldType TType) (err error) {
|
func (p *TCompactProtocol) Skip(fieldType TType) (err error) {
|
||||||
|
@ -806,7 +801,7 @@ func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
|
||||||
case COMPACT_STRUCT:
|
case COMPACT_STRUCT:
|
||||||
return STRUCT, nil
|
return STRUCT, nil
|
||||||
}
|
}
|
||||||
return STOP, TException(fmt.Errorf("don't know what type: %s", t&0x0f))
|
return STOP, TException(fmt.Errorf("don't know what type: %v", t&0x0f))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Given a TType value, find the appropriate TCompactProtocol.Types constant.
|
// Given a TType value, find the appropriate TCompactProtocol.Types constant.
|
60
vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol_test.go
generated
vendored
Normal file
60
vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol_test.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReadWriteCompactProtocol(t *testing.T) {
|
||||||
|
ReadWriteProtocolTest(t, NewTCompactProtocolFactory())
|
||||||
|
|
||||||
|
transports := []TTransport{
|
||||||
|
NewTMemoryBuffer(),
|
||||||
|
NewStreamTransportRW(bytes.NewBuffer(make([]byte, 0, 16384))),
|
||||||
|
NewTFramedTransport(NewTMemoryBuffer()),
|
||||||
|
}
|
||||||
|
|
||||||
|
zlib0, _ := NewTZlibTransport(NewTMemoryBuffer(), 0)
|
||||||
|
zlib6, _ := NewTZlibTransport(NewTMemoryBuffer(), 6)
|
||||||
|
zlib9, _ := NewTZlibTransport(NewTFramedTransport(NewTMemoryBuffer()), 9)
|
||||||
|
transports = append(transports, zlib0, zlib6, zlib9)
|
||||||
|
|
||||||
|
for _, trans := range transports {
|
||||||
|
p := NewTCompactProtocol(trans)
|
||||||
|
ReadWriteBool(t, p, trans)
|
||||||
|
p = NewTCompactProtocol(trans)
|
||||||
|
ReadWriteByte(t, p, trans)
|
||||||
|
p = NewTCompactProtocol(trans)
|
||||||
|
ReadWriteI16(t, p, trans)
|
||||||
|
p = NewTCompactProtocol(trans)
|
||||||
|
ReadWriteI32(t, p, trans)
|
||||||
|
p = NewTCompactProtocol(trans)
|
||||||
|
ReadWriteI64(t, p, trans)
|
||||||
|
p = NewTCompactProtocol(trans)
|
||||||
|
ReadWriteDouble(t, p, trans)
|
||||||
|
p = NewTCompactProtocol(trans)
|
||||||
|
ReadWriteString(t, p, trans)
|
||||||
|
p = NewTCompactProtocol(trans)
|
||||||
|
ReadWriteBinary(t, p, trans)
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
}
|
24
vendor/github.com/apache/thrift/lib/go/thrift/context.go
generated
vendored
Normal file
24
vendor/github.com/apache/thrift/lib/go/thrift/context.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
var defaultCtx = context.Background()
|
|
@ -20,6 +20,7 @@
|
||||||
package thrift
|
package thrift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"log"
|
"log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -258,8 +259,8 @@ func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) {
|
||||||
log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
|
log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
func (tdp *TDebugProtocol) Flush() (err error) {
|
func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) {
|
||||||
err = tdp.Delegate.Flush()
|
err = tdp.Delegate.Flush(ctx)
|
||||||
log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
|
log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
69
vendor/github.com/apache/thrift/lib/go/thrift/exception_test.go
generated
vendored
Normal file
69
vendor/github.com/apache/thrift/lib/go/thrift/exception_test.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestPrependError(t *testing.T) {
|
||||||
|
err := NewTApplicationException(INTERNAL_ERROR, "original error")
|
||||||
|
err2, ok := PrependError("Prepend: ", err).(TApplicationException)
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("Couldn't cast error TApplicationException")
|
||||||
|
}
|
||||||
|
if err2.Error() != "Prepend: original error" {
|
||||||
|
t.Fatal("Unexpected error string")
|
||||||
|
}
|
||||||
|
if err2.TypeId() != INTERNAL_ERROR {
|
||||||
|
t.Fatal("Unexpected type error")
|
||||||
|
}
|
||||||
|
|
||||||
|
err3 := NewTProtocolExceptionWithType(INVALID_DATA, errors.New("original error"))
|
||||||
|
err4, ok := PrependError("Prepend: ", err3).(TProtocolException)
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("Couldn't cast error TProtocolException")
|
||||||
|
}
|
||||||
|
if err4.Error() != "Prepend: original error" {
|
||||||
|
t.Fatal("Unexpected error string")
|
||||||
|
}
|
||||||
|
if err4.TypeId() != INVALID_DATA {
|
||||||
|
t.Fatal("Unexpected type error")
|
||||||
|
}
|
||||||
|
|
||||||
|
err5 := NewTTransportException(TIMED_OUT, "original error")
|
||||||
|
err6, ok := PrependError("Prepend: ", err5).(TTransportException)
|
||||||
|
if !ok {
|
||||||
|
t.Fatal("Couldn't cast error TTransportException")
|
||||||
|
}
|
||||||
|
if err6.Error() != "Prepend: original error" {
|
||||||
|
t.Fatal("Unexpected error string")
|
||||||
|
}
|
||||||
|
if err6.TypeId() != TIMED_OUT {
|
||||||
|
t.Fatal("Unexpected type error")
|
||||||
|
}
|
||||||
|
|
||||||
|
err7 := errors.New("original error")
|
||||||
|
err8 := PrependError("Prepend: ", err7)
|
||||||
|
if err8.Error() != "Prepend: original error" {
|
||||||
|
t.Fatal("Unexpected error string")
|
||||||
|
}
|
||||||
|
}
|
|
@ -22,6 +22,7 @@ package thrift
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -51,8 +52,12 @@ func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength ui
|
||||||
return &tFramedTransportFactory{factory: factory, maxLength: maxLength}
|
return &tFramedTransportFactory{factory: factory, maxLength: maxLength}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *tFramedTransportFactory) GetTransport(base TTransport) TTransport {
|
func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) {
|
||||||
return NewTFramedTransportMaxLength(p.factory.GetTransport(base), p.maxLength)
|
tt, err := p.factory.GetTransport(base)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return NewTFramedTransportMaxLength(tt, p.maxLength), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTFramedTransport(transport TTransport) *TFramedTransport {
|
func NewTFramedTransport(transport TTransport) *TFramedTransport {
|
||||||
|
@ -131,21 +136,23 @@ func (p *TFramedTransport) WriteString(s string) (n int, err error) {
|
||||||
return p.buf.WriteString(s)
|
return p.buf.WriteString(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *TFramedTransport) Flush() error {
|
func (p *TFramedTransport) Flush(ctx context.Context) error {
|
||||||
size := p.buf.Len()
|
size := p.buf.Len()
|
||||||
buf := p.buffer[:4]
|
buf := p.buffer[:4]
|
||||||
binary.BigEndian.PutUint32(buf, uint32(size))
|
binary.BigEndian.PutUint32(buf, uint32(size))
|
||||||
_, err := p.transport.Write(buf)
|
_, err := p.transport.Write(buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
p.buf.Truncate(0)
|
||||||
return NewTTransportExceptionFromError(err)
|
return NewTTransportExceptionFromError(err)
|
||||||
}
|
}
|
||||||
if size > 0 {
|
if size > 0 {
|
||||||
if n, err := p.buf.WriteTo(p.transport); err != nil {
|
if n, err := p.buf.WriteTo(p.transport); err != nil {
|
||||||
print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n")
|
print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n")
|
||||||
|
p.buf.Truncate(0)
|
||||||
return NewTTransportExceptionFromError(err)
|
return NewTTransportExceptionFromError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = p.transport.Flush()
|
err = p.transport.Flush(ctx)
|
||||||
return NewTTransportExceptionFromError(err)
|
return NewTTransportExceptionFromError(err)
|
||||||
}
|
}
|
||||||
|
|
29
vendor/github.com/apache/thrift/lib/go/thrift/framed_transport_test.go
generated
vendored
Normal file
29
vendor/github.com/apache/thrift/lib/go/thrift/framed_transport_test.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFramedTransport(t *testing.T) {
|
||||||
|
trans := NewTFramedTransport(NewTMemoryBuffer())
|
||||||
|
TransportTest(t, trans, trans)
|
||||||
|
}
|
|
@ -21,6 +21,7 @@ package thrift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -46,27 +47,16 @@ type THttpClient struct {
|
||||||
type THttpClientTransportFactory struct {
|
type THttpClientTransportFactory struct {
|
||||||
options THttpClientOptions
|
options THttpClientOptions
|
||||||
url string
|
url string
|
||||||
isPost bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *THttpClientTransportFactory) GetTransport(trans TTransport) TTransport {
|
func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
|
||||||
if trans != nil {
|
if trans != nil {
|
||||||
t, ok := trans.(*THttpClient)
|
t, ok := trans.(*THttpClient)
|
||||||
if ok && t.url != nil {
|
if ok && t.url != nil {
|
||||||
if t.requestBuffer != nil {
|
return NewTHttpClientWithOptions(t.url.String(), p.options)
|
||||||
t2, _ := NewTHttpPostClientWithOptions(t.url.String(), p.options)
|
|
||||||
return t2
|
|
||||||
}
|
|
||||||
t2, _ := NewTHttpClientWithOptions(t.url.String(), p.options)
|
|
||||||
return t2
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if p.isPost {
|
return NewTHttpClientWithOptions(p.url, p.options)
|
||||||
s, _ := NewTHttpPostClientWithOptions(p.url, p.options)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
s, _ := NewTHttpClientWithOptions(p.url, p.options)
|
|
||||||
return s
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type THttpClientOptions struct {
|
type THttpClientOptions struct {
|
||||||
|
@ -79,39 +69,10 @@ func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory {
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
|
func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
|
||||||
return &THttpClientTransportFactory{url: url, isPost: false, options: options}
|
return &THttpClientTransportFactory{url: url, options: options}
|
||||||
}
|
|
||||||
|
|
||||||
func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory {
|
|
||||||
return NewTHttpPostClientTransportFactoryWithOptions(url, THttpClientOptions{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
|
|
||||||
return &THttpClientTransportFactory{url: url, isPost: true, options: options}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
|
func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
|
||||||
parsedURL, err := url.Parse(urlstr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
response, err := http.Get(urlstr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
client := options.Client
|
|
||||||
if client == nil {
|
|
||||||
client = DefaultHttpClient
|
|
||||||
}
|
|
||||||
httpHeader := map[string][]string{"Content-Type": []string{"application/x-thrift"}}
|
|
||||||
return &THttpClient{client: client, response: response, url: parsedURL, header: httpHeader}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTHttpClient(urlstr string) (TTransport, error) {
|
|
||||||
return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
|
|
||||||
parsedURL, err := url.Parse(urlstr)
|
parsedURL, err := url.Parse(urlstr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -121,12 +82,12 @@ func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (T
|
||||||
if client == nil {
|
if client == nil {
|
||||||
client = DefaultHttpClient
|
client = DefaultHttpClient
|
||||||
}
|
}
|
||||||
httpHeader := map[string][]string{"Content-Type": []string{"application/x-thrift"}}
|
httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}}
|
||||||
return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil
|
return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTHttpPostClient(urlstr string) (TTransport, error) {
|
func NewTHttpClient(urlstr string) (TTransport, error) {
|
||||||
return NewTHttpPostClientWithOptions(urlstr, THttpClientOptions{})
|
return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the HTTP Header for this specific Thrift Transport
|
// Set the HTTP Header for this specific Thrift Transport
|
||||||
|
@ -221,7 +182,7 @@ func (p *THttpClient) WriteString(s string) (n int, err error) {
|
||||||
return p.requestBuffer.WriteString(s)
|
return p.requestBuffer.WriteString(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *THttpClient) Flush() error {
|
func (p *THttpClient) Flush(ctx context.Context) error {
|
||||||
// Close any previous response body to avoid leaking connections.
|
// Close any previous response body to avoid leaking connections.
|
||||||
p.closeResponse()
|
p.closeResponse()
|
||||||
|
|
||||||
|
@ -230,6 +191,9 @@ func (p *THttpClient) Flush() error {
|
||||||
return NewTTransportExceptionFromError(err)
|
return NewTTransportExceptionFromError(err)
|
||||||
}
|
}
|
||||||
req.Header = p.header
|
req.Header = p.header
|
||||||
|
if ctx != nil {
|
||||||
|
req = req.WithContext(ctx)
|
||||||
|
}
|
||||||
response, err := p.client.Do(req)
|
response, err := p.client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return NewTTransportExceptionFromError(err)
|
return NewTTransportExceptionFromError(err)
|
||||||
|
@ -256,3 +220,23 @@ func (p *THttpClient) RemainingBytes() (num_bytes uint64) {
|
||||||
const maxSize = ^uint64(0)
|
const maxSize = ^uint64(0)
|
||||||
return maxSize // the thruth is, we just don't know unless framed is used
|
return maxSize // the thruth is, we just don't know unless framed is used
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use NewTHttpClientTransportFactory instead.
|
||||||
|
func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory {
|
||||||
|
return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead.
|
||||||
|
func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
|
||||||
|
return NewTHttpClientTransportFactoryWithOptions(url, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use NewTHttpClientWithOptions instead.
|
||||||
|
func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
|
||||||
|
return NewTHttpClientWithOptions(urlstr, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use NewTHttpClient instead.
|
||||||
|
func NewTHttpPostClient(urlstr string) (TTransport, error) {
|
||||||
|
return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
|
||||||
|
}
|
106
vendor/github.com/apache/thrift/lib/go/thrift/http_client_test.go
generated
vendored
Normal file
106
vendor/github.com/apache/thrift/lib/go/thrift/http_client_test.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHttpClient(t *testing.T) {
|
||||||
|
l, addr := HttpClientSetupForTest(t)
|
||||||
|
if l != nil {
|
||||||
|
defer l.Close()
|
||||||
|
}
|
||||||
|
trans, err := NewTHttpPostClient("http://" + addr.String())
|
||||||
|
if err != nil {
|
||||||
|
l.Close()
|
||||||
|
t.Fatalf("Unable to connect to %s: %s", addr.String(), err)
|
||||||
|
}
|
||||||
|
TransportTest(t, trans, trans)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHttpClientHeaders(t *testing.T) {
|
||||||
|
l, addr := HttpClientSetupForTest(t)
|
||||||
|
if l != nil {
|
||||||
|
defer l.Close()
|
||||||
|
}
|
||||||
|
trans, err := NewTHttpPostClient("http://" + addr.String())
|
||||||
|
if err != nil {
|
||||||
|
l.Close()
|
||||||
|
t.Fatalf("Unable to connect to %s: %s", addr.String(), err)
|
||||||
|
}
|
||||||
|
TransportHeaderTest(t, trans, trans)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHttpCustomClient(t *testing.T) {
|
||||||
|
l, addr := HttpClientSetupForTest(t)
|
||||||
|
if l != nil {
|
||||||
|
defer l.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
httpTransport := &customHttpTransport{}
|
||||||
|
|
||||||
|
trans, err := NewTHttpPostClientWithOptions("http://"+addr.String(), THttpClientOptions{
|
||||||
|
Client: &http.Client{
|
||||||
|
Transport: httpTransport,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
l.Close()
|
||||||
|
t.Fatalf("Unable to connect to %s: %s", addr.String(), err)
|
||||||
|
}
|
||||||
|
TransportHeaderTest(t, trans, trans)
|
||||||
|
|
||||||
|
if !httpTransport.hit {
|
||||||
|
t.Fatalf("Custom client was not used")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHttpCustomClientPackageScope(t *testing.T) {
|
||||||
|
l, addr := HttpClientSetupForTest(t)
|
||||||
|
if l != nil {
|
||||||
|
defer l.Close()
|
||||||
|
}
|
||||||
|
httpTransport := &customHttpTransport{}
|
||||||
|
DefaultHttpClient = &http.Client{
|
||||||
|
Transport: httpTransport,
|
||||||
|
}
|
||||||
|
|
||||||
|
trans, err := NewTHttpPostClient("http://" + addr.String())
|
||||||
|
if err != nil {
|
||||||
|
l.Close()
|
||||||
|
t.Fatalf("Unable to connect to %s: %s", addr.String(), err)
|
||||||
|
}
|
||||||
|
TransportHeaderTest(t, trans, trans)
|
||||||
|
|
||||||
|
if !httpTransport.hit {
|
||||||
|
t.Fatalf("Custom client was not used")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type customHttpTransport struct {
|
||||||
|
hit bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *customHttpTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
c.hit = true
|
||||||
|
return http.DefaultTransport.RoundTrip(req)
|
||||||
|
}
|
63
vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go
generated
vendored
Normal file
63
vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function
|
||||||
|
func NewThriftHandlerFunc(processor TProcessor,
|
||||||
|
inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
|
return gz(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Add("Content-Type", "application/x-thrift")
|
||||||
|
|
||||||
|
transport := NewStreamTransport(r.Body, w)
|
||||||
|
processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// gz transparently compresses the HTTP response if the client supports it.
|
||||||
|
func gz(handler http.HandlerFunc) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
|
||||||
|
handler(w, r)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.Header().Set("Content-Encoding", "gzip")
|
||||||
|
gz := gzip.NewWriter(w)
|
||||||
|
defer gz.Close()
|
||||||
|
gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w}
|
||||||
|
handler(gzw, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type gzipResponseWriter struct {
|
||||||
|
io.Writer
|
||||||
|
http.ResponseWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w gzipResponseWriter) Write(b []byte) (int, error) {
|
||||||
|
return w.Writer.Write(b)
|
||||||
|
}
|
|
@ -21,6 +21,7 @@ package thrift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -38,38 +39,38 @@ type StreamTransportFactory struct {
|
||||||
isReadWriter bool
|
isReadWriter bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *StreamTransportFactory) GetTransport(trans TTransport) TTransport {
|
func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
|
||||||
if trans != nil {
|
if trans != nil {
|
||||||
t, ok := trans.(*StreamTransport)
|
t, ok := trans.(*StreamTransport)
|
||||||
if ok {
|
if ok {
|
||||||
if t.isReadWriter {
|
if t.isReadWriter {
|
||||||
return NewStreamTransportRW(t.Reader.(io.ReadWriter))
|
return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil
|
||||||
}
|
}
|
||||||
if t.Reader != nil && t.Writer != nil {
|
if t.Reader != nil && t.Writer != nil {
|
||||||
return NewStreamTransport(t.Reader, t.Writer)
|
return NewStreamTransport(t.Reader, t.Writer), nil
|
||||||
}
|
}
|
||||||
if t.Reader != nil && t.Writer == nil {
|
if t.Reader != nil && t.Writer == nil {
|
||||||
return NewStreamTransportR(t.Reader)
|
return NewStreamTransportR(t.Reader), nil
|
||||||
}
|
}
|
||||||
if t.Reader == nil && t.Writer != nil {
|
if t.Reader == nil && t.Writer != nil {
|
||||||
return NewStreamTransportW(t.Writer)
|
return NewStreamTransportW(t.Writer), nil
|
||||||
}
|
}
|
||||||
return &StreamTransport{}
|
return &StreamTransport{}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if p.isReadWriter {
|
if p.isReadWriter {
|
||||||
return NewStreamTransportRW(p.Reader.(io.ReadWriter))
|
return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil
|
||||||
}
|
}
|
||||||
if p.Reader != nil && p.Writer != nil {
|
if p.Reader != nil && p.Writer != nil {
|
||||||
return NewStreamTransport(p.Reader, p.Writer)
|
return NewStreamTransport(p.Reader, p.Writer), nil
|
||||||
}
|
}
|
||||||
if p.Reader != nil && p.Writer == nil {
|
if p.Reader != nil && p.Writer == nil {
|
||||||
return NewStreamTransportR(p.Reader)
|
return NewStreamTransportR(p.Reader), nil
|
||||||
}
|
}
|
||||||
if p.Reader == nil && p.Writer != nil {
|
if p.Reader == nil && p.Writer != nil {
|
||||||
return NewStreamTransportW(p.Writer)
|
return NewStreamTransportW(p.Writer), nil
|
||||||
}
|
}
|
||||||
return &StreamTransport{}
|
return &StreamTransport{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory {
|
func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory {
|
||||||
|
@ -138,7 +139,7 @@ func (p *StreamTransport) Close() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flushes the underlying output stream if not null.
|
// Flushes the underlying output stream if not null.
|
||||||
func (p *StreamTransport) Flush() error {
|
func (p *StreamTransport) Flush(ctx context.Context) error {
|
||||||
if p.Writer == nil {
|
if p.Writer == nil {
|
||||||
return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream")
|
return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream")
|
||||||
}
|
}
|
52
vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport_test.go
generated
vendored
Normal file
52
vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport_test.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestStreamTransport(t *testing.T) {
|
||||||
|
trans := NewStreamTransportRW(bytes.NewBuffer(make([]byte, 0, 1024)))
|
||||||
|
TransportTest(t, trans, trans)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStreamTransportOpenClose(t *testing.T) {
|
||||||
|
trans := NewStreamTransportRW(bytes.NewBuffer(make([]byte, 0, 1024)))
|
||||||
|
if !trans.IsOpen() {
|
||||||
|
t.Fatal("StreamTransport should be already open")
|
||||||
|
}
|
||||||
|
if trans.Open() == nil {
|
||||||
|
t.Fatal("StreamTransport should return error when open twice")
|
||||||
|
}
|
||||||
|
if trans.Close() != nil {
|
||||||
|
t.Fatal("StreamTransport should not return error when closing open transport")
|
||||||
|
}
|
||||||
|
if trans.IsOpen() {
|
||||||
|
t.Fatal("StreamTransport should not be open after close")
|
||||||
|
}
|
||||||
|
if trans.Close() == nil {
|
||||||
|
t.Fatal("StreamTransport should return error when closing a non open transport")
|
||||||
|
}
|
||||||
|
if trans.Open() == nil {
|
||||||
|
t.Fatal("StreamTransport should not be able to reopen")
|
||||||
|
}
|
||||||
|
}
|
|
@ -20,6 +20,7 @@
|
||||||
package thrift
|
package thrift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
)
|
)
|
||||||
|
@ -438,10 +439,10 @@ func (p *TJSONProtocol) ReadBinary() ([]byte, error) {
|
||||||
return v, p.ParsePostValue()
|
return v, p.ParsePostValue()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *TJSONProtocol) Flush() (err error) {
|
func (p *TJSONProtocol) Flush(ctx context.Context) (err error) {
|
||||||
err = p.writer.Flush()
|
err = p.writer.Flush()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = p.trans.Flush()
|
err = p.trans.Flush(ctx)
|
||||||
}
|
}
|
||||||
return NewTProtocolException(err)
|
return NewTProtocolException(err)
|
||||||
}
|
}
|
650
vendor/github.com/apache/thrift/lib/go/thrift/json_protocol_test.go
generated
vendored
Normal file
650
vendor/github.com/apache/thrift/lib/go/thrift/json_protocol_test.go
generated
vendored
Normal file
|
@ -0,0 +1,650 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestWriteJSONProtocolBool(t *testing.T) {
|
||||||
|
thetype := "boolean"
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
for _, value := range BOOL_VALUES {
|
||||||
|
if e := p.WriteBool(value); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if e := p.Flush(context.Background()); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
s := trans.String()
|
||||||
|
expected := ""
|
||||||
|
if value {
|
||||||
|
expected = "1"
|
||||||
|
} else {
|
||||||
|
expected = "0"
|
||||||
|
}
|
||||||
|
if s != expected {
|
||||||
|
t.Fatalf("Bad value for %s %v: %s expected", thetype, value, s)
|
||||||
|
}
|
||||||
|
v := -1
|
||||||
|
if err := json.Unmarshal([]byte(s), &v); err != nil || (v != 0) != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
}
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadJSONProtocolBool(t *testing.T) {
|
||||||
|
thetype := "boolean"
|
||||||
|
for _, value := range BOOL_VALUES {
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
if value {
|
||||||
|
trans.Write([]byte{'1'}) // not JSON_TRUE
|
||||||
|
} else {
|
||||||
|
trans.Write([]byte{'0'}) // not JSON_FALSE
|
||||||
|
}
|
||||||
|
trans.Flush(context.Background())
|
||||||
|
s := trans.String()
|
||||||
|
v, e := p.ReadBool()
|
||||||
|
if e != nil {
|
||||||
|
t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if v != value {
|
||||||
|
t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
vv := -1
|
||||||
|
if err := json.Unmarshal([]byte(s), &vv); err != nil || (vv != 0) != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, vv)
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteJSONProtocolByte(t *testing.T) {
|
||||||
|
thetype := "byte"
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
for _, value := range BYTE_VALUES {
|
||||||
|
if e := p.WriteByte(value); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if e := p.Flush(context.Background()); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
s := trans.String()
|
||||||
|
if s != fmt.Sprint(value) {
|
||||||
|
t.Fatalf("Bad value for %s %v: %s", thetype, value, s)
|
||||||
|
}
|
||||||
|
v := int8(0)
|
||||||
|
if err := json.Unmarshal([]byte(s), &v); err != nil || v != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
}
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadJSONProtocolByte(t *testing.T) {
|
||||||
|
thetype := "byte"
|
||||||
|
for _, value := range BYTE_VALUES {
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
trans.WriteString(strconv.Itoa(int(value)))
|
||||||
|
trans.Flush(context.Background())
|
||||||
|
s := trans.String()
|
||||||
|
v, e := p.ReadByte()
|
||||||
|
if e != nil {
|
||||||
|
t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if v != value {
|
||||||
|
t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(s), &v); err != nil || v != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteJSONProtocolI16(t *testing.T) {
|
||||||
|
thetype := "int16"
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
for _, value := range INT16_VALUES {
|
||||||
|
if e := p.WriteI16(value); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if e := p.Flush(context.Background()); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
s := trans.String()
|
||||||
|
if s != fmt.Sprint(value) {
|
||||||
|
t.Fatalf("Bad value for %s %v: %s", thetype, value, s)
|
||||||
|
}
|
||||||
|
v := int16(0)
|
||||||
|
if err := json.Unmarshal([]byte(s), &v); err != nil || v != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
}
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadJSONProtocolI16(t *testing.T) {
|
||||||
|
thetype := "int16"
|
||||||
|
for _, value := range INT16_VALUES {
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
trans.WriteString(strconv.Itoa(int(value)))
|
||||||
|
trans.Flush(context.Background())
|
||||||
|
s := trans.String()
|
||||||
|
v, e := p.ReadI16()
|
||||||
|
if e != nil {
|
||||||
|
t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if v != value {
|
||||||
|
t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(s), &v); err != nil || v != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteJSONProtocolI32(t *testing.T) {
|
||||||
|
thetype := "int32"
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
for _, value := range INT32_VALUES {
|
||||||
|
if e := p.WriteI32(value); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if e := p.Flush(context.Background()); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
s := trans.String()
|
||||||
|
if s != fmt.Sprint(value) {
|
||||||
|
t.Fatalf("Bad value for %s %v: %s", thetype, value, s)
|
||||||
|
}
|
||||||
|
v := int32(0)
|
||||||
|
if err := json.Unmarshal([]byte(s), &v); err != nil || v != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
}
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadJSONProtocolI32(t *testing.T) {
|
||||||
|
thetype := "int32"
|
||||||
|
for _, value := range INT32_VALUES {
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
trans.WriteString(strconv.Itoa(int(value)))
|
||||||
|
trans.Flush(context.Background())
|
||||||
|
s := trans.String()
|
||||||
|
v, e := p.ReadI32()
|
||||||
|
if e != nil {
|
||||||
|
t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if v != value {
|
||||||
|
t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(s), &v); err != nil || v != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteJSONProtocolI64(t *testing.T) {
|
||||||
|
thetype := "int64"
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
for _, value := range INT64_VALUES {
|
||||||
|
if e := p.WriteI64(value); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if e := p.Flush(context.Background()); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
s := trans.String()
|
||||||
|
if s != fmt.Sprint(value) {
|
||||||
|
t.Fatalf("Bad value for %s %v: %s", thetype, value, s)
|
||||||
|
}
|
||||||
|
v := int64(0)
|
||||||
|
if err := json.Unmarshal([]byte(s), &v); err != nil || v != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
}
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadJSONProtocolI64(t *testing.T) {
|
||||||
|
thetype := "int64"
|
||||||
|
for _, value := range INT64_VALUES {
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
trans.WriteString(strconv.FormatInt(value, 10))
|
||||||
|
trans.Flush(context.Background())
|
||||||
|
s := trans.String()
|
||||||
|
v, e := p.ReadI64()
|
||||||
|
if e != nil {
|
||||||
|
t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if v != value {
|
||||||
|
t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(s), &v); err != nil || v != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteJSONProtocolDouble(t *testing.T) {
|
||||||
|
thetype := "double"
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
for _, value := range DOUBLE_VALUES {
|
||||||
|
if e := p.WriteDouble(value); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if e := p.Flush(context.Background()); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
s := trans.String()
|
||||||
|
if math.IsInf(value, 1) {
|
||||||
|
if s != jsonQuote(JSON_INFINITY) {
|
||||||
|
t.Fatalf("Bad value for %s %v, wrote: %v, expected: %v", thetype, value, s, jsonQuote(JSON_INFINITY))
|
||||||
|
}
|
||||||
|
} else if math.IsInf(value, -1) {
|
||||||
|
if s != jsonQuote(JSON_NEGATIVE_INFINITY) {
|
||||||
|
t.Fatalf("Bad value for %s %v, wrote: %v, expected: %v", thetype, value, s, jsonQuote(JSON_NEGATIVE_INFINITY))
|
||||||
|
}
|
||||||
|
} else if math.IsNaN(value) {
|
||||||
|
if s != jsonQuote(JSON_NAN) {
|
||||||
|
t.Fatalf("Bad value for %s %v, wrote: %v, expected: %v", thetype, value, s, jsonQuote(JSON_NAN))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if s != fmt.Sprint(value) {
|
||||||
|
t.Fatalf("Bad value for %s %v: %s", thetype, value, s)
|
||||||
|
}
|
||||||
|
v := float64(0)
|
||||||
|
if err := json.Unmarshal([]byte(s), &v); err != nil || v != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
}
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadJSONProtocolDouble(t *testing.T) {
|
||||||
|
thetype := "double"
|
||||||
|
for _, value := range DOUBLE_VALUES {
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
n := NewNumericFromDouble(value)
|
||||||
|
trans.WriteString(n.String())
|
||||||
|
trans.Flush(context.Background())
|
||||||
|
s := trans.String()
|
||||||
|
v, e := p.ReadDouble()
|
||||||
|
if e != nil {
|
||||||
|
t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if math.IsInf(value, 1) {
|
||||||
|
if !math.IsInf(v, 1) {
|
||||||
|
t.Fatalf("Bad value for %s %v, wrote: %v, received: %v", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
} else if math.IsInf(value, -1) {
|
||||||
|
if !math.IsInf(v, -1) {
|
||||||
|
t.Fatalf("Bad value for %s %v, wrote: %v, received: %v", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
} else if math.IsNaN(value) {
|
||||||
|
if !math.IsNaN(v) {
|
||||||
|
t.Fatalf("Bad value for %s %v, wrote: %v, received: %v", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if v != value {
|
||||||
|
t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal([]byte(s), &v); err != nil || v != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteJSONProtocolString(t *testing.T) {
|
||||||
|
thetype := "string"
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
for _, value := range STRING_VALUES {
|
||||||
|
if e := p.WriteString(value); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if e := p.Flush(context.Background()); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
s := trans.String()
|
||||||
|
if s[0] != '"' || s[len(s)-1] != '"' {
|
||||||
|
t.Fatalf("Bad value for %s '%v', wrote '%v', expected: %v", thetype, value, s, fmt.Sprint("\"", value, "\""))
|
||||||
|
}
|
||||||
|
v := new(string)
|
||||||
|
if err := json.Unmarshal([]byte(s), v); err != nil || *v != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, *v)
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
}
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadJSONProtocolString(t *testing.T) {
|
||||||
|
thetype := "string"
|
||||||
|
for _, value := range STRING_VALUES {
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
trans.WriteString(jsonQuote(value))
|
||||||
|
trans.Flush(context.Background())
|
||||||
|
s := trans.String()
|
||||||
|
v, e := p.ReadString()
|
||||||
|
if e != nil {
|
||||||
|
t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if v != value {
|
||||||
|
t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
v1 := new(string)
|
||||||
|
if err := json.Unmarshal([]byte(s), v1); err != nil || *v1 != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, *v1)
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteJSONProtocolBinary(t *testing.T) {
|
||||||
|
thetype := "binary"
|
||||||
|
value := protocol_bdata
|
||||||
|
b64value := make([]byte, base64.StdEncoding.EncodedLen(len(protocol_bdata)))
|
||||||
|
base64.StdEncoding.Encode(b64value, value)
|
||||||
|
b64String := string(b64value)
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
if e := p.WriteBinary(value); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if e := p.Flush(context.Background()); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
s := trans.String()
|
||||||
|
expectedString := fmt.Sprint("\"", b64String, "\"")
|
||||||
|
if s != expectedString {
|
||||||
|
t.Fatalf("Bad value for %s %v\n wrote: \"%v\"\nexpected: \"%v\"", thetype, value, s, expectedString)
|
||||||
|
}
|
||||||
|
v1, err := p.ReadBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to read binary: %s", err.Error())
|
||||||
|
}
|
||||||
|
if len(v1) != len(value) {
|
||||||
|
t.Fatalf("Invalid value for binary\nexpected: \"%v\"\n read: \"%v\"", value, v1)
|
||||||
|
}
|
||||||
|
for k, v := range value {
|
||||||
|
if v1[k] != v {
|
||||||
|
t.Fatalf("Invalid value for binary at %v\nexpected: \"%v\"\n read: \"%v\"", k, v, v1[k])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadJSONProtocolBinary(t *testing.T) {
|
||||||
|
thetype := "binary"
|
||||||
|
value := protocol_bdata
|
||||||
|
b64value := make([]byte, base64.StdEncoding.EncodedLen(len(protocol_bdata)))
|
||||||
|
base64.StdEncoding.Encode(b64value, value)
|
||||||
|
b64String := string(b64value)
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
trans.WriteString(jsonQuote(b64String))
|
||||||
|
trans.Flush(context.Background())
|
||||||
|
s := trans.String()
|
||||||
|
v, e := p.ReadBinary()
|
||||||
|
if e != nil {
|
||||||
|
t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
if len(v) != len(value) {
|
||||||
|
t.Fatalf("Bad value for %s value length %v, wrote: %v, received length: %v", thetype, len(value), s, len(v))
|
||||||
|
}
|
||||||
|
for i := 0; i < len(v); i++ {
|
||||||
|
if v[i] != value[i] {
|
||||||
|
t.Fatalf("Bad value for %s at index %d value %v, wrote: %v, received: %v", thetype, i, value[i], s, v[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v1 := new(string)
|
||||||
|
if err := json.Unmarshal([]byte(s), v1); err != nil || *v1 != b64String {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, *v1)
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteJSONProtocolList(t *testing.T) {
|
||||||
|
thetype := "list"
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
p.WriteListBegin(TType(DOUBLE), len(DOUBLE_VALUES))
|
||||||
|
for _, value := range DOUBLE_VALUES {
|
||||||
|
if e := p.WriteDouble(value); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.WriteListEnd()
|
||||||
|
if e := p.Flush(context.Background()); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error())
|
||||||
|
}
|
||||||
|
str := trans.String()
|
||||||
|
str1 := new([]interface{})
|
||||||
|
err := json.Unmarshal([]byte(str), str1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to decode %s, wrote: %s", thetype, str)
|
||||||
|
}
|
||||||
|
l := *str1
|
||||||
|
if len(l) < 2 {
|
||||||
|
t.Fatalf("List must be at least of length two to include metadata")
|
||||||
|
}
|
||||||
|
if l[0] != "dbl" {
|
||||||
|
t.Fatal("Invalid type for list, expected: ", STRING, ", but was: ", l[0])
|
||||||
|
}
|
||||||
|
if int(l[1].(float64)) != len(DOUBLE_VALUES) {
|
||||||
|
t.Fatal("Invalid length for list, expected: ", len(DOUBLE_VALUES), ", but was: ", l[1])
|
||||||
|
}
|
||||||
|
for k, value := range DOUBLE_VALUES {
|
||||||
|
s := l[k+2]
|
||||||
|
if math.IsInf(value, 1) {
|
||||||
|
if s.(string) != JSON_INFINITY {
|
||||||
|
t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_INFINITY), str)
|
||||||
|
}
|
||||||
|
} else if math.IsInf(value, 0) {
|
||||||
|
if s.(string) != JSON_NEGATIVE_INFINITY {
|
||||||
|
t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_NEGATIVE_INFINITY), str)
|
||||||
|
}
|
||||||
|
} else if math.IsNaN(value) {
|
||||||
|
if s.(string) != JSON_NAN {
|
||||||
|
t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_NAN), str)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if s.(float64) != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s'", thetype, value, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
}
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteJSONProtocolSet(t *testing.T) {
|
||||||
|
thetype := "set"
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
p.WriteSetBegin(TType(DOUBLE), len(DOUBLE_VALUES))
|
||||||
|
for _, value := range DOUBLE_VALUES {
|
||||||
|
if e := p.WriteDouble(value); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.WriteSetEnd()
|
||||||
|
if e := p.Flush(context.Background()); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error())
|
||||||
|
}
|
||||||
|
str := trans.String()
|
||||||
|
str1 := new([]interface{})
|
||||||
|
err := json.Unmarshal([]byte(str), str1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to decode %s, wrote: %s", thetype, str)
|
||||||
|
}
|
||||||
|
l := *str1
|
||||||
|
if len(l) < 2 {
|
||||||
|
t.Fatalf("Set must be at least of length two to include metadata")
|
||||||
|
}
|
||||||
|
if l[0] != "dbl" {
|
||||||
|
t.Fatal("Invalid type for set, expected: ", DOUBLE, ", but was: ", l[0])
|
||||||
|
}
|
||||||
|
if int(l[1].(float64)) != len(DOUBLE_VALUES) {
|
||||||
|
t.Fatal("Invalid length for set, expected: ", len(DOUBLE_VALUES), ", but was: ", l[1])
|
||||||
|
}
|
||||||
|
for k, value := range DOUBLE_VALUES {
|
||||||
|
s := l[k+2]
|
||||||
|
if math.IsInf(value, 1) {
|
||||||
|
if s.(string) != JSON_INFINITY {
|
||||||
|
t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_INFINITY), str)
|
||||||
|
}
|
||||||
|
} else if math.IsInf(value, 0) {
|
||||||
|
if s.(string) != JSON_NEGATIVE_INFINITY {
|
||||||
|
t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_NEGATIVE_INFINITY), str)
|
||||||
|
}
|
||||||
|
} else if math.IsNaN(value) {
|
||||||
|
if s.(string) != JSON_NAN {
|
||||||
|
t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_NAN), str)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if s.(float64) != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s'", thetype, value, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
trans.Reset()
|
||||||
|
}
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWriteJSONProtocolMap(t *testing.T) {
|
||||||
|
thetype := "map"
|
||||||
|
trans := NewTMemoryBuffer()
|
||||||
|
p := NewTJSONProtocol(trans)
|
||||||
|
p.WriteMapBegin(TType(I32), TType(DOUBLE), len(DOUBLE_VALUES))
|
||||||
|
for k, value := range DOUBLE_VALUES {
|
||||||
|
if e := p.WriteI32(int32(k)); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s key int32 value %v due to error: %s", thetype, k, e.Error())
|
||||||
|
}
|
||||||
|
if e := p.WriteDouble(value); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s value float64 value %v due to error: %s", thetype, value, e.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.WriteMapEnd()
|
||||||
|
if e := p.Flush(context.Background()); e != nil {
|
||||||
|
t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error())
|
||||||
|
}
|
||||||
|
str := trans.String()
|
||||||
|
if str[0] != '[' || str[len(str)-1] != ']' {
|
||||||
|
t.Fatalf("Bad value for %s, wrote: %v, in go: %v", thetype, str, DOUBLE_VALUES)
|
||||||
|
}
|
||||||
|
expectedKeyType, expectedValueType, expectedSize, err := p.ReadMapBegin()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error while reading map begin: %s", err.Error())
|
||||||
|
}
|
||||||
|
if expectedKeyType != I32 {
|
||||||
|
t.Fatal("Expected map key type ", I32, ", but was ", expectedKeyType)
|
||||||
|
}
|
||||||
|
if expectedValueType != DOUBLE {
|
||||||
|
t.Fatal("Expected map value type ", DOUBLE, ", but was ", expectedValueType)
|
||||||
|
}
|
||||||
|
if expectedSize != len(DOUBLE_VALUES) {
|
||||||
|
t.Fatal("Expected map size of ", len(DOUBLE_VALUES), ", but was ", expectedSize)
|
||||||
|
}
|
||||||
|
for k, value := range DOUBLE_VALUES {
|
||||||
|
ik, err := p.ReadI32()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Bad key for %s index %v, wrote: %v, expected: %v, error: %s", thetype, k, ik, string(k), err.Error())
|
||||||
|
}
|
||||||
|
if int(ik) != k {
|
||||||
|
t.Fatalf("Bad key for %s index %v, wrote: %v, expected: %v", thetype, k, ik, k)
|
||||||
|
}
|
||||||
|
dv, err := p.ReadDouble()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Bad value for %s index %v, wrote: %v, expected: %v, error: %s", thetype, k, dv, value, err.Error())
|
||||||
|
}
|
||||||
|
s := strconv.FormatFloat(dv, 'g', 10, 64)
|
||||||
|
if math.IsInf(value, 1) {
|
||||||
|
if !math.IsInf(dv, 1) {
|
||||||
|
t.Fatalf("Bad value for %s at index %v %v, wrote: %v, expected: %v", thetype, k, value, s, jsonQuote(JSON_INFINITY))
|
||||||
|
}
|
||||||
|
} else if math.IsInf(value, 0) {
|
||||||
|
if !math.IsInf(dv, 0) {
|
||||||
|
t.Fatalf("Bad value for %s at index %v %v, wrote: %v, expected: %v", thetype, k, value, s, jsonQuote(JSON_NEGATIVE_INFINITY))
|
||||||
|
}
|
||||||
|
} else if math.IsNaN(value) {
|
||||||
|
if !math.IsNaN(dv) {
|
||||||
|
t.Fatalf("Bad value for %s at index %v %v, wrote: %v, expected: %v", thetype, k, value, s, jsonQuote(JSON_NAN))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
expected := strconv.FormatFloat(value, 'g', 10, 64)
|
||||||
|
if s != expected {
|
||||||
|
t.Fatalf("Bad value for %s at index %v %v, wrote: %v, expected %v", thetype, k, value, s, expected)
|
||||||
|
}
|
||||||
|
v := float64(0)
|
||||||
|
if err := json.Unmarshal([]byte(s), &v); err != nil || v != value {
|
||||||
|
t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = p.ReadMapEnd()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error while reading map end: %s", err.Error())
|
||||||
|
}
|
||||||
|
trans.Close()
|
||||||
|
}
|
540
vendor/github.com/apache/thrift/lib/go/thrift/lowlevel_benchmarks_test.go
generated
vendored
Normal file
540
vendor/github.com/apache/thrift/lib/go/thrift/lowlevel_benchmarks_test.go
generated
vendored
Normal file
|
@ -0,0 +1,540 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
var binaryProtoF = NewTBinaryProtocolFactoryDefault()
|
||||||
|
var compactProtoF = NewTCompactProtocolFactory()
|
||||||
|
|
||||||
|
var buf = bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
|
||||||
|
var tfv = []TTransportFactory{
|
||||||
|
NewTMemoryBufferTransportFactory(1024),
|
||||||
|
NewStreamTransportFactory(buf, buf, true),
|
||||||
|
NewTFramedTransportFactory(NewTMemoryBufferTransportFactory(1024)),
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBinaryBool_0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteBool(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBinaryByte_0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteByte(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBinaryI16_0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI16(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBinaryI32_0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI32(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkBinaryI64_0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI64(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkBinaryDouble_0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteDouble(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkBinaryString_0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteString(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkBinaryBinary_0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteBinary(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBinaryBool_1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteBool(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBinaryByte_1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteByte(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBinaryI16_1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI16(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBinaryI32_1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI32(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkBinaryI64_1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI64(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkBinaryDouble_1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteDouble(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkBinaryString_1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteString(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkBinaryBinary_1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteBinary(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBinaryBool_2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteBool(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBinaryByte_2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteByte(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBinaryI16_2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI16(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkBinaryI32_2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI32(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkBinaryI64_2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI64(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkBinaryDouble_2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteDouble(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkBinaryString_2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteString(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkBinaryBinary_2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := binaryProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteBinary(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCompactBool_0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteBool(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCompactByte_0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteByte(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCompactI16_0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI16(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCompactI32_0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI32(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCompactI64_0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI64(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCompactDouble0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteDouble(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCompactString0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteString(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCompactBinary0(b *testing.B) {
|
||||||
|
trans, err := tfv[0].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteBinary(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCompactBool_1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteBool(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCompactByte_1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteByte(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCompactI16_1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI16(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCompactI32_1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI32(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCompactI64_1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI64(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCompactDouble1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteDouble(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCompactString1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteString(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCompactBinary1(b *testing.B) {
|
||||||
|
trans, err := tfv[1].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteBinary(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCompactBool_2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteBool(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCompactByte_2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteByte(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCompactI16_2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI16(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkCompactI32_2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI32(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCompactI64_2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteI64(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCompactDouble2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteDouble(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCompactString2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteString(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func BenchmarkCompactBinary2(b *testing.B) {
|
||||||
|
trans, err := tfv[2].GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
p := compactProtoF.GetProtocol(trans)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
ReadWriteBinary(b, p, trans)
|
||||||
|
}
|
||||||
|
}
|
|
@ -21,6 +21,7 @@ package thrift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Memory buffer-based implementation of the TTransport interface.
|
// Memory buffer-based implementation of the TTransport interface.
|
||||||
|
@ -33,14 +34,14 @@ type TMemoryBufferTransportFactory struct {
|
||||||
size int
|
size int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) TTransport {
|
func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
|
||||||
if trans != nil {
|
if trans != nil {
|
||||||
t, ok := trans.(*TMemoryBuffer)
|
t, ok := trans.(*TMemoryBuffer)
|
||||||
if ok && t.size > 0 {
|
if ok && t.size > 0 {
|
||||||
return NewTMemoryBufferLen(t.size)
|
return NewTMemoryBufferLen(t.size), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return NewTMemoryBufferLen(p.size)
|
return NewTMemoryBufferLen(p.size), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
|
func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
|
||||||
|
@ -70,7 +71,7 @@ func (p *TMemoryBuffer) Close() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flushing a memory buffer is a no-op
|
// Flushing a memory buffer is a no-op
|
||||||
func (p *TMemoryBuffer) Flush() error {
|
func (p *TMemoryBuffer) Flush(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
29
vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer_test.go
generated
vendored
Normal file
29
vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer_test.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMemoryBuffer(t *testing.T) {
|
||||||
|
trans := NewTMemoryBufferLen(1024)
|
||||||
|
TransportTest(t, trans, trans)
|
||||||
|
}
|
|
@ -20,6 +20,7 @@
|
||||||
package thrift
|
package thrift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
@ -127,7 +128,7 @@ func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProces
|
||||||
t.serviceProcessorMap[name] = processor
|
t.serviceProcessorMap[name] = processor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TMultiplexedProcessor) Process(in, out TProtocol) (bool, TException) {
|
func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
|
||||||
name, typeId, seqid, err := in.ReadMessageBegin()
|
name, typeId, seqid, err := in.ReadMessageBegin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -140,7 +141,7 @@ func (t *TMultiplexedProcessor) Process(in, out TProtocol) (bool, TException) {
|
||||||
if len(v) != 2 {
|
if len(v) != 2 {
|
||||||
if t.DefaultProcessor != nil {
|
if t.DefaultProcessor != nil {
|
||||||
smb := NewStoredMessageProtocol(in, name, typeId, seqid)
|
smb := NewStoredMessageProtocol(in, name, typeId, seqid)
|
||||||
return t.DefaultProcessor.Process(smb, out)
|
return t.DefaultProcessor.Process(ctx, smb, out)
|
||||||
}
|
}
|
||||||
return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name)
|
return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name)
|
||||||
}
|
}
|
||||||
|
@ -149,7 +150,7 @@ func (t *TMultiplexedProcessor) Process(in, out TProtocol) (bool, TException) {
|
||||||
return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0])
|
return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0])
|
||||||
}
|
}
|
||||||
smb := NewStoredMessageProtocol(in, v[1], typeId, seqid)
|
smb := NewStoredMessageProtocol(in, v[1], typeId, seqid)
|
||||||
return actualProcessor.Process(smb, out)
|
return actualProcessor.Process(ctx, smb, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
//Protocol that use stored message for ReadMessageBegin
|
//Protocol that use stored message for ReadMessageBegin
|
|
@ -19,6 +19,18 @@
|
||||||
|
|
||||||
package thrift
|
package thrift
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
// A processor is a generic object which operates upon an input stream and
|
||||||
|
// writes to some output stream.
|
||||||
|
type TProcessor interface {
|
||||||
|
Process(ctx context.Context, in, out TProtocol) (bool, TException)
|
||||||
|
}
|
||||||
|
|
||||||
|
type TProcessorFunction interface {
|
||||||
|
Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
|
||||||
|
}
|
||||||
|
|
||||||
// The default processor factory just returns a singleton
|
// The default processor factory just returns a singleton
|
||||||
// instance.
|
// instance.
|
||||||
type TProcessorFactory interface {
|
type TProcessorFactory interface {
|
|
@ -20,7 +20,9 @@
|
||||||
package thrift
|
package thrift
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -73,7 +75,7 @@ type TProtocol interface {
|
||||||
ReadBinary() (value []byte, err error)
|
ReadBinary() (value []byte, err error)
|
||||||
|
|
||||||
Skip(fieldType TType) (err error)
|
Skip(fieldType TType) (err error)
|
||||||
Flush() (err error)
|
Flush(ctx context.Context) (err error)
|
||||||
|
|
||||||
Transport() TTransport
|
Transport() TTransport
|
||||||
}
|
}
|
||||||
|
@ -170,6 +172,8 @@ func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return self.ReadListEnd()
|
return self.ReadListEnd()
|
||||||
|
default:
|
||||||
|
return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
517
vendor/github.com/apache/thrift/lib/go/thrift/protocol_test.go
generated
vendored
Normal file
517
vendor/github.com/apache/thrift/lib/go/thrift/protocol_test.go
generated
vendored
Normal file
|
@ -0,0 +1,517 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
const PROTOCOL_BINARY_DATA_SIZE = 155
|
||||||
|
|
||||||
|
var (
|
||||||
|
protocol_bdata []byte // test data for writing; same as data
|
||||||
|
BOOL_VALUES []bool
|
||||||
|
BYTE_VALUES []int8
|
||||||
|
INT16_VALUES []int16
|
||||||
|
INT32_VALUES []int32
|
||||||
|
INT64_VALUES []int64
|
||||||
|
DOUBLE_VALUES []float64
|
||||||
|
STRING_VALUES []string
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
protocol_bdata = make([]byte, PROTOCOL_BINARY_DATA_SIZE)
|
||||||
|
for i := 0; i < PROTOCOL_BINARY_DATA_SIZE; i++ {
|
||||||
|
protocol_bdata[i] = byte((i + 'a') % 255)
|
||||||
|
}
|
||||||
|
BOOL_VALUES = []bool{false, true, false, false, true}
|
||||||
|
BYTE_VALUES = []int8{117, 0, 1, 32, 127, -128, -1}
|
||||||
|
INT16_VALUES = []int16{459, 0, 1, -1, -128, 127, 32767, -32768}
|
||||||
|
INT32_VALUES = []int32{459, 0, 1, -1, -128, 127, 32767, 2147483647, -2147483535}
|
||||||
|
INT64_VALUES = []int64{459, 0, 1, -1, -128, 127, 32767, 2147483647, -2147483535, 34359738481, -35184372088719, -9223372036854775808, 9223372036854775807}
|
||||||
|
DOUBLE_VALUES = []float64{459.3, 0.0, -1.0, 1.0, 0.5, 0.3333, 3.14159, 1.537e-38, 1.673e25, 6.02214179e23, -6.02214179e23, INFINITY.Float64(), NEGATIVE_INFINITY.Float64(), NAN.Float64()}
|
||||||
|
STRING_VALUES = []string{"", "a", "st[uf]f", "st,u:ff with spaces", "stuff\twith\nescape\\characters'...\"lots{of}fun</xml>"}
|
||||||
|
}
|
||||||
|
|
||||||
|
type HTTPEchoServer struct{}
|
||||||
|
type HTTPHeaderEchoServer struct{}
|
||||||
|
|
||||||
|
func (p *HTTPEchoServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||||
|
buf, err := ioutil.ReadAll(req.Body)
|
||||||
|
if err != nil {
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
w.Write(buf)
|
||||||
|
} else {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write(buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *HTTPHeaderEchoServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||||
|
buf, err := ioutil.ReadAll(req.Body)
|
||||||
|
if err != nil {
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
w.Write(buf)
|
||||||
|
} else {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write(buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func HttpClientSetupForTest(t *testing.T) (net.Listener, net.Addr) {
|
||||||
|
addr, err := FindAvailableTCPServerPort(40000)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to find available tcp port addr: %s", err)
|
||||||
|
return nil, addr
|
||||||
|
}
|
||||||
|
l, err := net.Listen(addr.Network(), addr.String())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to setup tcp listener on %s: %s", addr.String(), err)
|
||||||
|
return l, addr
|
||||||
|
}
|
||||||
|
go http.Serve(l, &HTTPEchoServer{})
|
||||||
|
return l, addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func HttpClientSetupForHeaderTest(t *testing.T) (net.Listener, net.Addr) {
|
||||||
|
addr, err := FindAvailableTCPServerPort(40000)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to find available tcp port addr: %s", err)
|
||||||
|
return nil, addr
|
||||||
|
}
|
||||||
|
l, err := net.Listen(addr.Network(), addr.String())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Unable to setup tcp listener on %s: %s", addr.String(), err)
|
||||||
|
return l, addr
|
||||||
|
}
|
||||||
|
go http.Serve(l, &HTTPHeaderEchoServer{})
|
||||||
|
return l, addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadWriteProtocolTest(t *testing.T, protocolFactory TProtocolFactory) {
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
l, addr := HttpClientSetupForTest(t)
|
||||||
|
defer l.Close()
|
||||||
|
transports := []TTransportFactory{
|
||||||
|
NewTMemoryBufferTransportFactory(1024),
|
||||||
|
NewStreamTransportFactory(buf, buf, true),
|
||||||
|
NewTFramedTransportFactory(NewTMemoryBufferTransportFactory(1024)),
|
||||||
|
NewTZlibTransportFactoryWithFactory(0, NewTMemoryBufferTransportFactory(1024)),
|
||||||
|
NewTZlibTransportFactoryWithFactory(6, NewTMemoryBufferTransportFactory(1024)),
|
||||||
|
NewTZlibTransportFactoryWithFactory(9, NewTFramedTransportFactory(NewTMemoryBufferTransportFactory(1024))),
|
||||||
|
NewTHttpPostClientTransportFactory("http://" + addr.String()),
|
||||||
|
}
|
||||||
|
for _, tf := range transports {
|
||||||
|
trans, err := tf.GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := protocolFactory.GetProtocol(trans)
|
||||||
|
ReadWriteBool(t, p, trans)
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
for _, tf := range transports {
|
||||||
|
trans, err := tf.GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := protocolFactory.GetProtocol(trans)
|
||||||
|
ReadWriteByte(t, p, trans)
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
for _, tf := range transports {
|
||||||
|
trans, err := tf.GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := protocolFactory.GetProtocol(trans)
|
||||||
|
ReadWriteI16(t, p, trans)
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
for _, tf := range transports {
|
||||||
|
trans, err := tf.GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := protocolFactory.GetProtocol(trans)
|
||||||
|
ReadWriteI32(t, p, trans)
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
for _, tf := range transports {
|
||||||
|
trans, err := tf.GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := protocolFactory.GetProtocol(trans)
|
||||||
|
ReadWriteI64(t, p, trans)
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
for _, tf := range transports {
|
||||||
|
trans, err := tf.GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := protocolFactory.GetProtocol(trans)
|
||||||
|
ReadWriteDouble(t, p, trans)
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
for _, tf := range transports {
|
||||||
|
trans, err := tf.GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := protocolFactory.GetProtocol(trans)
|
||||||
|
ReadWriteString(t, p, trans)
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
for _, tf := range transports {
|
||||||
|
trans, err := tf.GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := protocolFactory.GetProtocol(trans)
|
||||||
|
ReadWriteBinary(t, p, trans)
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
for _, tf := range transports {
|
||||||
|
trans, err := tf.GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p := protocolFactory.GetProtocol(trans)
|
||||||
|
ReadWriteI64(t, p, trans)
|
||||||
|
ReadWriteDouble(t, p, trans)
|
||||||
|
ReadWriteBinary(t, p, trans)
|
||||||
|
ReadWriteByte(t, p, trans)
|
||||||
|
trans.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadWriteBool(t testing.TB, p TProtocol, trans TTransport) {
|
||||||
|
thetype := TType(BOOL)
|
||||||
|
thelen := len(BOOL_VALUES)
|
||||||
|
err := p.WriteListBegin(thetype, thelen)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error writing list begin: %q", "ReadWriteBool", p, trans, err, thetype)
|
||||||
|
}
|
||||||
|
for k, v := range BOOL_VALUES {
|
||||||
|
err = p.WriteBool(v)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %v Error writing bool in list at index %v: %v", "ReadWriteBool", p, trans, err, k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.WriteListEnd()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %v Error writing list end: %v", "ReadWriteBool", p, trans, err, BOOL_VALUES)
|
||||||
|
}
|
||||||
|
p.Flush(context.Background())
|
||||||
|
thetype2, thelen2, err := p.ReadListBegin()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %v Error reading list: %v", "ReadWriteBool", p, trans, err, BOOL_VALUES)
|
||||||
|
}
|
||||||
|
_, ok := p.(*TSimpleJSONProtocol)
|
||||||
|
if !ok {
|
||||||
|
if thetype != thetype2 {
|
||||||
|
t.Errorf("%s: %T %T type %s != type %s", "ReadWriteBool", p, trans, thetype, thetype2)
|
||||||
|
}
|
||||||
|
if thelen != thelen2 {
|
||||||
|
t.Errorf("%s: %T %T len %v != len %v", "ReadWriteBool", p, trans, thelen, thelen2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range BOOL_VALUES {
|
||||||
|
value, err := p.ReadBool()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %v Error reading bool at index %v: %v", "ReadWriteBool", p, trans, err, k, v)
|
||||||
|
}
|
||||||
|
if v != value {
|
||||||
|
t.Errorf("%s: index %v %v %v %v != %v", "ReadWriteBool", k, p, trans, v, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = p.ReadListEnd()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T Unable to read list end: %q", "ReadWriteBool", p, trans, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadWriteByte(t testing.TB, p TProtocol, trans TTransport) {
|
||||||
|
thetype := TType(BYTE)
|
||||||
|
thelen := len(BYTE_VALUES)
|
||||||
|
err := p.WriteListBegin(thetype, thelen)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error writing list begin: %q", "ReadWriteByte", p, trans, err, thetype)
|
||||||
|
}
|
||||||
|
for k, v := range BYTE_VALUES {
|
||||||
|
err = p.WriteByte(v)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error writing byte in list at index %d: %q", "ReadWriteByte", p, trans, err, k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = p.WriteListEnd()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error writing list end: %q", "ReadWriteByte", p, trans, err, BYTE_VALUES)
|
||||||
|
}
|
||||||
|
err = p.Flush(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error flushing list of bytes: %q", "ReadWriteByte", p, trans, err, BYTE_VALUES)
|
||||||
|
}
|
||||||
|
thetype2, thelen2, err := p.ReadListBegin()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteByte", p, trans, err, BYTE_VALUES)
|
||||||
|
}
|
||||||
|
_, ok := p.(*TSimpleJSONProtocol)
|
||||||
|
if !ok {
|
||||||
|
if thetype != thetype2 {
|
||||||
|
t.Errorf("%s: %T %T type %s != type %s", "ReadWriteByte", p, trans, thetype, thetype2)
|
||||||
|
}
|
||||||
|
if thelen != thelen2 {
|
||||||
|
t.Errorf("%s: %T %T len %v != len %v", "ReadWriteByte", p, trans, thelen, thelen2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range BYTE_VALUES {
|
||||||
|
value, err := p.ReadByte()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error reading byte at index %d: %q", "ReadWriteByte", p, trans, err, k, v)
|
||||||
|
}
|
||||||
|
if v != value {
|
||||||
|
t.Errorf("%s: %T %T %d != %d", "ReadWriteByte", p, trans, v, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = p.ReadListEnd()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T Unable to read list end: %q", "ReadWriteByte", p, trans, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadWriteI16(t testing.TB, p TProtocol, trans TTransport) {
|
||||||
|
thetype := TType(I16)
|
||||||
|
thelen := len(INT16_VALUES)
|
||||||
|
p.WriteListBegin(thetype, thelen)
|
||||||
|
for _, v := range INT16_VALUES {
|
||||||
|
p.WriteI16(v)
|
||||||
|
}
|
||||||
|
p.WriteListEnd()
|
||||||
|
p.Flush(context.Background())
|
||||||
|
thetype2, thelen2, err := p.ReadListBegin()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteI16", p, trans, err, INT16_VALUES)
|
||||||
|
}
|
||||||
|
_, ok := p.(*TSimpleJSONProtocol)
|
||||||
|
if !ok {
|
||||||
|
if thetype != thetype2 {
|
||||||
|
t.Errorf("%s: %T %T type %s != type %s", "ReadWriteI16", p, trans, thetype, thetype2)
|
||||||
|
}
|
||||||
|
if thelen != thelen2 {
|
||||||
|
t.Errorf("%s: %T %T len %v != len %v", "ReadWriteI16", p, trans, thelen, thelen2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range INT16_VALUES {
|
||||||
|
value, err := p.ReadI16()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error reading int16 at index %d: %q", "ReadWriteI16", p, trans, err, k, v)
|
||||||
|
}
|
||||||
|
if v != value {
|
||||||
|
t.Errorf("%s: %T %T %d != %d", "ReadWriteI16", p, trans, v, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = p.ReadListEnd()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T Unable to read list end: %q", "ReadWriteI16", p, trans, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadWriteI32(t testing.TB, p TProtocol, trans TTransport) {
|
||||||
|
thetype := TType(I32)
|
||||||
|
thelen := len(INT32_VALUES)
|
||||||
|
p.WriteListBegin(thetype, thelen)
|
||||||
|
for _, v := range INT32_VALUES {
|
||||||
|
p.WriteI32(v)
|
||||||
|
}
|
||||||
|
p.WriteListEnd()
|
||||||
|
p.Flush(context.Background())
|
||||||
|
thetype2, thelen2, err := p.ReadListBegin()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteI32", p, trans, err, INT32_VALUES)
|
||||||
|
}
|
||||||
|
_, ok := p.(*TSimpleJSONProtocol)
|
||||||
|
if !ok {
|
||||||
|
if thetype != thetype2 {
|
||||||
|
t.Errorf("%s: %T %T type %s != type %s", "ReadWriteI32", p, trans, thetype, thetype2)
|
||||||
|
}
|
||||||
|
if thelen != thelen2 {
|
||||||
|
t.Errorf("%s: %T %T len %v != len %v", "ReadWriteI32", p, trans, thelen, thelen2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range INT32_VALUES {
|
||||||
|
value, err := p.ReadI32()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error reading int32 at index %d: %q", "ReadWriteI32", p, trans, err, k, v)
|
||||||
|
}
|
||||||
|
if v != value {
|
||||||
|
t.Errorf("%s: %T %T %d != %d", "ReadWriteI32", p, trans, v, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T Unable to read list end: %q", "ReadWriteI32", p, trans, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadWriteI64(t testing.TB, p TProtocol, trans TTransport) {
|
||||||
|
thetype := TType(I64)
|
||||||
|
thelen := len(INT64_VALUES)
|
||||||
|
p.WriteListBegin(thetype, thelen)
|
||||||
|
for _, v := range INT64_VALUES {
|
||||||
|
p.WriteI64(v)
|
||||||
|
}
|
||||||
|
p.WriteListEnd()
|
||||||
|
p.Flush(context.Background())
|
||||||
|
thetype2, thelen2, err := p.ReadListBegin()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteI64", p, trans, err, INT64_VALUES)
|
||||||
|
}
|
||||||
|
_, ok := p.(*TSimpleJSONProtocol)
|
||||||
|
if !ok {
|
||||||
|
if thetype != thetype2 {
|
||||||
|
t.Errorf("%s: %T %T type %s != type %s", "ReadWriteI64", p, trans, thetype, thetype2)
|
||||||
|
}
|
||||||
|
if thelen != thelen2 {
|
||||||
|
t.Errorf("%s: %T %T len %v != len %v", "ReadWriteI64", p, trans, thelen, thelen2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range INT64_VALUES {
|
||||||
|
value, err := p.ReadI64()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error reading int64 at index %d: %q", "ReadWriteI64", p, trans, err, k, v)
|
||||||
|
}
|
||||||
|
if v != value {
|
||||||
|
t.Errorf("%s: %T %T %q != %q", "ReadWriteI64", p, trans, v, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T Unable to read list end: %q", "ReadWriteI64", p, trans, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadWriteDouble(t testing.TB, p TProtocol, trans TTransport) {
|
||||||
|
thetype := TType(DOUBLE)
|
||||||
|
thelen := len(DOUBLE_VALUES)
|
||||||
|
p.WriteListBegin(thetype, thelen)
|
||||||
|
for _, v := range DOUBLE_VALUES {
|
||||||
|
p.WriteDouble(v)
|
||||||
|
}
|
||||||
|
p.WriteListEnd()
|
||||||
|
p.Flush(context.Background())
|
||||||
|
thetype2, thelen2, err := p.ReadListBegin()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %v Error reading list: %v", "ReadWriteDouble", p, trans, err, DOUBLE_VALUES)
|
||||||
|
}
|
||||||
|
if thetype != thetype2 {
|
||||||
|
t.Errorf("%s: %T %T type %s != type %s", "ReadWriteDouble", p, trans, thetype, thetype2)
|
||||||
|
}
|
||||||
|
if thelen != thelen2 {
|
||||||
|
t.Errorf("%s: %T %T len %v != len %v", "ReadWriteDouble", p, trans, thelen, thelen2)
|
||||||
|
}
|
||||||
|
for k, v := range DOUBLE_VALUES {
|
||||||
|
value, err := p.ReadDouble()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error reading double at index %d: %v", "ReadWriteDouble", p, trans, err, k, v)
|
||||||
|
}
|
||||||
|
if math.IsNaN(v) {
|
||||||
|
if !math.IsNaN(value) {
|
||||||
|
t.Errorf("%s: %T %T math.IsNaN(%v) != math.IsNaN(%v)", "ReadWriteDouble", p, trans, v, value)
|
||||||
|
}
|
||||||
|
} else if v != value {
|
||||||
|
t.Errorf("%s: %T %T %v != %v", "ReadWriteDouble", p, trans, v, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = p.ReadListEnd()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T Unable to read list end: %q", "ReadWriteDouble", p, trans, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadWriteString(t testing.TB, p TProtocol, trans TTransport) {
|
||||||
|
thetype := TType(STRING)
|
||||||
|
thelen := len(STRING_VALUES)
|
||||||
|
p.WriteListBegin(thetype, thelen)
|
||||||
|
for _, v := range STRING_VALUES {
|
||||||
|
p.WriteString(v)
|
||||||
|
}
|
||||||
|
p.WriteListEnd()
|
||||||
|
p.Flush(context.Background())
|
||||||
|
thetype2, thelen2, err := p.ReadListBegin()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteString", p, trans, err, STRING_VALUES)
|
||||||
|
}
|
||||||
|
_, ok := p.(*TSimpleJSONProtocol)
|
||||||
|
if !ok {
|
||||||
|
if thetype != thetype2 {
|
||||||
|
t.Errorf("%s: %T %T type %s != type %s", "ReadWriteString", p, trans, thetype, thetype2)
|
||||||
|
}
|
||||||
|
if thelen != thelen2 {
|
||||||
|
t.Errorf("%s: %T %T len %v != len %v", "ReadWriteString", p, trans, thelen, thelen2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for k, v := range STRING_VALUES {
|
||||||
|
value, err := p.ReadString()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T %q Error reading string at index %d: %q", "ReadWriteString", p, trans, err, k, v)
|
||||||
|
}
|
||||||
|
if v != value {
|
||||||
|
t.Errorf("%s: %T %T %v != %v", "ReadWriteString", p, trans, v, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T Unable to read list end: %q", "ReadWriteString", p, trans, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadWriteBinary(t testing.TB, p TProtocol, trans TTransport) {
|
||||||
|
v := protocol_bdata
|
||||||
|
p.WriteBinary(v)
|
||||||
|
p.Flush(context.Background())
|
||||||
|
value, err := p.ReadBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: %T %T Unable to read binary: %s", "ReadWriteBinary", p, trans, err.Error())
|
||||||
|
}
|
||||||
|
if len(v) != len(value) {
|
||||||
|
t.Errorf("%s: %T %T len(v) != len(value)... %d != %d", "ReadWriteBinary", p, trans, len(v), len(value))
|
||||||
|
} else {
|
||||||
|
for i := 0; i < len(v); i++ {
|
||||||
|
if v[i] != value[i] {
|
||||||
|
t.Errorf("%s: %T %T %s != %s", "ReadWriteBinary", p, trans, v, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
89
vendor/github.com/apache/thrift/lib/go/thrift/rich_transport_test.go
generated
vendored
Normal file
89
vendor/github.com/apache/thrift/lib/go/thrift/rich_transport_test.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package thrift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEnsureTransportsAreRich(t *testing.T) {
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
|
||||||
|
transports := []TTransportFactory{
|
||||||
|
NewTMemoryBufferTransportFactory(1024),
|
||||||
|
NewStreamTransportFactory(buf, buf, true),
|
||||||
|
NewTFramedTransportFactory(NewTMemoryBufferTransportFactory(1024)),
|
||||||
|
NewTHttpPostClientTransportFactory("http://127.0.0.1"),
|
||||||
|
}
|
||||||
|
for _, tf := range transports {
|
||||||
|
trans, err := tf.GetTransport(nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_, ok := trans.(TRichTransport)
|
||||||
|
if !ok {
|
||||||
|
t.Errorf("Transport %s does not implement TRichTransport interface", reflect.ValueOf(trans))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestReadByte tests whether readByte handles error cases correctly.
|
||||||
|
func TestReadByte(t *testing.T) {
|
||||||
|
for i, test := range readByteTests {
|
||||||
|
v, err := readByte(test.r)
|
||||||
|
if v != test.v {
|
||||||
|
t.Fatalf("TestReadByte %d: value differs. Expected %d, got %d", i, test.v, test.r.v)
|
||||||
|
}
|
||||||
|
if err != test.err {
|
||||||
|
t.Fatalf("TestReadByte %d: error differs. Expected %s, got %s", i, test.err, test.r.err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var someError = errors.New("Some error")
|
||||||
|
var readByteTests = []struct {
|
||||||
|
r *mockReader
|
||||||
|
v byte
|
||||||
|
err error
|
||||||
|
}{
|
||||||
|
{&mockReader{0, 55, io.EOF}, 0, io.EOF}, // reader sends EOF w/o data
|
||||||
|
{&mockReader{0, 55, someError}, 0, someError}, // reader sends some other error
|
||||||
|
{&mockReader{1, 55, nil}, 55, nil}, // reader sends data w/o error
|
||||||
|
{&mockReader{1, 55, io.EOF}, 55, nil}, // reader sends data with EOF
|
||||||
|
{&mockReader{1, 55, someError}, 55, someError}, // reader sends data withsome error
|
||||||
|
}
|
||||||
|
|
||||||
|
type mockReader struct {
|
||||||
|
n int
|
||||||
|
v byte
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *mockReader) Read(p []byte) (n int, err error) {
|
||||||
|
if r.n > 0 {
|
||||||
|
p[0] = r.v
|
||||||
|
}
|
||||||
|
return r.n, r.err
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue