Compare commits

..

No commits in common. "main" and "v1.0.5" have entirely different histories.
main ... v1.0.5

221 changed files with 34987 additions and 68713 deletions

View file

@ -1 +1 @@
0.23.0
0.16.0

3
.gitattributes vendored
View file

@ -1,3 +0,0 @@
gen-go/ linguist-generated=true
vendor/ linguist-generated=true
Gopkg.lock linguist-generated=true

25
.github/main.yml vendored
View file

@ -1,25 +0,0 @@
name: CI
on: [push]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup Go for use with actions
uses: actions/setup-go@v2
with:
go-version: 1.16
- name: Install goimports
run: go get golang.org/x/tools/cmd/goimports
- name: Set env with list of directories in repo containin go code
run: echo GO_USR_DIRS=$(go list -f {{.Dir}} ./... | grep -E -v "/gen-go/|/vendor/") >> $GITHUB_ENV
- name: Run goimports check
run: test -z "`for d in $GO_USR_DIRS; do goimports -d $d/*.go | tee /dev/stderr; done`"
- name: Create aurora/mesos docker cluster
run: docker-compose up -d
- name: Run tests
run: go test -timeout 35m -race -coverprofile=coverage.txt -covermode=atomic -v github.com/paypal/gorealis

View file

@ -1,57 +0,0 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ main ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ main ]
schedule:
- cron: '34 4 * * 3'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
- run: go build examples/client.go
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

View file

@ -1,30 +0,0 @@
name: CI
on:
push:
branches:
- main
pull_request:
branches:
- main
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup Go for use with actions
uses: actions/setup-go@v2
with:
go-version: 1.16
- name: Install goimports
run: go get golang.org/x/tools/cmd/goimports
- name: Set env with list of directories in repo containin go code
run: echo GO_USR_DIRS=$(go list -f {{.Dir}} ./... | grep -E -v "/gen-go/|/vendor/") >> $GITHUB_ENV
- name: Run goimports check
run: test -z "`for d in $GO_USR_DIRS; do goimports -d $d/*.go | tee /dev/stderr; done`"
- name: Create aurora/mesos docker cluster
run: docker-compose up -d
- name: Run tests
run: go test -timeout 35m -race -coverprofile=coverage.txt -covermode=atomic -v github.com/paypal/gorealis

12
.gitignore vendored
View file

@ -6,18 +6,6 @@
# Folders
_obj
_test
.idea
# Thrift library comes with a lot of other files we don't need.
# Ignore everything but the files we do need
vendor/github.com/apache/thrift/*
!vendor/github.com/apache/thrift/lib/
vendor/github.com/apache/thrift/lib/*
!vendor/github.com/apache/thrift/lib/go/
vendor/github.com/apache/thrift/lib/go/*
!vendor/github.com/apache/thrift/lib/go/thrift/
# Architecture specific extensions/prefixes
*.[568vq]

View file

@ -1,71 +0,0 @@
# This file contains all available configuration options
# with their default values.
# options for analysis running
run:
# default concurrency is a available CPU number
concurrency: 4
# timeout for analysis, e.g. 30s, 5m, default is 1m
deadline: 1m
# exit code when at least one issue was found, default is 1
issues-exit-code: 1
# include test files or not, default is true
tests: true
skip-dirs:
- gen-go/
# output configuration options
output:
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
format: colored-line-number
# print lines of code with issue, default is true
print-issued-lines: true
# print linter name in the end of issue text, default is true
print-linter-name: true
# all available settings of specific linters
linters-settings:
errcheck:
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
# default is false: such cases aren't reported by default.
check-type-assertions: true
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
# default is false: such cases aren't reported by default.
check-blank: true
govet:
# report about shadowed variables
check-shadowing: true
goconst:
# minimal length of string constant, 3 by default
min-len: 3
# minimal occurrences count to trigger, 3 by default
min-occurrences: 2
misspell:
# Correct spellings using locale preferences for US or UK.
# Default is to use a neutral variety of English.
# Setting locale to US will correct the British spelling of 'colour' to 'color'.
locale: US
lll:
# max line length, lines longer will be reported. Default is 120.
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
line-length: 120
# tab width in spaces. Default to 1.
tab-width: 4
linters:
enable:
- govet
- goimports
- golint
- lll
- goconst
enable-all: false
fast: false

View file

@ -1,62 +0,0 @@
1.25.1 (unreleased)
1.25.0
* Add priority api
1.24.0
* enable default sla for slaDrain
* Changes Travis CI badge to Github Actions badge
* Bug fix for auto paused update monitor
* Adds support for running CI on github actions
1.23.0
* First release tested against Aurora Scheduler 0.23.0
1.22.5
* Upgrading to thrift 0.14.0
1.22.4
* Updates which result in a no-op now return a response value so that the caller may analyze it to determine what happened
1.22.3
* Contains a monitor timeout fix. Previously an error was being left unchecked which made a specific monitor timining out not be handled properly.
1.22.2
* Bug fix: Change in retry mechanism created a deadlock. This release reverts that particular change.
1.22.1
* Adding safeguards against setting multiple constraints with the same name for a single task.
1.22.0
* CreateService and StartJobUpdate do not continue retrying if a timeout has been encountered
by the HTTP client. Instead they now return an error that conforms to the Timedout interface.
Users can check for a Timedout error by using `realis.IsTimeout(err)`.
* New API function VariableBatchStep has been added which returns the current batch at which
a Variable Batch Update configured Update is currently in.
* Added new PauseUpdateMonitor which monitors an update until it is an `ROLL_FORWARD_PAUSED` state.
* Added variableBatchStep command to sample client to be used for testing new VariableBatchStep api.
* JobUpdateStatus has changed function signature from:
`JobUpdateStatus(updateKey aurora.JobUpdateKey, desiredStatuses map[aurora.JobUpdateStatus]bool, interval, timeout time.Duration) (aurora.JobUpdateStatus, error)`
to
`JobUpdateStatus(updateKey aurora.JobUpdateKey, desiredStatuses []aurora.JobUpdateStatus, interval, timeout time.Duration) (aurora.JobUpdateStatus, error)`
* Added TerminalUpdateStates function which returns an slice containing all UpdateStates which are considered terminal states.
1.21.0
* Version numbering change. Future versions will be labled X.Y.Z where X is the major version, Y is the Aurora version the library has been tested against (e.g. 21 -> 0.21.0), and X is the minor revision.
* Moved to Thrift 0.12.0 code generator and go library.
* `aurora.ACTIVE_STATES`, `aurora.SLAVE_ASSIGNED_STATES`, `aurora.LIVE_STATES`, `aurora.TERMINAL_STATES`, `aurora.ACTIVE_JOB_UPDATE_STATES`, `aurora.AWAITNG_PULSE_JOB_UPDATE_STATES` are all now generated as a slices.
* Please use `realis.ActiveStates`, `realis.SlaveAssignedStates`,`realis.LiveStates`, `realis.TerminalStates`, `realis.ActiveJobUpdateStates`, `realis.AwaitingPulseJobUpdateStates` in their places when map representations are needed.
* `GetInstanceIds(key *aurora.JobKey, states map[aurora.ScheduleStatus]bool) (map[int32]bool, error)` has changed signature to ` GetInstanceIds(key *aurora.JobKey, states []aurora.ScheduleStatus) ([]int32, error)`
* Adding support for GPU as resource.
* Changing compose environment to Aurora snapshot in order to support staggered update.
* Adding staggered updates API.

View file

@ -1,12 +1,11 @@
# gorealis [![GoDoc](https://godoc.org/github.com/paypal/gorealis?status.svg)](https://godoc.org/github.com/paypal/gorealis) ![CI Build Status](https://github.com/paypal/gorealis/actions/workflows/main.yml/badge.svg) [![codecov](https://codecov.io/gh/paypal/gorealis/branch/main/graph/badge.svg)](https://codecov.io/gh/paypal/gorealis)
# gorealis [![GoDoc](https://godoc.org/github.com/rdelval/gorealis?status.svg)](https://godoc.org/github.com/rdelval/gorealis)
Version 1 of Go library for interacting with [Aurora Scheduler](https://github.com/aurora-scheduler/aurora).
Version 2 of this library can be found [here](https://github.com/aurora-scheduler/gorealis).
Go library for communicating with [Apache Aurora](https://github.com/apache/aurora).
Named after the northern lights (Aurora Borealis).
### Aurora version compatibility
Please see [.auroraversion](./.auroraversion) to see the latest Aurora version against which this
library has been tested.
library has been tested. Vendoring a working version of this library is highly recommended.
## Usage
@ -14,10 +13,13 @@ library has been tested.
* [Using the sample client](docs/using-the-sample-client.md)
* [Leveraging the library](docs/leveraging-the-library.md)
## Projects using gorealis
## To Do
* Create or import a custom transport that uses https://github.com/jmcvetta/napping to improve efficiency
* End to end testing with Vagrant setup
* [australis](https://github.com/aurora-scheduler/australis)
## Importing
* We suggest using a vendoring tool such as [govendor](https://github.com/kardianos/govendor) and
fetching by version, for example: `govendor fetch github.com/rdelval/gorealis@v1`
## Contributions
Contributions are always welcome. Please raise an issue to discuss a contribution before it is made.
Contributions are very much welcome. Please raise an issue so that the contribution may be discussed before it's made.

View file

@ -27,8 +27,8 @@ enum ResponseCode {
ERROR = 2,
WARNING = 3,
AUTH_FAILED = 4,
/** Raised when an operation was unable to proceed due to an in-progress job update. */
JOB_UPDATING_ERROR = 5,
/** Raised when a Lock-protected operation failed due to lock validation. */
LOCK_ERROR = 5,
/** Raised when a scheduler is transiently unavailable and later retry is recommended. */
ERROR_TRANSIENT = 6
}
@ -115,13 +115,11 @@ struct JobKey {
3: string name
}
// TODO(jly): Deprecated, remove in 0.21. See AURORA-1959.
/** A unique lock key. */
union LockKey {
1: JobKey job
}
// TODO(jly): Deprecated, remove in 0.21. See AURORA-1959.
/** A generic lock struct to facilitate context specific resource/operation serialization. */
struct Lock {
/** ID of the lock - unique per storage */
@ -205,8 +203,6 @@ union Image {
struct MesosContainer {
/** the optional filesystem image to use when launching this task. */
1: optional Image image
/** the optional list of volumes to mount into the task. */
2: optional list<Volume> volumes
}
/** Describes a parameter passed to docker cli */
@ -240,42 +236,6 @@ union Resource {
5: i64 numGpus
}
struct PartitionPolicy {
1: bool reschedule
2: optional i64 delaySecs
}
/** SLA requirements expressed as the percentage of instances to be RUNNING every durationSecs */
struct PercentageSlaPolicy {
/* The percentage of active instances required every `durationSecs`. */
1: double percentage
/** Minimum time duration a task needs to be `RUNNING` to be treated as active */
2: i64 durationSecs
}
/** SLA requirements expressed as the number of instances to be RUNNING every durationSecs */
struct CountSlaPolicy {
/** The number of active instances required every `durationSecs` */
1: i64 count
/** Minimum time duration a task needs to be `RUNNING` to be treated as active */
2: i64 durationSecs
}
/** SLA requirements to be delegated to an external coordinator */
struct CoordinatorSlaPolicy {
/** URL for the coordinator service that needs to be contacted for SLA checks */
1: string coordinatorUrl
/** Field in the Coordinator response json indicating if the action is allowed or not */
2: string statusKey
}
/** SLA requirements expressed in one of the many types */
union SlaPolicy {
1: PercentageSlaPolicy percentageSlaPolicy
2: CountSlaPolicy countSlaPolicy
3: CoordinatorSlaPolicy coordinatorSlaPolicy
}
/** Description of the tasks contained within a job. */
struct TaskConfig {
/** Job task belongs to. */
@ -284,6 +244,12 @@ struct TaskConfig {
/** contains the role component of JobKey */
17: Identity owner
7: bool isService
// TODO(maxim): Deprecated. See AURORA-1707.
8: double numCpus
// TODO(maxim): Deprecated. See AURORA-1707.
9: i64 ramMb
// TODO(maxim): Deprecated. See AURORA-1707.
10: i64 diskMb
11: i32 priority
13: i32 maxTaskFailures
// TODO(mnurolahzade): Deprecated. See AURORA-1708.
@ -295,6 +261,8 @@ struct TaskConfig {
32: set<Resource> resources
20: set<Constraint> constraints
/** a list of named ports this task requests */
21: set<string> requestedPorts
/** Resources to retrieve with Mesos Fetcher */
33: optional set<MesosFetcherURI> mesosFetcherUris
/**
@ -308,10 +276,6 @@ struct TaskConfig {
25: optional ExecutorConfig executorConfig
/** Used to display additional details in the UI. */
27: optional set<Metadata> metadata
/** Policy for how to deal with task partitions */
34: optional PartitionPolicy partitionPolicy
/** SLA requirements to be met during maintenance */
35: optional SlaPolicy slaPolicy
// This field is deliberately placed at the end to work around a bug in the immutable wrapper
// code generator. See AURORA-1185 for details.
@ -320,6 +284,15 @@ struct TaskConfig {
}
struct ResourceAggregate {
// TODO(maxim): Deprecated. See AURORA-1707.
/** Number of CPU cores allotted. */
1: double numCpus
// TODO(maxim): Deprecated. See AURORA-1707.
/** Megabytes of RAM allotted. */
2: i64 ramMb
// TODO(maxim): Deprecated. See AURORA-1707.
/** Megabytes of disk space allotted. */
3: i64 diskMb
/** Aggregated resource values. */
4: set<Resource> resources
}
@ -447,11 +420,7 @@ enum ScheduleStatus {
/** A fault in the task environment has caused the system to believe the task no longer exists.
* This can happen, for example, when a slave process disappears.
*/
LOST = 7,
/**
* The task is currently partitioned and in an unknown state.
**/
PARTITIONED = 18
LOST = 7
}
// States that a task may be in while still considered active.
@ -463,7 +432,6 @@ const set<ScheduleStatus> ACTIVE_STATES = [ScheduleStatus.ASSIGNED,
ScheduleStatus.RESTARTING
ScheduleStatus.RUNNING,
ScheduleStatus.STARTING,
ScheduleStatus.PARTITIONED,
ScheduleStatus.THROTTLED]
// States that a task may be in while associated with a slave machine and non-terminal.
@ -473,7 +441,6 @@ const set<ScheduleStatus> SLAVE_ASSIGNED_STATES = [ScheduleStatus.ASSIGNED,
ScheduleStatus.PREEMPTING,
ScheduleStatus.RESTARTING,
ScheduleStatus.RUNNING,
ScheduleStatus.PARTITIONED,
ScheduleStatus.STARTING]
// States that a task may be in while in an active sandbox.
@ -481,7 +448,6 @@ const set<ScheduleStatus> LIVE_STATES = [ScheduleStatus.KILLING,
ScheduleStatus.PREEMPTING,
ScheduleStatus.RESTARTING,
ScheduleStatus.DRAINING,
ScheduleStatus.PARTITIONED,
ScheduleStatus.RUNNING]
// States a completed task may be in.
@ -550,11 +516,6 @@ struct ScheduledTask {
* this task.
*/
3: i32 failureCount
/**
* The number of partitions this task has accumulated over its lifetime.
*/
6: i32 timesPartitioned
/** State change history for this task. */
4: list<TaskEvent> taskEvents
/**
@ -577,16 +538,16 @@ struct GetJobsResult {
* (terms are AND'ed together).
*/
struct TaskQuery {
14: optional string role
9: optional string environment
2: optional string jobName
4: optional set<string> taskIds
5: optional set<ScheduleStatus> statuses
7: optional set<i32> instanceIds
10: optional set<string> slaveHosts
11: optional set<JobKey> jobKeys
12: optional i32 offset
13: optional i32 limit
14: string role
9: string environment
2: string jobName
4: set<string> taskIds
5: set<ScheduleStatus> statuses
7: set<i32> instanceIds
10: set<string> slaveHosts
11: set<JobKey> jobKeys
12: i32 offset
13: i32 limit
}
struct HostStatus {
@ -656,9 +617,6 @@ const set<JobUpdateStatus> ACTIVE_JOB_UPDATE_STATES = [JobUpdateStatus.ROLLING_F
JobUpdateStatus.ROLL_BACK_PAUSED,
JobUpdateStatus.ROLL_FORWARD_AWAITING_PULSE,
JobUpdateStatus.ROLL_BACK_AWAITING_PULSE]
/** States the job update can be in while waiting for a pulse. */
const set<JobUpdateStatus> AWAITNG_PULSE_JOB_UPDATE_STATES = [JobUpdateStatus.ROLL_FORWARD_AWAITING_PULSE,
JobUpdateStatus.ROLL_BACK_AWAITING_PULSE]
/** Job update actions that can be applied to job instances. */
enum JobUpdateAction {
@ -716,40 +674,9 @@ struct JobUpdateKey {
2: string id
}
/** Limits the amount of active changes being made to instances to groupSize. */
struct QueueJobUpdateStrategy {
1: i32 groupSize
}
/** Similar to Queue strategy but will not start a new group until all instances in an active
* group have finished updating.
*/
struct BatchJobUpdateStrategy {
1: i32 groupSize
/* Update will pause automatically after each batch completes */
2: bool autopauseAfterBatch
}
/** Same as Batch strategy but each time an active group completes, the size of the next active
* group may change.
*/
struct VariableBatchJobUpdateStrategy {
1: list<i32> groupSizes
/* Update will pause automatically after each batch completes */
2: bool autopauseAfterBatch
}
union JobUpdateStrategy {
1: QueueJobUpdateStrategy queueStrategy
2: BatchJobUpdateStrategy batchStrategy
3: VariableBatchJobUpdateStrategy varBatchStrategy
}
/** Job update thresholds and limits. */
struct JobUpdateSettings {
/** Deprecated, please set value inside of desired update strategy instead.
* Max number of instances being updated at any given moment.
*/
/** Max number of instances being updated at any given moment. */
1: i32 updateGroupSize
/** Max number of instance failures to tolerate before marking instance as FAILED. */
@ -767,28 +694,19 @@ struct JobUpdateSettings {
/** Instance IDs to act on. All instances will be affected if this is not set. */
7: set<Range> updateOnlyTheseInstances
/** Deprecated, please set updateStrategy to the Batch strategy instead.
/**
* If true, use updateGroupSize as strict batching boundaries, and avoid proceeding to another
* batch until the preceding batch finishes updating.
*/
8: bool waitForBatchCompletion
/**
* If set, requires external calls to pulseJobUpdate RPC within the specified rate for the
* update to make progress. If no pulses received within specified interval the update will
* block. A blocked update is unable to continue but retains its current status. It may only get
* unblocked by a fresh pulseJobUpdate call.
*/
/**
* If set, requires external calls to pulseJobUpdate RPC within the specified rate for the
* update to make progress. If no pulses received within specified interval the update will
* block. A blocked update is unable to continue but retains its current status. It may only get
* unblocked by a fresh pulseJobUpdate call.
*/
9: optional i32 blockIfNoPulsesAfterMs
/**
* If true, updates will obey the SLA requirements of the tasks being updated. If the SLA policy
* differs between the old and new task configurations, updates will use the newest configuration.
*/
10: optional bool slaAware
/** Update strategy to be used for the update. See JobUpdateStrategy for choices. */
11: optional JobUpdateStrategy updateStrategy
}
/** Event marking a state transition in job update lifecycle. */
@ -819,9 +737,6 @@ struct JobInstanceUpdateEvent {
/** Job update action taken on the instance. */
3: JobUpdateAction action
/** Optional message explaining the instance update event. */
4: optional string message
}
/** Maps instance IDs to TaskConfigs it. */
@ -934,13 +849,6 @@ struct JobUpdateQuery {
7: i32 limit
}
struct HostMaintenanceRequest {
1: string host
2: SlaPolicy defaultSlaPolicy
3: i64 timeoutSecs
4: i64 createdTimestampMs
}
struct ListBackupsResult {
1: set<string> backups
}
@ -1125,6 +1033,7 @@ service ReadOnlyScheduler {
Response getJobUpdateSummaries(1: JobUpdateQuery jobUpdateQuery)
/** Gets job update details. */
// TODO(zmanji): `key` is deprecated, remove this with AURORA-1765
Response getJobUpdateDetails(2: JobUpdateQuery query)
/** Gets the diff between client (desired) and server (current) job states. */
@ -1164,7 +1073,7 @@ service AuroraSchedulerManager extends ReadOnlyScheduler {
Response restartShards(5: JobKey job, 3: set<i32> shardIds)
/** Initiates a kill on tasks. */
Response killTasks(4: JobKey job, 5: set<i32> instances, 6: string message)
Response killTasks(4: JobKey job, 5: set<i32> instances)
/**
* Adds new instances with the TaskConfig of the existing instance pointed by the key.
@ -1225,6 +1134,31 @@ service AuroraSchedulerManager extends ReadOnlyScheduler {
Response pulseJobUpdate(1: JobUpdateKey key)
}
struct InstanceConfigRewrite {
/** Key for the task to rewrite. */
1: InstanceKey instanceKey
/** The original configuration. */
2: TaskConfig oldTask
/** The rewritten configuration. */
3: TaskConfig rewrittenTask
}
struct JobConfigRewrite {
/** The original job configuration. */
1: JobConfiguration oldJob
/** The rewritten job configuration. */
2: JobConfiguration rewrittenJob
}
union ConfigRewrite {
1: JobConfigRewrite jobRewrite
2: InstanceConfigRewrite instanceRewrite
}
struct RewriteConfigsRequest {
1: list<ConfigRewrite> rewriteCommands
}
struct ExplicitReconciliationSettings {
1: optional i32 batchSize
}
@ -1277,27 +1211,23 @@ service AuroraAdmin extends AuroraSchedulerManager {
/** Set the given hosts back into serving mode. */
Response endMaintenance(1: Hosts hosts)
/**
* Ask scheduler to put hosts into DRAINING mode and move scheduled tasks off of the hosts
* such that its SLA requirements are satisfied. Use defaultSlaPolicy if it is not set for a task.
**/
Response slaDrainHosts(1: Hosts hosts, 2: SlaPolicy defaultSlaPolicy, 3: i64 timeoutSecs)
/** Start a storage snapshot and block until it completes. */
Response snapshot()
/**
* Forcibly rewrites the stored definition of user configurations. This is intended to be used
* in a controlled setting, primarily to migrate pieces of configurations that are opaque to the
* scheduler (e.g. executorConfig).
* The scheduler may do some validation of the rewritten configurations, but it is important
* that the caller take care to provide valid input and alter only necessary fields.
*/
Response rewriteConfigs(1: RewriteConfigsRequest request)
/** Tell scheduler to trigger an explicit task reconciliation with the given settings. */
Response triggerExplicitTaskReconciliation(1: ExplicitReconciliationSettings settings)
/** Tell scheduler to trigger an implicit task reconciliation. */
Response triggerImplicitTaskReconciliation()
/**
* Force prune any (terminal) tasks that match the query. If no statuses are supplied with the
* query, it will default to all terminal task states. If statuses are supplied, they must be
* terminal states.
*/
Response pruneTasks(1: TaskQuery query)
}
// The name of the header that should be sent to bypass leader redirection in the Scheduler.
@ -1305,4 +1235,4 @@ const string BYPASS_LEADER_REDIRECT_HEADER_NAME = 'Bypass-Leader-Redirect'
// The path under which a task's filesystem should be mounted when using images and the Mesos
// unified containerizer.
const string TASK_FILESYSTEM_MOUNT_POINT = 'taskfs'
const string TASK_FILESYSTEM_MOUNT_POINT = 'taskfs'

View file

@ -16,13 +16,10 @@ package realis
import (
"encoding/json"
"os"
"github.com/pkg/errors"
"os"
)
// Cluster contains the definition of the clusters.json file used by the default Aurora
// client for configuration
type Cluster struct {
Name string `json:"name"`
AgentRoot string `json:"slave_root"`
@ -35,8 +32,7 @@ type Cluster struct {
AuthMechanism string `json:"auth_mechanism"`
}
// LoadClusters loads clusters.json file traditionally located at /etc/aurora/clusters.json
// for use with a gorealis client
// Loads clusters.json file traditionally located at /etc/aurora/clusters.json
func LoadClusters(config string) (map[string]Cluster, error) {
file, err := os.Open(config)

View file

@ -12,19 +12,17 @@
* limitations under the License.
*/
package realis_test
package realis
import (
"fmt"
"testing"
realis "github.com/paypal/gorealis"
"github.com/stretchr/testify/assert"
"testing"
)
func TestLoadClusters(t *testing.T) {
clusters, err := realis.LoadClusters("examples/clusters.json")
clusters, err := LoadClusters("examples/clusters.json")
if err != nil {
fmt.Print(err)
}

View file

@ -15,77 +15,36 @@
package realis
import (
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/rdelval/gorealis/gen-go/apache/aurora"
)
// Container is an interface that defines a single function needed to create
// an Aurora container type. It exists because the code must support both Mesos
// and Docker containers.
type Container interface {
Build() *aurora.Container
}
// MesosContainer is a Mesos style container that can be used by Aurora Jobs.
// TODO(rdelvalle): Implement Mesos container builder
type MesosContainer struct {
container *aurora.MesosContainer
}
// DockerContainer is a vanilla Docker style container that can be used by Aurora Jobs.
type DockerContainer struct {
container *aurora.DockerContainer
}
// NewDockerContainer creates a new Aurora compatible Docker container configuration.
func NewDockerContainer() DockerContainer {
return DockerContainer{container: aurora.NewDockerContainer()}
}
// Build creates an Aurora container based upon the configuration provided.
func (c DockerContainer) Build() *aurora.Container {
return &aurora.Container{Docker: c.container}
}
// Image adds the name of a Docker image to be used by the Job when running.
func (c DockerContainer) Image(image string) DockerContainer {
c.container.Image = image
return c
}
// AddParameter adds a parameter to be passed to Docker when the container is run.
func (c DockerContainer) AddParameter(name, value string) DockerContainer {
c.container.Parameters = append(c.container.Parameters, &aurora.DockerParameter{
Name: name,
Value: value,
})
return c
}
// NewMesosContainer creates a Mesos style container to be configured and built for use by an Aurora Job.
func NewMesosContainer() MesosContainer {
return MesosContainer{container: aurora.NewMesosContainer()}
}
// Build creates a Mesos style Aurora container configuration to be passed on to the Aurora Job.
func (c MesosContainer) Build() *aurora.Container {
return &aurora.Container{Mesos: c.container}
}
// DockerImage configures the Mesos container to use a specific Docker image when being run.
func (c MesosContainer) DockerImage(name, tag string) MesosContainer {
if c.container.Image == nil {
c.container.Image = aurora.NewImage()
}
c.container.Image.Docker = &aurora.DockerImage{Name: name, Tag: tag}
return c
}
// AppcImage configures the Mesos container to use an image in the Appc format to run the container.
func (c MesosContainer) AppcImage(name, imageID string) MesosContainer {
if c.container.Image == nil {
c.container.Image = aurora.NewImage()
}
c.container.Image.Appc = &aurora.AppcImage{Name: name, ImageId: imageID}
c.container.Parameters = append(c.container.Parameters, &aurora.DockerParameter{name, value})
return c
}

View file

@ -1,109 +0,0 @@
version: "2"
services:
zk:
image: rdelvalle/zookeeper
restart: on-failure
ports:
- "2181:2181"
environment:
ZK_CONFIG: tickTime=2000,initLimit=10,syncLimit=5,maxClientCnxns=128,forceSync=no,clientPort=2181
ZK_ID: 1
networks:
aurora_cluster:
ipv4_address: 192.168.33.2
master:
image: aurorascheduler/mesos-master:1.7.2
restart: on-failure
ports:
- "5050:5050"
environment:
MESOS_ZK: zk://192.168.33.2:2181/mesos
MESOS_QUORUM: 1
MESOS_HOSTNAME: localhost
MESOS_CLUSTER: test-cluster
MESOS_REGISTRY: replicated_log
MESOS_WORK_DIR: /tmp/mesos
networks:
aurora_cluster:
ipv4_address: 192.168.33.3
depends_on:
- zk
agent-one:
image: aurorascheduler/mesos-agent:1.7.2
pid: host
restart: on-failure
ports:
- "5051:5051"
environment:
MESOS_ATTRIBUTES: 'zone:west'
MESOS_MASTER: zk://192.168.33.2:2181/mesos
MESOS_CONTAINERIZERS: docker,mesos
MESOS_PORT: 5051
MESOS_HOSTNAME: localhost
MESOS_RESOURCES: ports(*):[11000-11999]
MESOS_SYSTEMD_ENABLE_SUPPORT: 'false'
MESOS_WORK_DIR: /tmp/mesos
networks:
aurora_cluster:
ipv4_address: 192.168.33.4
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- zk
agent-two:
image: aurorascheduler/mesos-agent:1.7.2
pid: host
restart: on-failure
ports:
- "5061:5061"
environment:
MESOS_ATTRIBUTES: 'zone:east'
MESOS_MASTER: zk://192.168.33.2:2181/mesos
MESOS_CONTAINERIZERS: docker,mesos
MESOS_HOSTNAME: localhost
MESOS_PORT: 5061
MESOS_RESOURCES: ports(*):[11000-11999]
MESOS_SYSTEMD_ENABLE_SUPPORT: 'false'
MESOS_WORK_DIR: /tmp/mesos
networks:
aurora_cluster:
ipv4_address: 192.168.33.5
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- zk
aurora-one:
image: aurorascheduler/scheduler:0.23.0
pid: host
ports:
- "8081:8081"
restart: on-failure
environment:
CLUSTER_NAME: test-cluster
ZK_ENDPOINTS: "192.168.33.2:2181"
MESOS_MASTER: "zk://192.168.33.2:2181/mesos"
EXTRA_SCHEDULER_ARGS: "-min_required_instances_for_sla_check=1"
networks:
aurora_cluster:
ipv4_address: 192.168.33.7
depends_on:
- zk
- master
- agent-one
networks:
aurora_cluster:
driver: bridge
ipam:
config:
- subnet: 192.168.33.0/16
gateway: 192.168.33.1

View file

@ -1,90 +0,0 @@
# Developing gorealis
### Installing Docker
For our developer environment we leverage of Docker containers.
First you must have Docker installed. Instructions on how to install Docker
vary from platform to platform and can be found [here](https://docs.docker.com/install/).
### Installing docker-compose
To make the creation of our developer environment as simple as possible, we leverage
docker-compose to bring up all independent components up separately.
This also allows us to delete and recreate our development cluster very quickly.
To install docker-compose please follow the instructions for your platform
[here](https://docs.docker.com/compose/install/).
### Getting the source code
As of go 1.10.x, GOPATH is still relevant. This may change in the future but
for the sake of making development less error prone, it is suggested that the following
directories be created:
`$ mkdir -p $GOPATH/src/github.com/paypal`
And then clone the master branch into the newly created folder:
`$ cd $GOPATH/src/github.com/paypal; git clone git@github.com:paypal/gorealis.git`
Since we check in our vendor folder, gorealis no further set up is needed.
### Bringing up the cluster
To develop gorealis, you will need a fully functioning Mesos cluster along with
Apache Aurora.
In order to bring up our docker-compose set up execute the following command from the root
of the git repository:
`$ docker-compose up -d`
### Testing code
Since Docker does not work well using host mode under MacOS, a workaround has been employed:
docker-compose brings up a bridged network.
* The ports 8081 is exposed for Aurora. http://localhost:8081 will load the Aurora Web UI.
* The port 5050 is exposed for Mesos. http://localhost:5050 will load the Mesos Web UI.
#### Note for developers on MacOS:
Running the cluster using a bridged network on MacOS has some side effects.
Since Aurora exposes it's internal IP location through Zookeeper, gorealis will determine
the address to be 192.168.33.7. The address 192.168.33.7 is valid when running in a Linux
environment but not when running under MacOS. To run code involving the ZK leader fetcher
(such as the tests), a container connected to the network needs to be launched.
For example, running the tests in a container can be done through the following command from
the root of the git repository:
`$ docker run -t -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10.3-alpine go test github.com/paypal/gorealis`
Or
`$ ./runTestsMac.sh`
Alternatively, if an interactive shell is necessary, the following command may be used:
`$ docker run -it -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10.3-alpine /bin/sh`
### Cleaning up the cluster
If something went wrong while developing and a clean environment is desired, perform the
following command from the root of the git directory:
`$ docker-compose down && docker-compose up -d`
### Tearing down the cluster
Once development is done, the environment may be torn down by executing (from the root of the
git directory):
`$ docker-compose down`

View file

@ -1,11 +1,27 @@
# Running custom executors on Aurora
In this document we will be using the docker-compose executor to demonstrate
In this document, we will be using the docker-compose executor to demonstrate
how Aurora can use multiple executors on a single Scheduler. For this guide,
we will be using a vagrant instance to demonstrate the setup process. Many of the same
steps also apply to an Aurora installation made via a package manager. Differences in how to configure
the cluster between the vagrant image and the package manager will be clarified when necessary.
## Pre-configured Aurora Vagrant box
Alternatively, if Vagrant and VirtualBox are already configured your machine,
you may use a pre-configured vagrant image and skip to the [Creating Aurora Jobs](#creating-aurora-jobs).
To take this path, start by cloning the following repository and checking out the DockerComposeExecutor branch:
```
$ git clone -b 0.16.0-DCE git@github.com:rdelval/aurora.git
```
And bringing the vagrant box
```
$ cd aurora
$ vagrant up
```
**The pre-configured Vagrant box will most likely run on a stale version of Aurora (compared to the master)**
## Configuring Aurora manually
### Spinning up an Aurora instance with Vagrant
Follow the guide at http://aurora.apache.org/documentation/latest/getting-started/vagrant/
@ -15,7 +31,7 @@ until the end of step 4 (Start the local cluster) and skip to configuring Docker
Follow the guide at http://aurora.apache.org/documentation/latest/operations/installation/
### Configuring Scheduler to use Docker-Compose executor
In order to use the docker compose executor with Aurora, we must first give the scheduler
In order use the docker compose executor with Aurora, we must first give the scheduler
a configuration file that contains information on how to run the executor.
#### Configuration file
@ -26,14 +42,14 @@ under the custom executors section.
A sample config file for the docker-compose executor looks like this:
```
[
{
"executor":{
"command":{
[
{
"executor":{
"command":{
"value":"java -jar docker-compose-executor_0.1.0.jar",
"shell":"true",
"uris":[
{
"uris":[
{
"cache":false,
"executable":true,
"extract":false,
@ -47,8 +63,10 @@ A sample config file for the docker-compose executor looks like this:
"task_prefix":"compose-"
}
]
```
#### Configuring the Scheduler to run a custom executor
##### Setting the proper flags
Some flags need to be set on the Aurora scheduler in order for custom executors to work properly.
@ -88,37 +106,102 @@ On Ubuntu, restarting the aurora-scheduler can be achieved by running the follow
$ sudo service aurora-scheduler restart
```
## Using [dce-go](https://github.com/paypal/dce-go)
Instead of manually configuring Aurora to run the docker-compose executor, one can follow the instructions provided [here](https://github.com/paypal/dce-go/blob/develop/docs/environment.md) to quickly create a DCE environment that would include mesos, aurora, golang1.7, docker, docker-compose and DCE installed.
Please note that when using dce-go, the endpoints are going to be as shown below,
```
Aurora endpoint --> http://192.168.33.8:8081
Mesos endpoint --> http://192.168.33.8:5050
```
### Using a custom client
Pystachio does yet support launching tasks using custom executors. Therefore, a custom
client must be used in order to launch tasks using a custom executor. In this case,
we will be using [gorealis](https://github.com/rdelval/gorealis) to launch a task with
the compose executor on Aurora.
## Configuring the system to run a custom client and docker-compose executor
### Installing Go
Follow the instructions at the official golang website: [golang.org/doc/install](https://golang.org/doc/install)
#### Linux
### Installing docker-compose
##### Ubuntu
Agents which will run dce-go will need docker-compose in order to sucessfully run the executor.
Instructions for installing docker-compose on various platforms may be found on Docker's webiste: [docs.docker.com/compose/install/](https://docs.docker.com/compose/install/)
###### Adding a PPA and install via apt-get
```
$ sudo add-apt-repository ppa:ubuntu-lxc/lxd-stable
$ sudo apt-get update
$ sudo apt-get install golang
```
###### Configuring the GOPATH
Configure the environment to be able to compile and run Go code.
```
$ mkdir $HOME/go
$ echo export GOPATH=$HOME/go >> $HOME/.bashrc
$ echo export GOROOT=/usr/lib/go >> $HOME/.bashrc
$ echo export PATH=$PATH:$GOPATH/bin >> $HOME/.bashrc
$ echo export PATH=$PATH:$GOROOT/bin >> $HOME/.bashrc
```
Finally we must reload the .bashrc configuration:
```
$ source $HOME/.bashrc
```
#### OS X
One way to install go on OS X is by using [Homebrew](http://brew.sh/)
##### Installing Homebrew
Run the following command from the terminal to install Hombrew:
```
$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
```
##### Installing Go using Hombrew
Run the following command from the terminal to install Go:
```
$ brew install go
```
##### Configuring the GOPATH
Configure the environment to be able to compile and run Go code.
```
$ mkdir $HOME/go
$ echo export GOPATH=$HOME/go >> $HOME/.profile
$ echo export GOROOT=/usr/local/opt/go/libexec >> $HOME/.profile
$ echo export PATH=$PATH:$GOPATH/bin >> $HOME/.profile
$ echo export PATH=$PATH:$GOROOT/bin >> $HOME/.profile
```
Finally we must reload the .profile configuration:
```
$ source $HOME/.profile
```
#### Windows
Download and run the msi installer from https://golang.org/dl/
## Installing Docker Compose
To show Aurora's new multi executor feature, we need to use at least one custom executor.
In this case we will be using the [docker-compose-executor](https://github.com/mesos/docker-compose-executor).
In order to run the docker-compose executor, each agent must have docker-compose installed on it.
This can be done using pip:
```
$ sudo pip install docker-compose
```
## Downloading gorealis
Finally, we must get `gorealis` using the `go get` command:
```
go get github.com/paypal/gorealis
go get github.com/rdelval/gorealis
```
# Creating Aurora Jobs
## Creating a thermos job
To demonstrate that we are able to run jobs using different executors on the
To demonstrate that we are able to run jobs using different executors on the
same scheduler, we'll first launch a thermos job using the default Aurora Client.
We can use a sample job for this:
@ -181,12 +264,12 @@ job = realis.NewJob().
Using a vagrant setup as an example, we can run the following command to create a compose job:
```
go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=create
go run $GOPATH/src/github.com/rdelval/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=create
```
If everything went according to plan, a new job will be shown in the Aurora UI.
We can further investigate inside the Mesos task sandbox. Inside the sandbox, under
the sample-app folder, we can find a docker-compose.yml-generated.yml. If we inspect this file,
We can further investigate inside the Mesos task sandbox. Inside the sandbox, under
the sample-app folder, we can find a docker-compose.yml-generated.yml. If we inspect this file,
we can find the port at which we can find the web server we launched.
Under Web->Ports, we find the port Mesos allocated. We can then navigate to:
@ -195,10 +278,10 @@ Under Web->Ports, we find the port Mesos allocated. We can then navigate to:
A message from the executor should greet us.
## Creating a Thermos job using gorealis
It is also possible to create a thermos job using gorealis. To do this, however,
It is also possible to create a thermos job using gorealis. To do this, however,
a thermos payload is required. A thermos payload consists of a JSON blob that details
the entire task as it exists inside the Aurora Scheduler. *Creating the blob is unfortunately
out of the scope of what gorealis does*, so a thermos payload must be generated beforehand or
out of the scope of was gorealis does*, so a thermos payload must be generated beforehand or
retrieved from the structdump of an existing task for testing purposes.
A sample thermos JSON payload may be found [here](../examples/thermos_payload.json) in the examples folder.
@ -223,36 +306,17 @@ job = realis.NewJob().
Using a vagrant setup as an example, we can run the following command to create a Thermos job:
```
$ cd $GOPATH/src/github.com/paypal/gorealis
$ cd $GOPATH/src/github.com/rdelval/gorealis
$ go run examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=create -executor=thermos
```
## Creating jobs using gorealis JSON client
We can also use the [JSON client](../examples/jsonClient.go) to create Aurora jobs using gorealis.
If using _dce-go_, then use `http://192.168.33.8:8081` as the scheduler URL.
```
$ cd $GOPATH/src/github.com/paypal/gorealis/examples
```
To launch a job using the Thermos executor,
```
$ go run jsonClient.go -job=job_thermos.json -config=config.json
```
To launch a job using docker-compose executor,
```
$ go run jsonClient.go -job=job_dce.json -config=config.json
```
# Cleaning up
To stop the jobs we've launched, we need to send a job kill request to Aurora.
It should be noted that although we can't create jobs using a custom executor using the default Aurora client,
we ~can~ use the default Aurora client to kill them. Additionally, we can use gorealis perform the clean up as well.
## Using the Default Client (if manually configured Aurora)
## Using the Default Client
```
$ aurora job killall devcluster/www-data/prod/hello
@ -262,6 +326,6 @@ $ aurora job killall devcluster/vagrant/prod/docker-compose
## Using gorealis
```
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=kill
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=kill
$ go run $GOPATH/src/github.com/rdelval/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=kill
$ go run $GOPATH/src/github.com/rdelval/gorealis/examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=kill
```

View file

@ -1,6 +1,6 @@
# How to leverage the library (based on the [sample client](../examples/client.go))
For a more complete look at the API, please visit https://godoc.org/github.com/paypal/gorealis
For a more complete look at the API, please visit https://godoc.org/github.com/rdelval/gorealis
* Create a default configuration file (alternatively, manually create your own Config):
```
@ -57,19 +57,4 @@ updateJob := realis.NewUpdateJob(job)
updateJob.InstanceCount(1)
updateJob.Ram(128)
msg, err := r.UpdateJob(updateJob, "")
```
* Handling a timeout scenario:
When sending an API call to Aurora, the call may timeout at the client side.
This means that the time limit has been reached while waiting for the scheduler
to reply. In such a case it is recommended that the timeout is increased through
the use of the `realis.TimeoutMS()` option.
As these timeouts cannot be totally avoided, there exists a mechanism to mitigate such
scenarios. The `StartJobUpdate` and `CreateService` API will return an error that
implements the Timeout interface.
An error can be checked to see if it is a Timeout error by using the `realis.IsTimeout()`
function.
```

View file

@ -1,6 +1,6 @@
# Using the Sample client
## Usage:
## Usage:
```
Usage of ./client:
-cluster string
@ -29,21 +29,21 @@ executor examples, the vagrant box must be configured properly to use the docker
#### Creating a Thermos job
```
$ cd $GOPATH/src/github.com/paypal/gorealis/examples
$ cd $GOPATH/src/github.com/rdelval/gorealis/examples
$ go run client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=create
```
#### Kill a Thermos job
```
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=kill
$ go run $GOPATH/src/github.com/rdelval/gorealis.git/examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=kill
```
### Docker Compose executor (custom executor)
#### Creating Docker Compose executor job
```
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=create
$ go run $GOPATH/src/github.com/rdelval/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=create
```
#### Kill a Docker Compose executor job
```
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=kill
$ go run $GOPATH/src/github.com/rdelval/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=kill
```

104
errors.go
View file

@ -1,104 +0,0 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
// Using a pattern described by Dave Cheney to differentiate errors
// https://dave.cheney.net/2016/04/27/dont-just-check-errors-handle-them-gracefully
// Timeout errors are returned when a function is unable to continue executing due
// to a time constraint or meeting a set number of retries.
type timeout interface {
Timedout() bool
}
// IsTimeout returns true if the error being passed as an argument implements the Timeout interface
// and the Timedout function returns true.
func IsTimeout(err error) bool {
temp, ok := err.(timeout)
return ok && temp.Timedout()
}
type timeoutErr struct {
error
timedout bool
}
func (r *timeoutErr) Timedout() bool {
return r.timedout
}
func newTimedoutError(err error) *timeoutErr {
return &timeoutErr{error: err, timedout: true}
}
// retryErr is a superset of timeout which includes extra context
// with regards to our retry mechanism. This is done in order to make sure
// that our retry mechanism works as expected through our tests and should
// never be relied on or used directly. It is not made part of the public API
// on purpose.
type retryErr struct {
error
timedout bool
retryCount int // How many times did the mechanism retry the command
}
// Retry error is a timeout type error with added context.
func (r *retryErr) Timedout() bool {
return r.timedout
}
func (r *retryErr) RetryCount() int {
return r.retryCount
}
// ToRetryCount is a helper function for testing verification to avoid whitebox testing
// as well as keeping retryErr as a private.
// Should NOT be used under any other context.
func ToRetryCount(err error) *retryErr {
if retryErr, ok := err.(*retryErr); ok {
return retryErr
}
return nil
}
func newRetryError(err error, retryCount int) *retryErr {
return &retryErr{error: err, timedout: true, retryCount: retryCount}
}
// Temporary errors indicate that the action may or should be retried.
type temporary interface {
Temporary() bool
}
// IsTemporary indicates whether the error passed in as an argument implements the temporary interface
// and if the Temporary function returns true.
func IsTemporary(err error) bool {
temp, ok := err.(temporary)
return ok && temp.Temporary()
}
type temporaryErr struct {
error
temporary bool
}
func (t *temporaryErr) Temporary() bool {
return t.temporary
}
// NewTemporaryError creates a new error which satisfies the Temporary interface.
func NewTemporaryError(err error) *temporaryErr {
return &temporaryErr{error: err, temporary: true}
}

View file

@ -1,22 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDrTCCApWgAwIBAgIJAM+bKx50CY9JMA0GCSqGSIb3DQEBCwUAMG0xCzAJBgNV
BAYTAkdCMQ8wDQYDVQQIDAZMb25kb24xDzANBgNVBAcMBkxvbmRvbjEYMBYGA1UE
CgwPR2xvYmFsIFNlY3VyaXR5MRYwFAYDVQQLDA1JVCBEZXBhcnRtZW50MQowCAYD
VQQDDAEqMB4XDTE3MTIwODIwNTMwMVoXDTI3MTIwNjIwNTMwMVowbTELMAkGA1UE
BhMCR0IxDzANBgNVBAgMBkxvbmRvbjEPMA0GA1UEBwwGTG9uZG9uMRgwFgYDVQQK
DA9HbG9iYWwgU2VjdXJpdHkxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxCjAIBgNV
BAMMASowggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDhdN0KH80BF3Dk
RQqAARcf7F87uNhQM05HXK8ffpESvKhzrO9BHuDZ0yS3il0BK9XpTyTtHSLIbphk
rO3BOsmPj0zhaM20LsPtwy8GmMCym3hVNSYYyP5XCdjA3uZIYq2R8ruk+vZTe4Zr
F8GHV/xGYU4zKPMGzsQbICjZhj0yiYF9UQ2J+xw79nsqPTmo8+EdVuunLz39dt2o
SbDA01g/kPTIg9K2CAUH0mm4zegiqytwpn2JKVoemmgrDYECWnhLprWlvN9t/fX9
IgprDAHN1BsMrzfmfQXZpVmbIlTriVSdYVeTwG8rT7Tg8soIHqBrnJ1ykTpY4VrO
6tc2z4kTAgMBAAGjUDBOMB0GA1UdDgQWBBSLvwax1Zd6ZiE7TjRklWYNPwgZ2zAf
BgNVHSMEGDAWgBSLvwax1Zd6ZiE7TjRklWYNPwgZ2zAMBgNVHRMEBTADAQH/MA0G
CSqGSIb3DQEBCwUAA4IBAQCJY/EJxlyiSrnO82QcsWm9cT/ciU/G7Y4vX/tGs74C
tNxuBpc0vMfW4a9u6tmi3cW3EXD/KRvPwKZXxzTOhoQY9ZpbZLZ6VvCQ+aWQaXWT
664IS/mrEUZ/p3pgqTNtifdpPAZqVqNdS+Od8/B3/nWUn6JBkDZ4WaFQgfsSulxK
yzYN6UbwhLHfQUupFFhPfvYIVLH9ErGzcv5ZCHX9FornCc0W/8hL4EdjmpTW2ML2
hM5aTKynMiR1GuGSdSpJ+BOeiUI7Go1jGwjV+H9Pw/kfmooq2wuuUGti5dr0Qq7h
CQx1a14BmDBwGoMIOdjFATRwnami5e188fAJozL++i+s
-----END CERTIFICATE-----

View file

@ -1,28 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDhdN0KH80BF3Dk
RQqAARcf7F87uNhQM05HXK8ffpESvKhzrO9BHuDZ0yS3il0BK9XpTyTtHSLIbphk
rO3BOsmPj0zhaM20LsPtwy8GmMCym3hVNSYYyP5XCdjA3uZIYq2R8ruk+vZTe4Zr
F8GHV/xGYU4zKPMGzsQbICjZhj0yiYF9UQ2J+xw79nsqPTmo8+EdVuunLz39dt2o
SbDA01g/kPTIg9K2CAUH0mm4zegiqytwpn2JKVoemmgrDYECWnhLprWlvN9t/fX9
IgprDAHN1BsMrzfmfQXZpVmbIlTriVSdYVeTwG8rT7Tg8soIHqBrnJ1ykTpY4VrO
6tc2z4kTAgMBAAECggEBAMZL7SY8dikhnu+HMgcH7njrg4+ZsthHZ/AoOvcucRbT
zC2ByyWxrP6pUUAFeGvRTGHadJYA7FjxvSO/XZZ4yFN2LJ6NeW+jOjzjUXcx3zq4
t4vqJUnjbqDLTlPFOTItaJBXuGcRPJqMqNuEl3kdEAwvBYLF34r7TUy2and4NFc/
JziGljkiucoNBk62TCDrffnvxMJXht+ab6PMWO87PzMVs4xUFPe0ezv4O54btUcV
EJsU58013EHeCai8AnxjcIPlMlB+lg4Y3C4VXf0mJ//cBvbCp+kyWybMw/e+e222
xq/98vnCOIqcy4u+9ENPLJQe7hXZ3Sqh38kf0GuOh8ECgYEA+VFvuuBP0OQHTxeE
dUizR3Iz/xkeGDUZ/8Ix4TCUmRRuhEXrV7ShwUmuanO3pNhChW6hXZ6qj/yuhfOC
D4V4upEnJDccz/cbH1PdBsfALhC8/C0WSGvnEWZMw/SggmY4KwReqWwN9aA8qjdq
kFTOJc2Js+dCHP9kn9J3U16A+oMCgYEA53+2lhckAI8bsrbCayWRZAVx7hUNPijt
MQvH+PCJ3QeZ0z801zk+4ny5WQ1BT0vRzwj8an4Byi2ZuTQU//N4oawDK0JVYi7q
rjKX/AhAx/puoGAgqiS1nDmuiUiplW06HqayCFbpJ1CoXz8+MwdRXiJ8dgioafVJ
+7wHZDVmMjECgYEAoULxd/ia58x2hcv6Wzo469+MjlYaxyGhvXJIfRXFJ/a1PU1U
Whh1/+W+sRBEGpXfARt7uGhmfle8Mtw8pfl5C4PTw3L6afG1U2AVOMt/HMyq0JoB
LbrNbM20nZLfNzkS35AmAoPny5ZnZtoNTWntJTp69SiB9OuklFO35u7bki0CgYAL
qQYkVzQMBylI/iWaygChvhh3+n15RQx1bPd8lXkMNgbMeiGKOaruM4QOdTl16ga+
W+CC6KfkbBmTF4l7PuMzmXtrYWL1mBFgBtJa8nt41yddUpoyl7jCDrG43n0UNrU3
uAO9ocsKnOhuK7xRS6wQhsIoG9WHyMAaOuVQadQk8QKBgQDVibcvOPXNcF1aRMG7
V24nBb+YYz+00g/cLRkDnBX9/HORle0HSfeT70ctRhuFCoHHbHF4fnp/iAwDgxdB
dNufthftTZTtFGITUsJDN36fSXNjEvKzmKEAlEYkGAYijLlDwknPB+bf4NQ6T0R+
AtnKQY6G4kFSfw9AKgWGy7ZKfg==
-----END PRIVATE KEY-----

View file

@ -1,18 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIC6zCCAdOgAwIBAgIJAMgY8gND5lFnMA0GCSqGSIb3DQEBCwUAMAwxCjAIBgNV
BAMMASowHhcNMTcxMjA4MjA1MTQyWhcNMjcxMjA2MjA1MTQyWjAMMQowCAYDVQQD
DAEqMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwKcyyXg90wen25yh
QA17MDyzjzBsIL9+kznzRD1azoNqA3RShAAWXn5a81HeWvncpVL+TKPMU3UC02XT
I6GtX1U7xmdKstBLKiHQxGWX04DshSrVgcVzLUI6OHBG6feoL1mGAa8jB2UEE6ER
uXdgYgKbLUrvduSn4fBvPIhhXg9YL2n2TVujkaY9bPZ9M5tQ5K+g4wRwCAYgjTUN
55J82uzAsLCs+AQi9D4bLJmw0z2H7enRLkd9sRE2pArhXm4LLg/QlL8I5ZHv7vfl
RYdOoC3bjgKk+OVOmb2Fb/dWVlOMcnO8qeo9WyQbhAcjNK2W9Tqk5E5orGZ/bkw/
iZc0MwIDAQABo1AwTjAdBgNVHQ4EFgQUA0xmNKQqxUQgaM9ceCzFyocn9jswHwYD
VR0jBBgwFoAUA0xmNKQqxUQgaM9ceCzFyocn9jswDAYDVR0TBAUwAwEB/zANBgkq
hkiG9w0BAQsFAAOCAQEAnL7VvBTcFyLeNeuTAWmM0bjlwWsuL9Va2LZitnATgzE7
ACS+ZNURnpK/o3UHGc2ePDCFgPsF2mnh4Jmye2tl5uPxQS2zR96hp16ZGVi9N1gx
4aQyknKt6UFRP/cvWwgDN5N3pnRZQ7J0kaAWCPtAIldeGK7UDjOJ1DLDVLeByr7x
27TCt69ysisTtz6Tzr5vvVDEtu2yNIf/uGk3od+pe/0E1UXVCTItvwM30wvfcTPU
aMZXBYNmSrjnJ4k/9FSjZYNKPtK1c/JR+zUng1h+I3b7itY5VBGdzdq9fEk20PHm
Xdg1Ptbebtl6PJqWX+rydXuen6SUt8vFJE89MkbWSw==
-----END CERTIFICATE-----

View file

@ -17,108 +17,66 @@ package main
import (
"flag"
"fmt"
"github.com/rdelval/gorealis"
"github.com/rdelval/gorealis/gen-go/apache/aurora"
"github.com/rdelval/gorealis/response"
"io/ioutil"
"log"
"strings"
"time"
realis "github.com/paypal/gorealis"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/paypal/gorealis/response"
"os"
)
var cmd, executor, url, clustersConfig, clusterName, updateId, username, password, zkUrl, hostList, role string
var caCertsPath string
var clientKey, clientCert string
var ConnectionTimeout = 20000
func init() {
flag.StringVar(&cmd, "cmd", "", "Job request type to send to Aurora Scheduler")
flag.StringVar(&executor, "executor", "thermos", "Executor to use")
flag.StringVar(&url, "url", "", "URL at which the Aurora Scheduler exists as [url]:[port]")
flag.StringVar(&clustersConfig, "clusters", "", "Location of the clusters.json file used by aurora.")
flag.StringVar(&clusterName, "cluster", "devcluster", "Name of cluster to run job on (only necessary if clusters is set)")
flag.StringVar(&updateId, "updateId", "", "Update ID to operate on")
flag.StringVar(&username, "username", "aurora", "Username to use for authorization")
flag.StringVar(&password, "password", "secret", "Password to use for authorization")
flag.StringVar(&zkUrl, "zkurl", "", "zookeeper url")
flag.StringVar(&hostList, "hostList", "", "Comma separated list of hosts to operate on")
flag.StringVar(&role, "role", "", "owner role to use")
flag.StringVar(&caCertsPath, "caCertsPath", "", "Path to CA certs on local machine.")
flag.StringVar(&clientCert, "clientCert", "", "Client certificate to use to connect to Aurora.")
flag.StringVar(&clientKey, "clientKey", "", "Client private key to use to connect to Aurora.")
func main() {
cmd := flag.String("cmd", "", "Job request type to send to Aurora Scheduler")
executor := flag.String("executor", "thermos", "Executor to use")
url := flag.String("url", "", "URL at which the Aurora Scheduler exists as [url]:[port]")
clustersConfig := flag.String("clusters", "", "Location of the clusters.json file used by aurora.")
clusterName := flag.String("cluster", "devcluster", "Name of cluster to run job on (only necessary if clusters is set)")
updateId := flag.String("updateId", "", "Update ID to operate on")
username := flag.String("username", "aurora", "Username to use for authorization")
password := flag.String("password", "secret", "Password to use for authorization")
flag.Parse()
// Attempt to load leader from zookeeper using a
// cluster.json file used for the default aurora client if provided.
// This will override the provided url in the arguments
if clustersConfig != "" {
clusters, err := realis.LoadClusters(clustersConfig)
// Attempt to load leader from zookeeper
if *clustersConfig != "" {
clusters, err := realis.LoadClusters(*clustersConfig)
if err != nil {
log.Fatalln(err)
fmt.Println(err)
os.Exit(1)
}
cluster, ok := clusters[clusterName]
cluster, ok := clusters[*clusterName]
if !ok {
log.Fatalf("Cluster %s doesn't exist in the file provided\n", clusterName)
fmt.Printf("Cluster %s chosen doesn't exist\n", *clusterName)
os.Exit(1)
}
url, err = realis.LeaderFromZK(cluster)
*url, err = realis.LeaderFromZK(cluster)
if err != nil {
log.Fatalln(err)
fmt.Println(err)
os.Exit(1)
}
}
}
func main() {
var job realis.Job
var err error
var monitor *realis.Monitor
var r realis.Realis
clientOptions := []realis.ClientOption{
realis.BasicAuth(username, password),
realis.ThriftJSON(),
realis.TimeoutMS(ConnectionTimeout),
realis.BackOff(realis.Backoff{
Steps: 2,
Duration: 10 * time.Second,
Factor: 2.0,
Jitter: 0.1,
}),
realis.Debug(),
}
if zkUrl != "" {
fmt.Println("zkUrl: ", zkUrl)
clientOptions = append(clientOptions, realis.ZKUrl(zkUrl))
} else {
clientOptions = append(clientOptions, realis.SchedulerUrl(url))
}
if caCertsPath != "" {
clientOptions = append(clientOptions, realis.Certspath(caCertsPath))
}
if clientKey != "" && clientCert != "" {
clientOptions = append(clientOptions, realis.ClientCerts(clientKey, clientCert))
}
r, err = realis.NewRealisClient(clientOptions...)
//Create new configuration with default transport layer
config, err := realis.NewDefaultConfig(*url)
if err != nil {
log.Fatalln(err)
fmt.Println(err)
os.Exit(1)
}
monitor = &realis.Monitor{Client: r}
// Configured for vagrant
realis.AddBasicAuth(&config, *username, *password)
r := realis.NewClient(config)
defer r.Close()
switch executor {
monitor := &realis.Monitor{r}
var job realis.Job
switch *executor {
case "thermos":
payload, err := ioutil.ReadFile("examples/thermos_payload.json")
payload, err := ioutil.ReadFile("thermos_payload.json")
if err != nil {
log.Fatalln("Error reading json config file: ", err)
fmt.Println("Error reading json config file: ", err)
os.Exit(1)
}
job = realis.NewJob().
@ -133,21 +91,23 @@ func main() {
IsService(true).
InstanceCount(1).
AddPorts(1)
break
case "compose":
job = realis.NewJob().
Environment("prod").
Role("vagrant").
Name("docker-compose-test").
Name("docker-compose").
ExecutorName("docker-compose-executor").
ExecutorData("{}").
CPU(0.25).
RAM(512).
CPU(1).
RAM(64).
Disk(100).
IsService(true).
IsService(false).
InstanceCount(1).
AddPorts(4).
AddPorts(1).
AddLabel("fileName", "sample-app/docker-compose.yml").
AddURIs(true, true, "https://github.com/mesos/docker-compose-executor/releases/download/0.1.0/sample-app.tar.gz")
break
case "none":
job = realis.NewJob().
Environment("prod").
@ -159,82 +119,53 @@ func main() {
IsService(true).
InstanceCount(1).
AddPorts(1)
break
default:
log.Fatalln("Only thermos, compose, and none are supported for now")
fmt.Println("Only thermos, compose, and none are supported for now")
os.Exit(1)
}
switch cmd {
switch *cmd {
case "create":
fmt.Println("Creating job")
resp, err := r.CreateJob(job)
if err != nil {
log.Fatalln(err)
fmt.Println(err)
os.Exit(1)
}
fmt.Println(resp.String())
if ok, mErr := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 5, 50); !ok || mErr != nil {
_, err := r.KillJob(job.JobKey())
if err != nil {
log.Fatalln(err)
if resp.ResponseCode == aurora.ResponseCode_OK {
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 5, 50); !ok || err != nil {
_, err := r.KillJob(job.JobKey())
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
log.Fatalf("ok: %v\n err: %v", ok, mErr)
}
case "createService":
// Create a service with three instances using the update API instead of the createJob API
fmt.Println("Creating service")
settings := realis.NewUpdateSettings()
job.InstanceCount(3)
resp, result, err := r.CreateService(job, settings)
if err != nil {
log.Println("error: ", err)
log.Fatal("response: ", resp.String())
}
fmt.Println(result.String())
if ok, mErr := monitor.JobUpdate(*result.GetKey(), 5, 180); !ok || mErr != nil {
_, err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out")
_, err = r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
log.Fatalf("ok: %v\n err: %v", ok, mErr)
}
break
case "createDocker":
fmt.Println("Creating a docker based job")
container := realis.NewDockerContainer().Image("python:2.7").AddParameter("network", "host")
job.Container(container)
resp, err := r.CreateJob(job)
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
fmt.Println(resp.String())
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
_, err := r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
if resp.ResponseCode == aurora.ResponseCode_OK {
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
_, err := r.KillJob(job.JobKey())
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
}
case "createMesosContainer":
fmt.Println("Creating a docker based job")
container := realis.NewMesosContainer().DockerImage("python", "2.7")
job.Container(container)
resp, err := r.CreateJob(job)
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
_, err := r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
}
break
case "scheduleCron":
fmt.Println("Scheduling a Cron job")
// Cron config
@ -242,416 +173,157 @@ func main() {
job.IsService(false)
resp, err := r.ScheduleCronJob(job)
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
fmt.Println(resp.String())
break
case "startCron":
fmt.Println("Starting a Cron job")
resp, err := r.StartCronJob(job.JobKey())
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
fmt.Println(resp.String())
break
case "descheduleCron":
fmt.Println("Descheduling a Cron job")
resp, err := r.DescheduleCronJob(job.JobKey())
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
fmt.Println(resp.String())
break
case "kill":
fmt.Println("Killing job")
resp, err := r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
if ok, err := monitor.Instances(job.JobKey(), 0, 5, 50); !ok || err != nil {
log.Fatal("Unable to kill all instances of job")
if resp.ResponseCode == aurora.ResponseCode_OK {
if ok, err := monitor.Instances(job.JobKey(), 0, 5, 50); !ok || err != nil {
fmt.Println("Unable to kill all instances of job")
os.Exit(1)
}
}
fmt.Println(resp.String())
break
case "restart":
fmt.Println("Restarting job")
resp, err := r.RestartJob(job.JobKey())
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
fmt.Println(resp.String())
break
case "liveCount":
fmt.Println("Getting instance count")
live, err := r.GetInstanceIds(job.JobKey(), aurora.LIVE_STATES)
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Live instances: %+v\n", live)
fmt.Println("Number of live instances: ", len(live))
break
case "activeCount":
fmt.Println("Getting instance count")
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
fmt.Println("Number of live instances: ", len(live))
break
case "flexUp":
fmt.Println("Flexing up job")
numOfInstances := int32(4)
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
numOfInstances := int32(5)
resp, err := r.AddInstances(aurora.InstanceKey{job.JobKey(), 0}, numOfInstances)
if err != nil {
log.Fatal(err)
}
currInstances := int32(len(live))
fmt.Println("Current num of instances: ", currInstances)
resp, err := r.AddInstances(aurora.InstanceKey{
JobKey: job.JobKey(),
InstanceId: live[0],
},
numOfInstances)
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
if ok, err := monitor.Instances(job.JobKey(), currInstances+numOfInstances, 5, 50); !ok || err != nil {
fmt.Println("Flexing up failed")
if resp.ResponseCode == aurora.ResponseCode_OK {
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount()+numOfInstances, 5, 50); !ok || err != nil {
fmt.Println("Flexing up failed")
}
}
fmt.Println(resp.String())
case "flexDown":
fmt.Println("Flexing down job")
numOfInstances := int32(2)
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
}
currInstances := int32(len(live))
fmt.Println("Current num of instances: ", currInstances)
resp, err := r.RemoveInstances(job.JobKey(), numOfInstances)
if err != nil {
log.Fatal(err)
}
if ok, err := monitor.Instances(job.JobKey(), currInstances-numOfInstances, 5, 100); !ok || err != nil {
fmt.Println("flexDown failed")
}
fmt.Println(resp.String())
break
case "update":
fmt.Println("Updating a job with with more RAM and to 5 instances")
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
fmt.Println("Updating a job with with more RAM and to 3 instances")
taskConfig, err := r.FetchTaskConfig(aurora.InstanceKey{job.JobKey(), 0})
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
taskConfig, err := r.FetchTaskConfig(aurora.InstanceKey{
JobKey: job.JobKey(),
InstanceId: live[0],
})
if err != nil {
log.Fatal(err)
}
updateJob := realis.NewDefaultUpdateJob(taskConfig)
updateJob := realis.NewUpdateJob(taskConfig)
updateJob.InstanceCount(5).RAM(128)
resp, err := r.StartJobUpdate(updateJob, "")
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
jobUpdateKey := response.JobUpdateKey(resp)
monitor.JobUpdate(*jobUpdateKey, 5, 500)
case "pauseJobUpdate":
resp, err := r.PauseJobUpdate(&aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
}, "")
if err != nil {
log.Fatal(err)
}
fmt.Println("PauseJobUpdate response: ", resp.String())
case "resumeJobUpdate":
resp, err := r.ResumeJobUpdate(&aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
}, "")
if err != nil {
log.Fatal(err)
}
fmt.Println("ResumeJobUpdate response: ", resp.String())
case "pulseJobUpdate":
resp, err := r.PulseJobUpdate(&aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
})
if err != nil {
log.Fatal(err)
}
fmt.Println("PulseJobUpdate response: ", resp.String())
monitor.JobUpdate(*jobUpdateKey, 5, 100)
break
case "updateDetails":
resp, err := r.JobUpdateDetails(aurora.JobUpdateQuery{
Key: &aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
},
Limit: 1,
})
Key: &aurora.JobUpdateKey{job.JobKey(), *updateId}, Limit: 1})
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
fmt.Println(response.JobUpdateDetails(resp))
break
case "abortUpdate":
fmt.Println("Abort update")
resp, err := r.AbortJobUpdate(aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
},
"")
resp, err := r.AbortJobUpdate(aurora.JobUpdateKey{job.JobKey(), *updateId}, "")
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
fmt.Println(resp.String())
break
case "rollbackUpdate":
fmt.Println("Abort update")
resp, err := r.RollbackJobUpdate(aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
},
"")
resp, err := r.RollbackJobUpdate(aurora.JobUpdateKey{job.JobKey(), *updateId}, "")
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
fmt.Println(resp.String())
break
case "taskConfig":
fmt.Println("Getting job info")
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
config, err := r.FetchTaskConfig(aurora.InstanceKey{job.JobKey(), 0})
if err != nil {
log.Fatal(err)
fmt.Println(err)
os.Exit(1)
}
config, err := r.FetchTaskConfig(aurora.InstanceKey{
JobKey: job.JobKey(),
InstanceId: live[0],
})
if err != nil {
log.Fatal(err)
}
log.Println(config.String())
case "updatesummary":
fmt.Println("Getting job update summary")
jobquery := &aurora.JobUpdateQuery{
Role: &job.JobKey().Role,
JobKey: job.JobKey(),
}
updatesummary, err := r.GetJobUpdateSummaries(jobquery)
if err != nil {
log.Fatalf("error while getting update summary: %v", err)
}
fmt.Println(updatesummary)
case "taskStatus":
fmt.Println("Getting task status")
taskQ := &aurora.TaskQuery{
Role: &job.JobKey().Role,
Environment: &job.JobKey().Environment,
JobName: &job.JobKey().Name,
}
tasks, err := r.GetTaskStatus(taskQ)
if err != nil {
log.Fatalf("error: %+v\n ", err)
}
fmt.Printf("length: %d\n ", len(tasks))
fmt.Printf("tasks: %+v\n", tasks)
case "tasksWithoutConfig":
fmt.Println("Getting task status")
taskQ := &aurora.TaskQuery{
Role: &job.JobKey().Role,
Environment: &job.JobKey().Environment,
JobName: &job.JobKey().Name,
}
tasks, err := r.GetTasksWithoutConfigs(taskQ)
if err != nil {
log.Fatalf("error: %+v\n ", err)
}
fmt.Printf("length: %d\n ", len(tasks))
fmt.Printf("tasks: %+v\n", tasks)
case "drainHosts":
fmt.Println("Setting hosts to DRAINING")
if hostList == "" {
log.Fatal("No hosts specified to drain")
}
hosts := strings.Split(hostList, ",")
_, result, err := r.DrainHosts(hosts...)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
// Monitor change to DRAINING and DRAINED mode
hostResult, err := monitor.HostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
5,
10)
if err != nil {
for host, ok := range hostResult {
if !ok {
fmt.Printf("Host %s did not transtion into desired mode(s)\n", host)
}
}
log.Fatalf("error: %+v\n", err.Error())
}
fmt.Print(result.String())
case "SLADrainHosts":
fmt.Println("Setting hosts to DRAINING using SLA aware draining")
if hostList == "" {
log.Fatal("No hosts specified to drain")
}
hosts := strings.Split(hostList, ",")
policy := aurora.SlaPolicy{PercentageSlaPolicy: &aurora.PercentageSlaPolicy{Percentage: 50.0}}
result, err := r.SLADrainHosts(&policy, 30, hosts...)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
// Monitor change to DRAINING and DRAINED mode
hostResult, err := monitor.HostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
5,
10)
if err != nil {
for host, ok := range hostResult {
if !ok {
fmt.Printf("Host %s did not transtion into desired mode(s)\n", host)
}
}
log.Fatalf("error: %+v\n", err.Error())
}
fmt.Print(result.String())
case "endMaintenance":
fmt.Println("Setting hosts to ACTIVE")
if hostList == "" {
log.Fatal("No hosts specified to drain")
}
hosts := strings.Split(hostList, ",")
_, result, err := r.EndMaintenance(hosts...)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
// Monitor change to DRAINING and DRAINED mode
hostResult, err := monitor.HostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
5,
10)
if err != nil {
for host, ok := range hostResult {
if !ok {
fmt.Printf("Host %s did not transtion into desired mode(s)\n", host)
}
}
log.Fatalf("error: %+v\n", err.Error())
}
fmt.Print(result.String())
case "getPendingReasons":
fmt.Println("Getting pending reasons")
taskQ := &aurora.TaskQuery{
Role: &job.JobKey().Role,
Environment: &job.JobKey().Environment,
JobName: &job.JobKey().Name,
}
reasons, err := r.GetPendingReason(taskQ)
if err != nil {
log.Fatalf("error: %+v\n ", err)
}
fmt.Printf("length: %d\n ", len(reasons))
fmt.Printf("tasks: %+v\n", reasons)
case "getJobs":
fmt.Println("GetJobs...role: ", role)
_, result, err := r.GetJobs(role)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
fmt.Println("map size: ", len(result.Configs))
fmt.Println(result.String())
case "snapshot":
fmt.Println("Forcing scheduler to write snapshot to mesos replicated log")
err := r.Snapshot()
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
case "performBackup":
fmt.Println("Writing Backup of Snapshot to file system")
err := r.PerformBackup()
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
case "forceExplicitRecon":
fmt.Println("Force an explicit recon")
err := r.ForceExplicitTaskReconciliation(nil)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
case "forceImplicitRecon":
fmt.Println("Force an implicit recon")
err := r.ForceImplicitTaskReconciliation()
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
print(config.String())
break
default:
log.Fatal("Command not supported")
fmt.Println("Command not supported")
os.Exit(1)
}
}

View file

@ -5,4 +5,4 @@
"auth_mechanism": "UNAUTHENTICATED",
"slave_run_directory": "latest",
"slave_root": "/var/lib/mesos"
}]
}]

View file

@ -1,13 +0,0 @@
{
"username": "aurora",
"password": "secret",
"sched_url": "http://192.168.33.7:8081",
"cluster" : {
"name": "devcluster",
"zk": "192.168.33.7",
"scheduler_zk_path": "/aurora/scheduler",
"auth_mechanism": "UNAUTHENTICATED",
"slave_run_directory": "latest",
"slave_root": "/var/lib/mesos"
}
}

View file

@ -1,21 +0,0 @@
{
"name": "sampleapp",
"cpu": 0.25,
"ram_mb": 256,
"disk_mb": 100,
"executor": "docker-compose-executor",
"service": true,
"ports": 4,
"instances": 1,
"uris": [
{
"uri": "http://192.168.33.8/app.tar.gz",
"extract": true,
"cache": false
}
],
"labels":{
"fileName":"sampleapp/docker-compose.yml,sampleapp/docker-compose-healthcheck.yml"
}
}

View file

@ -1,11 +0,0 @@
{
"name": "hello_world_from_gorealis",
"cpu": 1.0,
"ram_mb": 64,
"disk_mb": 100,
"executor": "thermos",
"exec_data_file": "examples/thermos_payload.json",
"service": true,
"ports": 1,
"instances": 1
}

View file

@ -18,14 +18,8 @@ import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"github.com/rdelval/gorealis"
"os"
"time"
realis "github.com/paypal/gorealis"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/pkg/errors"
)
type URIJson struct {
@ -35,17 +29,16 @@ type URIJson struct {
}
type JobJson struct {
Name string `json:"name"`
CPU float64 `json:"cpu"`
RAM int64 `json:"ram_mb"`
Disk int64 `json:"disk_mb"`
Executor string `json:"executor"`
ExecutorDataFile string `json:"exec_data_file,omitempty"`
Instances int32 `json:"instances"`
URIs []URIJson `json:"uris"`
Labels map[string]string `json:"labels"`
Service bool `json:"service"`
Ports int `json:"ports"`
Name string `json:"name"`
CPU float64 `json:"cpu"`
RAM int64 `json:"ram_mb"`
Disk int64 `json:"disk_mb"`
Executor string `json:"executor"`
Instances int32 `json:"instances"`
URIs []URIJson `json:"uris"`
Labels map[string]string `json:"labels"`
Service bool `json:"service"`
Ports int `json:"ports"`
}
func (j *JobJson) Validate() bool {
@ -69,158 +62,67 @@ func (j *JobJson) Validate() bool {
return true
}
type Config struct {
realis.Cluster `json:"cluster"`
Username string `json:"username"`
Password string `json:"password"`
SchedUrl string `json:"sched_url"`
Transport string `json:"transport,omitempty"`
Debug bool `json:"debug,omitempty"`
}
// Command-line arguments for config and job JSON files.
var configJSONFile, jobJSONFile string
var job *JobJson
var config *Config
// Reading command line arguments and validating.
// If Aurora scheduler URL not provided, then using zookeeper to locate the leader.
func init() {
flag.StringVar(&configJSONFile, "config", "./config.json", "The config file that contains username, password, and the cluster configuration information.")
flag.StringVar(&jobJSONFile, "job", "./job.json", "JSON file containing job definitions.")
func main() {
jsonFile := flag.String("file", "", "JSON file containing job definition")
flag.Parse()
job = new(JobJson)
config = new(Config)
if jobsFile, jobJSONReadErr := os.Open(jobJSONFile); jobJSONReadErr != nil {
if *jsonFile == "" {
flag.Usage()
fmt.Println("Error reading the job JSON file: ", jobJSONReadErr)
os.Exit(1)
} else {
if unmarshallErr := json.NewDecoder(jobsFile).Decode(job); unmarshallErr != nil {
flag.Usage()
fmt.Println("Error parsing job json file: ", unmarshallErr)
os.Exit(1)
}
// Need to validate the job JSON file.
if !job.Validate() {
fmt.Println("Invalid Job.")
os.Exit(1)
}
}
if configFile, configJSONErr := os.Open(configJSONFile); configJSONErr != nil {
flag.Usage()
fmt.Println("Error reading the config JSON file: ", configJSONErr)
file, err := os.Open(*jsonFile)
if err != nil {
fmt.Println("Error opening file ", err)
os.Exit(1)
} else {
if unmarshallErr := json.NewDecoder(configFile).Decode(config); unmarshallErr != nil {
fmt.Println("Error parsing config JSON file: ", unmarshallErr)
os.Exit(1)
}
}
}
func CreateRealisClient(config *Config) (realis.Realis, error) {
var transportOption realis.ClientOption
// Configuring transport protocol. If not transport is provided, then using JSON as the
// default transport protocol.
switch config.Transport {
case "binary":
transportOption = realis.ThriftBinary()
case "json", "":
transportOption = realis.ThriftJSON()
default:
fmt.Println("Invalid transport option provided!")
os.Exit(1)
}
clientOptions := []realis.ClientOption{
realis.BasicAuth(config.Username, config.Password),
transportOption,
realis.ZKCluster(&config.Cluster),
// realis.SchedulerUrl(config.SchedUrl),
realis.SetLogger(log.New(os.Stdout, "realis-debug: ", log.Ldate)),
realis.BackOff(realis.Backoff{
Steps: 2,
Duration: 10 * time.Second,
Factor: 2.0,
Jitter: 0.1,
}),
}
if config.Debug {
clientOptions = append(clientOptions, realis.Debug())
}
return realis.NewRealisClient(clientOptions...)
}
func main() {
if r, clientCreationErr := CreateRealisClient(config); clientCreationErr != nil {
fmt.Println(clientCreationErr)
os.Exit(1)
} else {
monitor := &realis.Monitor{Client: r}
defer r.Close()
uris := job.URIs
labels := job.Labels
auroraJob := realis.NewJob().
Environment("prod").
Role("vagrant").
Name(job.Name).
CPU(job.CPU).
RAM(job.RAM).
Disk(job.Disk).
IsService(job.Service).
InstanceCount(job.Instances).
AddPorts(job.Ports)
// If thermos executor, then reading in the thermos payload.
if (job.Executor == aurora.AURORA_EXECUTOR_NAME) || (job.Executor == "thermos") {
payload, err := ioutil.ReadFile(job.ExecutorDataFile)
if err != nil {
fmt.Println(errors.Wrap(err, "Invalid thermos payload file!"))
os.Exit(1)
}
auroraJob.ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData(string(payload))
} else {
auroraJob.ExecutorName(job.Executor)
}
// Adding URIs.
for _, uri := range uris {
auroraJob.AddURIs(uri.Extract, uri.Cache, uri.URI)
}
// Adding Labels.
for key, value := range labels {
auroraJob.AddLabel(key, value)
}
fmt.Println("Creating Job...")
if resp, jobCreationErr := r.CreateJob(auroraJob); jobCreationErr != nil {
fmt.Println("Error creating Aurora job: ", jobCreationErr)
os.Exit(1)
} else {
if resp.ResponseCode == aurora.ResponseCode_OK {
if ok, monitorErr := monitor.Instances(auroraJob.JobKey(), auroraJob.GetInstanceCount(), 5, 50); !ok || monitorErr != nil {
if _, jobErr := r.KillJob(auroraJob.JobKey()); jobErr !=
nil {
fmt.Println(jobErr)
os.Exit(1)
} else {
fmt.Println("ok: ", ok)
fmt.Println("jobErr: ", jobErr)
}
}
}
}
}
jsonJob := new(JobJson)
err = json.NewDecoder(file).Decode(jsonJob)
if err != nil {
fmt.Println("Error parsing file ", err)
os.Exit(1)
}
jsonJob.Validate()
//Create new configuration with default transport layer
config, err := realis.NewDefaultConfig("http://192.168.33.7:8081")
if err != nil {
fmt.Print(err)
os.Exit(1)
}
realis.AddBasicAuth(&config, "aurora", "secret")
r := realis.NewClient(config)
auroraJob := realis.NewJob().
Environment("prod").
Role("vagrant").
Name(jsonJob.Name).
CPU(jsonJob.CPU).
RAM(jsonJob.RAM).
Disk(jsonJob.Disk).
ExecutorName(jsonJob.Executor).
InstanceCount(jsonJob.Instances).
IsService(jsonJob.Service).
AddPorts(jsonJob.Ports)
for _, uri := range jsonJob.URIs {
auroraJob.AddURIs(uri.Extract, uri.Cache, uri.URI)
}
for k, v := range jsonJob.Labels {
auroraJob.AddLabel(k, v)
}
resp, err := r.CreateJob(auroraJob)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println(resp)
}

View file

@ -1,6 +0,0 @@
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
package aurora
var GoUnusedProtection__ int;

View file

@ -1,53 +0,0 @@
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
package aurora
import(
"bytes"
"context"
"fmt"
"time"
"github.com/apache/thrift/lib/go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = time.Now
var _ = bytes.Equal
const AURORA_EXECUTOR_NAME = "AuroraExecutor"
var ACTIVE_STATES []ScheduleStatus
var SLAVE_ASSIGNED_STATES []ScheduleStatus
var LIVE_STATES []ScheduleStatus
var TERMINAL_STATES []ScheduleStatus
const GOOD_IDENTIFIER_PATTERN = "^[\\w\\-\\.]+$"
const GOOD_IDENTIFIER_PATTERN_JVM = "^[\\w\\-\\.]+$"
const GOOD_IDENTIFIER_PATTERN_PYTHON = "^[\\w\\-\\.]+$"
var ACTIVE_JOB_UPDATE_STATES []JobUpdateStatus
var AWAITNG_PULSE_JOB_UPDATE_STATES []JobUpdateStatus
const BYPASS_LEADER_REDIRECT_HEADER_NAME = "Bypass-Leader-Redirect"
const TASK_FILESYSTEM_MOUNT_POINT = "taskfs"
func init() {
ACTIVE_STATES = []ScheduleStatus{
9, 17, 6, 0, 13, 12, 2, 1, 18, 16, }
SLAVE_ASSIGNED_STATES = []ScheduleStatus{
9, 17, 6, 13, 12, 2, 18, 1, }
LIVE_STATES = []ScheduleStatus{
6, 13, 12, 17, 18, 2, }
TERMINAL_STATES = []ScheduleStatus{
4, 3, 5, 7, }
ACTIVE_JOB_UPDATE_STATES = []JobUpdateStatus{
0, 1, 2, 3, 9, 10, }
AWAITNG_PULSE_JOB_UPDATE_STATES = []JobUpdateStatus{
9, 10, }
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,80 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package aurora
import (
"bytes"
"fmt"
"git.apache.org/thrift.git/lib/go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
const AURORA_EXECUTOR_NAME = "AuroraExecutor"
var ACTIVE_STATES map[ScheduleStatus]bool
var SLAVE_ASSIGNED_STATES map[ScheduleStatus]bool
var LIVE_STATES map[ScheduleStatus]bool
var TERMINAL_STATES map[ScheduleStatus]bool
const GOOD_IDENTIFIER_PATTERN = "^[\\w\\-\\.]+$"
const GOOD_IDENTIFIER_PATTERN_JVM = "^[\\w\\-\\.]+$"
const GOOD_IDENTIFIER_PATTERN_PYTHON = "^[\\w\\-\\.]+$"
var ACTIVE_JOB_UPDATE_STATES map[JobUpdateStatus]bool
const BYPASS_LEADER_REDIRECT_HEADER_NAME = "Bypass-Leader-Redirect"
const TASK_FILESYSTEM_MOUNT_POINT = "taskfs"
func init() {
ACTIVE_STATES = map[ScheduleStatus]bool{
9: true,
17: true,
6: true,
0: true,
13: true,
12: true,
2: true,
1: true,
16: true,
}
SLAVE_ASSIGNED_STATES = map[ScheduleStatus]bool{
9: true,
17: true,
6: true,
13: true,
12: true,
2: true,
1: true,
}
LIVE_STATES = map[ScheduleStatus]bool{
6: true,
13: true,
12: true,
17: true,
2: true,
}
TERMINAL_STATES = map[ScheduleStatus]bool{
4: true,
3: true,
5: true,
7: true,
}
ACTIVE_JOB_UPDATE_STATES = map[JobUpdateStatus]bool{
0: true,
1: true,
2: true,
3: true,
9: true,
10: true,
}
}

View file

@ -1,411 +1,382 @@
// Code generated by Thrift Compiler (0.14.0). DO NOT EDIT.
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package main
import (
"context"
"apache/aurora"
"flag"
"fmt"
"git.apache.org/thrift.git/lib/go/thrift"
"math"
"net"
"net/url"
"os"
"strconv"
"strings"
"github.com/apache/thrift/lib/go/thrift"
"apache/aurora"
)
var _ = aurora.GoUnusedProtection__
func Usage() {
fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
flag.PrintDefaults()
fmt.Fprintln(os.Stderr, "\nFunctions:")
fmt.Fprintln(os.Stderr, " Response getRoleSummary()")
fmt.Fprintln(os.Stderr, " Response getJobSummary(string role)")
fmt.Fprintln(os.Stderr, " Response getTasksStatus(TaskQuery query)")
fmt.Fprintln(os.Stderr, " Response getTasksWithoutConfigs(TaskQuery query)")
fmt.Fprintln(os.Stderr, " Response getPendingReason(TaskQuery query)")
fmt.Fprintln(os.Stderr, " Response getConfigSummary(JobKey job)")
fmt.Fprintln(os.Stderr, " Response getJobs(string ownerRole)")
fmt.Fprintln(os.Stderr, " Response getQuota(string ownerRole)")
fmt.Fprintln(os.Stderr, " Response populateJobConfig(JobConfiguration description)")
fmt.Fprintln(os.Stderr, " Response getJobUpdateSummaries(JobUpdateQuery jobUpdateQuery)")
fmt.Fprintln(os.Stderr, " Response getJobUpdateDetails(JobUpdateQuery query)")
fmt.Fprintln(os.Stderr, " Response getJobUpdateDiff(JobUpdateRequest request)")
fmt.Fprintln(os.Stderr, " Response getTierConfigs()")
fmt.Fprintln(os.Stderr)
os.Exit(0)
}
type httpHeaders map[string]string
func (h httpHeaders) String() string {
var m map[string]string = h
return fmt.Sprintf("%s", m)
}
func (h httpHeaders) Set(value string) error {
parts := strings.Split(value, ": ")
if len(parts) != 2 {
return fmt.Errorf("header should be of format 'Key: Value'")
}
h[parts[0]] = parts[1]
return nil
fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
flag.PrintDefaults()
fmt.Fprintln(os.Stderr, "\nFunctions:")
fmt.Fprintln(os.Stderr, " Response getRoleSummary()")
fmt.Fprintln(os.Stderr, " Response getJobSummary(string role)")
fmt.Fprintln(os.Stderr, " Response getTasksStatus(TaskQuery query)")
fmt.Fprintln(os.Stderr, " Response getTasksWithoutConfigs(TaskQuery query)")
fmt.Fprintln(os.Stderr, " Response getPendingReason(TaskQuery query)")
fmt.Fprintln(os.Stderr, " Response getConfigSummary(JobKey job)")
fmt.Fprintln(os.Stderr, " Response getJobs(string ownerRole)")
fmt.Fprintln(os.Stderr, " Response getQuota(string ownerRole)")
fmt.Fprintln(os.Stderr, " Response populateJobConfig(JobConfiguration description)")
fmt.Fprintln(os.Stderr, " Response getJobUpdateSummaries(JobUpdateQuery jobUpdateQuery)")
fmt.Fprintln(os.Stderr, " Response getJobUpdateDetails(JobUpdateQuery query)")
fmt.Fprintln(os.Stderr, " Response getJobUpdateDiff(JobUpdateRequest request)")
fmt.Fprintln(os.Stderr, " Response getTierConfigs()")
fmt.Fprintln(os.Stderr)
os.Exit(0)
}
func main() {
flag.Usage = Usage
var host string
var port int
var protocol string
var urlString string
var framed bool
var useHttp bool
headers := make(httpHeaders)
var parsedUrl *url.URL
var trans thrift.TTransport
_ = strconv.Atoi
_ = math.Abs
flag.Usage = Usage
flag.StringVar(&host, "h", "localhost", "Specify host and port")
flag.IntVar(&port, "p", 9090, "Specify port")
flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)")
flag.StringVar(&urlString, "u", "", "Specify the url")
flag.BoolVar(&framed, "framed", false, "Use framed transport")
flag.BoolVar(&useHttp, "http", false, "Use http")
flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")")
flag.Parse()
if len(urlString) > 0 {
var err error
parsedUrl, err = url.Parse(urlString)
if err != nil {
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
flag.Usage()
}
host = parsedUrl.Host
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https"
} else if useHttp {
_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
if err != nil {
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
flag.Usage()
}
}
cmd := flag.Arg(0)
var err error
if useHttp {
trans, err = thrift.NewTHttpClient(parsedUrl.String())
if len(headers) > 0 {
httptrans := trans.(*thrift.THttpClient)
for key, value := range headers {
httptrans.SetHeader(key, value)
}
}
} else {
portStr := fmt.Sprint(port)
if strings.Contains(host, ":") {
host, portStr, err = net.SplitHostPort(host)
if err != nil {
fmt.Fprintln(os.Stderr, "error with host:", err)
os.Exit(1)
}
}
trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr))
if err != nil {
fmt.Fprintln(os.Stderr, "error resolving address:", err)
os.Exit(1)
}
if framed {
trans = thrift.NewTFramedTransport(trans)
}
}
if err != nil {
fmt.Fprintln(os.Stderr, "Error creating transport", err)
os.Exit(1)
}
defer trans.Close()
var protocolFactory thrift.TProtocolFactory
switch protocol {
case "compact":
protocolFactory = thrift.NewTCompactProtocolFactory()
break
case "simplejson":
protocolFactory = thrift.NewTSimpleJSONProtocolFactory()
break
case "json":
protocolFactory = thrift.NewTJSONProtocolFactory()
break
case "binary", "":
protocolFactory = thrift.NewTBinaryProtocolFactoryDefault()
break
default:
fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol)
Usage()
os.Exit(1)
}
iprot := protocolFactory.GetProtocol(trans)
oprot := protocolFactory.GetProtocol(trans)
client := aurora.NewReadOnlySchedulerClient(thrift.NewTStandardClient(iprot, oprot))
if err := trans.Open(); err != nil {
fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
os.Exit(1)
}
switch cmd {
case "getRoleSummary":
if flag.NArg() - 1 != 0 {
fmt.Fprintln(os.Stderr, "GetRoleSummary requires 0 args")
flag.Usage()
}
fmt.Print(client.GetRoleSummary(context.Background()))
fmt.Print("\n")
break
case "getJobSummary":
if flag.NArg() - 1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobSummary requires 1 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetJobSummary(context.Background(), value0))
fmt.Print("\n")
break
case "getTasksStatus":
if flag.NArg() - 1 != 1 {
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
flag.Usage()
}
arg132 := flag.Arg(1)
mbTrans133 := thrift.NewTMemoryBufferLen(len(arg132))
defer mbTrans133.Close()
_, err134 := mbTrans133.WriteString(arg132)
if err134 != nil {
Usage()
return
}
factory135 := thrift.NewTJSONProtocolFactory()
jsProt136 := factory135.GetProtocol(mbTrans133)
argvalue0 := aurora.NewTaskQuery()
err137 := argvalue0.Read(context.Background(), jsProt136)
if err137 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetTasksStatus(context.Background(), value0))
fmt.Print("\n")
break
case "getTasksWithoutConfigs":
if flag.NArg() - 1 != 1 {
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
flag.Usage()
}
arg138 := flag.Arg(1)
mbTrans139 := thrift.NewTMemoryBufferLen(len(arg138))
defer mbTrans139.Close()
_, err140 := mbTrans139.WriteString(arg138)
if err140 != nil {
Usage()
return
}
factory141 := thrift.NewTJSONProtocolFactory()
jsProt142 := factory141.GetProtocol(mbTrans139)
argvalue0 := aurora.NewTaskQuery()
err143 := argvalue0.Read(context.Background(), jsProt142)
if err143 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetTasksWithoutConfigs(context.Background(), value0))
fmt.Print("\n")
break
case "getPendingReason":
if flag.NArg() - 1 != 1 {
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
flag.Usage()
}
arg144 := flag.Arg(1)
mbTrans145 := thrift.NewTMemoryBufferLen(len(arg144))
defer mbTrans145.Close()
_, err146 := mbTrans145.WriteString(arg144)
if err146 != nil {
Usage()
return
}
factory147 := thrift.NewTJSONProtocolFactory()
jsProt148 := factory147.GetProtocol(mbTrans145)
argvalue0 := aurora.NewTaskQuery()
err149 := argvalue0.Read(context.Background(), jsProt148)
if err149 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetPendingReason(context.Background(), value0))
fmt.Print("\n")
break
case "getConfigSummary":
if flag.NArg() - 1 != 1 {
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
flag.Usage()
}
arg150 := flag.Arg(1)
mbTrans151 := thrift.NewTMemoryBufferLen(len(arg150))
defer mbTrans151.Close()
_, err152 := mbTrans151.WriteString(arg150)
if err152 != nil {
Usage()
return
}
factory153 := thrift.NewTJSONProtocolFactory()
jsProt154 := factory153.GetProtocol(mbTrans151)
argvalue0 := aurora.NewJobKey()
err155 := argvalue0.Read(context.Background(), jsProt154)
if err155 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetConfigSummary(context.Background(), value0))
fmt.Print("\n")
break
case "getJobs":
if flag.NArg() - 1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobs requires 1 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetJobs(context.Background(), value0))
fmt.Print("\n")
break
case "getQuota":
if flag.NArg() - 1 != 1 {
fmt.Fprintln(os.Stderr, "GetQuota requires 1 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetQuota(context.Background(), value0))
fmt.Print("\n")
break
case "populateJobConfig":
if flag.NArg() - 1 != 1 {
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
flag.Usage()
}
arg158 := flag.Arg(1)
mbTrans159 := thrift.NewTMemoryBufferLen(len(arg158))
defer mbTrans159.Close()
_, err160 := mbTrans159.WriteString(arg158)
if err160 != nil {
Usage()
return
}
factory161 := thrift.NewTJSONProtocolFactory()
jsProt162 := factory161.GetProtocol(mbTrans159)
argvalue0 := aurora.NewJobConfiguration()
err163 := argvalue0.Read(context.Background(), jsProt162)
if err163 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.PopulateJobConfig(context.Background(), value0))
fmt.Print("\n")
break
case "getJobUpdateSummaries":
if flag.NArg() - 1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
flag.Usage()
}
arg164 := flag.Arg(1)
mbTrans165 := thrift.NewTMemoryBufferLen(len(arg164))
defer mbTrans165.Close()
_, err166 := mbTrans165.WriteString(arg164)
if err166 != nil {
Usage()
return
}
factory167 := thrift.NewTJSONProtocolFactory()
jsProt168 := factory167.GetProtocol(mbTrans165)
argvalue0 := aurora.NewJobUpdateQuery()
err169 := argvalue0.Read(context.Background(), jsProt168)
if err169 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateSummaries(context.Background(), value0))
fmt.Print("\n")
break
case "getJobUpdateDetails":
if flag.NArg() - 1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
flag.Usage()
}
arg170 := flag.Arg(1)
mbTrans171 := thrift.NewTMemoryBufferLen(len(arg170))
defer mbTrans171.Close()
_, err172 := mbTrans171.WriteString(arg170)
if err172 != nil {
Usage()
return
}
factory173 := thrift.NewTJSONProtocolFactory()
jsProt174 := factory173.GetProtocol(mbTrans171)
argvalue0 := aurora.NewJobUpdateQuery()
err175 := argvalue0.Read(context.Background(), jsProt174)
if err175 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateDetails(context.Background(), value0))
fmt.Print("\n")
break
case "getJobUpdateDiff":
if flag.NArg() - 1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
flag.Usage()
}
arg176 := flag.Arg(1)
mbTrans177 := thrift.NewTMemoryBufferLen(len(arg176))
defer mbTrans177.Close()
_, err178 := mbTrans177.WriteString(arg176)
if err178 != nil {
Usage()
return
}
factory179 := thrift.NewTJSONProtocolFactory()
jsProt180 := factory179.GetProtocol(mbTrans177)
argvalue0 := aurora.NewJobUpdateRequest()
err181 := argvalue0.Read(context.Background(), jsProt180)
if err181 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateDiff(context.Background(), value0))
fmt.Print("\n")
break
case "getTierConfigs":
if flag.NArg() - 1 != 0 {
fmt.Fprintln(os.Stderr, "GetTierConfigs requires 0 args")
flag.Usage()
}
fmt.Print(client.GetTierConfigs(context.Background()))
fmt.Print("\n")
break
case "":
Usage()
break
default:
fmt.Fprintln(os.Stderr, "Invalid function ", cmd)
}
flag.Usage = Usage
var host string
var port int
var protocol string
var urlString string
var framed bool
var useHttp bool
var parsedUrl url.URL
var trans thrift.TTransport
_ = strconv.Atoi
_ = math.Abs
flag.Usage = Usage
flag.StringVar(&host, "h", "localhost", "Specify host and port")
flag.IntVar(&port, "p", 9090, "Specify port")
flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)")
flag.StringVar(&urlString, "u", "", "Specify the url")
flag.BoolVar(&framed, "framed", false, "Use framed transport")
flag.BoolVar(&useHttp, "http", false, "Use http")
flag.Parse()
if len(urlString) > 0 {
parsedUrl, err := url.Parse(urlString)
if err != nil {
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
flag.Usage()
}
host = parsedUrl.Host
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http"
} else if useHttp {
_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
if err != nil {
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
flag.Usage()
}
}
cmd := flag.Arg(0)
var err error
if useHttp {
trans, err = thrift.NewTHttpClient(parsedUrl.String())
} else {
portStr := fmt.Sprint(port)
if strings.Contains(host, ":") {
host, portStr, err = net.SplitHostPort(host)
if err != nil {
fmt.Fprintln(os.Stderr, "error with host:", err)
os.Exit(1)
}
}
trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr))
if err != nil {
fmt.Fprintln(os.Stderr, "error resolving address:", err)
os.Exit(1)
}
if framed {
trans = thrift.NewTFramedTransport(trans)
}
}
if err != nil {
fmt.Fprintln(os.Stderr, "Error creating transport", err)
os.Exit(1)
}
defer trans.Close()
var protocolFactory thrift.TProtocolFactory
switch protocol {
case "compact":
protocolFactory = thrift.NewTCompactProtocolFactory()
break
case "simplejson":
protocolFactory = thrift.NewTSimpleJSONProtocolFactory()
break
case "json":
protocolFactory = thrift.NewTJSONProtocolFactory()
break
case "binary", "":
protocolFactory = thrift.NewTBinaryProtocolFactoryDefault()
break
default:
fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol)
Usage()
os.Exit(1)
}
client := aurora.NewReadOnlySchedulerClientFactory(trans, protocolFactory)
if err := trans.Open(); err != nil {
fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
os.Exit(1)
}
switch cmd {
case "getRoleSummary":
if flag.NArg()-1 != 0 {
fmt.Fprintln(os.Stderr, "GetRoleSummary requires 0 args")
flag.Usage()
}
fmt.Print(client.GetRoleSummary())
fmt.Print("\n")
break
case "getJobSummary":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobSummary requires 1 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetJobSummary(value0))
fmt.Print("\n")
break
case "getTasksStatus":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
flag.Usage()
}
arg82 := flag.Arg(1)
mbTrans83 := thrift.NewTMemoryBufferLen(len(arg82))
defer mbTrans83.Close()
_, err84 := mbTrans83.WriteString(arg82)
if err84 != nil {
Usage()
return
}
factory85 := thrift.NewTSimpleJSONProtocolFactory()
jsProt86 := factory85.GetProtocol(mbTrans83)
argvalue0 := aurora.NewTaskQuery()
err87 := argvalue0.Read(jsProt86)
if err87 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetTasksStatus(value0))
fmt.Print("\n")
break
case "getTasksWithoutConfigs":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
flag.Usage()
}
arg88 := flag.Arg(1)
mbTrans89 := thrift.NewTMemoryBufferLen(len(arg88))
defer mbTrans89.Close()
_, err90 := mbTrans89.WriteString(arg88)
if err90 != nil {
Usage()
return
}
factory91 := thrift.NewTSimpleJSONProtocolFactory()
jsProt92 := factory91.GetProtocol(mbTrans89)
argvalue0 := aurora.NewTaskQuery()
err93 := argvalue0.Read(jsProt92)
if err93 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetTasksWithoutConfigs(value0))
fmt.Print("\n")
break
case "getPendingReason":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
flag.Usage()
}
arg94 := flag.Arg(1)
mbTrans95 := thrift.NewTMemoryBufferLen(len(arg94))
defer mbTrans95.Close()
_, err96 := mbTrans95.WriteString(arg94)
if err96 != nil {
Usage()
return
}
factory97 := thrift.NewTSimpleJSONProtocolFactory()
jsProt98 := factory97.GetProtocol(mbTrans95)
argvalue0 := aurora.NewTaskQuery()
err99 := argvalue0.Read(jsProt98)
if err99 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetPendingReason(value0))
fmt.Print("\n")
break
case "getConfigSummary":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
flag.Usage()
}
arg100 := flag.Arg(1)
mbTrans101 := thrift.NewTMemoryBufferLen(len(arg100))
defer mbTrans101.Close()
_, err102 := mbTrans101.WriteString(arg100)
if err102 != nil {
Usage()
return
}
factory103 := thrift.NewTSimpleJSONProtocolFactory()
jsProt104 := factory103.GetProtocol(mbTrans101)
argvalue0 := aurora.NewJobKey()
err105 := argvalue0.Read(jsProt104)
if err105 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetConfigSummary(value0))
fmt.Print("\n")
break
case "getJobs":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobs requires 1 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetJobs(value0))
fmt.Print("\n")
break
case "getQuota":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetQuota requires 1 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetQuota(value0))
fmt.Print("\n")
break
case "populateJobConfig":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
flag.Usage()
}
arg108 := flag.Arg(1)
mbTrans109 := thrift.NewTMemoryBufferLen(len(arg108))
defer mbTrans109.Close()
_, err110 := mbTrans109.WriteString(arg108)
if err110 != nil {
Usage()
return
}
factory111 := thrift.NewTSimpleJSONProtocolFactory()
jsProt112 := factory111.GetProtocol(mbTrans109)
argvalue0 := aurora.NewJobConfiguration()
err113 := argvalue0.Read(jsProt112)
if err113 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.PopulateJobConfig(value0))
fmt.Print("\n")
break
case "getJobUpdateSummaries":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
flag.Usage()
}
arg114 := flag.Arg(1)
mbTrans115 := thrift.NewTMemoryBufferLen(len(arg114))
defer mbTrans115.Close()
_, err116 := mbTrans115.WriteString(arg114)
if err116 != nil {
Usage()
return
}
factory117 := thrift.NewTSimpleJSONProtocolFactory()
jsProt118 := factory117.GetProtocol(mbTrans115)
argvalue0 := aurora.NewJobUpdateQuery()
err119 := argvalue0.Read(jsProt118)
if err119 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateSummaries(value0))
fmt.Print("\n")
break
case "getJobUpdateDetails":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
flag.Usage()
}
arg120 := flag.Arg(1)
mbTrans121 := thrift.NewTMemoryBufferLen(len(arg120))
defer mbTrans121.Close()
_, err122 := mbTrans121.WriteString(arg120)
if err122 != nil {
Usage()
return
}
factory123 := thrift.NewTSimpleJSONProtocolFactory()
jsProt124 := factory123.GetProtocol(mbTrans121)
argvalue0 := aurora.NewJobUpdateQuery()
err125 := argvalue0.Read(jsProt124)
if err125 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateDetails(value0))
fmt.Print("\n")
break
case "getJobUpdateDiff":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
flag.Usage()
}
arg126 := flag.Arg(1)
mbTrans127 := thrift.NewTMemoryBufferLen(len(arg126))
defer mbTrans127.Close()
_, err128 := mbTrans127.WriteString(arg126)
if err128 != nil {
Usage()
return
}
factory129 := thrift.NewTSimpleJSONProtocolFactory()
jsProt130 := factory129.GetProtocol(mbTrans127)
argvalue0 := aurora.NewJobUpdateRequest()
err131 := argvalue0.Read(jsProt130)
if err131 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateDiff(value0))
fmt.Print("\n")
break
case "getTierConfigs":
if flag.NArg()-1 != 0 {
fmt.Fprintln(os.Stderr, "GetTierConfigs requires 0 args")
flag.Usage()
}
fmt.Print(client.GetTierConfigs())
fmt.Print("\n")
break
case "":
Usage()
break
default:
fmt.Fprintln(os.Stderr, "Invalid function ", cmd)
}
}

File diff suppressed because it is too large Load diff

15969
gen-go/apache/aurora/ttypes.go Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
#! /bin/bash
THRIFT_VER=0.14.0
THRIFT_VER=0.9.3
if [[ $(thrift -version | grep -e $THRIFT_VER -c) -ne 1 ]]; then
echo "Warning: This wrapper has only been tested with version" $THRIFT_VER;

12
go.mod
View file

@ -1,12 +0,0 @@
module github.com/paypal/gorealis
go 1.13
require (
github.com/apache/thrift v0.14.0
github.com/davecgh/go-spew v1.1.0 // indirect
github.com/pkg/errors v0.9.1
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a
github.com/stretchr/testify v1.7.0
)

30
go.sum
View file

@ -1,30 +0,0 @@
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.14.0 h1:vqZ2DP42i8th2OsgCcYZkirtbzvpZEFx53LiWDJXIAs=
github.com/apache/thrift v0.14.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e h1:+RHxT/gm0O3UF7nLJbdNzAmULvCFt4XfXHWzh3XI/zs=
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/ridv/thrift v0.12.1 h1:b80V1Oa2Mbd++jrlJZbJsIybO5/MCfbXKzd1A5v4aSo=
github.com/ridv/thrift v0.12.1/go.mod h1:yTMRF94RCZjO1fY1xt69yncvMbQCPdRL8BhbwIrjPx8=
github.com/ridv/thrift v0.13.1 h1:/8XnTRUqJJeiuqoL7mfnJQmXQa4GJn9tUCiP7+i6Y9o=
github.com/ridv/thrift v0.13.1/go.mod h1:yTMRF94RCZjO1fY1xt69yncvMbQCPdRL8BhbwIrjPx8=
github.com/ridv/thrift v0.13.2 h1:Q3Smr8poXd7VkWZPHvdJZzlQCJO+b5W37ECfoUL4qHc=
github.com/ridv/thrift v0.13.2/go.mod h1:yTMRF94RCZjO1fY1xt69yncvMbQCPdRL8BhbwIrjPx8=
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a h1:EYL2xz/Zdo0hyqdZMXR4lmT2O11jDLTPCEqIe/FR6W4=
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.0 h1:LThGCOvhuJic9Gyd1VBCkhyUXmO8vKaBFvBsJ2k03rg=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View file

@ -1,21 +0,0 @@
package realis
import (
"context"
"github.com/paypal/gorealis/gen-go/apache/aurora"
)
func (r *realisClient) jobExists(key aurora.JobKey) (bool, error) {
resp, err := r.client.GetConfigSummary(context.TODO(), &key)
if err != nil {
return false, err
}
return resp == nil ||
resp.GetResult_() == nil ||
resp.GetResult_().GetConfigSummaryResult_() == nil ||
resp.GetResult_().GetConfigSummaryResult_().GetSummary() == nil ||
resp.GetResponseCode() != aurora.ResponseCode_OK,
nil
}

285
job.go
View file

@ -15,25 +15,20 @@
package realis
import (
"github.com/rdelval/gorealis/gen-go/apache/aurora"
"strconv"
"github.com/paypal/gorealis/gen-go/apache/aurora"
)
// Job inteface is used to define a set of functions an Aurora Job object
// must implemement.
// TODO(rdelvalle): Consider getting rid of the Job interface
type Job interface {
// Set Job Key environment.
Environment(env string) Job
Role(role string) Job
Name(name string) Job
CPU(cpus float64) Job
CronSchedule(cron string) Job
CronCollisionPolicy(policy aurora.CronCollisionPolicy) Job
CPU(cpus float64) Job
Disk(disk int64) Job
RAM(ram int64) Job
GPU(gpu int64) Job
ExecutorName(name string) Job
ExecutorData(data string) Job
AddPorts(num int) Job
@ -41,15 +36,6 @@ type Job interface {
AddNamedPorts(names ...string) Job
AddLimitConstraint(name string, limit int32) Job
AddValueConstraint(name string, negated bool, values ...string) Job
// From Aurora Docs:
// dedicated attribute. Aurora treats this specially, and only allows matching jobs
// to run on these machines, and will only schedule matching jobs on these machines.
// When a job is created, the scheduler requires that the $role component matches
// the role field in the job configuration, and will reject the job creation otherwise.
// A wildcard (*) may be used for the role portion of the dedicated attribute, which
// will allow any owner to elect for a job to run on the host(s)
AddDedicatedConstraint(role, name string) Job
AddURIs(extract bool, cache bool, values ...string) Job
JobKey() *aurora.JobKey
JobConfig() *aurora.JobConfiguration
@ -59,93 +45,76 @@ type Job interface {
GetInstanceCount() int32
MaxFailure(maxFail int32) Job
Container(container Container) Job
PartitionPolicy(policy *aurora.PartitionPolicy) Job
Tier(tier string) Job
SlaPolicy(policy *aurora.SlaPolicy) Job
Priority(priority int32) Job
}
type resourceType int
const (
CPU resourceType = iota
RAM
DISK
GPU
)
const portNamePrefix = "org.apache.aurora.port."
// AuroraJob is a structure to collect all information pertaining to an Aurora job.
// Structure to collect all information pertaining to an Aurora job.
type AuroraJob struct {
jobConfig *aurora.JobConfiguration
resources map[resourceType]*aurora.Resource
metadata map[string]*aurora.Metadata
constraints map[string]*aurora.Constraint
portCount int
jobConfig *aurora.JobConfiguration
resources map[string]*aurora.Resource
portCount int
}
// NewJob is used to create a Job object with everything initialized.
// Create a Job object with everything initialized.
func NewJob() Job {
jobConfig := aurora.NewJobConfiguration()
taskConfig := aurora.NewTaskConfig()
jobKey := aurora.NewJobKey()
// Job Config
//Job Config
jobConfig.Key = jobKey
jobConfig.TaskConfig = taskConfig
// Task Config
//Task Config
taskConfig.Job = jobKey
taskConfig.Container = aurora.NewContainer()
taskConfig.Container.Mesos = aurora.NewMesosContainer()
taskConfig.MesosFetcherUris = make(map[*aurora.MesosFetcherURI]bool)
taskConfig.Metadata = make(map[*aurora.Metadata]bool)
taskConfig.Constraints = make(map[*aurora.Constraint]bool)
// Resources
//Resources
numCpus := aurora.NewResource()
ramMb := aurora.NewResource()
diskMb := aurora.NewResource()
resources := map[resourceType]*aurora.Resource{CPU: numCpus, RAM: ramMb, DISK: diskMb}
taskConfig.Resources = []*aurora.Resource{numCpus, ramMb, diskMb}
resources := make(map[string]*aurora.Resource)
resources["cpu"] = numCpus
resources["ram"] = ramMb
resources["disk"] = diskMb
numCpus.NumCpus = new(float64)
ramMb.RamMb = new(int64)
diskMb.DiskMb = new(int64)
taskConfig.Resources = make(map[*aurora.Resource]bool)
taskConfig.Resources[numCpus] = true
taskConfig.Resources[ramMb] = true
taskConfig.Resources[diskMb] = true
return &AuroraJob{
jobConfig: jobConfig,
resources: resources,
metadata: make(map[string]*aurora.Metadata),
constraints: make(map[string]*aurora.Constraint),
portCount: 0,
}
return AuroraJob{jobConfig, resources, 0}
}
// Environment sets the Job Key environment.
func (j *AuroraJob) Environment(env string) Job {
// Set Job Key environment.
func (j AuroraJob) Environment(env string) Job {
j.jobConfig.Key.Environment = env
return j
}
// Role sets the Job Key role.
func (j *AuroraJob) Role(role string) Job {
// Set Job Key Role.
func (j AuroraJob) Role(role string) Job {
j.jobConfig.Key.Role = role
// Will be deprecated
identity := &aurora.Identity{User: role}
//Will be deprecated
identity := &aurora.Identity{role}
j.jobConfig.Owner = identity
j.jobConfig.TaskConfig.Owner = identity
return j
}
// Name sets the Job Key Name.
func (j *AuroraJob) Name(name string) Job {
// Set Job Key Name.
func (j AuroraJob) Name(name string) Job {
j.jobConfig.Key.Name = name
return j
}
// ExecutorName sets the name of the executor that will the task will be configured to.
func (j *AuroraJob) ExecutorName(name string) Job {
// Set name of the executor that will the task will be configured to.
func (j AuroraJob) ExecutorName(name string) Job {
if j.jobConfig.TaskConfig.ExecutorConfig == nil {
j.jobConfig.TaskConfig.ExecutorConfig = aurora.NewExecutorConfig()
@ -155,8 +124,8 @@ func (j *AuroraJob) ExecutorName(name string) Job {
return j
}
// ExecutorData sets the data blob that will be passed to the Mesos executor.
func (j *AuroraJob) ExecutorData(data string) Job {
// Will be included as part of entire task inside the scheduler that will be serialized.
func (j AuroraJob) ExecutorData(data string) Job {
if j.jobConfig.TaskConfig.ExecutorConfig == nil {
j.jobConfig.TaskConfig.ExecutorConfig = aurora.NewExecutorConfig()
@ -166,226 +135,148 @@ func (j *AuroraJob) ExecutorData(data string) Job {
return j
}
// CPU sets the amount of CPU each task will use in an Aurora Job.
func (j *AuroraJob) CPU(cpus float64) Job {
*j.resources[CPU].NumCpus = cpus
func (j AuroraJob) CPU(cpus float64) Job {
j.resources["cpu"].NumCpus = &cpus
j.jobConfig.TaskConfig.NumCpus = cpus //Will be deprecated soon
return j
}
// RAM sets the amount of RAM each task will use in an Aurora Job.
func (j *AuroraJob) RAM(ram int64) Job {
*j.resources[RAM].RamMb = ram
func (j AuroraJob) RAM(ram int64) Job {
j.resources["ram"].RamMb = &ram
j.jobConfig.TaskConfig.RamMb = ram //Will be deprecated soon
return j
}
// Disk sets the amount of Disk each task will use in an Aurora Job.
func (j *AuroraJob) Disk(disk int64) Job {
*j.resources[DISK].DiskMb = disk
func (j AuroraJob) Disk(disk int64) Job {
j.resources["disk"].DiskMb = &disk
j.jobConfig.TaskConfig.DiskMb = disk //Will be deprecated
return j
}
// GPU sets the amount of GPU each task will use in an Aurora Job.
func (j *AuroraJob) GPU(gpu int64) Job {
// GPU resource must be set explicitly since the scheduler by default
// rejects jobs with GPU resources attached to it.
if _, ok := j.resources[GPU]; !ok {
j.resources[GPU] = &aurora.Resource{}
j.JobConfig().GetTaskConfig().Resources = append(
j.JobConfig().GetTaskConfig().Resources,
j.resources[GPU])
}
j.resources[GPU].NumGpus = &gpu
return j
}
// MaxFailure sets how many failures to tolerate before giving up per Job.
func (j *AuroraJob) MaxFailure(maxFail int32) Job {
// How many failures to tolerate before giving up.
func (j AuroraJob) MaxFailure(maxFail int32) Job {
j.jobConfig.TaskConfig.MaxTaskFailures = maxFail
return j
}
// InstanceCount sets how many instances of the task to run for this Job.
func (j *AuroraJob) InstanceCount(instCount int32) Job {
// How many instances of the job to run
func (j AuroraJob) InstanceCount(instCount int32) Job {
j.jobConfig.InstanceCount = instCount
return j
}
// CronSchedule allows the user to configure a cron schedule for this job to run in.
func (j *AuroraJob) CronSchedule(cron string) Job {
func (j AuroraJob) CronSchedule(cron string) Job {
j.jobConfig.CronSchedule = &cron
return j
}
// CronCollisionPolicy allows the user to decide what happens if two or more instances
// of the same Cron job need to run.
func (j *AuroraJob) CronCollisionPolicy(policy aurora.CronCollisionPolicy) Job {
func (j AuroraJob) CronCollisionPolicy(policy aurora.CronCollisionPolicy) Job {
j.jobConfig.CronCollisionPolicy = policy
return j
}
// GetInstanceCount returns how many tasks this Job contains.
func (j *AuroraJob) GetInstanceCount() int32 {
// How many instances of the job to run
func (j AuroraJob) GetInstanceCount() int32 {
return j.jobConfig.InstanceCount
}
// IsService returns true if the job is a long term running job or false if it is an ad-hoc job.
func (j *AuroraJob) IsService(isService bool) Job {
// Restart the job's tasks if they fail
func (j AuroraJob) IsService(isService bool) Job {
j.jobConfig.TaskConfig.IsService = isService
return j
}
// JobKey returns the job's configuration key.
func (j *AuroraJob) JobKey() *aurora.JobKey {
// Get the current job configurations key to use for some realis calls.
func (j AuroraJob) JobKey() *aurora.JobKey {
return j.jobConfig.Key
}
// JobConfig returns the job's configuration.
func (j *AuroraJob) JobConfig() *aurora.JobConfiguration {
// Get the current job configurations key to use for some realis calls.
func (j AuroraJob) JobConfig() *aurora.JobConfiguration {
return j.jobConfig
}
// TaskConfig returns the job's task(shard) configuration.
func (j *AuroraJob) TaskConfig() *aurora.TaskConfig {
func (j AuroraJob) TaskConfig() *aurora.TaskConfig {
return j.jobConfig.TaskConfig
}
// AddURIs adds a list of URIs with the same extract and cache configuration. Scheduler must have
// Add a list of URIs with the same extract and cache configuration. Scheduler must have
// --enable_mesos_fetcher flag enabled. Currently there is no duplicate detection.
func (j *AuroraJob) AddURIs(extract bool, cache bool, values ...string) Job {
func (j AuroraJob) AddURIs(extract bool, cache bool, values ...string) Job {
for _, value := range values {
j.jobConfig.TaskConfig.MesosFetcherUris = append(j.jobConfig.TaskConfig.MesosFetcherUris,
&aurora.MesosFetcherURI{Value: value, Extract: &extract, Cache: &cache})
j.jobConfig.
TaskConfig.
MesosFetcherUris[&aurora.MesosFetcherURI{value, &extract, &cache}] = true
}
return j
}
// AddLabel adds a Mesos label to the job. Note that Aurora will add the
// Adds a Mesos label to the job. Note that Aurora will add the
// prefix "org.apache.aurora.metadata." to the beginning of each key.
func (j *AuroraJob) AddLabel(key string, value string) Job {
if _, ok := j.metadata[key]; !ok {
j.metadata[key] = &aurora.Metadata{Key: key}
j.jobConfig.TaskConfig.Metadata = append(j.jobConfig.TaskConfig.Metadata, j.metadata[key])
}
j.metadata[key].Value = value
func (j AuroraJob) AddLabel(key string, value string) Job {
j.jobConfig.TaskConfig.Metadata[&aurora.Metadata{key, value}] = true
return j
}
// AddNamedPorts adds a named port to the job configuration These are random ports as it's
// Add a named port to the job configuration These are random ports as it's
// not currently possible to request specific ports using Aurora.
func (j *AuroraJob) AddNamedPorts(names ...string) Job {
func (j AuroraJob) AddNamedPorts(names ...string) Job {
j.portCount += len(names)
for _, name := range names {
j.jobConfig.TaskConfig.Resources = append(
j.jobConfig.TaskConfig.Resources,
&aurora.Resource{NamedPort: &name})
j.jobConfig.TaskConfig.Resources[&aurora.Resource{NamedPort: &name}] = true
}
return j
}
// AddPorts adds a request for a number of ports to the job configuration. The names chosen for these ports
// Adds a request for a number of ports to the job configuration. The names chosen for these ports
// will be org.apache.aurora.port.X, where X is the current port count for the job configuration
// starting at 0. These are random ports as it's not currently possible to request
// specific ports using Aurora.
func (j *AuroraJob) AddPorts(num int) Job {
func (j AuroraJob) AddPorts(num int) Job {
start := j.portCount
j.portCount += num
for i := start; i < j.portCount; i++ {
portName := portNamePrefix + strconv.Itoa(i)
j.jobConfig.TaskConfig.Resources = append(
j.jobConfig.TaskConfig.Resources,
&aurora.Resource{NamedPort: &portName})
portName := "org.apache.aurora.port." + strconv.Itoa(i)
j.jobConfig.TaskConfig.Resources[&aurora.Resource{NamedPort: &portName}] = true
}
return j
}
// AddValueConstraint allows the user to add a value constrain to the job to limit which agents the job's
// tasks can be run on. If the name matches a constraint that was previously set, the previous value will be
// overwritten. In case the previous constraint attached to the name was of type limit, the constraint will be clobbered
// by this new Value constraint.
// From Aurora Docs:
// Add a Value constraint
// name - Mesos slave attribute that the constraint is matched against.
// If negated = true , treat this as a 'not' - to avoid specific values.
// Values - list of values we look for in attribute name
func (j *AuroraJob) AddValueConstraint(name string, negated bool, values ...string) Job {
if _, ok := j.constraints[name]; !ok {
j.constraints[name] = &aurora.Constraint{Name: name}
j.jobConfig.TaskConfig.Constraints = append(j.jobConfig.TaskConfig.Constraints, j.constraints[name])
}
j.constraints[name].Constraint = &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: negated,
Values: values,
},
Limit: nil,
func (j AuroraJob) AddValueConstraint(name string, negated bool, values ...string) Job {
constraintValues := make(map[string]bool)
for _, value := range values {
constraintValues[value] = true
}
j.jobConfig.TaskConfig.Constraints[&aurora.Constraint{name,
&aurora.TaskConstraint{&aurora.ValueConstraint{negated, constraintValues}, nil}}] = true
return j
}
// AddLimitConstraint allows the user to limit how many tasks form the same Job are run on a single host.
// If the name matches a constraint that was previously set, the previous value will be
// overwritten. In case the previous constraint attached to the name was of type Value, the constraint will be clobbered
// by this new Limit constraint.
// From Aurora Docs:
// A constraint that specifies the maximum number of active tasks on a host with
// a matching attribute that may be scheduled simultaneously.
func (j *AuroraJob) AddLimitConstraint(name string, limit int32) Job {
if _, ok := j.constraints[name]; !ok {
j.constraints[name] = &aurora.Constraint{Name: name}
j.jobConfig.TaskConfig.Constraints = append(j.jobConfig.TaskConfig.Constraints, j.constraints[name])
}
j.constraints[name].Constraint = &aurora.TaskConstraint{
Value: nil,
Limit: &aurora.LimitConstraint{Limit: limit},
}
func (j AuroraJob) AddLimitConstraint(name string, limit int32) Job {
j.jobConfig.TaskConfig.Constraints[&aurora.Constraint{name,
&aurora.TaskConstraint{nil, &aurora.LimitConstraint{limit}}}] = true
return j
}
// AddDedicatedConstraint is a convenience function that allows the user to
// add a dedicated constraint to a Job configuration.
// In case a previous dedicated constraint was set, it will be clobbered by this new value.
func (j *AuroraJob) AddDedicatedConstraint(role, name string) Job {
j.AddValueConstraint("dedicated", false, role+"/"+name)
return j
}
// Container sets a container to run for the job configuration to run.
func (j *AuroraJob) Container(container Container) Job {
// Set a container to run for the job configuration to run.
// TODO (rdelvalle): Add no thermos mode where container is launched as a task and not an executor.
func (j AuroraJob) Container(container Container) Job {
j.jobConfig.TaskConfig.Container = container.Build()
return j
}
// PartitionPolicy sets a partition policy for the job configuration to implement.
func (j *AuroraJob) PartitionPolicy(policy *aurora.PartitionPolicy) Job {
j.jobConfig.TaskConfig.PartitionPolicy = policy
return j
}
// Tier sets the Tier for the Job.
func (j *AuroraJob) Tier(tier string) Job {
j.jobConfig.TaskConfig.Tier = &tier
return j
}
// SlaPolicy sets an SlaPolicy for the Job.
func (j *AuroraJob) SlaPolicy(policy *aurora.SlaPolicy) Job {
j.jobConfig.TaskConfig.SlaPolicy = policy
return j
}
func (j *AuroraJob) Priority(priority int32) Job {
j.jobConfig.TaskConfig.Priority = priority
return j
}

View file

@ -1,87 +0,0 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
type logger interface {
Println(v ...interface{})
Printf(format string, v ...interface{})
Print(v ...interface{})
}
// NoopLogger is a logger that can be attached to the client which will not print anything.
type NoopLogger struct{}
// Printf is a NOOP function here.
func (NoopLogger) Printf(format string, a ...interface{}) {}
// Print is a NOOP function here.
func (NoopLogger) Print(a ...interface{}) {}
// Println is a NOOP function here.
func (NoopLogger) Println(a ...interface{}) {}
// LevelLogger is a logger that can be configured to output different levels of information: Debug and Trace.
// Trace should only be enabled when very in depth information about the sequence of events a function took is needed.
type LevelLogger struct {
logger
debug bool
trace bool
}
// EnableDebug enables debug level logging for the LevelLogger
func (l *LevelLogger) EnableDebug(enable bool) {
l.debug = enable
}
// EnableTrace enables trace level logging for the LevelLogger
func (l *LevelLogger) EnableTrace(enable bool) {
l.trace = enable
}
func (l LevelLogger) debugPrintf(format string, a ...interface{}) {
if l.debug {
l.Printf("[DEBUG] "+format, a...)
}
}
func (l LevelLogger) debugPrint(a ...interface{}) {
if l.debug {
l.Print(append([]interface{}{"[DEBUG] "}, a...)...)
}
}
func (l LevelLogger) debugPrintln(a ...interface{}) {
if l.debug {
l.Println(append([]interface{}{"[DEBUG] "}, a...)...)
}
}
func (l LevelLogger) tracePrintf(format string, a ...interface{}) {
if l.trace {
l.Printf("[TRACE] "+format, a...)
}
}
func (l LevelLogger) tracePrint(a ...interface{}) {
if l.trace {
l.Print(append([]interface{}{"[TRACE] "}, a...)...)
}
}
func (l LevelLogger) tracePrintln(a ...interface{}) {
if l.trace {
l.Println(append([]interface{}{"[TRACE] "}, a...)...)
}
}

View file

@ -12,285 +12,84 @@
* limitations under the License.
*/
// Collection of monitors to create synchronicity
package realis
import (
"time"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"fmt"
"github.com/pkg/errors"
"github.com/rdelval/gorealis/gen-go/apache/aurora"
"github.com/rdelval/gorealis/response"
"os"
"time"
)
// Monitor is a wrapper for the Realis client which allows us to have functions
// with the same name for Monitoring purposes.
// TODO(rdelvalle): Deprecate monitors and instead add prefix Monitor to
// all functions in this file like it is done in V2.
type Monitor struct {
Client Realis
}
// JobUpdate polls the scheduler every certain amount of time to see if the update has entered a terminal state.
func (m *Monitor) JobUpdate(
updateKey aurora.JobUpdateKey,
interval int,
timeout int) (bool, error) {
// Polls the scheduler every certain amount of time to see if the update has succeeded
func (m *Monitor) JobUpdate(updateKey aurora.JobUpdateKey, interval int, timeout int) bool {
updateQ := aurora.JobUpdateQuery{
Key: &updateKey,
Limit: 1,
UpdateStatuses: TerminalUpdateStates(),
}
updateSummaries, err := m.JobUpdateQuery(
updateQ,
time.Duration(interval)*time.Second,
time.Duration(timeout)*time.Second)
status := updateSummaries[0].State.Status
if err != nil {
return false, err
Key: &updateKey,
Limit: 1,
}
m.Client.RealisConfig().logger.Printf("job update status: %v\n", status)
// Rolled forward is the only state in which an update has been successfully updated
// if we encounter an inactive state and it is not at rolled forward, update failed
switch status {
case aurora.JobUpdateStatus_ROLLED_FORWARD:
return true, nil
case aurora.JobUpdateStatus_ROLLED_BACK,
aurora.JobUpdateStatus_ABORTED,
aurora.JobUpdateStatus_ERROR,
aurora.JobUpdateStatus_FAILED:
return false, errors.Errorf("bad terminal state for update: %v", status)
default:
return false, errors.Errorf("unexpected update state: %v", status)
}
}
// JobUpdateStatus polls the scheduler every certain amount of time to see if the update has entered a specified state.
func (m *Monitor) JobUpdateStatus(updateKey aurora.JobUpdateKey,
desiredStatuses []aurora.JobUpdateStatus,
interval, timeout time.Duration) (aurora.JobUpdateStatus, error) {
updateQ := aurora.JobUpdateQuery{
Key: &updateKey,
Limit: 1,
UpdateStatuses: desiredStatuses,
}
summary, err := m.JobUpdateQuery(updateQ, interval, timeout)
if err != nil {
return 0, err
}
return summary[0].State.Status, nil
}
// JobUpdateQuery polls the scheduler every certain amount of time to see if the query call returns any results.
func (m *Monitor) JobUpdateQuery(
updateQuery aurora.JobUpdateQuery,
interval time.Duration,
timeout time.Duration) ([]*aurora.JobUpdateSummary, error) {
ticker := time.NewTicker(interval)
defer ticker.Stop()
timer := time.NewTimer(timeout)
defer timer.Stop()
var cliErr error
var respDetail *aurora.Response
for {
select {
case <-ticker.C:
respDetail, cliErr = m.Client.GetJobUpdateSummaries(&updateQuery)
if cliErr != nil {
return nil, cliErr
}
updateSummaries := respDetail.Result_.GetJobUpdateSummariesResult_.UpdateSummaries
if len(updateSummaries) >= 1 {
return updateSummaries, nil
}
case <-timer.C:
return nil, newTimedoutError(errors.New("job update monitor timed out"))
for i := 0; i*interval <= timeout; i++ {
respDetail, err := m.Client.JobUpdateDetails(updateQ)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
updateDetail := response.JobUpdateDetails(respDetail)
if len(updateDetail) == 0 {
fmt.Println("No update found")
return false
}
status := updateDetail[0].Update.Summary.State.Status
if _, ok := aurora.ACTIVE_JOB_UPDATE_STATES[status]; !ok {
// Rolled forward is the only state in which an update has been successfully updated
// if we encounter an inactive state and it is not at rolled forward, update failed
if status == aurora.JobUpdateStatus_ROLLED_FORWARD {
fmt.Println("Update succeded")
return true
} else {
fmt.Println("Update failed")
return false
}
}
fmt.Println("Polling, update still active...")
time.Sleep(time.Duration(interval) * time.Second)
}
fmt.Println("Timed out")
return false
}
// AutoPausedUpdateMonitor is a special monitor for auto pause enabled batch updates. This monitor ensures that the update
// being monitored is capable of auto pausing and has auto pausing enabled. After verifying this information,
// the monitor watches for the job to enter the ROLL_FORWARD_PAUSED state and calculates the current batch
// the update is in using information from the update configuration.
func (m *Monitor) AutoPausedUpdateMonitor(key aurora.JobUpdateKey, interval, timeout time.Duration) (int, error) {
key.Job = &aurora.JobKey{
Role: key.Job.Role,
Environment: key.Job.Environment,
Name: key.Job.Name,
}
query := aurora.JobUpdateQuery{
UpdateStatuses: aurora.ACTIVE_JOB_UPDATE_STATES,
Limit: 1,
Key: &key,
}
func (m *Monitor) Instances(key *aurora.JobKey, instances int32, interval int, timeout int) (bool, error) {
response, err := m.Client.JobUpdateDetails(query)
if err != nil {
return -1, errors.Wrap(err, "unable to get information about update")
}
for i := 0; i*interval < timeout; i++ {
// TODO (rdelvalle): check for possible nil values when going down the list of structs
updateDetails := response.Result_.GetJobUpdateDetailsResult_.DetailsList
if len(updateDetails) == 0 {
return -1, errors.Errorf("details for update could not be found")
}
live, err := m.Client.GetInstanceIds(key, aurora.LIVE_STATES)
updateStrategy := updateDetails[0].Update.Instructions.Settings.UpdateStrategy
var batchSizes []int32
switch {
case updateStrategy.IsSetVarBatchStrategy():
batchSizes = updateStrategy.VarBatchStrategy.GroupSizes
if !updateStrategy.VarBatchStrategy.AutopauseAfterBatch {
return -1, errors.Errorf("update does not have auto pause enabled")
if err != nil {
return false, errors.Wrap(err, "Unable to communicate with Aurora")
}
case updateStrategy.IsSetBatchStrategy():
batchSizes = []int32{updateStrategy.BatchStrategy.GroupSize}
if !updateStrategy.BatchStrategy.AutopauseAfterBatch {
return -1, errors.Errorf("update does not have auto pause enabled")
if len(live) == int(instances) {
return true, nil
}
default:
return -1, errors.Errorf("update is not using a batch update strategy")
fmt.Println("Polling, instances running: ", len(live))
time.Sleep(time.Duration(interval) * time.Second)
}
query.UpdateStatuses = append(TerminalUpdateStates(), aurora.JobUpdateStatus_ROLL_FORWARD_PAUSED)
summary, err := m.JobUpdateQuery(query, interval, timeout)
if err != nil {
return -1, err
}
if !(summary[0].State.Status == aurora.JobUpdateStatus_ROLL_FORWARD_PAUSED ||
summary[0].State.Status == aurora.JobUpdateStatus_ROLLED_FORWARD) {
return -1, errors.Errorf("update is in a terminal state %v", summary[0].State.Status)
}
updatingInstances := make(map[int32]struct{})
for _, e := range updateDetails[0].InstanceEvents {
// We only care about INSTANCE_UPDATING actions because we only care that they've been attempted
if e != nil && e.GetAction() == aurora.JobUpdateAction_INSTANCE_UPDATING {
updatingInstances[e.GetInstanceId()] = struct{}{}
}
}
return calculateCurrentBatch(int32(len(updatingInstances)), batchSizes), nil
}
// Instances will monitor a Job until all instances enter one of the LIVE_STATES
func (m *Monitor) Instances(key *aurora.JobKey, instances int32, interval, timeout int) (bool, error) {
return m.ScheduleStatus(key, instances, LiveStates, interval, timeout)
}
// ScheduleStatus will monitor a Job until all instances enter a desired status.
// Defaults sets of desired statuses provided by the thrift API include:
// ACTIVE_STATES, SLAVE_ASSIGNED_STATES, LIVE_STATES, and TERMINAL_STATES
func (m *Monitor) ScheduleStatus(
key *aurora.JobKey,
instanceCount int32,
desiredStatuses map[aurora.ScheduleStatus]bool,
interval int,
timeout int) (bool, error) {
ticker := time.NewTicker(time.Second * time.Duration(interval))
defer ticker.Stop()
timer := time.NewTimer(time.Second * time.Duration(timeout))
defer timer.Stop()
wantedStatuses := make([]aurora.ScheduleStatus, 0)
for status := range desiredStatuses {
wantedStatuses = append(wantedStatuses, status)
}
for {
select {
case <-ticker.C:
// Query Aurora for the state of the job key ever interval
instCount, cliErr := m.Client.GetInstanceIds(key, wantedStatuses)
if cliErr != nil {
return false, errors.Wrap(cliErr, "Unable to communicate with Aurora")
}
if len(instCount) == int(instanceCount) {
return true, nil
}
case <-timer.C:
// If the timer runs out, return a timeout error to user
return false, newTimedoutError(errors.New("schedule status monitor timed out"))
}
}
}
// HostMaintenance will monitor host status until all hosts match the status provided.
// Returns a map where the value is true if the host
// is in one of the desired mode(s) or false if it is not as of the time when the monitor exited.
func (m *Monitor) HostMaintenance(
hosts []string,
modes []aurora.MaintenanceMode,
interval, timeout int) (map[string]bool, error) {
// Transform modes to monitor for into a set for easy lookup
desiredMode := make(map[aurora.MaintenanceMode]struct{})
for _, mode := range modes {
desiredMode[mode] = struct{}{}
}
// Turn slice into a host set to eliminate duplicates.
// We also can't use a simple count because multiple modes means
// we can have multiple matches for a single host.
// I.e. host A transitions from ACTIVE to DRAINING to DRAINED while monitored
remainingHosts := make(map[string]struct{})
for _, host := range hosts {
remainingHosts[host] = struct{}{}
}
hostResult := make(map[string]bool)
ticker := time.NewTicker(time.Second * time.Duration(interval))
defer ticker.Stop()
timer := time.NewTimer(time.Second * time.Duration(timeout))
defer timer.Stop()
for {
select {
case <-ticker.C:
// Client call has multiple retries internally
_, result, err := m.Client.MaintenanceStatus(hosts...)
if err != nil {
// Error is either a payload error or a severe connection error
for host := range remainingHosts {
hostResult[host] = false
}
return hostResult, errors.Wrap(err, "client error in monitor")
}
for _, status := range result.GetStatuses() {
if _, ok := desiredMode[status.GetMode()]; ok {
hostResult[status.GetHost()] = true
delete(remainingHosts, status.GetHost())
if len(remainingHosts) == 0 {
return hostResult, nil
}
}
}
case <-timer.C:
for host := range remainingHosts {
hostResult[host] = false
}
return hostResult, newTimedoutError(errors.New("host maintenance monitor timed out"))
}
}
fmt.Println("Timed out")
return false, nil
}

1240
realis.go

File diff suppressed because it is too large Load diff

View file

@ -1,309 +0,0 @@
package realis
import (
"context"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/pkg/errors"
)
// TODO(rdelvalle): Consider moving these functions to another interface. It would be a backwards incompatible change,
// but would add safety.
// Set a list of nodes to DRAINING. This means nothing will be able to be scheduled on them and any existing
// tasks will be killed and re-scheduled elsewhere in the cluster. Tasks from DRAINING nodes are not guaranteed
// to return to running unless there is enough capacity in the cluster to run them.
func (r *realisClient) DrainHosts(hosts ...string) (*aurora.Response, *aurora.DrainHostsResult_, error) {
var result *aurora.DrainHostsResult_
if len(hosts) == 0 {
return nil, nil, errors.New("no hosts provided to drain")
}
drainList := aurora.NewHosts()
drainList.HostNames = hosts
r.logger.debugPrintf("DrainHosts Thrift Payload: %v\n", drainList)
resp, retryErr := r.thriftCallWithRetries(
false,
func() (*aurora.Response, error) {
return r.adminClient.DrainHosts(context.TODO(), drainList)
},
nil,
)
if retryErr != nil {
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
}
if resp.GetResult_() != nil {
result = resp.GetResult_().GetDrainHostsResult_()
}
return resp, result, nil
}
// Start SLA Aware Drain.
// defaultSlaPolicy is the fallback SlaPolicy to use if a task does not have an SlaPolicy.
// After timeoutSecs, tasks will be forcefully drained without checking SLA.
func (r *realisClient) SLADrainHosts(
policy *aurora.SlaPolicy,
timeout int64,
hosts ...string) (*aurora.DrainHostsResult_, error) {
var result *aurora.DrainHostsResult_
if len(hosts) == 0 {
return nil, errors.New("no hosts provided to drain")
}
if policy == nil || policy.CountSetFieldsSlaPolicy() == 0 {
policy = &defaultSlaPolicy
r.logger.Printf("Warning: start draining with default sla policy %v", policy)
}
if timeout < 0 {
r.logger.Printf("Warning: timeout %d secs is invalid, draining with default timeout %d secs",
timeout,
defaultSlaDrainTimeoutSecs)
timeout = defaultSlaDrainTimeoutSecs
}
drainList := aurora.NewHosts()
drainList.HostNames = hosts
r.logger.debugPrintf("SLADrainHosts Thrift Payload: %v\n", drainList)
resp, retryErr := r.thriftCallWithRetries(
false,
func() (*aurora.Response, error) {
return r.adminClient.SlaDrainHosts(context.TODO(), drainList, policy, timeout)
},
nil,
)
if retryErr != nil {
return result, errors.Wrap(retryErr, "Unable to recover connection")
}
if resp.GetResult_() != nil {
result = resp.GetResult_().GetDrainHostsResult_()
}
return result, nil
}
func (r *realisClient) StartMaintenance(hosts ...string) (*aurora.Response, *aurora.StartMaintenanceResult_, error) {
var result *aurora.StartMaintenanceResult_
if len(hosts) == 0 {
return nil, nil, errors.New("no hosts provided to start maintenance on")
}
hostList := aurora.NewHosts()
hostList.HostNames = hosts
r.logger.debugPrintf("StartMaintenance Thrift Payload: %v\n", hostList)
resp, retryErr := r.thriftCallWithRetries(
false,
func() (*aurora.Response, error) {
return r.adminClient.StartMaintenance(context.TODO(), hostList)
},
nil,
)
if retryErr != nil {
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
}
if resp.GetResult_() != nil {
result = resp.GetResult_().GetStartMaintenanceResult_()
}
return resp, result, nil
}
func (r *realisClient) EndMaintenance(hosts ...string) (*aurora.Response, *aurora.EndMaintenanceResult_, error) {
var result *aurora.EndMaintenanceResult_
if len(hosts) == 0 {
return nil, nil, errors.New("no hosts provided to end maintenance on")
}
hostList := aurora.NewHosts()
hostList.HostNames = hosts
r.logger.debugPrintf("EndMaintenance Thrift Payload: %v\n", hostList)
resp, retryErr := r.thriftCallWithRetries(
false,
func() (*aurora.Response, error) {
return r.adminClient.EndMaintenance(context.TODO(), hostList)
},
nil,
)
if retryErr != nil {
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
}
if resp.GetResult_() != nil {
result = resp.GetResult_().GetEndMaintenanceResult_()
}
return resp, result, nil
}
func (r *realisClient) MaintenanceStatus(hosts ...string) (*aurora.Response, *aurora.MaintenanceStatusResult_, error) {
var result *aurora.MaintenanceStatusResult_
if len(hosts) == 0 {
return nil, nil, errors.New("no hosts provided to get maintenance status from")
}
hostList := aurora.NewHosts()
hostList.HostNames = hosts
r.logger.debugPrintf("MaintenanceStatus Thrift Payload: %v\n", hostList)
// Make thrift call. If we encounter an error sending the call, attempt to reconnect
// and continue trying to resend command until we run out of retries.
resp, retryErr := r.thriftCallWithRetries(
false,
func() (*aurora.Response, error) {
return r.adminClient.MaintenanceStatus(context.TODO(), hostList)
},
nil,
)
if retryErr != nil {
return resp, result, errors.Wrap(retryErr, "Unable to recover connection")
}
if resp.GetResult_() != nil {
result = resp.GetResult_().GetMaintenanceStatusResult_()
}
return resp, result, nil
}
// SetQuota sets a quota aggregate for the given role
// TODO(zircote) Currently investigating an error that is returned
// from thrift calls that include resources for `NamedPort` and `NumGpu`
func (r *realisClient) SetQuota(role string, cpu *float64, ramMb *int64, diskMb *int64) (*aurora.Response, error) {
quota := &aurora.ResourceAggregate{
Resources: []*aurora.Resource{{NumCpus: cpu}, {RamMb: ramMb}, {DiskMb: diskMb}},
}
resp, retryErr := r.thriftCallWithRetries(
false,
func() (*aurora.Response, error) {
return r.adminClient.SetQuota(context.TODO(), role, quota)
},
nil,
)
if retryErr != nil {
return resp, errors.Wrap(retryErr, "Unable to set role quota")
}
return resp, retryErr
}
// GetQuota returns the resource aggregate for the given role
func (r *realisClient) GetQuota(role string) (*aurora.Response, error) {
resp, retryErr := r.thriftCallWithRetries(
false,
func() (*aurora.Response, error) {
return r.adminClient.GetQuota(context.TODO(), role)
},
nil,
)
if retryErr != nil {
return resp, errors.Wrap(retryErr, "Unable to get role quota")
}
return resp, retryErr
}
// Force Aurora Scheduler to perform a snapshot and write to Mesos log
func (r *realisClient) Snapshot() error {
_, retryErr := r.thriftCallWithRetries(
false,
func() (*aurora.Response, error) {
return r.adminClient.Snapshot(context.TODO())
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "Unable to recover connection")
}
return nil
}
// Force Aurora Scheduler to write backup file to a file in the backup directory
func (r *realisClient) PerformBackup() error {
_, retryErr := r.thriftCallWithRetries(
false,
func() (*aurora.Response, error) {
return r.adminClient.PerformBackup(context.TODO())
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "Unable to recover connection")
}
return nil
}
func (r *realisClient) ForceImplicitTaskReconciliation() error {
_, retryErr := r.thriftCallWithRetries(
false,
func() (*aurora.Response, error) {
return r.adminClient.TriggerImplicitTaskReconciliation(context.TODO())
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "Unable to recover connection")
}
return nil
}
func (r *realisClient) ForceExplicitTaskReconciliation(batchSize *int32) error {
if batchSize != nil && *batchSize < 1 {
return errors.New("invalid batch size")
}
settings := aurora.NewExplicitReconciliationSettings()
settings.BatchSize = batchSize
_, retryErr := r.thriftCallWithRetries(false,
func() (*aurora.Response, error) {
return r.adminClient.TriggerExplicitTaskReconciliation(context.TODO(), settings)
},
nil,
)
if retryErr != nil {
return errors.Wrap(retryErr, "Unable to recover connection")
}
return nil
}

File diff suppressed because it is too large Load diff

135
realis_test.go Normal file
View file

@ -0,0 +1,135 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
import (
"fmt"
"github.com/rdelval/gorealis/gen-go/apache/aurora"
"github.com/stretchr/testify/assert"
"io/ioutil"
"os"
"testing"
)
var r Realis
var thermosPayload []byte
func TestMain(m *testing.M) {
// New configuration to connect to Vagrant image
config, err := NewDefaultConfig("http://192.168.33.7:8081")
if err != nil {
fmt.Println("Please run vagrant box before running test suite")
os.Exit(1)
}
thermosPayload, err = ioutil.ReadFile("examples/thermos_payload.json")
if err != nil {
fmt.Println("Error reading thermos payload file: ", err)
os.Exit(1)
}
// Configured for vagrant
AddBasicAuth(&config, "aurora", "secret")
r = NewClient(config)
os.Exit(m.Run())
}
func TestRealisClient_CreateJob_Thermos(t *testing.T) {
job := NewJob().
Environment("prod").
Role("vagrant").
Name("create_job_test").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData(string(thermosPayload)).
CPU(1).
RAM(64).
Disk(100).
IsService(true).
InstanceCount(1).
AddPorts(1)
resp, err := r.CreateJob(job)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
// Tasks must exist for it to be killed
t.Run("TestRealisClient_KillJob_Thermos", func(t *testing.T) {
resp, err := r.KillJob(job.JobKey())
if err != nil {
fmt.Println(err)
os.Exit(1)
}
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
})
}
func TestRealisClient_ScheduleCronJob_Thermos(t *testing.T) {
thermosCronPayload, err := ioutil.ReadFile("examples/thermos_cron_payload.json")
if err != nil {
fmt.Println("Error reading thermos payload file: ", err)
os.Exit(1)
}
job := NewJob().
Environment("prod").
Role("vagrant").
Name("cronsched_job_test").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData(string(thermosCronPayload)).
CPU(1).
RAM(64).
Disk(100).
IsService(true).
InstanceCount(1).
AddPorts(1).
CronSchedule("* * * * *").
IsService(false)
resp, err := r.ScheduleCronJob(job)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
t.Run("TestRealisClient_StartCronJob_Thermos", func(t *testing.T) {
resp, err := r.StartCronJob(job.JobKey())
if err != nil {
fmt.Println(err)
os.Exit(1)
}
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
})
t.Run("TestRealisClient_DeschedulerCronJob_Thermos", func(t *testing.T) {
resp, err := r.DescheduleCronJob(job.JobKey())
if err != nil {
fmt.Println(err)
os.Exit(1)
}
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
})
}

View file

@ -17,37 +17,24 @@ package response
import (
"bytes"
"errors"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/rdelval/gorealis/gen-go/apache/aurora"
"github.com/pkg/errors"
)
// Get key from a response created by a StartJobUpdate call
func JobUpdateKey(resp *aurora.Response) *aurora.JobUpdateKey {
return resp.GetResult_().GetStartJobUpdateResult_().GetKey()
return resp.Result_.StartJobUpdateResult_.GetKey()
}
func JobUpdateDetails(resp *aurora.Response) []*aurora.JobUpdateDetails {
return resp.GetResult_().GetGetJobUpdateDetailsResult_().GetDetailsList()
return resp.Result_.GetJobUpdateDetailsResult_.DetailsList
}
func ScheduleStatusResult(resp *aurora.Response) *aurora.ScheduleStatusResult_ {
return resp.GetResult_().GetScheduleStatusResult_()
}
func JobUpdateSummaries(resp *aurora.Response) []*aurora.JobUpdateSummary {
if resp.GetResult_() == nil || resp.GetResult_().GetGetJobUpdateSummariesResult_() == nil {
return nil
}
return resp.GetResult_().GetGetJobUpdateSummariesResult_().GetUpdateSummaries()
}
// Deprecated: Replaced by checks done inside of thriftCallHelper
func ResponseCodeCheck(resp *aurora.Response) (*aurora.Response, error) {
if resp == nil {
return resp, errors.New("Response is nil")
}
if resp.GetResponseCode() != aurora.ResponseCode_OK {
return resp, errors.New(CombineMessage(resp))
}
@ -63,7 +50,7 @@ func CombineMessage(resp *aurora.Response) string {
}
if buffer.Len() > 0 {
buffer.Truncate(buffer.Len() - 2) // Get rid of trailing comma + space
buffer.Truncate(buffer.Len()-2) // Get rid of trailing comma + space
}
return buffer.String()
}

294
retry.go
View file

@ -1,294 +0,0 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
import (
"io"
"math/rand"
"net/url"
"time"
"github.com/apache/thrift/lib/go/thrift"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/paypal/gorealis/response"
"github.com/pkg/errors"
)
// Backoff determines how the retry mechanism should react after each failure and how many failures it should
// tolerate.
type Backoff struct {
Duration time.Duration // the base duration
Factor float64 // Duration is multiplied by a factor each iteration
Jitter float64 // The amount of jitter applied each iteration
Steps int // Exit with error after this many steps
}
// Jitter returns a time.Duration between duration and duration + maxFactor *
// duration.
//
// This allows clients to avoid converging on periodic behavior. If maxFactor
// is 0.0, a suggested default value will be chosen.
func Jitter(duration time.Duration, maxFactor float64) time.Duration {
if maxFactor <= 0.0 {
maxFactor = 1.0
}
wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
return wait
}
// ConditionFunc returns true if the condition is satisfied, or an error
// if the loop should be aborted.
type ConditionFunc func() (done bool, err error)
// ExponentialBackoff is a modified version of the Kubernetes exponential-backoff code.
// It repeats a condition check with exponential backoff and checks the condition up to
// Steps times, increasing the wait by multiplying the previous duration by Factor.
//
// If Jitter is greater than zero, a random amount of each duration is added
// (between duration and duration*(1+jitter)).
//
// If the condition never returns true, ErrWaitTimeout is returned. Errors
// do not cause the function to return.
func ExponentialBackoff(backoff Backoff, logger logger, condition ConditionFunc) error {
var err error
var ok bool
var curStep int
duration := backoff.Duration
for curStep = 0; curStep < backoff.Steps; curStep++ {
// Only sleep if it's not the first iteration.
if curStep != 0 {
adjusted := duration
if backoff.Jitter > 0.0 {
adjusted = Jitter(duration, backoff.Jitter)
}
logger.Printf(
"A retryable error occurred during function call, backing off for %v before retrying\n", adjusted)
time.Sleep(adjusted)
duration = time.Duration(float64(duration) * backoff.Factor)
}
// Execute function passed in.
ok, err = condition()
// If the function executed says it succeeded, stop retrying
if ok {
return nil
}
if err != nil {
// If the error is temporary, continue retrying.
if !IsTemporary(err) {
return err
}
// Print out the temporary error we experienced.
logger.Println(err)
}
}
if curStep > 1 {
logger.Printf("retried this function call %d time(s)", curStep)
}
// Provide more information to the user wherever possible
if err != nil {
return newRetryError(errors.Wrap(err, "ran out of retries"), curStep)
}
return newRetryError(errors.New("ran out of retries"), curStep)
}
type auroraThriftCall func() (resp *aurora.Response, err error)
// verifyOntimeout defines the type of function that will be used to verify whether a Thirft call to the Scheduler
// made it to the scheduler or not. In general, these types of functions will have to interact with the scheduler
// through the very same Thrift API which previously encountered a time out from the client.
// This means that the functions themselves should be kept to a minimum number of Thrift calls.
// It should also be noted that this is a best effort mechanism and
// is likely to fail for the same reasons that the original call failed.
type verifyOnTimeout func() (*aurora.Response, bool)
// Duplicates the functionality of ExponentialBackoff but is specifically targeted towards ThriftCalls.
func (r *realisClient) thriftCallWithRetries(
returnOnTimeout bool,
thriftCall auroraThriftCall,
verifyOnTimeout verifyOnTimeout) (*aurora.Response, error) {
var resp *aurora.Response
var clientErr error
var curStep int
timeouts := 0
backoff := r.config.backoff
duration := backoff.Duration
for curStep = 0; curStep < backoff.Steps; curStep++ {
// If this isn't our first try, backoff before the next try.
if curStep != 0 {
adjusted := duration
if backoff.Jitter > 0.0 {
adjusted = Jitter(duration, backoff.Jitter)
}
r.logger.Printf(
"A retryable error occurred during thrift call, backing off for %v before retry %v",
adjusted,
curStep)
time.Sleep(adjusted)
duration = time.Duration(float64(duration) * backoff.Factor)
}
// Only allow one go-routine make use or modify the thrift client connection.
// Placing this in an anonymous function in order to create a new, short-lived stack allowing unlock
// to be run in case of a panic inside of thriftCall.
func() {
r.lock.Lock()
defer r.lock.Unlock()
resp, clientErr = thriftCall()
r.logger.tracePrintf("Aurora Thrift Call ended resp: %v clientErr: %v", resp, clientErr)
}()
// Check if our thrift call is returning an error.
if clientErr != nil {
// Print out the error to the user
r.logger.Printf("Client Error: %v", clientErr)
temporary, timedout := isConnectionError(clientErr)
if !temporary && r.RealisConfig().failOnPermanentErrors {
return nil, errors.Wrap(clientErr, "permanent connection error")
}
// There exists a corner case where thrift payload was received by Aurora but
// connection timed out before Aurora was able to reply.
// Users can take special action on a timeout by using IsTimedout and reacting accordingly
// if they have configured the client to return on a timeout.
if timedout && returnOnTimeout {
return resp, newTimedoutError(errors.New("client connection closed before server answer"))
}
// In the future, reestablish connection should be able to check if it is actually possible
// to make a thrift call to Aurora. For now, a reconnect should always lead to a retry.
// Ignoring error due to the fact that an error should be retried regardless
reestablishErr := r.ReestablishConn()
if reestablishErr != nil {
r.logger.debugPrintf("error re-establishing connection ", reestablishErr)
}
// If users did not opt for a return on timeout in order to react to a timedout error,
// attempt to verify that the call made it to the scheduler after the connection was re-established.
if timedout {
timeouts++
r.logger.debugPrintf(
"Client closed connection %d times before server responded, "+
"consider increasing connection timeout",
timeouts)
// Allow caller to provide a function which checks if the original call was successful before
// it timed out.
if verifyOnTimeout != nil {
if verifyResp, ok := verifyOnTimeout(); ok {
r.logger.Print("verified that the call went through successfully after a client timeout")
// Response here might be different than the original as it is no longer constructed
// by the scheduler but mimicked.
// This is OK since the scheduler is very unlikely to change responses at this point in its
// development cycle but we must be careful to not return an incorrectly constructed response.
return verifyResp, nil
}
}
}
// Retry the thrift payload
continue
}
// If there was no client error, but the response is nil, something went wrong.
// Ideally, we'll never encounter this but we're placing a safeguard here.
if resp == nil {
return nil, errors.New("response from aurora is nil")
}
// Check Response Code from thrift and make a decision to continue retrying or not.
switch responseCode := resp.GetResponseCode(); responseCode {
// If the thrift call succeeded, stop retrying
case aurora.ResponseCode_OK:
return resp, nil
// If the response code is transient, continue retrying
case aurora.ResponseCode_ERROR_TRANSIENT:
r.logger.Println("Aurora replied with Transient error code, retrying")
continue
// Failure scenarios, these indicate a bad payload or a bad config. Stop retrying.
case aurora.ResponseCode_INVALID_REQUEST,
aurora.ResponseCode_ERROR,
aurora.ResponseCode_AUTH_FAILED,
aurora.ResponseCode_JOB_UPDATING_ERROR:
r.logger.Printf("Terminal Response Code %v from Aurora, won't retry\n", resp.GetResponseCode().String())
return resp, errors.New(response.CombineMessage(resp))
// The only case that should fall down to here is a WARNING response code.
// It is currently not used as a response in the scheduler so it is unknown how to handle it.
default:
r.logger.debugPrintf("unhandled response code %v received from Aurora\n", responseCode)
return nil, errors.Errorf("unhandled response code from Aurora %v", responseCode.String())
}
}
if curStep > 1 {
r.config.logger.Printf("this thrift call was retried %d time(s)", curStep)
}
// Provide more information to the user wherever possible.
if clientErr != nil {
return nil, newRetryError(errors.Wrap(clientErr, "ran out of retries, including latest error"), curStep)
}
return nil, newRetryError(errors.New("ran out of retries"), curStep)
}
// isConnectionError processes the error received by the client.
// The return values indicate weather this was determined to be a temporary error
// and weather it was determined to be a timeout error
func isConnectionError(err error) (bool, bool) {
// Determine if error is a temporary URL error by going up the stack
transportException, ok := err.(thrift.TTransportException)
if !ok {
return false, false
}
urlError, ok := transportException.Err().(*url.Error)
if !ok {
return false, false
}
// EOF error occurs when the server closes the read buffer of the client. This is common
// when the server is overloaded and we consider it temporary.
// All other which are not temporary as per the member function Temporary(),
// are considered not temporary (permanent).
if urlError.Err != io.EOF && !urlError.Temporary() {
return false, false
}
return true, urlError.Timeout()
}

View file

@ -1,13 +0,0 @@
#!/bin/bash
docker-compose up -d
# If running docker-compose up gives any error, don't do anything.
if [ $? -ne 0 ]; then
exit
fi
# Since we run our docker compose setup in bridge mode to be able to run on MacOS, we have to launch a Docker container within the bridge network in order to avoid any routing issues.
docker run --rm -t -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10-stretch go test -v github.com/paypal/gorealis $@
docker-compose down

View file

@ -1,4 +0,0 @@
#!/bin/bash
# Since we run our docker compose setup in bridge mode to be able to run on MacOS, we have to launch a Docker container within the bridge network in order to avoid any routing issues.
docker run --rm -t -w /gorealis -v $GOPATH/pkg:/go/pkg -v $(pwd):/gorealis --network gorealis_aurora_cluster golang:1.16-buster go test -v github.com/paypal/gorealis $@

View file

@ -15,174 +15,95 @@
package realis
import (
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/rdelval/gorealis/gen-go/apache/aurora"
)
// UpdateJob is a structure to collect all information required to create job update.
// Structure to collect all information required to create job update
type UpdateJob struct {
Job // SetInstanceCount for job is hidden, access via full qualifier
req *aurora.JobUpdateRequest
}
// NewDefaultUpdateJob creates an UpdateJob object with opinionated default settings.
func NewDefaultUpdateJob(config *aurora.TaskConfig) *UpdateJob {
// Create a default UpdateJob object.
func NewUpdateJob(config *aurora.TaskConfig) *UpdateJob {
req := aurora.NewJobUpdateRequest()
req.TaskConfig = config
req.Settings = NewUpdateSettings()
job, ok := NewJob().(*AuroraJob)
if !ok {
// This should never happen but it is here as a safeguard
return nil
}
req.Settings = aurora.NewJobUpdateSettings()
job := NewJob().(AuroraJob)
job.jobConfig.TaskConfig = config
// Rebuild resource map from TaskConfig
for _, ptr := range config.Resources {
for ptr := range config.Resources {
if ptr.NumCpus != nil {
job.resources[CPU].NumCpus = ptr.NumCpus
job.resources["cpu"].NumCpus = ptr.NumCpus
continue // Guard against Union violations that Go won't enforce
}
if ptr.RamMb != nil {
job.resources[RAM].RamMb = ptr.RamMb
job.resources["ram"].RamMb = ptr.RamMb
continue
}
if ptr.DiskMb != nil {
job.resources[DISK].DiskMb = ptr.DiskMb
continue
}
if ptr.NumGpus != nil {
job.resources[GPU] = &aurora.Resource{NumGpus: ptr.NumGpus}
job.resources["disk"].DiskMb = ptr.DiskMb
continue
}
}
// Mirrors defaults set by Pystachio
req.Settings.UpdateOnlyTheseInstances = make(map[*aurora.Range]bool)
req.Settings.UpdateGroupSize = 1
req.Settings.WaitForBatchCompletion = false
req.Settings.MinWaitInInstanceRunningMs = 45000
req.Settings.MinWaitInInstanceRunningMs = 45000 // Deprecated
req.Settings.MaxPerInstanceFailures = 0
req.Settings.MaxFailedInstances = 0
req.Settings.RollbackOnFailure = true
req.Settings.WaitForBatchCompletion = false
//TODO(rdelvalle): Deep copy job struct to avoid unexpected behavior
return &UpdateJob{Job: job, req: req}
return &UpdateJob{job, req}
}
// NewUpdateJob creates an UpdateJob object wihtout default settings.
func NewUpdateJob(config *aurora.TaskConfig, settings *aurora.JobUpdateSettings) *UpdateJob {
req := aurora.NewJobUpdateRequest()
req.TaskConfig = config
req.Settings = settings
job, ok := NewJob().(*AuroraJob)
if !ok {
// This should never happen but it is here as a safeguard
return nil
}
job.jobConfig.TaskConfig = config
// Rebuild resource map from TaskConfig
for _, ptr := range config.Resources {
if ptr.NumCpus != nil {
job.resources[CPU].NumCpus = ptr.NumCpus
continue // Guard against Union violations that Go won't enforce
}
if ptr.RamMb != nil {
job.resources[RAM].RamMb = ptr.RamMb
continue
}
if ptr.DiskMb != nil {
job.resources[DISK].DiskMb = ptr.DiskMb
continue
}
if ptr.NumGpus != nil {
job.resources[GPU] = &aurora.Resource{}
job.resources[GPU].NumGpus = ptr.NumGpus
continue // Guard against Union violations that Go won't enforce
}
}
//TODO(rdelvalle): Deep copy job struct to avoid unexpected behavior
return &UpdateJob{Job: job, req: req}
}
// InstanceCount sets instance count the job will have after the update.
// Set instance count the job will have after the update.
func (u *UpdateJob) InstanceCount(inst int32) *UpdateJob {
u.req.InstanceCount = inst
return u
}
// BatchSize sets the max number of instances being updated at any given moment.
// Max number of instances being updated at any given moment.
func (u *UpdateJob) BatchSize(size int32) *UpdateJob {
u.req.Settings.UpdateGroupSize = size
return u
}
// WatchTime sets the minimum number of seconds a shard must remain in RUNNING state before considered a success.
func (u *UpdateJob) WatchTime(ms int32) *UpdateJob {
u.req.Settings.MinWaitInInstanceRunningMs = ms
// Minimum number of seconds a shard must remain in RUNNING state before considered a success.
func (u *UpdateJob) WatchTime(milliseconds int32) *UpdateJob {
u.req.Settings.MaxPerInstanceFailures = milliseconds
return u
}
// WaitForBatchCompletion configures the job update to wait for all instances in a group to be done before moving on.
// Wait for all instances in a group to be done before moving on.
func (u *UpdateJob) WaitForBatchCompletion(batchWait bool) *UpdateJob {
u.req.Settings.WaitForBatchCompletion = batchWait
return u
}
// MaxPerInstanceFailures sets the max number of instance failures to tolerate before marking instance as FAILED.
// Max number of instance failures to tolerate before marking instance as FAILED.
func (u *UpdateJob) MaxPerInstanceFailures(inst int32) *UpdateJob {
u.req.Settings.MaxPerInstanceFailures = inst
return u
}
// MaxFailedInstances sets the max number of FAILED instances to tolerate before terminating the update.
// Max number of FAILED instances to tolerate before terminating the update.
func (u *UpdateJob) MaxFailedInstances(inst int32) *UpdateJob {
u.req.Settings.MaxFailedInstances = inst
return u
}
// RollbackOnFail configure the job to rollback automatically after a job update fails.
// When False, prevents auto rollback of a failed update.
func (u *UpdateJob) RollbackOnFail(rollback bool) *UpdateJob {
u.req.Settings.RollbackOnFailure = rollback
return u
}
// NewUpdateSettings return an opinionated set of job update settings.
func (u *UpdateJob) BatchUpdateStrategy(strategy aurora.BatchJobUpdateStrategy) *UpdateJob {
u.req.Settings.UpdateStrategy = &aurora.JobUpdateStrategy{BatchStrategy: &strategy}
return u
}
func (u *UpdateJob) QueueUpdateStrategy(strategy aurora.QueueJobUpdateStrategy) *UpdateJob {
u.req.Settings.UpdateStrategy = &aurora.JobUpdateStrategy{QueueStrategy: &strategy}
return u
}
func (u *UpdateJob) VariableBatchStrategy(strategy aurora.VariableBatchJobUpdateStrategy) *UpdateJob {
u.req.Settings.UpdateStrategy = &aurora.JobUpdateStrategy{VarBatchStrategy: &strategy}
return u
}
func NewUpdateSettings() *aurora.JobUpdateSettings {
us := new(aurora.JobUpdateSettings)
// Mirrors defaults set by Pystachio
us.UpdateGroupSize = 1
us.WaitForBatchCompletion = false
us.MinWaitInInstanceRunningMs = 45000
us.MaxPerInstanceFailures = 0
us.MaxFailedInstances = 0
us.RollbackOnFailure = true
return us
}

167
util.go
View file

@ -1,167 +0,0 @@
package realis
import (
"crypto/x509"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/pkg/errors"
)
const apiPath = "/api"
// ActiveStates - States a task may be in when active.
var ActiveStates = make(map[aurora.ScheduleStatus]bool)
// SlaveAssignedStates - States a task may be in when it has already been assigned to a Mesos agent.
var SlaveAssignedStates = make(map[aurora.ScheduleStatus]bool)
// LiveStates - States a task may be in when it is live (e.g. able to take traffic)
var LiveStates = make(map[aurora.ScheduleStatus]bool)
// TerminalStates - Set of states a task may not transition away from.
var TerminalStates = make(map[aurora.ScheduleStatus]bool)
// ActiveJobUpdateStates - States a Job Update may be in where it is considered active.
var ActiveJobUpdateStates = make(map[aurora.JobUpdateStatus]bool)
// TerminalUpdateStates returns a slice containing all the terminal states an update may be in.
// This is a function in order to avoid having a slice that can be accidentally mutated.
func TerminalUpdateStates() []aurora.JobUpdateStatus {
return []aurora.JobUpdateStatus{
aurora.JobUpdateStatus_ROLLED_FORWARD,
aurora.JobUpdateStatus_ROLLED_BACK,
aurora.JobUpdateStatus_ABORTED,
aurora.JobUpdateStatus_ERROR,
aurora.JobUpdateStatus_FAILED,
}
}
// AwaitingPulseJobUpdateStates - States a job update may be in where it is waiting for a pulse.
var AwaitingPulseJobUpdateStates = make(map[aurora.JobUpdateStatus]bool)
func init() {
for _, status := range aurora.ACTIVE_STATES {
ActiveStates[status] = true
}
for _, status := range aurora.SLAVE_ASSIGNED_STATES {
SlaveAssignedStates[status] = true
}
for _, status := range aurora.LIVE_STATES {
LiveStates[status] = true
}
for _, status := range aurora.TERMINAL_STATES {
TerminalStates[status] = true
}
for _, status := range aurora.ACTIVE_JOB_UPDATE_STATES {
ActiveJobUpdateStates[status] = true
}
for _, status := range aurora.AWAITNG_PULSE_JOB_UPDATE_STATES {
AwaitingPulseJobUpdateStates[status] = true
}
}
// createCertPool will attempt to load certificates into a certificate pool from a given directory.
// Only files with an extension contained in the extension map are considered.
// This function ignores any files that cannot be read successfully or cannot be added to the certPool
// successfully.
func createCertPool(path string, extensions map[string]struct{}) (*x509.CertPool, error) {
_, err := os.Stat(path)
if err != nil {
return nil, errors.Wrap(err, "unable to load certificates")
}
caFiles, err := ioutil.ReadDir(path)
if err != nil {
return nil, err
}
certPool := x509.NewCertPool()
loadedCerts := 0
for _, cert := range caFiles {
// Skip directories
if cert.IsDir() {
continue
}
// Skip any files that do not contain the right extension
if _, ok := extensions[filepath.Ext(cert.Name())]; !ok {
continue
}
pem, err := ioutil.ReadFile(filepath.Join(path, cert.Name()))
if err != nil {
continue
}
if certPool.AppendCertsFromPEM(pem) {
loadedCerts++
}
}
if loadedCerts == 0 {
return nil, errors.New("no certificates were able to be successfully loaded")
}
return certPool, nil
}
func validateAuroraURL(location string) (string, error) {
// If no protocol defined, assume http
if !strings.Contains(location, "://") {
location = "http://" + location
}
u, err := url.Parse(location)
if err != nil {
return "", errors.Wrap(err, "error parsing url")
}
// If no path provided assume /api
if u.Path == "" {
u.Path = "/api"
}
// If no port provided, assume default 8081
if u.Port() == "" {
u.Host = u.Host + ":8081"
}
if !(u.Scheme == "http" || u.Scheme == "https") {
return "", errors.Errorf("only protocols http and https are supported %v\n", u.Scheme)
}
// This could theoretically be elsewhere but we'll be strict for the sake of simplicity
if u.Path != apiPath {
return "", errors.Errorf("expected /api path %v\n", u.Path)
}
return u.String(), nil
}
func calculateCurrentBatch(updatingInstances int32, batchSizes []int32) int {
for i, size := range batchSizes {
updatingInstances -= size
if updatingInstances <= 0 {
return i
}
}
// Overflow batches
batchCount := len(batchSizes) - 1
lastBatchIndex := len(batchSizes) - 1
batchCount += int(updatingInstances / batchSizes[lastBatchIndex])
if updatingInstances%batchSizes[lastBatchIndex] != 0 {
batchCount++
}
return batchCount
}

View file

@ -1,114 +0,0 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestAuroraURLValidator(t *testing.T) {
t.Run("badURL", func(t *testing.T) {
url, err := validateAuroraURL("http://badurl.com/badpath")
assert.Empty(t, url)
assert.Error(t, err)
})
t.Run("URLHttp", func(t *testing.T) {
url, err := validateAuroraURL("http://goodurl.com:8081/api")
assert.Equal(t, "http://goodurl.com:8081/api", url)
assert.NoError(t, err)
})
t.Run("URLHttps", func(t *testing.T) {
url, err := validateAuroraURL("https://goodurl.com:8081/api")
assert.Equal(t, "https://goodurl.com:8081/api", url)
assert.NoError(t, err)
})
t.Run("URLNoPath", func(t *testing.T) {
url, err := validateAuroraURL("http://goodurl.com:8081")
assert.Equal(t, "http://goodurl.com:8081/api", url)
assert.NoError(t, err)
})
t.Run("ipAddrNoPath", func(t *testing.T) {
url, err := validateAuroraURL("http://192.168.1.33:8081")
assert.Equal(t, "http://192.168.1.33:8081/api", url)
assert.NoError(t, err)
})
t.Run("URLNoProtocol", func(t *testing.T) {
url, err := validateAuroraURL("goodurl.com:8081/api")
assert.Equal(t, "http://goodurl.com:8081/api", url)
assert.NoError(t, err)
})
t.Run("URLNoProtocolNoPathNoPort", func(t *testing.T) {
url, err := validateAuroraURL("goodurl.com")
assert.Equal(t, "http://goodurl.com:8081/api", url)
assert.NoError(t, err)
})
}
func TestCurrentBatchCalculator(t *testing.T) {
t.Run("singleBatchOverflow", func(t *testing.T) {
curBatch := calculateCurrentBatch(10, []int32{2})
assert.Equal(t, 4, curBatch)
})
t.Run("noInstancesUpdating", func(t *testing.T) {
curBatch := calculateCurrentBatch(0, []int32{2})
assert.Equal(t, 0, curBatch)
})
t.Run("evenMatchSingleBatch", func(t *testing.T) {
curBatch := calculateCurrentBatch(2, []int32{2})
assert.Equal(t, 0, curBatch)
})
t.Run("moreInstancesThanBatches", func(t *testing.T) {
curBatch := calculateCurrentBatch(5, []int32{1, 2})
assert.Equal(t, 2, curBatch)
})
t.Run("moreInstancesThanBatchesDecreasing", func(t *testing.T) {
curBatch := calculateCurrentBatch(5, []int32{2, 1})
assert.Equal(t, 3, curBatch)
})
t.Run("unevenFit", func(t *testing.T) {
curBatch := calculateCurrentBatch(2, []int32{1, 2})
assert.Equal(t, 1, curBatch)
})
t.Run("halfWay", func(t *testing.T) {
curBatch := calculateCurrentBatch(1, []int32{1, 2})
assert.Equal(t, 0, curBatch)
})
}
func TestCertPoolCreator(t *testing.T) {
extensions := map[string]struct{}{".crt": {}}
_, err := createCertPool("examples/certs", extensions)
assert.NoError(t, err)
t.Run("badDir", func(t *testing.T) {
_, err := createCertPool("idontexist", extensions)
assert.Error(t, err)
})
}

239
vendor/git.apache.org/thrift.git/LICENSE generated vendored Normal file
View file

@ -0,0 +1,239 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------
SOFTWARE DISTRIBUTED WITH THRIFT:
The Apache Thrift software includes a number of subcomponents with
separate copyright notices and license terms. Your use of the source
code for the these subcomponents is subject to the terms and
conditions of the following licenses.
--------------------------------------------------
Portions of the following files are licensed under the MIT License:
lib/erl/src/Makefile.am
Please see doc/otp-base-license.txt for the full terms of this license.
--------------------------------------------------
For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
# Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
#
# Copying and distribution of this file, with or without
# modification, are permitted in any medium without royalty provided
# the copyright notice and this notice are preserved.
--------------------------------------------------
For the lib/nodejs/lib/thrift/json_parse.js:
/*
json_parse.js
2015-05-02
Public Domain.
NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
*/
(By Douglas Crockford <douglas@crockford.com>)
--------------------------------------------------

5
vendor/git.apache.org/thrift.git/NOTICE generated vendored Normal file
View file

@ -0,0 +1,5 @@
Apache Thrift
Copyright 2006-2010 The Apache Software Foundation.
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View file

@ -30,22 +30,11 @@ const (
PROTOCOL_ERROR = 7
)
var defaultApplicationExceptionMessage = map[int32]string{
UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception",
UNKNOWN_METHOD: "unknown method",
INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type",
WRONG_METHOD_NAME: "wrong method name",
BAD_SEQUENCE_ID: "bad sequence ID",
MISSING_RESULT: "missing result",
INTERNAL_ERROR: "unknown internal error",
PROTOCOL_ERROR: "unknown protocol error",
}
// Application level Thrift exception
type TApplicationException interface {
TException
TypeId() int32
Read(iprot TProtocol) error
Read(iprot TProtocol) (TApplicationException, error)
Write(oprot TProtocol) error
}
@ -55,10 +44,7 @@ type tApplicationException struct {
}
func (e tApplicationException) Error() string {
if e.message != "" {
return e.message
}
return defaultApplicationExceptionMessage[e.type_]
return e.message
}
func NewTApplicationException(type_ int32, message string) TApplicationException {
@ -69,11 +55,10 @@ func (p *tApplicationException) TypeId() int32 {
return p.type_
}
func (p *tApplicationException) Read(iprot TProtocol) error {
// TODO: this should really be generated by the compiler
func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, error) {
_, err := iprot.ReadStructBegin()
if err != nil {
return err
return nil, err
}
message := ""
@ -82,7 +67,7 @@ func (p *tApplicationException) Read(iprot TProtocol) error {
for {
_, ttype, id, err := iprot.ReadFieldBegin()
if err != nil {
return err
return nil, err
}
if ttype == STOP {
break
@ -91,40 +76,33 @@ func (p *tApplicationException) Read(iprot TProtocol) error {
case 1:
if ttype == STRING {
if message, err = iprot.ReadString(); err != nil {
return err
return nil, err
}
} else {
if err = SkipDefaultDepth(iprot, ttype); err != nil {
return err
return nil, err
}
}
case 2:
if ttype == I32 {
if type_, err = iprot.ReadI32(); err != nil {
return err
return nil, err
}
} else {
if err = SkipDefaultDepth(iprot, ttype); err != nil {
return err
return nil, err
}
}
default:
if err = SkipDefaultDepth(iprot, ttype); err != nil {
return err
return nil, err
}
}
if err = iprot.ReadFieldEnd(); err != nil {
return err
return nil, err
}
}
if err := iprot.ReadStructEnd(); err != nil {
return err
}
p.message = message
p.type_ = type_
return nil
return NewTApplicationException(type_, message), iprot.ReadStructEnd()
}
func (p *tApplicationException) Write(oprot TProtocol) (err error) {

View file

@ -21,7 +21,6 @@ package thrift
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
@ -448,6 +447,9 @@ func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
if size < 0 {
return nil, invalidDataLength
}
if uint64(size) > p.trans.RemainingBytes() {
return nil, invalidDataLength
}
isize := int(size)
buf := make([]byte, isize)
@ -455,8 +457,8 @@ func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
return buf, NewTProtocolException(err)
}
func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) {
return NewTProtocolException(p.trans.Flush(ctx))
func (p *TBinaryProtocol) Flush() (err error) {
return NewTProtocolException(p.trans.Flush())
}
func (p *TBinaryProtocol) Skip(fieldType TType) (err error) {
@ -478,6 +480,9 @@ func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
if size < 0 {
return "", nil
}
if uint64(size) > p.trans.RemainingBytes() {
return "", invalidDataLength
}
var (
buf bytes.Buffer

View file

@ -21,7 +21,6 @@ package thrift
import (
"bufio"
"context"
)
type TBufferedTransportFactory struct {
@ -33,8 +32,8 @@ type TBufferedTransport struct {
tp TTransport
}
func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
return NewTBufferedTransport(trans, p.size), nil
func (p *TBufferedTransportFactory) GetTransport(trans TTransport) TTransport {
return NewTBufferedTransport(trans, p.size)
}
func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory {
@ -79,12 +78,12 @@ func (p *TBufferedTransport) Write(b []byte) (int, error) {
return n, err
}
func (p *TBufferedTransport) Flush(ctx context.Context) error {
func (p *TBufferedTransport) Flush() error {
if err := p.ReadWriter.Flush(); err != nil {
p.ReadWriter.Writer.Reset(p.tp)
return err
}
return p.tp.Flush(ctx)
return p.tp.Flush()
}
func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) {

View file

@ -20,7 +20,6 @@
package thrift
import (
"context"
"encoding/binary"
"fmt"
"io"
@ -562,6 +561,9 @@ func (p *TCompactProtocol) ReadString() (value string, err error) {
if length < 0 {
return "", invalidDataLength
}
if uint64(length) > p.trans.RemainingBytes() {
return "", invalidDataLength
}
if length == 0 {
return "", nil
@ -588,14 +590,17 @@ func (p *TCompactProtocol) ReadBinary() (value []byte, err error) {
if length < 0 {
return nil, invalidDataLength
}
if uint64(length) > p.trans.RemainingBytes() {
return nil, invalidDataLength
}
buf := make([]byte, length)
_, e = io.ReadFull(p.trans, buf)
return buf, NewTProtocolException(e)
}
func (p *TCompactProtocol) Flush(ctx context.Context) (err error) {
return NewTProtocolException(p.trans.Flush(ctx))
func (p *TCompactProtocol) Flush() (err error) {
return NewTProtocolException(p.trans.Flush())
}
func (p *TCompactProtocol) Skip(fieldType TType) (err error) {
@ -801,7 +806,7 @@ func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
case COMPACT_STRUCT:
return STRUCT, nil
}
return STOP, TException(fmt.Errorf("don't know what type: %v", t&0x0f))
return STOP, TException(fmt.Errorf("don't know what type: %s", t&0x0f))
}
// Given a TType value, find the appropriate TCompactProtocol.Types constant.

View file

@ -20,7 +20,6 @@
package thrift
import (
"context"
"log"
)
@ -259,8 +258,8 @@ func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) {
log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
return
}
func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) {
err = tdp.Delegate.Flush(ctx)
func (tdp *TDebugProtocol) Flush() (err error) {
err = tdp.Delegate.Flush()
log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
return
}

View file

@ -22,7 +22,6 @@ package thrift
import (
"bufio"
"bytes"
"context"
"encoding/binary"
"fmt"
"io"
@ -52,12 +51,8 @@ func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength ui
return &tFramedTransportFactory{factory: factory, maxLength: maxLength}
}
func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) {
tt, err := p.factory.GetTransport(base)
if err != nil {
return nil, err
}
return NewTFramedTransportMaxLength(tt, p.maxLength), nil
func (p *tFramedTransportFactory) GetTransport(base TTransport) TTransport {
return NewTFramedTransportMaxLength(p.factory.GetTransport(base), p.maxLength)
}
func NewTFramedTransport(transport TTransport) *TFramedTransport {
@ -136,23 +131,21 @@ func (p *TFramedTransport) WriteString(s string) (n int, err error) {
return p.buf.WriteString(s)
}
func (p *TFramedTransport) Flush(ctx context.Context) error {
func (p *TFramedTransport) Flush() error {
size := p.buf.Len()
buf := p.buffer[:4]
binary.BigEndian.PutUint32(buf, uint32(size))
_, err := p.transport.Write(buf)
if err != nil {
p.buf.Truncate(0)
return NewTTransportExceptionFromError(err)
}
if size > 0 {
if n, err := p.buf.WriteTo(p.transport); err != nil {
print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n")
p.buf.Truncate(0)
return NewTTransportExceptionFromError(err)
}
}
err = p.transport.Flush(ctx)
err = p.transport.Flush()
return NewTTransportExceptionFromError(err)
}

View file

@ -21,7 +21,6 @@ package thrift
import (
"bytes"
"context"
"io"
"io/ioutil"
"net/http"
@ -47,16 +46,27 @@ type THttpClient struct {
type THttpClientTransportFactory struct {
options THttpClientOptions
url string
isPost bool
}
func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
func (p *THttpClientTransportFactory) GetTransport(trans TTransport) TTransport {
if trans != nil {
t, ok := trans.(*THttpClient)
if ok && t.url != nil {
return NewTHttpClientWithOptions(t.url.String(), p.options)
if t.requestBuffer != nil {
t2, _ := NewTHttpPostClientWithOptions(t.url.String(), p.options)
return t2
}
t2, _ := NewTHttpClientWithOptions(t.url.String(), p.options)
return t2
}
}
return NewTHttpClientWithOptions(p.url, p.options)
if p.isPost {
s, _ := NewTHttpPostClientWithOptions(p.url, p.options)
return s
}
s, _ := NewTHttpClientWithOptions(p.url, p.options)
return s
}
type THttpClientOptions struct {
@ -69,10 +79,39 @@ func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory {
}
func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
return &THttpClientTransportFactory{url: url, options: options}
return &THttpClientTransportFactory{url: url, isPost: false, options: options}
}
func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory {
return NewTHttpPostClientTransportFactoryWithOptions(url, THttpClientOptions{})
}
func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
return &THttpClientTransportFactory{url: url, isPost: true, options: options}
}
func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
parsedURL, err := url.Parse(urlstr)
if err != nil {
return nil, err
}
response, err := http.Get(urlstr)
if err != nil {
return nil, err
}
client := options.Client
if client == nil {
client = DefaultHttpClient
}
httpHeader := map[string][]string{"Content-Type": []string{"application/x-thrift"}}
return &THttpClient{client: client, response: response, url: parsedURL, header: httpHeader}, nil
}
func NewTHttpClient(urlstr string) (TTransport, error) {
return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
}
func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
parsedURL, err := url.Parse(urlstr)
if err != nil {
return nil, err
@ -82,12 +121,12 @@ func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTran
if client == nil {
client = DefaultHttpClient
}
httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}}
httpHeader := map[string][]string{"Content-Type": []string{"application/x-thrift"}}
return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil
}
func NewTHttpClient(urlstr string) (TTransport, error) {
return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
func NewTHttpPostClient(urlstr string) (TTransport, error) {
return NewTHttpPostClientWithOptions(urlstr, THttpClientOptions{})
}
// Set the HTTP Header for this specific Thrift Transport
@ -182,7 +221,7 @@ func (p *THttpClient) WriteString(s string) (n int, err error) {
return p.requestBuffer.WriteString(s)
}
func (p *THttpClient) Flush(ctx context.Context) error {
func (p *THttpClient) Flush() error {
// Close any previous response body to avoid leaking connections.
p.closeResponse()
@ -191,9 +230,6 @@ func (p *THttpClient) Flush(ctx context.Context) error {
return NewTTransportExceptionFromError(err)
}
req.Header = p.header
if ctx != nil {
req = req.WithContext(ctx)
}
response, err := p.client.Do(req)
if err != nil {
return NewTTransportExceptionFromError(err)
@ -220,23 +256,3 @@ func (p *THttpClient) RemainingBytes() (num_bytes uint64) {
const maxSize = ^uint64(0)
return maxSize // the thruth is, we just don't know unless framed is used
}
// Deprecated: Use NewTHttpClientTransportFactory instead.
func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory {
return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
}
// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead.
func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
return NewTHttpClientTransportFactoryWithOptions(url, options)
}
// Deprecated: Use NewTHttpClientWithOptions instead.
func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
return NewTHttpClientWithOptions(urlstr, options)
}
// Deprecated: Use NewTHttpClient instead.
func NewTHttpPostClient(urlstr string) (TTransport, error) {
return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
}

View file

@ -19,10 +19,16 @@
package thrift
import (
"testing"
)
import "net/http"
func TestNothing(t *testing.T) {
// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function
func NewThriftHandlerFunc(processor TProcessor,
inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/x-thrift")
transport := NewStreamTransport(r.Body, w)
processor.Process(inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport))
}
}

View file

@ -21,7 +21,6 @@ package thrift
import (
"bufio"
"context"
"io"
)
@ -39,38 +38,38 @@ type StreamTransportFactory struct {
isReadWriter bool
}
func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
func (p *StreamTransportFactory) GetTransport(trans TTransport) TTransport {
if trans != nil {
t, ok := trans.(*StreamTransport)
if ok {
if t.isReadWriter {
return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil
return NewStreamTransportRW(t.Reader.(io.ReadWriter))
}
if t.Reader != nil && t.Writer != nil {
return NewStreamTransport(t.Reader, t.Writer), nil
return NewStreamTransport(t.Reader, t.Writer)
}
if t.Reader != nil && t.Writer == nil {
return NewStreamTransportR(t.Reader), nil
return NewStreamTransportR(t.Reader)
}
if t.Reader == nil && t.Writer != nil {
return NewStreamTransportW(t.Writer), nil
return NewStreamTransportW(t.Writer)
}
return &StreamTransport{}, nil
return &StreamTransport{}
}
}
if p.isReadWriter {
return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil
return NewStreamTransportRW(p.Reader.(io.ReadWriter))
}
if p.Reader != nil && p.Writer != nil {
return NewStreamTransport(p.Reader, p.Writer), nil
return NewStreamTransport(p.Reader, p.Writer)
}
if p.Reader != nil && p.Writer == nil {
return NewStreamTransportR(p.Reader), nil
return NewStreamTransportR(p.Reader)
}
if p.Reader == nil && p.Writer != nil {
return NewStreamTransportW(p.Writer), nil
return NewStreamTransportW(p.Writer)
}
return &StreamTransport{}, nil
return &StreamTransport{}
}
func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory {
@ -139,7 +138,7 @@ func (p *StreamTransport) Close() error {
}
// Flushes the underlying output stream if not null.
func (p *StreamTransport) Flush(ctx context.Context) error {
func (p *StreamTransport) Flush() error {
if p.Writer == nil {
return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream")
}

View file

@ -20,7 +20,6 @@
package thrift
import (
"context"
"encoding/base64"
"fmt"
)
@ -439,10 +438,10 @@ func (p *TJSONProtocol) ReadBinary() ([]byte, error) {
return v, p.ParsePostValue()
}
func (p *TJSONProtocol) Flush(ctx context.Context) (err error) {
func (p *TJSONProtocol) Flush() (err error) {
err = p.writer.Flush()
if err == nil {
err = p.trans.Flush(ctx)
err = p.trans.Flush()
}
return NewTProtocolException(err)
}

View file

@ -21,7 +21,6 @@ package thrift
import (
"bytes"
"context"
)
// Memory buffer-based implementation of the TTransport interface.
@ -34,14 +33,14 @@ type TMemoryBufferTransportFactory struct {
size int
}
func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) TTransport {
if trans != nil {
t, ok := trans.(*TMemoryBuffer)
if ok && t.size > 0 {
return NewTMemoryBufferLen(t.size), nil
return NewTMemoryBufferLen(t.size)
}
}
return NewTMemoryBufferLen(p.size), nil
return NewTMemoryBufferLen(p.size)
}
func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
@ -71,7 +70,7 @@ func (p *TMemoryBuffer) Close() error {
}
// Flushing a memory buffer is a no-op
func (p *TMemoryBuffer) Flush(ctx context.Context) error {
func (p *TMemoryBuffer) Flush() error {
return nil
}

View file

@ -20,7 +20,6 @@
package thrift
import (
"context"
"fmt"
"strings"
)
@ -128,7 +127,7 @@ func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProces
t.serviceProcessorMap[name] = processor
}
func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
func (t *TMultiplexedProcessor) Process(in, out TProtocol) (bool, TException) {
name, typeId, seqid, err := in.ReadMessageBegin()
if err != nil {
return false, err
@ -141,7 +140,7 @@ func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol)
if len(v) != 2 {
if t.DefaultProcessor != nil {
smb := NewStoredMessageProtocol(in, name, typeId, seqid)
return t.DefaultProcessor.Process(ctx, smb, out)
return t.DefaultProcessor.Process(smb, out)
}
return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name)
}
@ -150,7 +149,7 @@ func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol)
return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0])
}
smb := NewStoredMessageProtocol(in, v[1], typeId, seqid)
return actualProcessor.Process(ctx, smb, out)
return actualProcessor.Process(smb, out)
}
//Protocol that use stored message for ReadMessageBegin

View file

@ -19,12 +19,12 @@
package thrift
import "context"
type mockProcessor struct {
ProcessFunc func(in, out TProtocol) (bool, TException)
// A processor is a generic object which operates upon an input stream and
// writes to some output stream.
type TProcessor interface {
Process(in, out TProtocol) (bool, TException)
}
func (m *mockProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
return m.ProcessFunc(in, out)
type TProcessorFunction interface {
Process(seqId int32, in, out TProtocol) (bool, TException)
}

View file

@ -19,18 +19,6 @@
package thrift
import "context"
// A processor is a generic object which operates upon an input stream and
// writes to some output stream.
type TProcessor interface {
Process(ctx context.Context, in, out TProtocol) (bool, TException)
}
type TProcessorFunction interface {
Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
}
// The default processor factory just returns a singleton
// instance.
type TProcessorFactory interface {

View file

@ -20,9 +20,7 @@
package thrift
import (
"context"
"errors"
"fmt"
)
const (
@ -75,7 +73,7 @@ type TProtocol interface {
ReadBinary() (value []byte, err error)
Skip(fieldType TType) (err error)
Flush(ctx context.Context) (err error)
Flush() (err error)
Transport() TTransport
}
@ -172,8 +170,6 @@ func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) {
}
}
return self.ReadListEnd()
default:
return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
}
return nil
}

View file

@ -19,10 +19,6 @@
package thrift
import (
"context"
)
type TSerializer struct {
Transport *TMemoryBuffer
Protocol TProtocol
@ -42,35 +38,35 @@ func NewTSerializer() *TSerializer {
protocol}
}
func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) {
func (t *TSerializer) WriteString(msg TStruct) (s string, err error) {
t.Transport.Reset()
if err = msg.Write(t.Protocol); err != nil {
return
}
if err = t.Protocol.Flush(ctx); err != nil {
if err = t.Protocol.Flush(); err != nil {
return
}
if err = t.Transport.Flush(ctx); err != nil {
if err = t.Transport.Flush(); err != nil {
return
}
return t.Transport.String(), nil
}
func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) {
func (t *TSerializer) Write(msg TStruct) (b []byte, err error) {
t.Transport.Reset()
if err = msg.Write(t.Protocol); err != nil {
return
}
if err = t.Protocol.Flush(ctx); err != nil {
if err = t.Protocol.Flush(); err != nil {
return
}
if err = t.Transport.Flush(ctx); err != nil {
if err = t.Transport.Flush(); err != nil {
return
}

View file

@ -47,14 +47,7 @@ func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*T
return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil
}
// Creates a TServerSocket from a net.Addr
func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket {
return &TServerSocket{addr: addr, clientTimeout: clientTimeout}
}
func (p *TServerSocket) Listen() error {
p.mu.Lock()
defer p.mu.Unlock()
if p.IsListening() {
return nil
}
@ -74,15 +67,10 @@ func (p *TServerSocket) Accept() (TTransport, error) {
if interrupted {
return nil, errTransportInterrupted
}
p.mu.Lock()
listener := p.listener
p.mu.Unlock()
if listener == nil {
if p.listener == nil {
return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
}
conn, err := listener.Accept()
conn, err := p.listener.Accept()
if err != nil {
return nil, NewTTransportExceptionFromError(err)
}
@ -96,8 +84,6 @@ func (p *TServerSocket) IsListening() bool {
// Connects the socket, creating a new socket object if necessary.
func (p *TServerSocket) Open() error {
p.mu.Lock()
defer p.mu.Unlock()
if p.IsListening() {
return NewTTransportException(ALREADY_OPEN, "Server socket already open")
}
@ -117,21 +103,20 @@ func (p *TServerSocket) Addr() net.Addr {
}
func (p *TServerSocket) Close() error {
var err error
p.mu.Lock()
if p.IsListening() {
err = p.listener.Close()
defer func() {
p.listener = nil
}()
if p.IsListening() {
return p.listener.Close()
}
p.mu.Unlock()
return err
return nil
}
func (p *TServerSocket) Interrupt() error {
p.mu.Lock()
p.interrupted = true
p.mu.Unlock()
p.Close()
p.mu.Unlock()
return nil
}

View file

@ -22,7 +22,6 @@ package thrift
import (
"bufio"
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
@ -553,7 +552,7 @@ func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) {
return v, p.ParsePostValue()
}
func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) {
func (p *TSimpleJSONProtocol) Flush() (err error) {
return NewTProtocolException(p.writer.Flush())
}
@ -1065,7 +1064,7 @@ func (p *TSimpleJSONProtocol) ParseListEnd() error {
for _, char := range line {
switch char {
default:
e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line)
e := fmt.Errorf("Expecting end of list \"]\", but found: \"", line, "\"")
return NewTProtocolExceptionWithType(INVALID_DATA, e)
case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]):
break

View file

@ -22,19 +22,13 @@ package thrift
import (
"log"
"runtime/debug"
"sync"
"sync/atomic"
)
/*
* This is not a typical TSimpleServer as it is not blocked after accept a socket.
* It is more like a TThreadedServer that can handle different connections in different goroutines.
* This will work if golang user implements a conn-pool like thing in client side.
*/
// Simple, non-concurrent server for testing.
type TSimpleServer struct {
closed int32
wg sync.WaitGroup
mu sync.Mutex
quit chan struct{}
stopped int64
processorFactory TProcessorFactory
serverTransport TServerTransport
@ -94,6 +88,7 @@ func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTranspor
outputTransportFactory: outputTransportFactory,
inputProtocolFactory: inputProtocolFactory,
outputProtocolFactory: outputProtocolFactory,
quit: make(chan struct{}, 1),
}
}
@ -125,37 +120,23 @@ func (p *TSimpleServer) Listen() error {
return p.serverTransport.Listen()
}
func (p *TSimpleServer) innerAccept() (int32, error) {
client, err := p.serverTransport.Accept()
p.mu.Lock()
defer p.mu.Unlock()
closed := atomic.LoadInt32(&p.closed)
if closed != 0 {
return closed, nil
}
if err != nil {
return 0, err
}
if client != nil {
p.wg.Add(1)
go func() {
defer p.wg.Done()
if err := p.processRequests(client); err != nil {
log.Println("error processing request:", err)
}
}()
}
return 0, nil
}
func (p *TSimpleServer) AcceptLoop() error {
for {
closed, err := p.innerAccept()
client, err := p.serverTransport.Accept()
if err != nil {
select {
case <-p.quit:
return nil
default:
}
return err
}
if closed != 0 {
return nil
if client != nil {
go func() {
if err := p.processRequests(client); err != nil {
log.Println("error processing request:", err)
}
}()
}
}
}
@ -170,27 +151,17 @@ func (p *TSimpleServer) Serve() error {
}
func (p *TSimpleServer) Stop() error {
p.mu.Lock()
defer p.mu.Unlock()
if atomic.LoadInt32(&p.closed) != 0 {
return nil
if atomic.CompareAndSwapInt64(&p.stopped, 0, 1) {
p.quit <- struct{}{}
p.serverTransport.Interrupt()
}
atomic.StoreInt32(&p.closed, 1)
p.serverTransport.Interrupt()
p.wg.Wait()
return nil
}
func (p *TSimpleServer) processRequests(client TTransport) error {
processor := p.processorFactory.GetProcessor(client)
inputTransport, err := p.inputTransportFactory.GetTransport(client)
if err != nil {
return err
}
outputTransport, err := p.outputTransportFactory.GetTransport(client)
if err != nil {
return err
}
inputTransport := p.inputTransportFactory.GetTransport(client)
outputTransport := p.outputTransportFactory.GetTransport(client)
inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
outputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport)
defer func() {
@ -198,7 +169,6 @@ func (p *TSimpleServer) processRequests(client TTransport) error {
log.Printf("panic in processor: %s: %s", e, debug.Stack())
}
}()
if inputTransport != nil {
defer inputTransport.Close()
}
@ -206,19 +176,13 @@ func (p *TSimpleServer) processRequests(client TTransport) error {
defer outputTransport.Close()
}
for {
if atomic.LoadInt32(&p.closed) != 0 {
return nil
}
ok, err := processor.Process(defaultCtx, inputProtocol, outputProtocol)
ok, err := processor.Process(inputProtocol, outputProtocol)
if err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE {
return nil
} else if err != nil {
log.Printf("error processing request: %s", err)
return err
}
if err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD {
continue
}
if !ok {
break
}

View file

@ -20,7 +20,6 @@
package thrift
import (
"context"
"net"
"time"
)
@ -149,7 +148,7 @@ func (p *TSocket) Write(buf []byte) (int, error) {
return p.conn.Write(buf)
}
func (p *TSocket) Flush(ctx context.Context) error {
func (p *TSocket) Flush() error {
return nil
}

View file

@ -38,9 +38,6 @@ func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket,
}
func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) {
if cfg.MinVersion == 0 {
cfg.MinVersion = tls.VersionTLS10
}
addr, err := net.ResolveTCPAddr("tcp", listenAddr)
if err != nil {
return nil, err

View file

@ -20,7 +20,6 @@
package thrift
import (
"context"
"crypto/tls"
"net"
"time"
@ -49,9 +48,6 @@ func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) {
// NewTSSLSocketTimeout creates a net.Conn-backed TTransport, given a host and port
// it also accepts a tls Configuration and a timeout as a time.Duration
func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*TSSLSocket, error) {
if cfg.MinVersion == 0 {
cfg.MinVersion = tls.VersionTLS10
}
return &TSSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil
}
@ -91,8 +87,7 @@ func (p *TSSLSocket) Open() error {
// If we have a hostname, we need to pass the hostname to tls.Dial for
// certificate hostname checks.
if p.hostPort != "" {
if p.conn, err = tls.DialWithDialer(&net.Dialer{
Timeout: p.timeout}, "tcp", p.hostPort, p.cfg); err != nil {
if p.conn, err = tls.Dial("tcp", p.hostPort, p.cfg); err != nil {
return NewTTransportException(NOT_OPEN, err.Error())
}
} else {
@ -108,8 +103,7 @@ func (p *TSSLSocket) Open() error {
if len(p.addr.String()) == 0 {
return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
}
if p.conn, err = tls.DialWithDialer(&net.Dialer{
Timeout: p.timeout}, p.addr.Network(), p.addr.String(), p.cfg); err != nil {
if p.conn, err = tls.Dial(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
return NewTTransportException(NOT_OPEN, err.Error())
}
}
@ -159,7 +153,7 @@ func (p *TSSLSocket) Write(buf []byte) (int, error) {
return p.conn.Write(buf)
}
func (p *TSSLSocket) Flush(ctx context.Context) error {
func (p *TSSLSocket) Flush() error {
return nil
}

View file

@ -20,7 +20,6 @@
package thrift
import (
"context"
"errors"
"io"
)
@ -31,10 +30,6 @@ type Flusher interface {
Flush() (err error)
}
type ContextFlusher interface {
Flush(ctx context.Context) (err error)
}
type ReadSizeProvider interface {
RemainingBytes() (num_bytes uint64)
}
@ -42,7 +37,7 @@ type ReadSizeProvider interface {
// Encapsulates the I/O layer
type TTransport interface {
io.ReadWriteCloser
ContextFlusher
Flusher
ReadSizeProvider
// Opens the transport for communication
@ -65,6 +60,6 @@ type TRichTransport interface {
io.ByteReader
io.ByteWriter
stringWriter
ContextFlusher
Flusher
ReadSizeProvider
}

View file

@ -24,14 +24,14 @@ package thrift
// a ServerTransport and then may want to mutate them (i.e. create
// a BufferedTransport from the underlying base transport)
type TTransportFactory interface {
GetTransport(trans TTransport) (TTransport, error)
GetTransport(trans TTransport) TTransport
}
type tTransportFactory struct{}
// Return a wrapped instance of the base Transport.
func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
return trans, nil
func (p *tTransportFactory) GetTransport(trans TTransport) TTransport {
return trans
}
func NewTTransportFactory() TTransportFactory {

View file

@ -21,15 +21,13 @@ package thrift
import (
"compress/zlib"
"context"
"io"
"log"
)
// TZlibTransportFactory is a factory for TZlibTransport instances
type TZlibTransportFactory struct {
level int
factory TTransportFactory
level int
}
// TZlibTransport is a TTransport implementation that makes use of zlib compression.
@ -40,27 +38,14 @@ type TZlibTransport struct {
}
// GetTransport constructs a new instance of NewTZlibTransport
func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
if p.factory != nil {
// wrap other factory
var err error
trans, err = p.factory.GetTransport(trans)
if err != nil {
return nil, err
}
}
return NewTZlibTransport(trans, p.level)
func (p *TZlibTransportFactory) GetTransport(trans TTransport) TTransport {
t, _ := NewTZlibTransport(trans, p.level)
return t
}
// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory
func NewTZlibTransportFactory(level int) *TZlibTransportFactory {
return &TZlibTransportFactory{level: level, factory: nil}
}
// NewTZlibTransportFactory constructs a new instance of TZlibTransportFactory
// as a wrapper over existing transport factory
func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory {
return &TZlibTransportFactory{level: level, factory: factory}
return &TZlibTransportFactory{level: level}
}
// NewTZlibTransport constructs a new instance of TZlibTransport
@ -92,11 +77,11 @@ func (z *TZlibTransport) Close() error {
}
// Flush flushes the writer and its underlying transport.
func (z *TZlibTransport) Flush(ctx context.Context) error {
func (z *TZlibTransport) Flush() error {
if err := z.writer.Flush(); err != nil {
return err
}
return z.transport.Flush(ctx)
return z.transport.Flush()
}
// IsOpen returns true if the transport is open

View file

@ -1,41 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"testing"
)
func TestTApplicationException(t *testing.T) {
exc := NewTApplicationException(UNKNOWN_APPLICATION_EXCEPTION, "")
if exc.Error() != defaultApplicationExceptionMessage[UNKNOWN_APPLICATION_EXCEPTION] {
t.Fatalf("Expected empty string for exception but found '%s'", exc.Error())
}
if exc.TypeId() != UNKNOWN_APPLICATION_EXCEPTION {
t.Fatalf("Expected type UNKNOWN for exception but found '%v'", exc.TypeId())
}
exc = NewTApplicationException(WRONG_METHOD_NAME, "junk_method")
if exc.Error() != "junk_method" {
t.Fatalf("Expected 'junk_method' for exception but found '%s'", exc.Error())
}
if exc.TypeId() != WRONG_METHOD_NAME {
t.Fatalf("Expected type WRONG_METHOD_NAME for exception but found '%v'", exc.TypeId())
}
}

View file

@ -1,28 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"testing"
)
func TestReadWriteBinaryProtocol(t *testing.T) {
ReadWriteProtocolTest(t, NewTBinaryProtocolFactoryDefault())
}

View file

@ -1,29 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"testing"
)
func TestBufferedTransport(t *testing.T) {
trans := NewTBufferedTransport(NewTMemoryBuffer(), 10240)
TransportTest(t, trans, trans)
}

Some files were not shown because too many files have changed in this diff Show more