Checking in vendor folder for ease of using go get.

This commit is contained in:
Renan DelValle 2018-10-23 23:32:59 -07:00
parent 7a1251853b
commit cdb4b5a1d0
No known key found for this signature in database
GPG key ID: C240AD6D6F443EC9
3554 changed files with 1270116 additions and 0 deletions

1
vendor/github.com/paypal/gorealis/.auroraversion generated vendored Normal file
View file

@ -0,0 +1 @@
0.21.0

28
vendor/github.com/paypal/gorealis/.gitignore generated vendored Normal file
View file

@ -0,0 +1,28 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
.idea
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
# Output of the go coverage tool, specifically when used with LiteIDE
*.out

26
vendor/github.com/paypal/gorealis/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,26 @@
sudo: required
language: go
go:
- "1.10.x"
env:
global:
- GO_USR_DIRS=$(go list -f {{.Dir}} ./... | grep -E -v "/gen-go/|/vendor/")
services:
- docker
before_install:
- go get golang.org/x/tools/cmd/goimports
- test -z "`for d in $GO_USR_DIRS; do goimports -d $d/*.go | tee /dev/stderr; done`"
install:
- docker-compose up -d
script:
- go test -race -coverprofile=coverage.txt -covermode=atomic -v github.com/paypal/gorealis
after_success:
- bash <(curl -s https://codecov.io/bash)

44
vendor/github.com/paypal/gorealis/Gopkg.lock generated vendored Normal file
View file

@ -0,0 +1,44 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "0.10.0-http-client-fix"
name = "git.apache.org/thrift.git"
packages = ["lib/go/thrift"]
revision = "cb1afec972a85791e9b24a04b60fc9dbbfc3cda3"
source = "github.com/rdelval/thrift"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/samuel/go-zookeeper"
packages = ["zk"]
revision = "471cd4e61d7a78ece1791fa5faa0345dc8c7d5a5"
[[projects]]
name = "github.com/stretchr/testify"
packages = ["assert"]
revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c"
version = "v1.2.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "fdb631400420ca7299ad56b66175c9157ca073e3de52f666a84fc1d6fa893978"
solver-name = "gps-cdcl"
solver-version = 1

17
vendor/github.com/paypal/gorealis/Gopkg.toml generated vendored Normal file
View file

@ -0,0 +1,17 @@
[[constraint]]
name = "git.apache.org/thrift.git"
branch = "0.10.0-http-client-fix"
source = "github.com/rdelval/thrift"
[[constraint]]
name = "github.com/pkg/errors"
revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc"
[[constraint]]
name = "github.com/samuel/go-zookeeper"
revision = "471cd4e61d7a78ece1791fa5faa0345dc8c7d5a5"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.2.0"

201
vendor/github.com/paypal/gorealis/LICENSE generated vendored Normal file
View file

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

21
vendor/github.com/paypal/gorealis/README.md generated vendored Normal file
View file

@ -0,0 +1,21 @@
# gorealis [![GoDoc](https://godoc.org/github.com/paypal/gorealis?status.svg)](https://godoc.org/github.com/paypal/gorealis) [![Build Status](https://travis-ci.org/paypal/gorealis.svg?branch=master)](https://travis-ci.org/paypal/gorealis) [![codecov](https://codecov.io/gh/paypal/gorealis/branch/master/graph/badge.svg)](https://codecov.io/gh/paypal/gorealis)
Go library for interacting with [Apache Aurora](https://github.com/apache/aurora).
### Aurora version compatibility
Please see [.auroraversion](./.auroraversion) to see the latest Aurora version against which this
library has been tested.
## Usage
* [Getting started](docs/getting-started.md)
* [Using the sample client](docs/using-the-sample-client.md)
* [Leveraging the library](docs/leveraging-the-library.md)
## Projects using gorealis
* [australis](https://github.com/rdelval/australis)
## Contributions
Contributions are always welcome. Please raise an issue to discuss a contribution before it is made.

1217
vendor/github.com/paypal/gorealis/auroraAPI.thrift generated vendored Normal file

File diff suppressed because it is too large Load diff

56
vendor/github.com/paypal/gorealis/clusters.go generated vendored Normal file
View file

@ -0,0 +1,56 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
import (
"encoding/json"
"os"
"github.com/pkg/errors"
)
type Cluster struct {
Name string `json:"name"`
AgentRoot string `json:"slave_root"`
AgentRunDir string `json:"slave_run_directory"`
ZK string `json:"zk"`
ZKPort int `json:"zk_port"`
SchedZKPath string `json:"scheduler_zk_path"`
SchedURI string `json:"scheduler_uri"`
ProxyURL string `json:"proxy_url"`
AuthMechanism string `json:"auth_mechanism"`
}
// Loads clusters.json file traditionally located at /etc/aurora/clusters.json
func LoadClusters(config string) (map[string]Cluster, error) {
file, err := os.Open(config)
if err != nil {
return nil, errors.Wrap(err, "Error opening file "+config)
}
clusters := make([]Cluster, 1)
err = json.NewDecoder(file).Decode(&clusters)
if err != nil {
return nil, errors.Wrap(err, "Error parsing file "+config)
}
m := make(map[string]Cluster)
for _, cluster := range clusters {
m[cluster.Name] = cluster
}
return m, nil
}

38
vendor/github.com/paypal/gorealis/clusters_test.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis_test
import (
"fmt"
"testing"
"github.com/paypal/gorealis"
"github.com/stretchr/testify/assert"
)
func TestLoadClusters(t *testing.T) {
clusters, err := realis.LoadClusters("examples/clusters.json")
if err != nil {
fmt.Print(err)
}
assert.Equal(t, clusters["devcluster"].Name, "devcluster")
assert.Equal(t, clusters["devcluster"].ZK, "192.168.33.7")
assert.Equal(t, clusters["devcluster"].SchedZKPath, "/aurora/scheduler")
assert.Equal(t, clusters["devcluster"].AuthMechanism, "UNAUTHENTICATED")
assert.Equal(t, clusters["devcluster"].AgentRunDir, "latest")
assert.Equal(t, clusters["devcluster"].AgentRoot, "/var/lib/mesos")
}

78
vendor/github.com/paypal/gorealis/container.go generated vendored Normal file
View file

@ -0,0 +1,78 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
import (
"github.com/paypal/gorealis/gen-go/apache/aurora"
)
type Container interface {
Build() *aurora.Container
}
type MesosContainer struct {
container *aurora.MesosContainer
}
type DockerContainer struct {
container *aurora.DockerContainer
}
func NewDockerContainer() DockerContainer {
return DockerContainer{container: aurora.NewDockerContainer()}
}
func (c DockerContainer) Build() *aurora.Container {
return &aurora.Container{Docker: c.container}
}
func (c DockerContainer) Image(image string) DockerContainer {
c.container.Image = image
return c
}
func (c DockerContainer) AddParameter(name, value string) DockerContainer {
c.container.Parameters = append(c.container.Parameters, &aurora.DockerParameter{
Name: name,
Value: value,
})
return c
}
func NewMesosContainer() MesosContainer {
return MesosContainer{container: aurora.NewMesosContainer()}
}
func (c MesosContainer) Build() *aurora.Container {
return &aurora.Container{Mesos: c.container}
}
func (c MesosContainer) DockerImage(name, tag string) MesosContainer {
if c.container.Image == nil {
c.container.Image = aurora.NewImage()
}
c.container.Image.Docker = &aurora.DockerImage{Name: name, Tag: tag}
return c
}
func (c MesosContainer) AppcImage(name, imageId string) MesosContainer {
if c.container.Image == nil {
c.container.Image = aurora.NewImage()
}
c.container.Image.Appc = &aurora.AppcImage{Name: name, ImageId: imageId}
return c
}

82
vendor/github.com/paypal/gorealis/docker-compose.yml generated vendored Normal file
View file

@ -0,0 +1,82 @@
version: "2"
services:
zk:
image: rdelvalle/zookeeper
restart: on-failure
ports:
- "2181:2181"
environment:
ZK_CONFIG: tickTime=2000,initLimit=10,syncLimit=5,maxClientCnxns=128,forceSync=no,clientPort=2181
ZK_ID: 1
networks:
aurora_cluster:
ipv4_address: 192.168.33.2
master:
image: rdelvalle/mesos-master:1.5.1
restart: on-failure
ports:
- "5050:5050"
environment:
MESOS_ZK: zk://192.168.33.2:2181/mesos
MESOS_QUORUM: 1
MESOS_HOSTNAME: localhost
MESOS_CLUSTER: test-cluster
MESOS_REGISTRY: replicated_log
MESOS_WORK_DIR: /tmp/mesos
networks:
aurora_cluster:
ipv4_address: 192.168.33.3
depends_on:
- zk
agent-one:
image: rdelvalle/mesos-agent:1.5.1
pid: host
restart: on-failure
ports:
- "5051:5051"
environment:
MESOS_MASTER: zk://192.168.33.2:2181/mesos
MESOS_CONTAINERIZERS: docker,mesos
MESOS_PORT: 5051
MESOS_HOSTNAME: localhost
MESOS_RESOURCES: ports(*):[11000-11999]
MESOS_SYSTEMD_ENABLE_SUPPORT: 'false'
MESOS_WORK_DIR: /tmp/mesos
networks:
aurora_cluster:
ipv4_address: 192.168.33.4
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- zk
aurora-one:
image: rdelvalle/aurora:0.21.0
pid: host
ports:
- "8081:8081"
restart: on-failure
environment:
CLUSTER_NAME: test-cluster
ZK_ENDPOINTS: "192.168.33.2:2181"
MESOS_MASTER: "zk://192.168.33.2:2181/mesos"
networks:
aurora_cluster:
ipv4_address: 192.168.33.7
depends_on:
- zk
- master
- agent-one
networks:
aurora_cluster:
driver: bridge
ipam:
config:
- subnet: 192.168.33.0/16
gateway: 192.168.33.1

90
vendor/github.com/paypal/gorealis/docs/developing.md generated vendored Normal file
View file

@ -0,0 +1,90 @@
# Developing gorealis
### Installing Docker
For our developer environment we leverage of Docker containers.
First you must have Docker installed. Instructions on how to install Docker
vary from platform to platform and can be found [here](https://docs.docker.com/install/).
### Installing docker-compose
To make the creation of our developer environment as simple as possible, we leverage
docker-compose to bring up all independent components up separately.
This also allows us to delete and recreate our development cluster very quickly.
To install docker-compose please follow the instructions for your platform
[here](https://docs.docker.com/compose/install/).
### Getting the source code
As of go 1.10.x, GOPATH is still relevant. This may change in the future but
for the sake of making development less error prone, it is suggested that the following
directories be created:
`$ mkdir -p $GOPATH/src/github.com/paypal`
And then clone the master branch into the newly created folder:
`$ cd $GOPATH/src/github.com/paypal; git clone git@github.com:paypal/gorealis.git`
Since we check in our vendor folder, gorealis no further set up is needed.
### Bringing up the cluster
To develop gorealis, you will need a fully functioning Mesos cluster along with
Apache Aurora.
In order to bring up our docker-compose set up execute the following command from the root
of the git repository:
`$ docker-compose up -d`
### Testing code
Since Docker does not work well using host mode under MacOS, a workaround has been employed:
docker-compose brings up a bridged network.
* The ports 8081 is exposed for Aurora. http://localhost:8081 will load the Aurora Web UI.
* The port 5050 is exposed for Mesos. http://localhost:5050 will load the Mesos Web UI.
#### Note for developers on MacOS:
Running the cluster using a bridged network on MacOS has some side effects.
Since Aurora exposes it's internal IP location through Zookeeper, gorealis will determine
the address to be 192.168.33.7. The address 192.168.33.7 is valid when running in a Linux
environment but not when running under MacOS. To run code involving the ZK leader fetcher
(such as the tests), a container connected to the network needs to be launched.
For example, running the tests in a container can be done through the following command from
the root of the git repository:
`$ docker run -t -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10.3-alpine go test github.com/paypal/gorealis`
Or
`$ ./runTestsMac.sh`
Alternatively, if an interactive shell is necessary, the following command may be used:
`$ docker run -it -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10.3-alpine /bin/sh`
### Cleaning up the cluster
If something went wrong while developing and a clean environment is desired, perform the
following command from the root of the git directory:
`$ docker-compose down && docker-compose up -d`
### Tearing down the cluster
Once development is done, the environment may be torn down by executing (from the root of the
git directory):
`$ docker-compose down`

View file

@ -0,0 +1,341 @@
# Running custom executors on Aurora
In this document we will be using the docker-compose executor to demonstrate
how Aurora can use multiple executors on a single Scheduler. For this guide,
we will be using a vagrant instance to demonstrate the setup process. Many of the same
steps also apply to an Aurora installation made via a package manager. Differences in how to configure
the cluster between the vagrant image and the package manager will be clarified when necessary.
## Configuring Aurora manually
### Spinning up an Aurora instance with Vagrant
Follow the guide at http://aurora.apache.org/documentation/latest/getting-started/vagrant/
until the end of step 4 (Start the local cluster) and skip to configuring Docker-Compose executor.
### Installing Aurora through a package manager
Follow the guide at http://aurora.apache.org/documentation/latest/operations/installation/
### Configuring Scheduler to use Docker-Compose executor
In order to use the docker compose executor with Aurora, we must first give the scheduler
a configuration file that contains information on how to run the executor.
#### Configuration file
The configuration is a JSON file that contains where to find the executor and how to run it.
More information about how an executor may be configured for consumption by Aurora can be found [here](https://github.com/apache/aurora/blob/master/docs/operations/configuration.md#custom-executors)
under the custom executors section.
A sample config file for the docker-compose executor looks like this:
```
[
{
"executor":{
"command":{
"value":"java -jar docker-compose-executor_0.1.0.jar",
"shell":"true",
"uris":[
{
"cache":false,
"executable":true,
"extract":false,
"value":"https://github.com/mesos/docker-compose-executor/releases/download/0.1.0/docker-compose-executor_0.1.0.jar"
}
]
},
"name":"docker-compose-executor",
"resources":[]
},
"task_prefix":"compose-"
}
]
```
#### Configuring the Scheduler to run a custom executor
##### Setting the proper flags
Some flags need to be set on the Aurora scheduler in order for custom executors to work properly.
The `-custom_executor_config` flag must point to the location of the JSON blob.
The `-enable_mesos_fetcher` flag must be set to true in order to allow jobs to fetch resources.
##### On vagrant
* Log into the vagrant image by going to the folder at which the Aurora repository
was cloned and running:
```
$ vagrant ssh
```
* Write the sample JSON blob provided above to a file inside the vagrant image.
* Inside the vagrant image, modify the file `/etc/init/aurora-scheduler.conf` to include:
```
-custom_executor_config=<Location of JSON blob> \
-enable_mesos_fetcher=true
```
##### On a scheduler installed via package manager
* Write the sample JSON blob provided above to a file on the same machine where the scheduler is running.
* Modify `EXTRA_SCHEDULER_ARGS` in the file file `/etc/default/aurora-scheduler` to be:
```
EXTRA_SCHEDULER_ARGS="-custom_executor_config=<Location of JSON blob> -enable_mesos_fetcher=true"
```
For these configurations to kick in, the aurora-scheduler must be restarted.
Depending on the distribution of choice being used this command may look a bit different.
On Ubuntu, restarting the aurora-scheduler can be achieved by running the following command:
```
$ sudo service aurora-scheduler restart
```
### Using a custom client
Pystachio does not yet support launching tasks using custom executors. Therefore, a custom
client must be used in order to launch tasks using a custom executor. In this case,
we will be using [gorealis](https://github.com/paypal/gorealis) to launch a task with
the compose executor on Aurora.
## Using [dce-go](https://github.com/paypal/dce-go)
Instead of manually configuring Aurora to run the docker-compose executor, one can follow the instructions provided [here](https://github.com/paypal/dce-go/blob/develop/docs/environment.md) to quickly create a DCE environment that would include mesos, aurora, golang1.7, docker, docker-compose and DCE installed.
Please note that when using dce-go, the endpoints are going to be as shown below,
```
Aurora endpoint --> http://192.168.33.8:8081
Mesos endpoint --> http://192.168.33.8:5050
```
## Configuring the system to run a custom client and docker-compose executor
### Installing Go
#### Linux
##### Ubuntu
###### Adding a PPA and install via apt-get
```
$ sudo add-apt-repository ppa:ubuntu-lxc/lxd-stable
$ sudo apt-get update
$ sudo apt-get install golang
```
###### Configuring the GOPATH
Configure the environment to be able to compile and run Go code.
```
$ mkdir $HOME/go
$ echo export GOPATH=$HOME/go >> $HOME/.bashrc
$ echo export GOROOT=/usr/lib/go >> $HOME/.bashrc
$ echo export PATH=$PATH:$GOPATH/bin >> $HOME/.bashrc
$ echo export PATH=$PATH:$GOROOT/bin >> $HOME/.bashrc
```
Finally we must reload the .bashrc configuration:
```
$ source $HOME/.bashrc
```
#### OS X
One way to install go on OS X is by using [Homebrew](http://brew.sh/)
##### Installing Homebrew
Run the following command from the terminal to install Hombrew:
```
$ /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
```
##### Installing Go using Hombrew
Run the following command from the terminal to install Go:
```
$ brew install go
```
##### Configuring the GOPATH
Configure the environment to be able to compile and run Go code.
```
$ mkdir $HOME/go
$ echo export GOPATH=$HOME/go >> $HOME/.profile
$ echo export GOROOT=/usr/local/opt/go/libexec >> $HOME/.profile
$ echo export PATH=$PATH:$GOPATH/bin >> $HOME/.profile
$ echo export PATH=$PATH:$GOROOT/bin >> $HOME/.profile
```
Finally we must reload the .profile configuration:
```
$ source $HOME/.profile
```
#### Windows
Download and run the msi installer from https://golang.org/dl/
## Installing Docker Compose (if manually configured Aurora)
To show Aurora's new multi executor feature, we need to use at least one custom executor.
In this case we will be using the [docker-compose-executor](https://github.com/mesos/docker-compose-executor).
In order to run the docker-compose executor, each agent must have docker-compose installed on it.
This can be done using pip:
```
$ sudo pip install docker-compose
```
## Downloading gorealis
Finally, we must get `gorealis` using the `go get` command:
```
go get github.com/paypal/gorealis
```
# Creating Aurora Jobs
## Creating a thermos job
To demonstrate that we are able to run jobs using different executors on the
same scheduler, we'll first launch a thermos job using the default Aurora Client.
We can use a sample job for this:
hello_world.aurora
```
hello = Process(
name = 'hello',
cmdline = """
while true; do
echo hello world
sleep 10
done
""")
task = SequentialTask(
processes = [hello],
resources = Resources(cpu = 1.0, ram = 128*MB, disk = 128*MB))
jobs = [Service(
task = task, cluster = 'devcluster', role = 'www-data', environment = 'prod', name = 'hello')]
```
Now we create the job:
```
aurora job create devcluster/www-data/prod/hello hello_world.aurora
```
Note that user `www-data` must exist on the Agent on which the task will be run.
If that user doesn't exist, please modify the role value inside the .aurora file as well
as the command to a user that exists on the machine on which the task will be run.
## Creating a docker-compose-executor job
Now that we have a thermos job running, it's time to launch a docker-compose job.
We'll be using the gorealis library sample client to send a create job request
to the scheduler, specifying that we would like to use the docker-compose executor.
Furthermore, we will be specifying what resources we need to download in order to
successfully run a docker compose job.
For example, the job configuration in the sample client looks like this:
```
job = realis.NewJob().
Environment("prod").
Role("vagrant").
Name("docker-compose").
ExecutorName("docker-compose-executor").
ExecutorData("{}").
CPU(1).
RAM(64).
Disk(100).
IsService(false).
InstanceCount(1).
AddPorts(1).
AddLabel("fileName", "sample-app/docker-compose.yml").
AddURIs(true, true, "https://github.com/mesos/docker-compose-executor/releases/download/0.1.0/sample-app.tar.gz")
```
Using a vagrant setup as an example, we can run the following command to create a compose job:
```
go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=create
```
If everything went according to plan, a new job will be shown in the Aurora UI.
We can further investigate inside the Mesos task sandbox. Inside the sandbox, under
the sample-app folder, we can find a docker-compose.yml-generated.yml. If we inspect this file,
we can find the port at which we can find the web server we launched.
Under Web->Ports, we find the port Mesos allocated. We can then navigate to:
`<agent address>:<assigned port>`. (In vagrant's case the agent address is `192.68.33.7`)
A message from the executor should greet us.
## Creating a Thermos job using gorealis
It is also possible to create a thermos job using gorealis. To do this, however,
a thermos payload is required. A thermos payload consists of a JSON blob that details
the entire task as it exists inside the Aurora Scheduler. *Creating the blob is unfortunately
out of the scope of what gorealis does*, so a thermos payload must be generated beforehand or
retrieved from the structdump of an existing task for testing purposes.
A sample thermos JSON payload may be found [here](../examples/thermos_payload.json) in the examples folder.
The job struct configuration for a Thermos job looks something like this:
```
payload, err := ioutil.ReadFile("examples/thermos_payload.json")
job = realis.NewJob().
Environment("prod").
Role("vagrant").
Name("hello_world_from_gorealis").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData(string(payload)).
CPU(1).
RAM(64).
Disk(100).
IsService(true).
InstanceCount(1).
AddPorts(1)
```
Using a vagrant setup as an example, we can run the following command to create a Thermos job:
```
$ cd $GOPATH/src/github.com/paypal/gorealis
$ go run examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=create -executor=thermos
```
## Creating jobs using gorealis JSON client
We can also use the [JSON client](../examples/jsonClient.go) to create Aurora jobs using gorealis.
If using _dce-go_, then use `http://192.168.33.8:8081` as the scheduler URL.
```
$ cd $GOPATH/src/github.com/paypal/gorealis/examples
```
To launch a job using the Thermos executor,
```
$ go run jsonClient.go -job=job_thermos.json -config=config.json
```
To launch a job using docker-compose executor,
```
$ go run jsonClient.go -job=job_dce.json -config=config.json
```
# Cleaning up
To stop the jobs we've launched, we need to send a job kill request to Aurora.
It should be noted that although we can't create jobs using a custom executor using the default Aurora client,
we ~can~ use the default Aurora client to kill them. Additionally, we can use gorealis perform the clean up as well.
## Using the Default Client (if manually configured Aurora)
```
$ aurora job killall devcluster/www-data/prod/hello
$ aurora job killall devcluster/vagrant/prod/docker-compose
```
## Using gorealis
```
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=kill
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=kill
```

View file

@ -0,0 +1,60 @@
# How to leverage the library (based on the [sample client](../examples/client.go))
For a more complete look at the API, please visit https://godoc.org/github.com/paypal/gorealis
* Create a default configuration file (alternatively, manually create your own Config):
```
config, err := realis.NewDefaultConfig(*url)
```
* Create a new Realis client by passing the configuration struct in:
```
r := realis.NewClient(config)
defer r.Close()
```
* Construct a job using a Job struct:
```
job = realis.NewJob().
Environment("prod").
Role("vagrant").
Name("docker-compose").
ExecutorName("docker-compose-executor").
ExecutorData("{}").
CPU(1).
RAM(64).
Disk(100).
IsService(false).
InstanceCount(1).
AddPorts(1).
AddLabel("fileName", "sample-app/docker-compose.yml").
AddURIs(true, true, "https://github.com/mesos/docker-compose-executor/releases/download/0.1.0/sample-app.tar.gz")
```
* Use client to send a job to Aurora:
```
r.CreateJob(job)
```
* Killing an Aurora Job:
```
r.KillJob(job.GetKey())
```
* Restarting all instances of an Aurora Job:
```
r.RestartJob(job.GetKey())
```
* Adding instances (based on config of instance 0) to Aurora:
```
r.AddInstances(&aurora.InstanceKey{job.GetKey(),0}, 5)
```
* Updating the job configuration of a service job:
```
updateJob := realis.NewUpdateJob(job)
updateJob.InstanceCount(1)
updateJob.Ram(128)
msg, err := r.UpdateJob(updateJob, "")
```

View file

@ -0,0 +1,49 @@
# Using the Sample client
## Usage:
```
Usage of ./client:
-cluster string
Name of cluster to run job on (default "devcluster")
-clusters string
Location of the clusters.json file used by aurora.
-cmd string
Job request type to send to Aurora Scheduler
-executor string
Executor to use (default "thermos")
-password string
Password to use for authorization (default "secret")
-updateId string
Update ID to operate on
-url string
URL at which the Aurora Scheduler exists as [url]:[port]
-username string
Username to use for authorization (default "aurora")
```
## Sample commands:
These commands are set to run on a vagrant box. To be able to run the docker compose
executor examples, the vagrant box must be configured properly to use the docker compose executor.
### Thermos
#### Creating a Thermos job
```
$ cd $GOPATH/src/github.com/paypal/gorealis/examples
$ go run client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=create
```
#### Kill a Thermos job
```
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=thermos -url=http://192.168.33.7:8081 -cmd=kill
```
### Docker Compose executor (custom executor)
#### Creating Docker Compose executor job
```
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=create
```
#### Kill a Docker Compose executor job
```
$ go run $GOPATH/src/github.com/paypal/gorealis/examples/client.go -executor=compose -url=http://192.168.33.7:8081 -cmd=kill
```

88
vendor/github.com/paypal/gorealis/errors.go generated vendored Normal file
View file

@ -0,0 +1,88 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
// Using a pattern described by Dave Cheney to differentiate errors
// https://dave.cheney.net/2016/04/27/dont-just-check-errors-handle-them-gracefully
// Timeout errors are returned when a function is unable to continue executing due
// to a time constraint or meeting a set number of retries.
type timeout interface {
Timedout() bool
}
func IsTimeout(err error) bool {
temp, ok := err.(timeout)
return ok && temp.Timedout()
}
// retryErr is a superset of timeout which includes extra context
// with regards to our retry mechanism. This is done in order to make sure
// that our retry mechanism works as expected through our tests and should
// never be relied on or used directly. It is not made part of the public API
// on purpose.
type retryErr struct {
error
timedout bool
retryCount int // How many times did the mechanism retry the command
}
// Retry error is a timeout type error with added context.
func (r *retryErr) Timedout() bool {
return r.timedout
}
func (r *retryErr) RetryCount() int {
return r.retryCount
}
// Helper function for testing verification to avoid whitebox testing
// as well as keeping retryErr as a private.
// Should NOT be used under any other context.
func ToRetryCount(err error) *retryErr {
if retryErr, ok := err.(*retryErr); ok {
return retryErr
} else {
return nil
}
}
func newRetryError(err error, retryCount int) *retryErr {
return &retryErr{error: err, timedout: true, retryCount: retryCount}
}
// Temporary errors indicate that the action may and should be retried.
type temporary interface {
Temporary() bool
}
func IsTemporary(err error) bool {
temp, ok := err.(temporary)
return ok && temp.Temporary()
}
type TemporaryErr struct {
error
temporary bool
}
func (t *TemporaryErr) Temporary() bool {
return t.temporary
}
// Retrying after receiving this error is advised
func NewTemporaryError(err error) *TemporaryErr {
return &TemporaryErr{error: err, temporary: true}
}

View file

@ -0,0 +1,22 @@
-----BEGIN CERTIFICATE-----
MIIDrTCCApWgAwIBAgIJAM+bKx50CY9JMA0GCSqGSIb3DQEBCwUAMG0xCzAJBgNV
BAYTAkdCMQ8wDQYDVQQIDAZMb25kb24xDzANBgNVBAcMBkxvbmRvbjEYMBYGA1UE
CgwPR2xvYmFsIFNlY3VyaXR5MRYwFAYDVQQLDA1JVCBEZXBhcnRtZW50MQowCAYD
VQQDDAEqMB4XDTE3MTIwODIwNTMwMVoXDTI3MTIwNjIwNTMwMVowbTELMAkGA1UE
BhMCR0IxDzANBgNVBAgMBkxvbmRvbjEPMA0GA1UEBwwGTG9uZG9uMRgwFgYDVQQK
DA9HbG9iYWwgU2VjdXJpdHkxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxCjAIBgNV
BAMMASowggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDhdN0KH80BF3Dk
RQqAARcf7F87uNhQM05HXK8ffpESvKhzrO9BHuDZ0yS3il0BK9XpTyTtHSLIbphk
rO3BOsmPj0zhaM20LsPtwy8GmMCym3hVNSYYyP5XCdjA3uZIYq2R8ruk+vZTe4Zr
F8GHV/xGYU4zKPMGzsQbICjZhj0yiYF9UQ2J+xw79nsqPTmo8+EdVuunLz39dt2o
SbDA01g/kPTIg9K2CAUH0mm4zegiqytwpn2JKVoemmgrDYECWnhLprWlvN9t/fX9
IgprDAHN1BsMrzfmfQXZpVmbIlTriVSdYVeTwG8rT7Tg8soIHqBrnJ1ykTpY4VrO
6tc2z4kTAgMBAAGjUDBOMB0GA1UdDgQWBBSLvwax1Zd6ZiE7TjRklWYNPwgZ2zAf
BgNVHSMEGDAWgBSLvwax1Zd6ZiE7TjRklWYNPwgZ2zAMBgNVHRMEBTADAQH/MA0G
CSqGSIb3DQEBCwUAA4IBAQCJY/EJxlyiSrnO82QcsWm9cT/ciU/G7Y4vX/tGs74C
tNxuBpc0vMfW4a9u6tmi3cW3EXD/KRvPwKZXxzTOhoQY9ZpbZLZ6VvCQ+aWQaXWT
664IS/mrEUZ/p3pgqTNtifdpPAZqVqNdS+Od8/B3/nWUn6JBkDZ4WaFQgfsSulxK
yzYN6UbwhLHfQUupFFhPfvYIVLH9ErGzcv5ZCHX9FornCc0W/8hL4EdjmpTW2ML2
hM5aTKynMiR1GuGSdSpJ+BOeiUI7Go1jGwjV+H9Pw/kfmooq2wuuUGti5dr0Qq7h
CQx1a14BmDBwGoMIOdjFATRwnami5e188fAJozL++i+s
-----END CERTIFICATE-----

View file

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDhdN0KH80BF3Dk
RQqAARcf7F87uNhQM05HXK8ffpESvKhzrO9BHuDZ0yS3il0BK9XpTyTtHSLIbphk
rO3BOsmPj0zhaM20LsPtwy8GmMCym3hVNSYYyP5XCdjA3uZIYq2R8ruk+vZTe4Zr
F8GHV/xGYU4zKPMGzsQbICjZhj0yiYF9UQ2J+xw79nsqPTmo8+EdVuunLz39dt2o
SbDA01g/kPTIg9K2CAUH0mm4zegiqytwpn2JKVoemmgrDYECWnhLprWlvN9t/fX9
IgprDAHN1BsMrzfmfQXZpVmbIlTriVSdYVeTwG8rT7Tg8soIHqBrnJ1ykTpY4VrO
6tc2z4kTAgMBAAECggEBAMZL7SY8dikhnu+HMgcH7njrg4+ZsthHZ/AoOvcucRbT
zC2ByyWxrP6pUUAFeGvRTGHadJYA7FjxvSO/XZZ4yFN2LJ6NeW+jOjzjUXcx3zq4
t4vqJUnjbqDLTlPFOTItaJBXuGcRPJqMqNuEl3kdEAwvBYLF34r7TUy2and4NFc/
JziGljkiucoNBk62TCDrffnvxMJXht+ab6PMWO87PzMVs4xUFPe0ezv4O54btUcV
EJsU58013EHeCai8AnxjcIPlMlB+lg4Y3C4VXf0mJ//cBvbCp+kyWybMw/e+e222
xq/98vnCOIqcy4u+9ENPLJQe7hXZ3Sqh38kf0GuOh8ECgYEA+VFvuuBP0OQHTxeE
dUizR3Iz/xkeGDUZ/8Ix4TCUmRRuhEXrV7ShwUmuanO3pNhChW6hXZ6qj/yuhfOC
D4V4upEnJDccz/cbH1PdBsfALhC8/C0WSGvnEWZMw/SggmY4KwReqWwN9aA8qjdq
kFTOJc2Js+dCHP9kn9J3U16A+oMCgYEA53+2lhckAI8bsrbCayWRZAVx7hUNPijt
MQvH+PCJ3QeZ0z801zk+4ny5WQ1BT0vRzwj8an4Byi2ZuTQU//N4oawDK0JVYi7q
rjKX/AhAx/puoGAgqiS1nDmuiUiplW06HqayCFbpJ1CoXz8+MwdRXiJ8dgioafVJ
+7wHZDVmMjECgYEAoULxd/ia58x2hcv6Wzo469+MjlYaxyGhvXJIfRXFJ/a1PU1U
Whh1/+W+sRBEGpXfARt7uGhmfle8Mtw8pfl5C4PTw3L6afG1U2AVOMt/HMyq0JoB
LbrNbM20nZLfNzkS35AmAoPny5ZnZtoNTWntJTp69SiB9OuklFO35u7bki0CgYAL
qQYkVzQMBylI/iWaygChvhh3+n15RQx1bPd8lXkMNgbMeiGKOaruM4QOdTl16ga+
W+CC6KfkbBmTF4l7PuMzmXtrYWL1mBFgBtJa8nt41yddUpoyl7jCDrG43n0UNrU3
uAO9ocsKnOhuK7xRS6wQhsIoG9WHyMAaOuVQadQk8QKBgQDVibcvOPXNcF1aRMG7
V24nBb+YYz+00g/cLRkDnBX9/HORle0HSfeT70ctRhuFCoHHbHF4fnp/iAwDgxdB
dNufthftTZTtFGITUsJDN36fSXNjEvKzmKEAlEYkGAYijLlDwknPB+bf4NQ6T0R+
AtnKQY6G4kFSfw9AKgWGy7ZKfg==
-----END PRIVATE KEY-----

View file

@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC6zCCAdOgAwIBAgIJAMgY8gND5lFnMA0GCSqGSIb3DQEBCwUAMAwxCjAIBgNV
BAMMASowHhcNMTcxMjA4MjA1MTQyWhcNMjcxMjA2MjA1MTQyWjAMMQowCAYDVQQD
DAEqMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwKcyyXg90wen25yh
QA17MDyzjzBsIL9+kznzRD1azoNqA3RShAAWXn5a81HeWvncpVL+TKPMU3UC02XT
I6GtX1U7xmdKstBLKiHQxGWX04DshSrVgcVzLUI6OHBG6feoL1mGAa8jB2UEE6ER
uXdgYgKbLUrvduSn4fBvPIhhXg9YL2n2TVujkaY9bPZ9M5tQ5K+g4wRwCAYgjTUN
55J82uzAsLCs+AQi9D4bLJmw0z2H7enRLkd9sRE2pArhXm4LLg/QlL8I5ZHv7vfl
RYdOoC3bjgKk+OVOmb2Fb/dWVlOMcnO8qeo9WyQbhAcjNK2W9Tqk5E5orGZ/bkw/
iZc0MwIDAQABo1AwTjAdBgNVHQ4EFgQUA0xmNKQqxUQgaM9ceCzFyocn9jswHwYD
VR0jBBgwFoAUA0xmNKQqxUQgaM9ceCzFyocn9jswDAYDVR0TBAUwAwEB/zANBgkq
hkiG9w0BAQsFAAOCAQEAnL7VvBTcFyLeNeuTAWmM0bjlwWsuL9Va2LZitnATgzE7
ACS+ZNURnpK/o3UHGc2ePDCFgPsF2mnh4Jmye2tl5uPxQS2zR96hp16ZGVi9N1gx
4aQyknKt6UFRP/cvWwgDN5N3pnRZQ7J0kaAWCPtAIldeGK7UDjOJ1DLDVLeByr7x
27TCt69ysisTtz6Tzr5vvVDEtu2yNIf/uGk3od+pe/0E1UXVCTItvwM30wvfcTPU
aMZXBYNmSrjnJ4k/9FSjZYNKPtK1c/JR+zUng1h+I3b7itY5VBGdzdq9fEk20PHm
Xdg1Ptbebtl6PJqWX+rydXuen6SUt8vFJE89MkbWSw==
-----END CERTIFICATE-----

625
vendor/github.com/paypal/gorealis/examples/client.go generated vendored Normal file
View file

@ -0,0 +1,625 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"strings"
"time"
"github.com/paypal/gorealis"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/paypal/gorealis/response"
)
var cmd, executor, url, clustersConfig, clusterName, updateId, username, password, zkUrl, hostList, role string
var caCertsPath string
var clientKey, clientCert string
var CONNECTION_TIMEOUT = 20000
func init() {
flag.StringVar(&cmd, "cmd", "", "Job request type to send to Aurora Scheduler")
flag.StringVar(&executor, "executor", "thermos", "Executor to use")
flag.StringVar(&url, "url", "", "URL at which the Aurora Scheduler exists as [url]:[port]")
flag.StringVar(&clustersConfig, "clusters", "", "Location of the clusters.json file used by aurora.")
flag.StringVar(&clusterName, "cluster", "devcluster", "Name of cluster to run job on (only necessary if clusters is set)")
flag.StringVar(&updateId, "updateId", "", "Update ID to operate on")
flag.StringVar(&username, "username", "aurora", "Username to use for authorization")
flag.StringVar(&password, "password", "secret", "Password to use for authorization")
flag.StringVar(&zkUrl, "zkurl", "", "zookeeper url")
flag.StringVar(&hostList, "hostList", "", "Comma separated list of hosts to operate on")
flag.StringVar(&role, "role", "", "owner role to use")
flag.StringVar(&caCertsPath, "caCertsPath", "", "Path to CA certs on local machine.")
flag.StringVar(&clientCert, "clientCert", "", "Client certificate to use to connect to Aurora.")
flag.StringVar(&clientKey, "clientKey", "", "Client private key to use to connect to Aurora.")
flag.Parse()
// Attempt to load leader from zookeeper using a
// cluster.json file used for the default aurora client if provided.
// This will override the provided url in the arguments
if clustersConfig != "" {
clusters, err := realis.LoadClusters(clustersConfig)
if err != nil {
log.Fatalln(err)
}
cluster, ok := clusters[clusterName]
if !ok {
log.Fatalf("Cluster %s doesn't exist in the file provided\n", clusterName)
}
url, err = realis.LeaderFromZK(cluster)
if err != nil {
log.Fatalln(err)
}
}
}
func main() {
var job realis.Job
var err error
var monitor *realis.Monitor
var r realis.Realis
clientOptions := []realis.ClientOption{
realis.BasicAuth(username, password),
realis.ThriftJSON(),
realis.TimeoutMS(CONNECTION_TIMEOUT),
realis.BackOff(realis.Backoff{
Steps: 2,
Duration: 10 * time.Second,
Factor: 2.0,
Jitter: 0.1,
}),
realis.Debug(),
}
//check if zkUrl is available.
if zkUrl != "" {
fmt.Println("zkUrl: ", zkUrl)
clientOptions = append(clientOptions, realis.ZKUrl(zkUrl))
} else {
clientOptions = append(clientOptions, realis.SchedulerUrl(url))
}
if caCertsPath != "" {
clientOptions = append(clientOptions, realis.Certspath(caCertsPath))
}
if clientKey != "" && clientCert != "" {
clientOptions = append(clientOptions, realis.ClientCerts(clientKey, clientCert))
}
r, err = realis.NewRealisClient(clientOptions...)
if err != nil {
log.Fatalln(err)
}
monitor = &realis.Monitor{r}
defer r.Close()
switch executor {
case "thermos":
payload, err := ioutil.ReadFile("examples/thermos_payload.json")
if err != nil {
log.Fatalln("Error reading json config file: ", err)
}
job = realis.NewJob().
Environment("prod").
Role("vagrant").
Name("hello_world_from_gorealis").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData(string(payload)).
CPU(1).
RAM(64).
Disk(100).
IsService(true).
InstanceCount(1).
AddPorts(1)
case "compose":
job = realis.NewJob().
Environment("prod").
Role("vagrant").
Name("docker-compose-test").
ExecutorName("docker-compose-executor").
ExecutorData("{}").
CPU(0.25).
RAM(512).
Disk(100).
IsService(true).
InstanceCount(1).
AddPorts(4).
AddLabel("fileName", "sample-app/docker-compose.yml").
AddURIs(true, true, "https://github.com/mesos/docker-compose-executor/releases/download/0.1.0/sample-app.tar.gz")
case "none":
job = realis.NewJob().
Environment("prod").
Role("vagrant").
Name("docker_as_task").
CPU(1).
RAM(64).
Disk(100).
IsService(true).
InstanceCount(1).
AddPorts(1)
default:
log.Fatalln("Only thermos, compose, and none are supported for now")
}
switch cmd {
case "create":
fmt.Println("Creating job")
resp, err := r.CreateJob(job)
if err != nil {
log.Fatalln(err)
}
fmt.Println(resp.String())
if ok, mErr := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 5, 50); !ok || mErr != nil {
_, err := r.KillJob(job.JobKey())
if err != nil {
log.Fatalln(err)
}
log.Fatalf("ok: %v\n err: %v", ok, mErr)
}
case "createService":
// Create a service with three instances using the update API instead of the createJob API
fmt.Println("Creating service")
settings := realis.NewUpdateSettings()
job.InstanceCount(3)
resp, result, err := r.CreateService(job, settings)
if err != nil {
log.Println("error: ", err)
log.Fatal("response: ", resp.String())
}
fmt.Println(result.String())
if ok, mErr := monitor.JobUpdate(*result.GetKey(), 5, 180); !ok || mErr != nil {
_, err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out")
_, err = r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
log.Fatalf("ok: %v\n err: %v", ok, mErr)
}
case "createDocker":
fmt.Println("Creating a docker based job")
container := realis.NewDockerContainer().Image("python:2.7").AddParameter("network", "host")
job.Container(container)
resp, err := r.CreateJob(job)
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
_, err := r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
}
case "createMesosContainer":
fmt.Println("Creating a docker based job")
container := realis.NewMesosContainer().DockerImage("python", "2.7")
job.Container(container)
resp, err := r.CreateJob(job)
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
_, err := r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
}
case "scheduleCron":
fmt.Println("Scheduling a Cron job")
// Cron config
job.CronSchedule("* * * * *")
job.IsService(false)
resp, err := r.ScheduleCronJob(job)
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "startCron":
fmt.Println("Starting a Cron job")
resp, err := r.StartCronJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "descheduleCron":
fmt.Println("Descheduling a Cron job")
resp, err := r.DescheduleCronJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "kill":
fmt.Println("Killing job")
resp, err := r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
if ok, err := monitor.Instances(job.JobKey(), 0, 5, 50); !ok || err != nil {
log.Fatal("Unable to kill all instances of job")
}
fmt.Println(resp.String())
case "restart":
fmt.Println("Restarting job")
resp, err := r.RestartJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "liveCount":
fmt.Println("Getting instance count")
live, err := r.GetInstanceIds(job.JobKey(), aurora.LIVE_STATES)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Live instances: %+v\n", live)
case "activeCount":
fmt.Println("Getting instance count")
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
}
fmt.Println("Number of live instances: ", len(live))
case "flexUp":
fmt.Println("Flexing up job")
numOfInstances := int32(4)
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
}
currInstances := int32(len(live))
fmt.Println("Current num of instances: ", currInstances)
var instId int32
for k := range live {
instId = k
}
resp, err := r.AddInstances(aurora.InstanceKey{
JobKey: job.JobKey(),
InstanceId: instId,
},
numOfInstances)
if err != nil {
log.Fatal(err)
}
if ok, err := monitor.Instances(job.JobKey(), currInstances+numOfInstances, 5, 50); !ok || err != nil {
fmt.Println("Flexing up failed")
}
fmt.Println(resp.String())
case "flexDown":
fmt.Println("Flexing down job")
numOfInstances := int32(2)
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
}
currInstances := int32(len(live))
fmt.Println("Current num of instances: ", currInstances)
resp, err := r.RemoveInstances(job.JobKey(), numOfInstances)
if err != nil {
log.Fatal(err)
}
if ok, err := monitor.Instances(job.JobKey(), currInstances-numOfInstances, 5, 100); !ok || err != nil {
fmt.Println("flexDown failed")
}
fmt.Println(resp.String())
case "update":
fmt.Println("Updating a job with with more RAM and to 5 instances")
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
}
var instId int32
for k := range live {
instId = k
}
taskConfig, err := r.FetchTaskConfig(aurora.InstanceKey{
JobKey: job.JobKey(),
InstanceId: instId,
})
if err != nil {
log.Fatal(err)
}
updateJob := realis.NewDefaultUpdateJob(taskConfig)
updateJob.InstanceCount(5).RAM(128)
resp, err := r.StartJobUpdate(updateJob, "")
if err != nil {
log.Fatal(err)
}
jobUpdateKey := response.JobUpdateKey(resp)
monitor.JobUpdate(*jobUpdateKey, 5, 500)
case "pauseJobUpdate":
resp, err := r.PauseJobUpdate(&aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
}, "")
if err != nil {
log.Fatal(err)
}
fmt.Println("PauseJobUpdate response: ", resp.String())
case "resumeJobUpdate":
resp, err := r.ResumeJobUpdate(&aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
}, "")
if err != nil {
log.Fatal(err)
}
fmt.Println("ResumeJobUpdate response: ", resp.String())
case "pulseJobUpdate":
resp, err := r.PulseJobUpdate(&aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
})
if err != nil {
log.Fatal(err)
}
fmt.Println("PulseJobUpdate response: ", resp.String())
case "updateDetails":
resp, err := r.JobUpdateDetails(aurora.JobUpdateQuery{
Key: &aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
},
Limit: 1,
})
if err != nil {
log.Fatal(err)
}
fmt.Println(response.JobUpdateDetails(resp))
case "abortUpdate":
fmt.Println("Abort update")
resp, err := r.AbortJobUpdate(aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
},
"")
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "rollbackUpdate":
fmt.Println("Abort update")
resp, err := r.RollbackJobUpdate(aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
},
"")
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "taskConfig":
fmt.Println("Getting job info")
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
}
var instId int32
for k := range live {
instId = k
break
}
config, err := r.FetchTaskConfig(aurora.InstanceKey{
JobKey: job.JobKey(),
InstanceId: instId,
})
if err != nil {
log.Fatal(err)
}
log.Println(config.String())
case "updatesummary":
fmt.Println("Getting job update summary")
jobquery := &aurora.JobUpdateQuery{
Role: &job.JobKey().Role,
JobKey: job.JobKey(),
}
updatesummary, err := r.GetJobUpdateSummaries(jobquery)
if err != nil {
log.Fatalf("error while getting update summary: %v", err)
}
fmt.Println(updatesummary)
case "taskStatus":
fmt.Println("Getting task status")
taskQ := &aurora.TaskQuery{
Role: &job.JobKey().Role,
Environment: &job.JobKey().Environment,
JobName: &job.JobKey().Name,
}
tasks, err := r.GetTaskStatus(taskQ)
if err != nil {
log.Fatalf("error: %+v\n ", err)
}
fmt.Printf("length: %d\n ", len(tasks))
fmt.Printf("tasks: %+v\n", tasks)
case "tasksWithoutConfig":
fmt.Println("Getting task status")
taskQ := &aurora.TaskQuery{
Role: &job.JobKey().Role,
Environment: &job.JobKey().Environment,
JobName: &job.JobKey().Name,
}
tasks, err := r.GetTasksWithoutConfigs(taskQ)
if err != nil {
log.Fatalf("error: %+v\n ", err)
}
fmt.Printf("length: %d\n ", len(tasks))
fmt.Printf("tasks: %+v\n", tasks)
case "drainHosts":
fmt.Println("Setting hosts to DRAINING")
if hostList == "" {
log.Fatal("No hosts specified to drain")
}
hosts := strings.Split(hostList, ",")
_, result, err := r.DrainHosts(hosts...)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
// Monitor change to DRAINING and DRAINED mode
hostResult, err := monitor.HostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
5,
10)
if err != nil {
for host, ok := range hostResult {
if !ok {
fmt.Printf("Host %s did not transtion into desired mode(s)\n", host)
}
}
log.Fatalf("error: %+v\n", err.Error())
}
fmt.Print(result.String())
case "endMaintenance":
fmt.Println("Setting hosts to ACTIVE")
if hostList == "" {
log.Fatal("No hosts specified to drain")
}
hosts := strings.Split(hostList, ",")
_, result, err := r.EndMaintenance(hosts...)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
// Monitor change to DRAINING and DRAINED mode
hostResult, err := monitor.HostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
5,
10)
if err != nil {
for host, ok := range hostResult {
if !ok {
fmt.Printf("Host %s did not transtion into desired mode(s)\n", host)
}
}
log.Fatalf("error: %+v\n", err.Error())
}
fmt.Print(result.String())
case "getJobs":
fmt.Println("GetJobs...role: ", role)
_, result, err := r.GetJobs(role)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
fmt.Println("map size: ", len(result.Configs))
fmt.Println(result.String())
case "snapshot":
fmt.Println("Forcing scheduler to write snapshot to mesos replicated log")
err := r.Snapshot()
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
case "performBackup":
fmt.Println("Writing Backup of Snapshot to file system")
err := r.PerformBackup()
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
case "forceExplicitRecon":
fmt.Println("Force an explicit recon")
err := r.ForceExplicitTaskReconciliation(nil)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
case "forceImplicitRecon":
fmt.Println("Force an implicit recon")
err := r.ForceImplicitTaskReconciliation()
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
default:
log.Fatal("Command not supported")
}
}

View file

@ -0,0 +1,8 @@
[{
"name": "devcluster",
"zk": "192.168.33.7",
"scheduler_zk_path": "/aurora/scheduler",
"auth_mechanism": "UNAUTHENTICATED",
"slave_run_directory": "latest",
"slave_root": "/var/lib/mesos"
}]

13
vendor/github.com/paypal/gorealis/examples/config.json generated vendored Normal file
View file

@ -0,0 +1,13 @@
{
"username": "aurora",
"password": "secret",
"sched_url": "http://192.168.33.7:8081",
"cluster" : {
"name": "devcluster",
"zk": "192.168.33.7",
"scheduler_zk_path": "/aurora/scheduler",
"auth_mechanism": "UNAUTHENTICATED",
"slave_run_directory": "latest",
"slave_root": "/var/lib/mesos"
}
}

21
vendor/github.com/paypal/gorealis/examples/job.json generated vendored Normal file
View file

@ -0,0 +1,21 @@
{
"name": "sample",
"cpu": 1.4,
"ram_mb": 128,
"disk_mb": 64,
"executor": "docker-compose-executor",
"service": false,
"ports": 1,
"instances": 1,
"uris": [
{
"uri": "https://github.com/mesos/docker-compose-executor/releases/download/0.1.0/sample-app.tar.gz",
"extract": true,
"cache": true
}
],
"labels":{
"fileName":"sample-app/docker-compose.yml"
}
}

View file

@ -0,0 +1,21 @@
{
"name": "sampleapp",
"cpu": 0.25,
"ram_mb": 256,
"disk_mb": 100,
"executor": "docker-compose-executor",
"service": true,
"ports": 4,
"instances": 1,
"uris": [
{
"uri": "http://192.168.33.8/app.tar.gz",
"extract": true,
"cache": false
}
],
"labels":{
"fileName":"sampleapp/docker-compose.yml,sampleapp/docker-compose-healthcheck.yml"
}
}

View file

@ -0,0 +1,11 @@
{
"name": "hello_world_from_gorealis",
"cpu": 1.0,
"ram_mb": 64,
"disk_mb": 100,
"executor": "thermos",
"exec_data_file": "examples/thermos_payload.json",
"service": true,
"ports": 1,
"instances": 1
}

View file

@ -0,0 +1,226 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"time"
"github.com/paypal/gorealis"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/pkg/errors"
)
type URIJson struct {
URI string `json:"uri"`
Extract bool `json:"extract"`
Cache bool `json:"cache"`
}
type JobJson struct {
Name string `json:"name"`
CPU float64 `json:"cpu"`
RAM int64 `json:"ram_mb"`
Disk int64 `json:"disk_mb"`
Executor string `json:"executor"`
ExecutorDataFile string `json:"exec_data_file,omitempty"`
Instances int32 `json:"instances"`
URIs []URIJson `json:"uris"`
Labels map[string]string `json:"labels"`
Service bool `json:"service"`
Ports int `json:"ports"`
}
func (j *JobJson) Validate() bool {
if j.Name == "" {
return false
}
if j.CPU <= 0.0 {
return false
}
if j.RAM <= 0 {
return false
}
if j.Disk <= 0 {
return false
}
return true
}
type Config struct {
realis.Cluster `json:"cluster"`
Username string `json:"username"`
Password string `json:"password"`
SchedUrl string `json:"sched_url"`
Transport string `json:"transport,omitempty"`
Debug bool `json:"debug,omitempty"`
}
// Command-line arguments for config and job JSON files.
var configJSONFile, jobJSONFile string
var job *JobJson
var config *Config
// Reading command line arguments and validating.
// If Aurora scheduler URL not provided, then using zookeeper to locate the leader.
func init() {
flag.StringVar(&configJSONFile, "config", "./config.json", "The config file that contains username, password, and the cluster configuration information.")
flag.StringVar(&jobJSONFile, "job", "./job.json", "JSON file containing job definitions.")
flag.Parse()
job = new(JobJson)
config = new(Config)
if jobsFile, jobJSONReadErr := os.Open(jobJSONFile); jobJSONReadErr != nil {
flag.Usage()
fmt.Println("Error reading the job JSON file: ", jobJSONReadErr)
os.Exit(1)
} else {
if unmarshallErr := json.NewDecoder(jobsFile).Decode(job); unmarshallErr != nil {
flag.Usage()
fmt.Println("Error parsing job json file: ", unmarshallErr)
os.Exit(1)
}
// Need to validate the job JSON file.
if !job.Validate() {
fmt.Println("Invalid Job.")
os.Exit(1)
}
}
if configFile, configJSONErr := os.Open(configJSONFile); configJSONErr != nil {
flag.Usage()
fmt.Println("Error reading the config JSON file: ", configJSONErr)
os.Exit(1)
} else {
if unmarshallErr := json.NewDecoder(configFile).Decode(config); unmarshallErr != nil {
fmt.Println("Error parsing config JSON file: ", unmarshallErr)
os.Exit(1)
}
}
}
func CreateRealisClient(config *Config) (realis.Realis, error) {
var transportOption realis.ClientOption
// Configuring transport protocol. If not transport is provided, then using JSON as the
// default transport protocol.
switch config.Transport {
case "binary":
transportOption = realis.ThriftBinary()
case "json", "":
transportOption = realis.ThriftJSON()
default:
fmt.Println("Invalid transport option provided!")
os.Exit(1)
}
clientOptions := []realis.ClientOption{
realis.BasicAuth(config.Username, config.Password),
transportOption,
realis.ZKCluster(&config.Cluster),
// realis.SchedulerUrl(config.SchedUrl),
realis.SetLogger(log.New(os.Stdout, "realis-debug: ", log.Ldate)),
realis.BackOff(realis.Backoff{
Steps: 2,
Duration: 10 * time.Second,
Factor: 2.0,
Jitter: 0.1,
}),
}
if config.Debug {
clientOptions = append(clientOptions, realis.Debug())
}
return realis.NewRealisClient(clientOptions...)
}
func main() {
if r, clientCreationErr := CreateRealisClient(config); clientCreationErr != nil {
fmt.Println(clientCreationErr)
os.Exit(1)
} else {
monitor := &realis.Monitor{Client: r}
defer r.Close()
uris := job.URIs
labels := job.Labels
auroraJob := realis.NewJob().
Environment("prod").
Role("vagrant").
Name(job.Name).
CPU(job.CPU).
RAM(job.RAM).
Disk(job.Disk).
IsService(job.Service).
InstanceCount(job.Instances).
AddPorts(job.Ports)
// If thermos executor, then reading in the thermos payload.
if (job.Executor == aurora.AURORA_EXECUTOR_NAME) || (job.Executor == "thermos") {
payload, err := ioutil.ReadFile(job.ExecutorDataFile)
if err != nil {
fmt.Println(errors.Wrap(err, "Invalid thermos payload file!"))
os.Exit(1)
}
auroraJob.ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData(string(payload))
} else {
auroraJob.ExecutorName(job.Executor)
}
// Adding URIs.
for _, uri := range uris {
auroraJob.AddURIs(uri.Extract, uri.Cache, uri.URI)
}
// Adding Labels.
for key, value := range labels {
auroraJob.AddLabel(key, value)
}
fmt.Println("Creating Job...")
if resp, jobCreationErr := r.CreateJob(auroraJob); jobCreationErr != nil {
fmt.Println("Error creating Aurora job: ", jobCreationErr)
os.Exit(1)
} else {
if resp.ResponseCode == aurora.ResponseCode_OK {
if ok, monitorErr := monitor.Instances(auroraJob.JobKey(), auroraJob.GetInstanceCount(), 5, 50); !ok || monitorErr != nil {
if _, jobErr := r.KillJob(auroraJob.JobKey()); jobErr !=
nil {
fmt.Println(jobErr)
os.Exit(1)
} else {
fmt.Println("ok: ", ok)
fmt.Println("jobErr: ", jobErr)
}
}
}
}
}
}

View file

@ -0,0 +1,62 @@
{
"environment": "prod",
"health_check_config": {
"initial_interval_secs": 15.0,
"health_checker": {
"http": {
"expected_response_code": 0,
"endpoint": "/health",
"expected_response": "ok"
}
},
"interval_secs": 10.0,
"timeout_secs": 1.0,
"max_consecutive_failures": 0
},
"name": "hello_world_from_gorealis",
"service": false,
"max_task_failures": 1,
"cron_collision_policy": "KILL_EXISTING",
"enable_hooks": false,
"cluster": "devcluster",
"task": {
"processes": [
{
"daemon": false,
"name": "hello",
"ephemeral": false,
"max_failures": 1,
"min_duration": 5,
"cmdline": "echo hello world from gorealis; sleep 10",
"final": false
}
],
"name": "hello",
"finalization_wait": 30,
"max_failures": 1,
"max_concurrency": 0,
"resources": {
"gpu": 0,
"disk": 134217728,
"ram": 134217728,
"cpu": 1.0
},
"constraints": [
{
"order": [
"hello"
]
}
]
},
"production": false,
"role": "vagrant",
"lifecycle": {
"http": {
"graceful_shutdown_endpoint": "/quitquitquit",
"port": "health",
"shutdown_endpoint": "/abortabortabort"
}
},
"priority": 0
}

View file

@ -0,0 +1,62 @@
{
"environment": "prod",
"health_check_config": {
"initial_interval_secs": 15.0,
"health_checker": {
"http": {
"expected_response_code": 0,
"endpoint": "/health",
"expected_response": "ok"
}
},
"interval_secs": 10.0,
"timeout_secs": 1.0,
"max_consecutive_failures": 0
},
"name": "hello_world_from_gorealis",
"service": false,
"max_task_failures": 1,
"cron_collision_policy": "KILL_EXISTING",
"enable_hooks": false,
"cluster": "devcluster",
"task": {
"processes": [
{
"daemon": false,
"name": "hello",
"ephemeral": false,
"max_failures": 1,
"min_duration": 5,
"cmdline": "\n while true; do\n echo hello world from gorealis\n sleep 10\n done\n ",
"final": false
}
],
"name": "hello",
"finalization_wait": 30,
"max_failures": 1,
"max_concurrency": 0,
"resources": {
"gpu": 0,
"disk": 134217728,
"ram": 134217728,
"cpu": 1.0
},
"constraints": [
{
"order": [
"hello"
]
}
]
},
"production": false,
"role": "vagrant",
"lifecycle": {
"http": {
"graceful_shutdown_endpoint": "/quitquitquit",
"port": "health",
"shutdown_endpoint": "/abortabortabort"
}
},
"priority": 0
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,801 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package main
import (
"apache/aurora"
"flag"
"fmt"
"git.apache.org/thrift.git/lib/go/thrift"
"math"
"net"
"net/url"
"os"
"strconv"
"strings"
)
func Usage() {
fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
flag.PrintDefaults()
fmt.Fprintln(os.Stderr, "\nFunctions:")
fmt.Fprintln(os.Stderr, " Response createJob(JobConfiguration description)")
fmt.Fprintln(os.Stderr, " Response scheduleCronJob(JobConfiguration description)")
fmt.Fprintln(os.Stderr, " Response descheduleCronJob(JobKey job)")
fmt.Fprintln(os.Stderr, " Response startCronJob(JobKey job)")
fmt.Fprintln(os.Stderr, " Response restartShards(JobKey job, shardIds)")
fmt.Fprintln(os.Stderr, " Response killTasks(JobKey job, instances, string message)")
fmt.Fprintln(os.Stderr, " Response addInstances(InstanceKey key, i32 count)")
fmt.Fprintln(os.Stderr, " Response replaceCronTemplate(JobConfiguration config)")
fmt.Fprintln(os.Stderr, " Response startJobUpdate(JobUpdateRequest request, string message)")
fmt.Fprintln(os.Stderr, " Response pauseJobUpdate(JobUpdateKey key, string message)")
fmt.Fprintln(os.Stderr, " Response resumeJobUpdate(JobUpdateKey key, string message)")
fmt.Fprintln(os.Stderr, " Response abortJobUpdate(JobUpdateKey key, string message)")
fmt.Fprintln(os.Stderr, " Response rollbackJobUpdate(JobUpdateKey key, string message)")
fmt.Fprintln(os.Stderr, " Response pulseJobUpdate(JobUpdateKey key)")
fmt.Fprintln(os.Stderr, " Response getRoleSummary()")
fmt.Fprintln(os.Stderr, " Response getJobSummary(string role)")
fmt.Fprintln(os.Stderr, " Response getTasksStatus(TaskQuery query)")
fmt.Fprintln(os.Stderr, " Response getTasksWithoutConfigs(TaskQuery query)")
fmt.Fprintln(os.Stderr, " Response getPendingReason(TaskQuery query)")
fmt.Fprintln(os.Stderr, " Response getConfigSummary(JobKey job)")
fmt.Fprintln(os.Stderr, " Response getJobs(string ownerRole)")
fmt.Fprintln(os.Stderr, " Response getQuota(string ownerRole)")
fmt.Fprintln(os.Stderr, " Response populateJobConfig(JobConfiguration description)")
fmt.Fprintln(os.Stderr, " Response getJobUpdateSummaries(JobUpdateQuery jobUpdateQuery)")
fmt.Fprintln(os.Stderr, " Response getJobUpdateDetails(JobUpdateQuery query)")
fmt.Fprintln(os.Stderr, " Response getJobUpdateDiff(JobUpdateRequest request)")
fmt.Fprintln(os.Stderr, " Response getTierConfigs()")
fmt.Fprintln(os.Stderr)
os.Exit(0)
}
func main() {
flag.Usage = Usage
var host string
var port int
var protocol string
var urlString string
var framed bool
var useHttp bool
var parsedUrl url.URL
var trans thrift.TTransport
_ = strconv.Atoi
_ = math.Abs
flag.Usage = Usage
flag.StringVar(&host, "h", "localhost", "Specify host and port")
flag.IntVar(&port, "p", 9090, "Specify port")
flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)")
flag.StringVar(&urlString, "u", "", "Specify the url")
flag.BoolVar(&framed, "framed", false, "Use framed transport")
flag.BoolVar(&useHttp, "http", false, "Use http")
flag.Parse()
if len(urlString) > 0 {
parsedUrl, err := url.Parse(urlString)
if err != nil {
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
flag.Usage()
}
host = parsedUrl.Host
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http"
} else if useHttp {
_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
if err != nil {
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
flag.Usage()
}
}
cmd := flag.Arg(0)
var err error
if useHttp {
trans, err = thrift.NewTHttpClient(parsedUrl.String())
} else {
portStr := fmt.Sprint(port)
if strings.Contains(host, ":") {
host, portStr, err = net.SplitHostPort(host)
if err != nil {
fmt.Fprintln(os.Stderr, "error with host:", err)
os.Exit(1)
}
}
trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr))
if err != nil {
fmt.Fprintln(os.Stderr, "error resolving address:", err)
os.Exit(1)
}
if framed {
trans = thrift.NewTFramedTransport(trans)
}
}
if err != nil {
fmt.Fprintln(os.Stderr, "Error creating transport", err)
os.Exit(1)
}
defer trans.Close()
var protocolFactory thrift.TProtocolFactory
switch protocol {
case "compact":
protocolFactory = thrift.NewTCompactProtocolFactory()
break
case "simplejson":
protocolFactory = thrift.NewTSimpleJSONProtocolFactory()
break
case "json":
protocolFactory = thrift.NewTJSONProtocolFactory()
break
case "binary", "":
protocolFactory = thrift.NewTBinaryProtocolFactoryDefault()
break
default:
fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol)
Usage()
os.Exit(1)
}
client := aurora.NewAuroraSchedulerManagerClientFactory(trans, protocolFactory)
if err := trans.Open(); err != nil {
fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
os.Exit(1)
}
switch cmd {
case "createJob":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "CreateJob requires 1 args")
flag.Usage()
}
arg163 := flag.Arg(1)
mbTrans164 := thrift.NewTMemoryBufferLen(len(arg163))
defer mbTrans164.Close()
_, err165 := mbTrans164.WriteString(arg163)
if err165 != nil {
Usage()
return
}
factory166 := thrift.NewTSimpleJSONProtocolFactory()
jsProt167 := factory166.GetProtocol(mbTrans164)
argvalue0 := aurora.NewJobConfiguration()
err168 := argvalue0.Read(jsProt167)
if err168 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.CreateJob(value0))
fmt.Print("\n")
break
case "scheduleCronJob":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "ScheduleCronJob requires 1 args")
flag.Usage()
}
arg169 := flag.Arg(1)
mbTrans170 := thrift.NewTMemoryBufferLen(len(arg169))
defer mbTrans170.Close()
_, err171 := mbTrans170.WriteString(arg169)
if err171 != nil {
Usage()
return
}
factory172 := thrift.NewTSimpleJSONProtocolFactory()
jsProt173 := factory172.GetProtocol(mbTrans170)
argvalue0 := aurora.NewJobConfiguration()
err174 := argvalue0.Read(jsProt173)
if err174 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.ScheduleCronJob(value0))
fmt.Print("\n")
break
case "descheduleCronJob":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "DescheduleCronJob requires 1 args")
flag.Usage()
}
arg175 := flag.Arg(1)
mbTrans176 := thrift.NewTMemoryBufferLen(len(arg175))
defer mbTrans176.Close()
_, err177 := mbTrans176.WriteString(arg175)
if err177 != nil {
Usage()
return
}
factory178 := thrift.NewTSimpleJSONProtocolFactory()
jsProt179 := factory178.GetProtocol(mbTrans176)
argvalue0 := aurora.NewJobKey()
err180 := argvalue0.Read(jsProt179)
if err180 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.DescheduleCronJob(value0))
fmt.Print("\n")
break
case "startCronJob":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "StartCronJob requires 1 args")
flag.Usage()
}
arg181 := flag.Arg(1)
mbTrans182 := thrift.NewTMemoryBufferLen(len(arg181))
defer mbTrans182.Close()
_, err183 := mbTrans182.WriteString(arg181)
if err183 != nil {
Usage()
return
}
factory184 := thrift.NewTSimpleJSONProtocolFactory()
jsProt185 := factory184.GetProtocol(mbTrans182)
argvalue0 := aurora.NewJobKey()
err186 := argvalue0.Read(jsProt185)
if err186 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.StartCronJob(value0))
fmt.Print("\n")
break
case "restartShards":
if flag.NArg()-1 != 2 {
fmt.Fprintln(os.Stderr, "RestartShards requires 2 args")
flag.Usage()
}
arg187 := flag.Arg(1)
mbTrans188 := thrift.NewTMemoryBufferLen(len(arg187))
defer mbTrans188.Close()
_, err189 := mbTrans188.WriteString(arg187)
if err189 != nil {
Usage()
return
}
factory190 := thrift.NewTSimpleJSONProtocolFactory()
jsProt191 := factory190.GetProtocol(mbTrans188)
argvalue0 := aurora.NewJobKey()
err192 := argvalue0.Read(jsProt191)
if err192 != nil {
Usage()
return
}
value0 := argvalue0
arg193 := flag.Arg(2)
mbTrans194 := thrift.NewTMemoryBufferLen(len(arg193))
defer mbTrans194.Close()
_, err195 := mbTrans194.WriteString(arg193)
if err195 != nil {
Usage()
return
}
factory196 := thrift.NewTSimpleJSONProtocolFactory()
jsProt197 := factory196.GetProtocol(mbTrans194)
containerStruct1 := aurora.NewAuroraSchedulerManagerRestartShardsArgs()
err198 := containerStruct1.ReadField2(jsProt197)
if err198 != nil {
Usage()
return
}
argvalue1 := containerStruct1.ShardIds
value1 := argvalue1
fmt.Print(client.RestartShards(value0, value1))
fmt.Print("\n")
break
case "killTasks":
if flag.NArg()-1 != 3 {
fmt.Fprintln(os.Stderr, "KillTasks requires 3 args")
flag.Usage()
}
arg199 := flag.Arg(1)
mbTrans200 := thrift.NewTMemoryBufferLen(len(arg199))
defer mbTrans200.Close()
_, err201 := mbTrans200.WriteString(arg199)
if err201 != nil {
Usage()
return
}
factory202 := thrift.NewTSimpleJSONProtocolFactory()
jsProt203 := factory202.GetProtocol(mbTrans200)
argvalue0 := aurora.NewJobKey()
err204 := argvalue0.Read(jsProt203)
if err204 != nil {
Usage()
return
}
value0 := argvalue0
arg205 := flag.Arg(2)
mbTrans206 := thrift.NewTMemoryBufferLen(len(arg205))
defer mbTrans206.Close()
_, err207 := mbTrans206.WriteString(arg205)
if err207 != nil {
Usage()
return
}
factory208 := thrift.NewTSimpleJSONProtocolFactory()
jsProt209 := factory208.GetProtocol(mbTrans206)
containerStruct1 := aurora.NewAuroraSchedulerManagerKillTasksArgs()
err210 := containerStruct1.ReadField2(jsProt209)
if err210 != nil {
Usage()
return
}
argvalue1 := containerStruct1.Instances
value1 := argvalue1
argvalue2 := flag.Arg(3)
value2 := argvalue2
fmt.Print(client.KillTasks(value0, value1, value2))
fmt.Print("\n")
break
case "addInstances":
if flag.NArg()-1 != 2 {
fmt.Fprintln(os.Stderr, "AddInstances requires 2 args")
flag.Usage()
}
arg212 := flag.Arg(1)
mbTrans213 := thrift.NewTMemoryBufferLen(len(arg212))
defer mbTrans213.Close()
_, err214 := mbTrans213.WriteString(arg212)
if err214 != nil {
Usage()
return
}
factory215 := thrift.NewTSimpleJSONProtocolFactory()
jsProt216 := factory215.GetProtocol(mbTrans213)
argvalue0 := aurora.NewInstanceKey()
err217 := argvalue0.Read(jsProt216)
if err217 != nil {
Usage()
return
}
value0 := argvalue0
tmp1, err218 := (strconv.Atoi(flag.Arg(2)))
if err218 != nil {
Usage()
return
}
argvalue1 := int32(tmp1)
value1 := argvalue1
fmt.Print(client.AddInstances(value0, value1))
fmt.Print("\n")
break
case "replaceCronTemplate":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "ReplaceCronTemplate requires 1 args")
flag.Usage()
}
arg219 := flag.Arg(1)
mbTrans220 := thrift.NewTMemoryBufferLen(len(arg219))
defer mbTrans220.Close()
_, err221 := mbTrans220.WriteString(arg219)
if err221 != nil {
Usage()
return
}
factory222 := thrift.NewTSimpleJSONProtocolFactory()
jsProt223 := factory222.GetProtocol(mbTrans220)
argvalue0 := aurora.NewJobConfiguration()
err224 := argvalue0.Read(jsProt223)
if err224 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.ReplaceCronTemplate(value0))
fmt.Print("\n")
break
case "startJobUpdate":
if flag.NArg()-1 != 2 {
fmt.Fprintln(os.Stderr, "StartJobUpdate requires 2 args")
flag.Usage()
}
arg225 := flag.Arg(1)
mbTrans226 := thrift.NewTMemoryBufferLen(len(arg225))
defer mbTrans226.Close()
_, err227 := mbTrans226.WriteString(arg225)
if err227 != nil {
Usage()
return
}
factory228 := thrift.NewTSimpleJSONProtocolFactory()
jsProt229 := factory228.GetProtocol(mbTrans226)
argvalue0 := aurora.NewJobUpdateRequest()
err230 := argvalue0.Read(jsProt229)
if err230 != nil {
Usage()
return
}
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.StartJobUpdate(value0, value1))
fmt.Print("\n")
break
case "pauseJobUpdate":
if flag.NArg()-1 != 2 {
fmt.Fprintln(os.Stderr, "PauseJobUpdate requires 2 args")
flag.Usage()
}
arg232 := flag.Arg(1)
mbTrans233 := thrift.NewTMemoryBufferLen(len(arg232))
defer mbTrans233.Close()
_, err234 := mbTrans233.WriteString(arg232)
if err234 != nil {
Usage()
return
}
factory235 := thrift.NewTSimpleJSONProtocolFactory()
jsProt236 := factory235.GetProtocol(mbTrans233)
argvalue0 := aurora.NewJobUpdateKey()
err237 := argvalue0.Read(jsProt236)
if err237 != nil {
Usage()
return
}
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.PauseJobUpdate(value0, value1))
fmt.Print("\n")
break
case "resumeJobUpdate":
if flag.NArg()-1 != 2 {
fmt.Fprintln(os.Stderr, "ResumeJobUpdate requires 2 args")
flag.Usage()
}
arg239 := flag.Arg(1)
mbTrans240 := thrift.NewTMemoryBufferLen(len(arg239))
defer mbTrans240.Close()
_, err241 := mbTrans240.WriteString(arg239)
if err241 != nil {
Usage()
return
}
factory242 := thrift.NewTSimpleJSONProtocolFactory()
jsProt243 := factory242.GetProtocol(mbTrans240)
argvalue0 := aurora.NewJobUpdateKey()
err244 := argvalue0.Read(jsProt243)
if err244 != nil {
Usage()
return
}
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.ResumeJobUpdate(value0, value1))
fmt.Print("\n")
break
case "abortJobUpdate":
if flag.NArg()-1 != 2 {
fmt.Fprintln(os.Stderr, "AbortJobUpdate requires 2 args")
flag.Usage()
}
arg246 := flag.Arg(1)
mbTrans247 := thrift.NewTMemoryBufferLen(len(arg246))
defer mbTrans247.Close()
_, err248 := mbTrans247.WriteString(arg246)
if err248 != nil {
Usage()
return
}
factory249 := thrift.NewTSimpleJSONProtocolFactory()
jsProt250 := factory249.GetProtocol(mbTrans247)
argvalue0 := aurora.NewJobUpdateKey()
err251 := argvalue0.Read(jsProt250)
if err251 != nil {
Usage()
return
}
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.AbortJobUpdate(value0, value1))
fmt.Print("\n")
break
case "rollbackJobUpdate":
if flag.NArg()-1 != 2 {
fmt.Fprintln(os.Stderr, "RollbackJobUpdate requires 2 args")
flag.Usage()
}
arg253 := flag.Arg(1)
mbTrans254 := thrift.NewTMemoryBufferLen(len(arg253))
defer mbTrans254.Close()
_, err255 := mbTrans254.WriteString(arg253)
if err255 != nil {
Usage()
return
}
factory256 := thrift.NewTSimpleJSONProtocolFactory()
jsProt257 := factory256.GetProtocol(mbTrans254)
argvalue0 := aurora.NewJobUpdateKey()
err258 := argvalue0.Read(jsProt257)
if err258 != nil {
Usage()
return
}
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.RollbackJobUpdate(value0, value1))
fmt.Print("\n")
break
case "pulseJobUpdate":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "PulseJobUpdate requires 1 args")
flag.Usage()
}
arg260 := flag.Arg(1)
mbTrans261 := thrift.NewTMemoryBufferLen(len(arg260))
defer mbTrans261.Close()
_, err262 := mbTrans261.WriteString(arg260)
if err262 != nil {
Usage()
return
}
factory263 := thrift.NewTSimpleJSONProtocolFactory()
jsProt264 := factory263.GetProtocol(mbTrans261)
argvalue0 := aurora.NewJobUpdateKey()
err265 := argvalue0.Read(jsProt264)
if err265 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.PulseJobUpdate(value0))
fmt.Print("\n")
break
case "getRoleSummary":
if flag.NArg()-1 != 0 {
fmt.Fprintln(os.Stderr, "GetRoleSummary requires 0 args")
flag.Usage()
}
fmt.Print(client.GetRoleSummary())
fmt.Print("\n")
break
case "getJobSummary":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobSummary requires 1 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetJobSummary(value0))
fmt.Print("\n")
break
case "getTasksStatus":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
flag.Usage()
}
arg267 := flag.Arg(1)
mbTrans268 := thrift.NewTMemoryBufferLen(len(arg267))
defer mbTrans268.Close()
_, err269 := mbTrans268.WriteString(arg267)
if err269 != nil {
Usage()
return
}
factory270 := thrift.NewTSimpleJSONProtocolFactory()
jsProt271 := factory270.GetProtocol(mbTrans268)
argvalue0 := aurora.NewTaskQuery()
err272 := argvalue0.Read(jsProt271)
if err272 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetTasksStatus(value0))
fmt.Print("\n")
break
case "getTasksWithoutConfigs":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
flag.Usage()
}
arg273 := flag.Arg(1)
mbTrans274 := thrift.NewTMemoryBufferLen(len(arg273))
defer mbTrans274.Close()
_, err275 := mbTrans274.WriteString(arg273)
if err275 != nil {
Usage()
return
}
factory276 := thrift.NewTSimpleJSONProtocolFactory()
jsProt277 := factory276.GetProtocol(mbTrans274)
argvalue0 := aurora.NewTaskQuery()
err278 := argvalue0.Read(jsProt277)
if err278 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetTasksWithoutConfigs(value0))
fmt.Print("\n")
break
case "getPendingReason":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
flag.Usage()
}
arg279 := flag.Arg(1)
mbTrans280 := thrift.NewTMemoryBufferLen(len(arg279))
defer mbTrans280.Close()
_, err281 := mbTrans280.WriteString(arg279)
if err281 != nil {
Usage()
return
}
factory282 := thrift.NewTSimpleJSONProtocolFactory()
jsProt283 := factory282.GetProtocol(mbTrans280)
argvalue0 := aurora.NewTaskQuery()
err284 := argvalue0.Read(jsProt283)
if err284 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetPendingReason(value0))
fmt.Print("\n")
break
case "getConfigSummary":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
flag.Usage()
}
arg285 := flag.Arg(1)
mbTrans286 := thrift.NewTMemoryBufferLen(len(arg285))
defer mbTrans286.Close()
_, err287 := mbTrans286.WriteString(arg285)
if err287 != nil {
Usage()
return
}
factory288 := thrift.NewTSimpleJSONProtocolFactory()
jsProt289 := factory288.GetProtocol(mbTrans286)
argvalue0 := aurora.NewJobKey()
err290 := argvalue0.Read(jsProt289)
if err290 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetConfigSummary(value0))
fmt.Print("\n")
break
case "getJobs":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobs requires 1 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetJobs(value0))
fmt.Print("\n")
break
case "getQuota":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetQuota requires 1 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetQuota(value0))
fmt.Print("\n")
break
case "populateJobConfig":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
flag.Usage()
}
arg293 := flag.Arg(1)
mbTrans294 := thrift.NewTMemoryBufferLen(len(arg293))
defer mbTrans294.Close()
_, err295 := mbTrans294.WriteString(arg293)
if err295 != nil {
Usage()
return
}
factory296 := thrift.NewTSimpleJSONProtocolFactory()
jsProt297 := factory296.GetProtocol(mbTrans294)
argvalue0 := aurora.NewJobConfiguration()
err298 := argvalue0.Read(jsProt297)
if err298 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.PopulateJobConfig(value0))
fmt.Print("\n")
break
case "getJobUpdateSummaries":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
flag.Usage()
}
arg299 := flag.Arg(1)
mbTrans300 := thrift.NewTMemoryBufferLen(len(arg299))
defer mbTrans300.Close()
_, err301 := mbTrans300.WriteString(arg299)
if err301 != nil {
Usage()
return
}
factory302 := thrift.NewTSimpleJSONProtocolFactory()
jsProt303 := factory302.GetProtocol(mbTrans300)
argvalue0 := aurora.NewJobUpdateQuery()
err304 := argvalue0.Read(jsProt303)
if err304 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateSummaries(value0))
fmt.Print("\n")
break
case "getJobUpdateDetails":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
flag.Usage()
}
arg305 := flag.Arg(1)
mbTrans306 := thrift.NewTMemoryBufferLen(len(arg305))
defer mbTrans306.Close()
_, err307 := mbTrans306.WriteString(arg305)
if err307 != nil {
Usage()
return
}
factory308 := thrift.NewTSimpleJSONProtocolFactory()
jsProt309 := factory308.GetProtocol(mbTrans306)
argvalue0 := aurora.NewJobUpdateQuery()
err310 := argvalue0.Read(jsProt309)
if err310 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateDetails(value0))
fmt.Print("\n")
break
case "getJobUpdateDiff":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
flag.Usage()
}
arg311 := flag.Arg(1)
mbTrans312 := thrift.NewTMemoryBufferLen(len(arg311))
defer mbTrans312.Close()
_, err313 := mbTrans312.WriteString(arg311)
if err313 != nil {
Usage()
return
}
factory314 := thrift.NewTSimpleJSONProtocolFactory()
jsProt315 := factory314.GetProtocol(mbTrans312)
argvalue0 := aurora.NewJobUpdateRequest()
err316 := argvalue0.Read(jsProt315)
if err316 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateDiff(value0))
fmt.Print("\n")
break
case "getTierConfigs":
if flag.NArg()-1 != 0 {
fmt.Fprintln(os.Stderr, "GetTierConfigs requires 0 args")
flag.Usage()
}
fmt.Print(client.GetTierConfigs())
fmt.Print("\n")
break
case "":
Usage()
break
default:
fmt.Fprintln(os.Stderr, "Invalid function ", cmd)
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,86 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package aurora
import (
"bytes"
"fmt"
"git.apache.org/thrift.git/lib/go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
const AURORA_EXECUTOR_NAME = "AuroraExecutor"
var ACTIVE_STATES map[ScheduleStatus]bool
var SLAVE_ASSIGNED_STATES map[ScheduleStatus]bool
var LIVE_STATES map[ScheduleStatus]bool
var TERMINAL_STATES map[ScheduleStatus]bool
const GOOD_IDENTIFIER_PATTERN = "^[\\w\\-\\.]+$"
const GOOD_IDENTIFIER_PATTERN_JVM = "^[\\w\\-\\.]+$"
const GOOD_IDENTIFIER_PATTERN_PYTHON = "^[\\w\\-\\.]+$"
var ACTIVE_JOB_UPDATE_STATES map[JobUpdateStatus]bool
var AWAITNG_PULSE_JOB_UPDATE_STATES map[JobUpdateStatus]bool
const BYPASS_LEADER_REDIRECT_HEADER_NAME = "Bypass-Leader-Redirect"
const TASK_FILESYSTEM_MOUNT_POINT = "taskfs"
func init() {
ACTIVE_STATES = map[ScheduleStatus]bool{
9: true,
17: true,
6: true,
0: true,
13: true,
12: true,
2: true,
1: true,
16: true,
}
SLAVE_ASSIGNED_STATES = map[ScheduleStatus]bool{
9: true,
17: true,
6: true,
13: true,
12: true,
2: true,
1: true,
}
LIVE_STATES = map[ScheduleStatus]bool{
6: true,
13: true,
12: true,
17: true,
2: true,
}
TERMINAL_STATES = map[ScheduleStatus]bool{
4: true,
3: true,
5: true,
7: true,
}
ACTIVE_JOB_UPDATE_STATES = map[JobUpdateStatus]bool{
0: true,
1: true,
2: true,
3: true,
9: true,
10: true,
}
AWAITNG_PULSE_JOB_UPDATE_STATES = map[JobUpdateStatus]bool{
9: true,
10: true,
}
}

View file

@ -0,0 +1,382 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package main
import (
"apache/aurora"
"flag"
"fmt"
"git.apache.org/thrift.git/lib/go/thrift"
"math"
"net"
"net/url"
"os"
"strconv"
"strings"
)
func Usage() {
fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
flag.PrintDefaults()
fmt.Fprintln(os.Stderr, "\nFunctions:")
fmt.Fprintln(os.Stderr, " Response getRoleSummary()")
fmt.Fprintln(os.Stderr, " Response getJobSummary(string role)")
fmt.Fprintln(os.Stderr, " Response getTasksStatus(TaskQuery query)")
fmt.Fprintln(os.Stderr, " Response getTasksWithoutConfigs(TaskQuery query)")
fmt.Fprintln(os.Stderr, " Response getPendingReason(TaskQuery query)")
fmt.Fprintln(os.Stderr, " Response getConfigSummary(JobKey job)")
fmt.Fprintln(os.Stderr, " Response getJobs(string ownerRole)")
fmt.Fprintln(os.Stderr, " Response getQuota(string ownerRole)")
fmt.Fprintln(os.Stderr, " Response populateJobConfig(JobConfiguration description)")
fmt.Fprintln(os.Stderr, " Response getJobUpdateSummaries(JobUpdateQuery jobUpdateQuery)")
fmt.Fprintln(os.Stderr, " Response getJobUpdateDetails(JobUpdateQuery query)")
fmt.Fprintln(os.Stderr, " Response getJobUpdateDiff(JobUpdateRequest request)")
fmt.Fprintln(os.Stderr, " Response getTierConfigs()")
fmt.Fprintln(os.Stderr)
os.Exit(0)
}
func main() {
flag.Usage = Usage
var host string
var port int
var protocol string
var urlString string
var framed bool
var useHttp bool
var parsedUrl url.URL
var trans thrift.TTransport
_ = strconv.Atoi
_ = math.Abs
flag.Usage = Usage
flag.StringVar(&host, "h", "localhost", "Specify host and port")
flag.IntVar(&port, "p", 9090, "Specify port")
flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)")
flag.StringVar(&urlString, "u", "", "Specify the url")
flag.BoolVar(&framed, "framed", false, "Use framed transport")
flag.BoolVar(&useHttp, "http", false, "Use http")
flag.Parse()
if len(urlString) > 0 {
parsedUrl, err := url.Parse(urlString)
if err != nil {
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
flag.Usage()
}
host = parsedUrl.Host
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http"
} else if useHttp {
_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
if err != nil {
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
flag.Usage()
}
}
cmd := flag.Arg(0)
var err error
if useHttp {
trans, err = thrift.NewTHttpClient(parsedUrl.String())
} else {
portStr := fmt.Sprint(port)
if strings.Contains(host, ":") {
host, portStr, err = net.SplitHostPort(host)
if err != nil {
fmt.Fprintln(os.Stderr, "error with host:", err)
os.Exit(1)
}
}
trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr))
if err != nil {
fmt.Fprintln(os.Stderr, "error resolving address:", err)
os.Exit(1)
}
if framed {
trans = thrift.NewTFramedTransport(trans)
}
}
if err != nil {
fmt.Fprintln(os.Stderr, "Error creating transport", err)
os.Exit(1)
}
defer trans.Close()
var protocolFactory thrift.TProtocolFactory
switch protocol {
case "compact":
protocolFactory = thrift.NewTCompactProtocolFactory()
break
case "simplejson":
protocolFactory = thrift.NewTSimpleJSONProtocolFactory()
break
case "json":
protocolFactory = thrift.NewTJSONProtocolFactory()
break
case "binary", "":
protocolFactory = thrift.NewTBinaryProtocolFactoryDefault()
break
default:
fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol)
Usage()
os.Exit(1)
}
client := aurora.NewReadOnlySchedulerClientFactory(trans, protocolFactory)
if err := trans.Open(); err != nil {
fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
os.Exit(1)
}
switch cmd {
case "getRoleSummary":
if flag.NArg()-1 != 0 {
fmt.Fprintln(os.Stderr, "GetRoleSummary requires 0 args")
flag.Usage()
}
fmt.Print(client.GetRoleSummary())
fmt.Print("\n")
break
case "getJobSummary":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobSummary requires 1 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetJobSummary(value0))
fmt.Print("\n")
break
case "getTasksStatus":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetTasksStatus requires 1 args")
flag.Usage()
}
arg82 := flag.Arg(1)
mbTrans83 := thrift.NewTMemoryBufferLen(len(arg82))
defer mbTrans83.Close()
_, err84 := mbTrans83.WriteString(arg82)
if err84 != nil {
Usage()
return
}
factory85 := thrift.NewTSimpleJSONProtocolFactory()
jsProt86 := factory85.GetProtocol(mbTrans83)
argvalue0 := aurora.NewTaskQuery()
err87 := argvalue0.Read(jsProt86)
if err87 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetTasksStatus(value0))
fmt.Print("\n")
break
case "getTasksWithoutConfigs":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetTasksWithoutConfigs requires 1 args")
flag.Usage()
}
arg88 := flag.Arg(1)
mbTrans89 := thrift.NewTMemoryBufferLen(len(arg88))
defer mbTrans89.Close()
_, err90 := mbTrans89.WriteString(arg88)
if err90 != nil {
Usage()
return
}
factory91 := thrift.NewTSimpleJSONProtocolFactory()
jsProt92 := factory91.GetProtocol(mbTrans89)
argvalue0 := aurora.NewTaskQuery()
err93 := argvalue0.Read(jsProt92)
if err93 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetTasksWithoutConfigs(value0))
fmt.Print("\n")
break
case "getPendingReason":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetPendingReason requires 1 args")
flag.Usage()
}
arg94 := flag.Arg(1)
mbTrans95 := thrift.NewTMemoryBufferLen(len(arg94))
defer mbTrans95.Close()
_, err96 := mbTrans95.WriteString(arg94)
if err96 != nil {
Usage()
return
}
factory97 := thrift.NewTSimpleJSONProtocolFactory()
jsProt98 := factory97.GetProtocol(mbTrans95)
argvalue0 := aurora.NewTaskQuery()
err99 := argvalue0.Read(jsProt98)
if err99 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetPendingReason(value0))
fmt.Print("\n")
break
case "getConfigSummary":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetConfigSummary requires 1 args")
flag.Usage()
}
arg100 := flag.Arg(1)
mbTrans101 := thrift.NewTMemoryBufferLen(len(arg100))
defer mbTrans101.Close()
_, err102 := mbTrans101.WriteString(arg100)
if err102 != nil {
Usage()
return
}
factory103 := thrift.NewTSimpleJSONProtocolFactory()
jsProt104 := factory103.GetProtocol(mbTrans101)
argvalue0 := aurora.NewJobKey()
err105 := argvalue0.Read(jsProt104)
if err105 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetConfigSummary(value0))
fmt.Print("\n")
break
case "getJobs":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobs requires 1 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetJobs(value0))
fmt.Print("\n")
break
case "getQuota":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetQuota requires 1 args")
flag.Usage()
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetQuota(value0))
fmt.Print("\n")
break
case "populateJobConfig":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "PopulateJobConfig requires 1 args")
flag.Usage()
}
arg108 := flag.Arg(1)
mbTrans109 := thrift.NewTMemoryBufferLen(len(arg108))
defer mbTrans109.Close()
_, err110 := mbTrans109.WriteString(arg108)
if err110 != nil {
Usage()
return
}
factory111 := thrift.NewTSimpleJSONProtocolFactory()
jsProt112 := factory111.GetProtocol(mbTrans109)
argvalue0 := aurora.NewJobConfiguration()
err113 := argvalue0.Read(jsProt112)
if err113 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.PopulateJobConfig(value0))
fmt.Print("\n")
break
case "getJobUpdateSummaries":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobUpdateSummaries requires 1 args")
flag.Usage()
}
arg114 := flag.Arg(1)
mbTrans115 := thrift.NewTMemoryBufferLen(len(arg114))
defer mbTrans115.Close()
_, err116 := mbTrans115.WriteString(arg114)
if err116 != nil {
Usage()
return
}
factory117 := thrift.NewTSimpleJSONProtocolFactory()
jsProt118 := factory117.GetProtocol(mbTrans115)
argvalue0 := aurora.NewJobUpdateQuery()
err119 := argvalue0.Read(jsProt118)
if err119 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateSummaries(value0))
fmt.Print("\n")
break
case "getJobUpdateDetails":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobUpdateDetails requires 1 args")
flag.Usage()
}
arg120 := flag.Arg(1)
mbTrans121 := thrift.NewTMemoryBufferLen(len(arg120))
defer mbTrans121.Close()
_, err122 := mbTrans121.WriteString(arg120)
if err122 != nil {
Usage()
return
}
factory123 := thrift.NewTSimpleJSONProtocolFactory()
jsProt124 := factory123.GetProtocol(mbTrans121)
argvalue0 := aurora.NewJobUpdateQuery()
err125 := argvalue0.Read(jsProt124)
if err125 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateDetails(value0))
fmt.Print("\n")
break
case "getJobUpdateDiff":
if flag.NArg()-1 != 1 {
fmt.Fprintln(os.Stderr, "GetJobUpdateDiff requires 1 args")
flag.Usage()
}
arg126 := flag.Arg(1)
mbTrans127 := thrift.NewTMemoryBufferLen(len(arg126))
defer mbTrans127.Close()
_, err128 := mbTrans127.WriteString(arg126)
if err128 != nil {
Usage()
return
}
factory129 := thrift.NewTSimpleJSONProtocolFactory()
jsProt130 := factory129.GetProtocol(mbTrans127)
argvalue0 := aurora.NewJobUpdateRequest()
err131 := argvalue0.Read(jsProt130)
if err131 != nil {
Usage()
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateDiff(value0))
fmt.Print("\n")
break
case "getTierConfigs":
if flag.NArg()-1 != 0 {
fmt.Fprintln(os.Stderr, "GetTierConfigs requires 0 args")
flag.Usage()
}
fmt.Print(client.GetTierConfigs())
fmt.Print("\n")
break
case "":
Usage()
break
default:
fmt.Fprintln(os.Stderr, "Invalid function ", cmd)
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

11
vendor/github.com/paypal/gorealis/generateBindings.sh generated vendored Executable file
View file

@ -0,0 +1,11 @@
#! /bin/bash
THRIFT_VER=0.9.3
if [[ $(thrift -version | grep -e $THRIFT_VER -c) -ne 1 ]]; then
echo "Warning: This wrapper has only been tested with version" $THRIFT_VER;
fi
echo "Generating bindings...";
thrift -o ./ -r -gen go:package=apache.aurora auroraAPI.thrift;
echo "Done";

320
vendor/github.com/paypal/gorealis/job.go generated vendored Normal file
View file

@ -0,0 +1,320 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
import (
"strconv"
"github.com/paypal/gorealis/gen-go/apache/aurora"
)
type Job interface {
// Set Job Key environment.
Environment(env string) Job
Role(role string) Job
Name(name string) Job
CPU(cpus float64) Job
CronSchedule(cron string) Job
CronCollisionPolicy(policy aurora.CronCollisionPolicy) Job
Disk(disk int64) Job
RAM(ram int64) Job
ExecutorName(name string) Job
ExecutorData(data string) Job
AddPorts(num int) Job
AddLabel(key string, value string) Job
AddNamedPorts(names ...string) Job
AddLimitConstraint(name string, limit int32) Job
AddValueConstraint(name string, negated bool, values ...string) Job
// From Aurora Docs:
// dedicated attribute. Aurora treats this specially, and only allows matching jobs
// to run on these machines, and will only schedule matching jobs on these machines.
// When a job is created, the scheduler requires that the $role component matches
// the role field in the job configuration, and will reject the job creation otherwise.
// A wildcard (*) may be used for the role portion of the dedicated attribute, which
// will allow any owner to elect for a job to run on the host(s)
AddDedicatedConstraint(role, name string) Job
AddURIs(extract bool, cache bool, values ...string) Job
JobKey() *aurora.JobKey
JobConfig() *aurora.JobConfiguration
TaskConfig() *aurora.TaskConfig
IsService(isService bool) Job
InstanceCount(instCount int32) Job
GetInstanceCount() int32
MaxFailure(maxFail int32) Job
Container(container Container) Job
}
// Structure to collect all information pertaining to an Aurora job.
type AuroraJob struct {
jobConfig *aurora.JobConfiguration
resources map[string]*aurora.Resource
portCount int
}
// Create a Job object with everything initialized.
func NewJob() Job {
jobConfig := aurora.NewJobConfiguration()
taskConfig := aurora.NewTaskConfig()
jobKey := aurora.NewJobKey()
// Job Config
jobConfig.Key = jobKey
jobConfig.TaskConfig = taskConfig
// Task Config
taskConfig.Job = jobKey
taskConfig.Container = aurora.NewContainer()
taskConfig.Container.Mesos = aurora.NewMesosContainer()
taskConfig.MesosFetcherUris = make(map[*aurora.MesosFetcherURI]bool)
taskConfig.Metadata = make(map[*aurora.Metadata]bool)
taskConfig.Constraints = make(map[*aurora.Constraint]bool)
// Resources
numCpus := aurora.NewResource()
ramMb := aurora.NewResource()
diskMb := aurora.NewResource()
resources := make(map[string]*aurora.Resource)
resources["cpu"] = numCpus
resources["ram"] = ramMb
resources["disk"] = diskMb
taskConfig.Resources = make(map[*aurora.Resource]bool)
taskConfig.Resources[numCpus] = true
taskConfig.Resources[ramMb] = true
taskConfig.Resources[diskMb] = true
numCpus.NumCpus = new(float64)
ramMb.RamMb = new(int64)
diskMb.DiskMb = new(int64)
return &AuroraJob{
jobConfig: jobConfig,
resources: resources,
portCount: 0,
}
}
// Set Job Key environment.
func (j *AuroraJob) Environment(env string) Job {
j.jobConfig.Key.Environment = env
return j
}
// Set Job Key Role.
func (j *AuroraJob) Role(role string) Job {
j.jobConfig.Key.Role = role
//Will be deprecated
identity := &aurora.Identity{User: role}
j.jobConfig.Owner = identity
j.jobConfig.TaskConfig.Owner = identity
return j
}
// Set Job Key Name.
func (j *AuroraJob) Name(name string) Job {
j.jobConfig.Key.Name = name
return j
}
// Set name of the executor that will the task will be configured to.
func (j *AuroraJob) ExecutorName(name string) Job {
if j.jobConfig.TaskConfig.ExecutorConfig == nil {
j.jobConfig.TaskConfig.ExecutorConfig = aurora.NewExecutorConfig()
}
j.jobConfig.TaskConfig.ExecutorConfig.Name = name
return j
}
// Will be included as part of entire task inside the scheduler that will be serialized.
func (j *AuroraJob) ExecutorData(data string) Job {
if j.jobConfig.TaskConfig.ExecutorConfig == nil {
j.jobConfig.TaskConfig.ExecutorConfig = aurora.NewExecutorConfig()
}
j.jobConfig.TaskConfig.ExecutorConfig.Data = data
return j
}
func (j *AuroraJob) CPU(cpus float64) Job {
*j.resources["cpu"].NumCpus = cpus
j.jobConfig.TaskConfig.NumCpus = cpus //Will be deprecated soon
return j
}
func (j *AuroraJob) RAM(ram int64) Job {
*j.resources["ram"].RamMb = ram
j.jobConfig.TaskConfig.RamMb = ram //Will be deprecated soon
return j
}
func (j *AuroraJob) Disk(disk int64) Job {
*j.resources["disk"].DiskMb = disk
j.jobConfig.TaskConfig.DiskMb = disk //Will be deprecated
return j
}
// How many failures to tolerate before giving up.
func (j *AuroraJob) MaxFailure(maxFail int32) Job {
j.jobConfig.TaskConfig.MaxTaskFailures = maxFail
return j
}
// How many instances of the job to run
func (j *AuroraJob) InstanceCount(instCount int32) Job {
j.jobConfig.InstanceCount = instCount
return j
}
func (j *AuroraJob) CronSchedule(cron string) Job {
j.jobConfig.CronSchedule = &cron
return j
}
func (j *AuroraJob) CronCollisionPolicy(policy aurora.CronCollisionPolicy) Job {
j.jobConfig.CronCollisionPolicy = policy
return j
}
// How many instances of the job to run
func (j *AuroraJob) GetInstanceCount() int32 {
return j.jobConfig.InstanceCount
}
// Restart the job's tasks if they fail
func (j *AuroraJob) IsService(isService bool) Job {
j.jobConfig.TaskConfig.IsService = isService
return j
}
// Get the current job configurations key to use for some realis calls.
func (j *AuroraJob) JobKey() *aurora.JobKey {
return j.jobConfig.Key
}
// Get the current job configurations key to use for some realis calls.
func (j *AuroraJob) JobConfig() *aurora.JobConfiguration {
return j.jobConfig
}
func (j *AuroraJob) TaskConfig() *aurora.TaskConfig {
return j.jobConfig.TaskConfig
}
// Add a list of URIs with the same extract and cache configuration. Scheduler must have
// --enable_mesos_fetcher flag enabled. Currently there is no duplicate detection.
func (j *AuroraJob) AddURIs(extract bool, cache bool, values ...string) Job {
for _, value := range values {
j.jobConfig.TaskConfig.MesosFetcherUris[&aurora.MesosFetcherURI{
Value: value,
Extract: &extract,
Cache: &cache,
}] = true
}
return j
}
// Adds a Mesos label to the job. Note that Aurora will add the
// prefix "org.apache.aurora.metadata." to the beginning of each key.
func (j *AuroraJob) AddLabel(key string, value string) Job {
j.jobConfig.TaskConfig.Metadata[&aurora.Metadata{Key: key, Value: value}] = true
return j
}
// Add a named port to the job configuration These are random ports as it's
// not currently possible to request specific ports using Aurora.
func (j *AuroraJob) AddNamedPorts(names ...string) Job {
j.portCount += len(names)
for _, name := range names {
j.jobConfig.TaskConfig.Resources[&aurora.Resource{NamedPort: &name}] = true
}
return j
}
// Adds a request for a number of ports to the job configuration. The names chosen for these ports
// will be org.apache.aurora.port.X, where X is the current port count for the job configuration
// starting at 0. These are random ports as it's not currently possible to request
// specific ports using Aurora.
func (j *AuroraJob) AddPorts(num int) Job {
start := j.portCount
j.portCount += num
for i := start; i < j.portCount; i++ {
portName := "org.apache.aurora.port." + strconv.Itoa(i)
j.jobConfig.TaskConfig.Resources[&aurora.Resource{NamedPort: &portName}] = true
}
return j
}
// From Aurora Docs:
// Add a Value constraint
// name - Mesos slave attribute that the constraint is matched against.
// If negated = true , treat this as a 'not' - to avoid specific values.
// Values - list of values we look for in attribute name
func (j *AuroraJob) AddValueConstraint(name string, negated bool, values ...string) Job {
constraintValues := make(map[string]bool)
for _, value := range values {
constraintValues[value] = true
}
j.jobConfig.TaskConfig.Constraints[&aurora.Constraint{
Name: name,
Constraint: &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: negated,
Values: constraintValues,
},
Limit: nil,
},
}] = true
return j
}
// From Aurora Docs:
// A constraint that specifies the maximum number of active tasks on a host with
// a matching attribute that may be scheduled simultaneously.
func (j *AuroraJob) AddLimitConstraint(name string, limit int32) Job {
j.jobConfig.TaskConfig.Constraints[&aurora.Constraint{
Name: name,
Constraint: &aurora.TaskConstraint{
Value: nil,
Limit: &aurora.LimitConstraint{Limit: limit},
},
}] = true
return j
}
func (j *AuroraJob) AddDedicatedConstraint(role, name string) Job {
j.AddValueConstraint("dedicated", false, role+"/"+name)
return j
}
// Set a container to run for the job configuration to run.
func (j *AuroraJob) Container(container Container) Job {
j.jobConfig.TaskConfig.Container = container.Build()
return j
}

59
vendor/github.com/paypal/gorealis/logger.go generated vendored Normal file
View file

@ -0,0 +1,59 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
type Logger interface {
Println(v ...interface{})
Printf(format string, v ...interface{})
Print(v ...interface{})
}
type NoopLogger struct{}
func (NoopLogger) Printf(format string, a ...interface{}) {}
func (NoopLogger) Print(a ...interface{}) {}
func (NoopLogger) Println(a ...interface{}) {}
type LevelLogger struct {
Logger
debug bool
}
func (l *LevelLogger) EnableDebug(enable bool) {
l.debug = enable
}
func (l LevelLogger) DebugPrintf(format string, a ...interface{}) {
if l.debug {
l.Print("[DEBUG] ")
l.Printf(format, a)
}
}
func (l LevelLogger) DebugPrint(a ...interface{}) {
if l.debug {
l.Print("[DEBUG] ")
l.Print(a)
}
}
func (l LevelLogger) DebugPrintln(a ...interface{}) {
if l.debug {
l.Print("[DEBUG] ")
l.Println(a)
}
}

183
vendor/github.com/paypal/gorealis/monitors.go generated vendored Normal file
View file

@ -0,0 +1,183 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Collection of monitors to create synchronicity
package realis
import (
"time"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/paypal/gorealis/response"
"github.com/pkg/errors"
)
const (
UpdateFailed = "update failed"
RolledBack = "update rolled back"
Timeout = "timeout"
)
type Monitor struct {
Client Realis
}
// Polls the scheduler every certain amount of time to see if the update has succeeded
func (m *Monitor) JobUpdate(updateKey aurora.JobUpdateKey, interval int, timeout int) (bool, error) {
updateQ := aurora.JobUpdateQuery{
Key: &updateKey,
Limit: 1,
}
ticker := time.NewTicker(time.Second * time.Duration(interval))
defer ticker.Stop()
timer := time.NewTimer(time.Second * time.Duration(timeout))
defer timer.Stop()
var cliErr error
var respDetail *aurora.Response
for {
select {
case <-ticker.C:
respDetail, cliErr = m.Client.JobUpdateDetails(updateQ)
if cliErr != nil {
return false, cliErr
}
updateDetail := response.JobUpdateDetails(respDetail)
if len(updateDetail) == 0 {
m.Client.RealisConfig().logger.Println("No update found")
return false, errors.New("No update found for " + updateKey.String())
}
status := updateDetail[0].Update.Summary.State.Status
if _, ok := aurora.ACTIVE_JOB_UPDATE_STATES[status]; !ok {
// Rolled forward is the only state in which an update has been successfully updated
// if we encounter an inactive state and it is not at rolled forward, update failed
switch status {
case aurora.JobUpdateStatus_ROLLED_FORWARD:
m.Client.RealisConfig().logger.Println("Update succeeded")
return true, nil
case aurora.JobUpdateStatus_FAILED:
m.Client.RealisConfig().logger.Println("Update failed")
return false, errors.New(UpdateFailed)
case aurora.JobUpdateStatus_ROLLED_BACK:
m.Client.RealisConfig().logger.Println("rolled back")
return false, errors.New(RolledBack)
default:
return false, nil
}
}
case <-timer.C:
return false, errors.New(Timeout)
}
}
}
// Monitor a Job until all instances enter one of the LIVE_STATES
func (m *Monitor) Instances(key *aurora.JobKey, instances int32, interval, timeout int) (bool, error) {
return m.ScheduleStatus(key, instances, aurora.LIVE_STATES, interval, timeout)
}
// Monitor a Job until all instances enter a desired status.
// Defaults sets of desired statuses provided by the thrift API include:
// ACTIVE_STATES, SLAVE_ASSIGNED_STATES, LIVE_STATES, and TERMINAL_STATES
func (m *Monitor) ScheduleStatus(key *aurora.JobKey, instanceCount int32, desiredStatuses map[aurora.ScheduleStatus]bool, interval, timeout int) (bool, error) {
ticker := time.NewTicker(time.Second * time.Duration(interval))
defer ticker.Stop()
timer := time.NewTimer(time.Second * time.Duration(timeout))
defer timer.Stop()
for {
select {
case <-ticker.C:
// Query Aurora for the state of the job key ever interval
instCount, cliErr := m.Client.GetInstanceIds(key, desiredStatuses)
if cliErr != nil {
return false, errors.Wrap(cliErr, "Unable to communicate with Aurora")
}
if len(instCount) == int(instanceCount) {
return true, nil
}
case <-timer.C:
// If the timer runs out, return a timeout error to user
return false, errors.New(Timeout)
}
}
}
// Monitor host status until all hosts match the status provided. Returns a map where the value is true if the host
// is in one of the desired mode(s) or false if it is not as of the time when the monitor exited.
func (m *Monitor) HostMaintenance(hosts []string, modes []aurora.MaintenanceMode, interval, timeout int) (map[string]bool, error) {
// Transform modes to monitor for into a set for easy lookup
desiredMode := make(map[aurora.MaintenanceMode]struct{})
for _, mode := range modes {
desiredMode[mode] = struct{}{}
}
// Turn slice into a host set to eliminate duplicates.
// We also can't use a simple count because multiple modes means we can have multiple matches for a single host.
// I.e. host A transitions from ACTIVE to DRAINING to DRAINED while monitored
remainingHosts := make(map[string]struct{})
for _, host := range hosts {
remainingHosts[host] = struct{}{}
}
hostResult := make(map[string]bool)
ticker := time.NewTicker(time.Second * time.Duration(interval))
defer ticker.Stop()
timer := time.NewTimer(time.Second * time.Duration(timeout))
defer timer.Stop()
for {
select {
case <-ticker.C:
// Client call has multiple retries internally
_, result, err := m.Client.MaintenanceStatus(hosts...)
if err != nil {
// Error is either a payload error or a severe connection error
for host := range remainingHosts {
hostResult[host] = false
}
return hostResult, errors.Wrap(err, "client error in monitor")
}
for status := range result.GetStatuses() {
if _, ok := desiredMode[status.GetMode()]; ok {
hostResult[status.GetHost()] = true
delete(remainingHosts, status.GetHost())
if len(remainingHosts) == 0 {
return hostResult, nil
}
}
}
case <-timer.C:
for host := range remainingHosts {
hostResult[host] = false
}
return hostResult, errors.New(Timeout)
}
}
}

1151
vendor/github.com/paypal/gorealis/realis.go generated vendored Normal file

File diff suppressed because it is too large Load diff

539
vendor/github.com/paypal/gorealis/realis_e2e_test.go generated vendored Normal file
View file

@ -0,0 +1,539 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis_test
import (
"fmt"
"io/ioutil"
"os"
"sync"
"testing"
"time"
"github.com/paypal/gorealis"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/paypal/gorealis/response"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
var r realis.Realis
var monitor *realis.Monitor
var thermosPayload []byte
func TestMain(m *testing.M) {
var err error
// New configuration to connect to docker container
r, err = realis.NewRealisClient(realis.SchedulerUrl("http://192.168.33.7:8081"),
realis.BasicAuth("aurora", "secret"),
realis.TimeoutMS(20000))
if err != nil {
fmt.Println("Please run docker-compose up -d before running test suite")
os.Exit(1)
}
defer r.Close()
// Create monitor
monitor = &realis.Monitor{Client: r}
thermosPayload, err = ioutil.ReadFile("examples/thermos_payload.json")
if err != nil {
fmt.Println("Error reading thermos payload file: ", err)
os.Exit(1)
}
os.Exit(m.Run())
}
func TestNonExistentEndpoint(t *testing.T) {
backoff := realis.Backoff{ // Reduce penalties for this test to make it quick
Steps: 5,
Duration: 1 * time.Second,
Factor: 1.0,
Jitter: 0.1}
// Attempt to connect to a bad endpoint
r, err := realis.NewRealisClient(realis.SchedulerUrl("http://192.168.33.7:8081/doesntexist/"),
realis.TimeoutMS(200),
realis.BackOff(backoff),
)
defer r.Close()
taskQ := &aurora.TaskQuery{}
_, err = r.GetTasksWithoutConfigs(taskQ)
// Check that we do error out of retrying
assert.Error(t, err)
// Check that the error before this one was a a retry error
// TODO: Consider bubbling up timeout behaving error all the way up to the user.
retryErr := realis.ToRetryCount(errors.Cause(err))
assert.NotNil(t, retryErr, "error passed in is not a retry error")
assert.Equal(t, backoff.Steps, retryErr.RetryCount(), "retry count is incorrect")
}
func TestLeaderFromZK(t *testing.T) {
cluster := realis.GetDefaultClusterFromZKUrl("192.168.33.2:2181")
url, err := realis.LeaderFromZK(*cluster)
assert.NoError(t, err)
assert.Equal(t, "http://192.168.33.7:8081", url)
}
func TestRealisClient_ReestablishConn(t *testing.T) {
// Test that we're able to tear down the old connection and create a new one.
err := r.ReestablishConn()
assert.NoError(t, err)
}
func TestGetCACerts(t *testing.T) {
certs, err := realis.GetCerts("./examples/certs")
assert.NoError(t, err)
assert.Equal(t, len(certs.Subjects()), 2)
}
func TestRealisClient_CreateJob_Thermos(t *testing.T) {
role := "vagrant"
job := realis.NewJob().
Environment("prod").
Role(role).
Name("create_thermos_job_test").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData(string(thermosPayload)).
CPU(1).
RAM(64).
Disk(100).
IsService(true).
InstanceCount(1).
AddPorts(1)
start := time.Now()
resp, err := r.CreateJob(job)
end := time.Now()
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
fmt.Printf("Create call took %d ns\n", (end.UnixNano() - start.UnixNano()))
// Test Instances Monitor
success, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 1, 50)
assert.True(t, success)
assert.NoError(t, err)
//Fetch all Jobs
_, result, err := r.GetJobs(role)
fmt.Printf("GetJobs length: %+v \n", len(result.Configs))
assert.Equal(t, len(result.Configs), 1)
assert.NoError(t, err)
// Test asking the scheduler to perform a Snpshot
t.Run("TestRealisClient_Snapshot", func(t *testing.T) {
err := r.Snapshot()
assert.NoError(t, err)
})
// Test asking the scheduler to backup a Snapshot
t.Run("TestRealisClient_PerformBackup", func(t *testing.T) {
err := r.PerformBackup()
assert.NoError(t, err)
})
// Tasks must exist for it to, be killed
t.Run("TestRealisClient_KillJob_Thermos", func(t *testing.T) {
start := time.Now()
resp, err := r.KillJob(job.JobKey())
end := time.Now()
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
fmt.Printf("Kill call took %d ns\n", (end.UnixNano() - start.UnixNano()))
})
}
// Test configuring an executor that doesn't exist for CreateJob API
func TestRealisClient_CreateJob_ExecutorDoesNotExist(t *testing.T) {
// Create a single job
job := realis.NewJob().
Environment("prod").
Role("vagrant").
Name("executordoesntexist").
ExecutorName("idontexist").
ExecutorData("").
CPU(.25).
RAM(4).
Disk(10).
InstanceCount(1)
resp, err := r.CreateJob(job)
assert.Error(t, err)
assert.Equal(t, aurora.ResponseCode_INVALID_REQUEST, resp.GetResponseCode())
}
func TestRealisClient_CreateService_WithPulse_Thermos(t *testing.T) {
fmt.Println("Creating service")
role := "vagrant"
job := realis.NewJob().
Environment("prod").
Role(role).
Name("create_thermos_job_test").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData(string(thermosPayload)).
CPU(.5).
RAM(64).
Disk(100).
IsService(true).
InstanceCount(1).
AddPorts(1).
AddLabel("currentTime", time.Now().String())
pulse := int32(30)
timeout := 300
settings := realis.NewUpdateSettings()
settings.BlockIfNoPulsesAfterMs = &pulse
settings.UpdateGroupSize = 1
settings.WaitForBatchCompletion = true
job.InstanceCount(2)
resp, result, err := r.CreateService(job, settings)
fmt.Println(result.String())
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
updateQ := aurora.JobUpdateQuery{
Key: result.GetKey(),
Limit: 1,
}
start := time.Now()
for i := 0; i*int(pulse) <= timeout; i++ {
fmt.Println("sending PulseJobUpdate....")
resp, err = r.PulseJobUpdate(result.GetKey())
assert.NotNil(t, resp)
assert.Nil(t, err)
respDetail, err := r.JobUpdateDetails(updateQ)
assert.Nil(t, err)
updateDetail := response.JobUpdateDetails(respDetail)
if len(updateDetail) == 0 {
fmt.Println("No update found")
assert.NotEqual(t, len(updateDetail), 0)
}
status := updateDetail[0].Update.Summary.State.Status
if _, ok := aurora.ACTIVE_JOB_UPDATE_STATES[status]; !ok {
// Rolled forward is the only state in which an update has been successfully updated
// if we encounter an inactive state and it is not at rolled forward, update failed
if status == aurora.JobUpdateStatus_ROLLED_FORWARD {
fmt.Println("Update succeded")
break
} else {
fmt.Println("Update failed")
break
}
}
fmt.Println("Polling, update still active...")
time.Sleep(time.Duration(pulse) * time.Second)
}
end := time.Now()
fmt.Printf("Update call took %d ns\n", (end.UnixNano() - start.UnixNano()))
t.Run("TestRealisClient_KillJob_Thermos", func(t *testing.T) {
start := time.Now()
resp, err := r.KillJob(job.JobKey())
end := time.Now()
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
fmt.Printf("Kill call took %d ns\n", (end.UnixNano() - start.UnixNano()))
})
}
// Test configuring an executor that doesn't exist for CreateJob API
func TestRealisClient_CreateService(t *testing.T) {
// Create a single job
job := realis.NewJob().
Environment("prod").
Role("vagrant").
Name("create_service_test").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData(string(thermosPayload)).
CPU(.25).
RAM(4).
Disk(10).
InstanceCount(3).
IsService(true)
settings := realis.NewUpdateSettings()
settings.UpdateGroupSize = 2
job.InstanceCount(3)
resp, result, err := r.CreateService(job, settings)
assert.NoError(t, err)
assert.NotNil(t, result)
assert.Equal(t, aurora.ResponseCode_OK, resp.GetResponseCode())
var ok bool
var mErr error
if ok, mErr = monitor.JobUpdate(*result.GetKey(), 5, 180); !ok || mErr != nil {
// Update may already be in a terminal state so don't check for error
_, err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out.")
_, err = r.KillJob(job.JobKey())
assert.NoError(t, err)
}
assert.True(t, ok)
assert.NoError(t, mErr)
// Kill task test task after confirming it came up fine
_, err = r.KillJob(job.JobKey())
assert.NoError(t, err)
}
// Test configuring an executor that doesn't exist for CreateJob API
func TestRealisClient_CreateService_ExecutorDoesNotExist(t *testing.T) {
// Create a single job
job := realis.NewJob().
Environment("prod").
Role("vagrant").
Name("executordoesntexist").
ExecutorName("idontexist").
ExecutorData("").
CPU(.25).
RAM(4).
Disk(10).
InstanceCount(1)
settings := realis.NewUpdateSettings()
job.InstanceCount(3)
resp, result, err := r.CreateService(job, settings)
assert.Error(t, err)
assert.Nil(t, result)
assert.Equal(t, aurora.ResponseCode_INVALID_REQUEST, resp.GetResponseCode())
}
func TestRealisClient_ScheduleCronJob_Thermos(t *testing.T) {
thermosCronPayload, err := ioutil.ReadFile("examples/thermos_cron_payload.json")
assert.NoError(t, err)
job := realis.NewJob().
Environment("prod").
Role("vagrant").
Name("cronsched_job_test").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData(string(thermosCronPayload)).
CPU(1).
RAM(64).
Disk(100).
IsService(true).
InstanceCount(1).
AddPorts(1).
CronSchedule("* * * * *").
IsService(false)
resp, err := r.ScheduleCronJob(job)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
t.Run("TestRealisClient_StartCronJob_Thermos", func(t *testing.T) {
start := time.Now()
resp, err := r.StartCronJob(job.JobKey())
end := time.Now()
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
fmt.Printf("Schedule cron call took %d ns\n", (end.UnixNano() - start.UnixNano()))
})
t.Run("TestRealisClient_DeschedulerCronJob_Thermos", func(t *testing.T) {
start := time.Now()
resp, err := r.DescheduleCronJob(job.JobKey())
end := time.Now()
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
fmt.Printf("Deschedule cron call took %d ns\n", (end.UnixNano() - start.UnixNano()))
})
}
func TestRealisClient_DrainHosts(t *testing.T) {
hosts := []string{"localhost"}
_, _, err := r.DrainHosts(hosts...)
if err != nil {
fmt.Printf("error: %+v\n", err.Error())
os.Exit(1)
}
// Monitor change to DRAINING and DRAINED mode
hostResults, err := monitor.HostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
1,
50)
assert.Equal(t, map[string]bool{"localhost": true}, hostResults)
assert.NoError(t, err)
t.Run("TestRealisClient_MonitorNontransitioned", func(t *testing.T) {
// Monitor change to DRAINING and DRAINED mode
hostResults, err := monitor.HostMaintenance(
append(hosts, "IMAGINARY_HOST"),
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
1,
1)
// Assert monitor returned an error that was not nil, and also a list of the non-transitioned hosts
assert.Error(t, err)
assert.Equal(t, map[string]bool{"localhost": true, "IMAGINARY_HOST": false}, hostResults)
})
t.Run("TestRealisClient_EndMaintenance", func(t *testing.T) {
_, _, err := r.EndMaintenance(hosts...)
if err != nil {
fmt.Printf("error: %+v\n", err.Error())
os.Exit(1)
}
// Monitor change to DRAINING and DRAINED mode
_, err = monitor.HostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
5,
10)
assert.NoError(t, err)
})
}
// Test multiple go routines using a single connection
func TestRealisClient_SessionThreadSafety(t *testing.T) {
// Create a single job
job := realis.NewJob().
Environment("prod").
Role("vagrant").
Name("create_thermos_job_test_multi").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData(string(thermosPayload)).
CPU(.25).
RAM(4).
Disk(10).
InstanceCount(1000) // Impossible amount to go live in any sane machine
resp, err := r.CreateJob(job)
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
wg := sync.WaitGroup{}
for i := 0; i < 20; i++ {
wg.Add(1)
// Launch multiple monitors that will poll every second
go func() {
defer wg.Done()
// Test Schedule status monitor for terminal state and timing out after 30 seconds
success, err := monitor.ScheduleStatus(job.JobKey(), job.GetInstanceCount(), aurora.LIVE_STATES, 1, 30)
assert.False(t, success)
assert.Error(t, err)
resp, err := r.KillJob(job.JobKey())
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
}()
}
wg.Wait()
}
// Test setting and getting the quota
func TestRealisClient_SetQuota(t *testing.T) {
var cpu = 3.5
var ram int64 = 20480
var disk int64 = 10240
resp, err := r.SetQuota("vagrant", &cpu, &ram, &disk)
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
t.Run("TestRealisClient_GetQuota", func(t *testing.T) {
// Test GetQuota based on previously set values
var result *aurora.GetQuotaResult_
resp, err = r.GetQuota("vagrant")
if resp.GetResult_() != nil {
result = resp.GetResult_().GetQuotaResult_
}
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
for res := range result.Quota.GetResources() {
switch true {
case res.DiskMb != nil:
assert.Equal(t, disk, *res.DiskMb)
break
case res.NumCpus != nil:
assert.Equal(t, cpu, *res.NumCpus)
break
case res.RamMb != nil:
assert.Equal(t, ram, *res.RamMb)
break
}
}
fmt.Print("GetQuota Result", result.String())
})
}
func TestRealisClient_ForceImplicitTaskReconciliation(t *testing.T) {
err := r.ForceImplicitTaskReconciliation()
assert.NoError(t, err)
}
func TestRealisClient_ForceExplicitTaskReconciliation(t *testing.T) {
// Default value
err := r.ForceExplicitTaskReconciliation(nil)
assert.NoError(t, err)
// Custom batch value
var batchSize int32 = 32
err = r.ForceExplicitTaskReconciliation(&batchSize)
assert.NoError(t, err)
}

65
vendor/github.com/paypal/gorealis/response/response.go generated vendored Normal file
View file

@ -0,0 +1,65 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Helper functions to process aurora.Response
package response
import (
"bytes"
"errors"
"github.com/paypal/gorealis/gen-go/apache/aurora"
)
// Get key from a response created by a StartJobUpdate call
func JobUpdateKey(resp *aurora.Response) *aurora.JobUpdateKey {
return resp.GetResult_().GetStartJobUpdateResult_().GetKey()
}
func JobUpdateDetails(resp *aurora.Response) []*aurora.JobUpdateDetails {
return resp.GetResult_().GetGetJobUpdateDetailsResult_().GetDetailsList()
}
func ScheduleStatusResult(resp *aurora.Response) *aurora.ScheduleStatusResult_ {
return resp.GetResult_().GetScheduleStatusResult_()
}
func JobUpdateSummaries(resp *aurora.Response) []*aurora.JobUpdateSummary {
return resp.GetResult_().GetGetJobUpdateSummariesResult_().GetUpdateSummaries()
}
// Deprecated: Replaced by checks done inside of thriftCallHelper
func ResponseCodeCheck(resp *aurora.Response) (*aurora.Response, error) {
if resp == nil {
return resp, errors.New("Response is nil")
}
if resp.GetResponseCode() != aurora.ResponseCode_OK {
return resp, errors.New(CombineMessage(resp))
}
return resp, nil
}
// Based on aurora client: src/main/python/apache/aurora/client/base.py
func CombineMessage(resp *aurora.Response) string {
var buffer bytes.Buffer
for _, detail := range resp.GetDetails() {
buffer.WriteString(detail.GetMessage() + ", ")
}
if buffer.Len() > 0 {
buffer.Truncate(buffer.Len() - 2) // Get rid of trailing comma + space
}
return buffer.String()
}

215
vendor/github.com/paypal/gorealis/retry.go generated vendored Normal file
View file

@ -0,0 +1,215 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
import (
"math/rand"
"time"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/paypal/gorealis/response"
"github.com/pkg/errors"
)
type Backoff struct {
Duration time.Duration // the base duration
Factor float64 // Duration is multipled by factor each iteration
Jitter float64 // The amount of jitter applied each iteration
Steps int // Exit with error after this many steps
}
// Jitter returns a time.Duration between duration and duration + maxFactor *
// duration.
//
// This allows clients to avoid converging on periodic behavior. If maxFactor
// is 0.0, a suggested default value will be chosen.
func Jitter(duration time.Duration, maxFactor float64) time.Duration {
if maxFactor <= 0.0 {
maxFactor = 1.0
}
wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
return wait
}
// ConditionFunc returns true if the condition is satisfied, or an error
// if the loop should be aborted.
type ConditionFunc func() (done bool, err error)
// Modified version of the Kubernetes exponential-backoff code.
// ExponentialBackoff repeats a condition check with exponential backoff.
//
// It checks the condition up to Steps times, increasing the wait by multiplying
// the previous duration by Factor.
//
// If Jitter is greater than zero, a random amount of each duration is added
// (between duration and duration*(1+jitter)).
//
// If the condition never returns true, ErrWaitTimeout is returned. Errors
// do not cause the function to return.
func ExponentialBackoff(backoff Backoff, logger Logger, condition ConditionFunc) error {
var err error
var ok bool
var curStep int
duration := backoff.Duration
for curStep = 0; curStep < backoff.Steps; curStep++ {
// Only sleep if it's not the first iteration.
if curStep != 0 {
adjusted := duration
if backoff.Jitter > 0.0 {
adjusted = Jitter(duration, backoff.Jitter)
}
logger.Printf("A retriable error occurred during function call, backing off for %v before retrying\n", adjusted)
time.Sleep(adjusted)
duration = time.Duration(float64(duration) * backoff.Factor)
}
// Execute function passed in.
ok, err = condition()
// If the function executed says it succeeded, stop retrying
if ok {
return nil
}
if err != nil {
// If the error is temporary, continue retrying.
if !IsTemporary(err) {
return err
} else {
// Print out the temporary error we experienced.
logger.Println(err)
}
}
}
if curStep > 1 {
logger.Printf("retried this function call %d time(s)", curStep)
}
// Provide more information to the user wherever possible
if err != nil {
return newRetryError(errors.Wrap(err, "ran out of retries"), curStep)
} else {
return newRetryError(errors.New("ran out of retries"), curStep)
}
}
type auroraThriftCall func() (resp *aurora.Response, err error)
// Duplicates the functionality of ExponentialBackoff but is specifically targeted towards ThriftCalls.
func (r *realisClient) thriftCallWithRetries(thriftCall auroraThriftCall) (*aurora.Response, error) {
var resp *aurora.Response
var clientErr error
var curStep int
backoff := r.config.backoff
duration := backoff.Duration
for curStep = 0; curStep < backoff.Steps; curStep++ {
// If this isn't our first try, backoff before the next try.
if curStep != 0 {
adjusted := duration
if backoff.Jitter > 0.0 {
adjusted = Jitter(duration, backoff.Jitter)
}
r.logger.Printf("A retriable error occurred during thrift call, backing off for %v before retry %v\n", adjusted, curStep)
time.Sleep(adjusted)
duration = time.Duration(float64(duration) * backoff.Factor)
}
// Only allow one go-routine make use or modify the thrift client connection.
// Placing this in an anonymous function in order to create a new, short-lived stack allowing unlock
// to be run in case of a panic inside of thriftCall.
func() {
r.lock.Lock()
defer r.lock.Unlock()
resp, clientErr = thriftCall()
r.logger.DebugPrintf("Aurora Thrift Call ended resp: %v clientErr: %v\n", resp, clientErr)
}()
// Check if our thrift call is returning an error. This is a retriable event as we don't know
// if it was caused by network issues.
if clientErr != nil {
// Print out the error to the user
r.logger.Printf("Client Error: %v\n", clientErr)
// In the future, reestablish connection should be able to check if it is actually possible
// to make a thrift call to Aurora. For now, a reconnect should always lead to a retry.
r.ReestablishConn()
} else {
// If there was no client error, but the response is nil, something went wrong.
// Ideally, we'll never encounter this but we're placing a safeguard here.
if resp == nil {
return nil, errors.New("Response from aurora is nil")
}
// Check Response Code from thrift and make a decision to continue retrying or not.
switch responseCode := resp.GetResponseCode(); responseCode {
// If the thrift call succeeded, stop retrying
case aurora.ResponseCode_OK:
return resp, nil
// If the response code is transient, continue retrying
case aurora.ResponseCode_ERROR_TRANSIENT:
r.logger.Println("Aurora replied with Transient error code, retrying")
continue
// Failure scenarios, these indicate a bad payload or a bad config. Stop retrying.
case aurora.ResponseCode_INVALID_REQUEST,
aurora.ResponseCode_ERROR,
aurora.ResponseCode_AUTH_FAILED,
aurora.ResponseCode_JOB_UPDATING_ERROR:
r.logger.Printf("Terminal Response Code %v from Aurora, won't retry\n", resp.GetResponseCode().String())
return resp, errors.New(response.CombineMessage(resp))
// The only case that should fall down to here is a WARNING response code.
// It is currently not used as a response in the scheduler so it is unknown how to handle it.
default:
r.logger.DebugPrintf("unhandled response code %v received from Aurora\n", responseCode)
return nil, errors.Errorf("unhandled response code from Aurora %v\n", responseCode.String())
}
}
}
r.logger.DebugPrintf("it took %v retries to complete this operation\n", curStep)
if curStep > 1 {
r.config.logger.Printf("retried this thrift call %d time(s)", curStep)
}
// Provide more information to the user wherever possible.
if clientErr != nil {
return nil, newRetryError(errors.Wrap(clientErr, "ran out of retries, including latest error"), curStep)
} else {
return nil, newRetryError(errors.New("ran out of retries"), curStep)
}
}

4
vendor/github.com/paypal/gorealis/runTestsMac.sh generated vendored Executable file
View file

@ -0,0 +1,4 @@
#!/bin/bash
# Since we run our docker compose setup in bridge mode to be able to run on MacOS, we have to launch a Docker container within the bridge network in order to avoid any routing issues.
docker run -t -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10.3-stretch go test -v github.com/paypal/gorealis

154
vendor/github.com/paypal/gorealis/updatejob.go generated vendored Normal file
View file

@ -0,0 +1,154 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
import (
"github.com/paypal/gorealis/gen-go/apache/aurora"
)
// Structure to collect all information required to create job update
type UpdateJob struct {
Job // SetInstanceCount for job is hidden, access via full qualifier
req *aurora.JobUpdateRequest
}
// Create a default UpdateJob object.
func NewDefaultUpdateJob(config *aurora.TaskConfig) *UpdateJob {
req := aurora.NewJobUpdateRequest()
req.TaskConfig = config
req.Settings = NewUpdateSettings()
job := NewJob().(*AuroraJob)
job.jobConfig.TaskConfig = config
// Rebuild resource map from TaskConfig
for ptr := range config.Resources {
if ptr.NumCpus != nil {
job.resources["cpu"].NumCpus = ptr.NumCpus
continue // Guard against Union violations that Go won't enforce
}
if ptr.RamMb != nil {
job.resources["ram"].RamMb = ptr.RamMb
continue
}
if ptr.DiskMb != nil {
job.resources["disk"].DiskMb = ptr.DiskMb
continue
}
}
// Mirrors defaults set by Pystachio
req.Settings.UpdateOnlyTheseInstances = make(map[*aurora.Range]bool)
req.Settings.UpdateGroupSize = 1
req.Settings.WaitForBatchCompletion = false
req.Settings.MinWaitInInstanceRunningMs = 45000
req.Settings.MaxPerInstanceFailures = 0
req.Settings.MaxFailedInstances = 0
req.Settings.RollbackOnFailure = true
//TODO(rdelvalle): Deep copy job struct to avoid unexpected behavior
return &UpdateJob{Job: job, req: req}
}
func NewUpdateJob(config *aurora.TaskConfig, settings *aurora.JobUpdateSettings) *UpdateJob {
req := aurora.NewJobUpdateRequest()
req.TaskConfig = config
req.Settings = settings
job := NewJob().(*AuroraJob)
job.jobConfig.TaskConfig = config
// Rebuild resource map from TaskConfig
for ptr := range config.Resources {
if ptr.NumCpus != nil {
job.resources["cpu"].NumCpus = ptr.NumCpus
continue // Guard against Union violations that Go won't enforce
}
if ptr.RamMb != nil {
job.resources["ram"].RamMb = ptr.RamMb
continue
}
if ptr.DiskMb != nil {
job.resources["disk"].DiskMb = ptr.DiskMb
continue
}
}
//TODO(rdelvalle): Deep copy job struct to avoid unexpected behavior
return &UpdateJob{Job: job, req: req}
}
// Set instance count the job will have after the update.
func (u *UpdateJob) InstanceCount(inst int32) *UpdateJob {
u.req.InstanceCount = inst
return u
}
// Max number of instances being updated at any given moment.
func (u *UpdateJob) BatchSize(size int32) *UpdateJob {
u.req.Settings.UpdateGroupSize = size
return u
}
// Minimum number of seconds a shard must remain in RUNNING state before considered a success.
func (u *UpdateJob) WatchTime(ms int32) *UpdateJob {
u.req.Settings.MinWaitInInstanceRunningMs = ms
return u
}
// Wait for all instances in a group to be done before moving on.
func (u *UpdateJob) WaitForBatchCompletion(batchWait bool) *UpdateJob {
u.req.Settings.WaitForBatchCompletion = batchWait
return u
}
// Max number of instance failures to tolerate before marking instance as FAILED.
func (u *UpdateJob) MaxPerInstanceFailures(inst int32) *UpdateJob {
u.req.Settings.MaxPerInstanceFailures = inst
return u
}
// Max number of FAILED instances to tolerate before terminating the update.
func (u *UpdateJob) MaxFailedInstances(inst int32) *UpdateJob {
u.req.Settings.MaxFailedInstances = inst
return u
}
// When False, prevents auto rollback of a failed update.
func (u *UpdateJob) RollbackOnFail(rollback bool) *UpdateJob {
u.req.Settings.RollbackOnFailure = rollback
return u
}
func NewUpdateSettings() *aurora.JobUpdateSettings {
us := new(aurora.JobUpdateSettings)
// Mirrors defaults set by Pystachio
us.UpdateOnlyTheseInstances = make(map[*aurora.Range]bool)
us.UpdateGroupSize = 1
us.WaitForBatchCompletion = false
us.MinWaitInInstanceRunningMs = 45000
us.MaxPerInstanceFailures = 0
us.MaxFailedInstances = 0
us.RollbackOnFailure = true
return us
}

174
vendor/github.com/paypal/gorealis/zk.go generated vendored Normal file
View file

@ -0,0 +1,174 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
import (
"encoding/json"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/samuel/go-zookeeper/zk"
)
type Endpoint struct {
Host string `json:"host"`
Port int `json:"port"`
}
type ServiceInstance struct {
Service Endpoint `json:"serviceEndpoint"`
AdditionalEndpoints map[string]Endpoint `json:"additionalEndpoints"`
Status string `json:"status"`
}
type zkConfig struct {
endpoints []string
path string
backoff Backoff
timeout time.Duration
logger Logger
}
type ZKOpt func(z *zkConfig)
func ZKEndpoints(endpoints ...string) ZKOpt {
return func(z *zkConfig) {
z.endpoints = endpoints
}
}
func ZKPath(path string) ZKOpt {
return func(z *zkConfig) {
z.path = path
}
}
func ZKBackoff(b Backoff) ZKOpt {
return func(z *zkConfig) {
z.backoff = b
}
}
func ZKTimeout(d time.Duration) ZKOpt {
return func(z *zkConfig) {
z.timeout = d
}
}
func ZKLogger(l Logger) ZKOpt {
return func(z *zkConfig) {
z.logger = l
}
}
// Retrieves current Aurora leader from ZK.
func LeaderFromZK(cluster Cluster) (string, error) {
return LeaderFromZKOpts(ZKEndpoints(strings.Split(cluster.ZK, ",")...), ZKPath(cluster.SchedZKPath))
}
// Retrieves current Aurora leader from ZK with a custom configuration.
func LeaderFromZKOpts(options ...ZKOpt) (string, error) {
var leaderURL string
// Load the default configuration for Zookeeper followed by overriding values with those provided by the caller.
config := &zkConfig{backoff: defaultBackoff, timeout: time.Second * 10, logger: NoopLogger{}}
for _, opt := range options {
opt(config)
}
if len(config.endpoints) == 0 {
return "", errors.New("no Zookeeper endpoints supplied")
}
if config.path == "" {
return "", errors.New("no Zookeeper path supplied")
}
// Create a closure that allows us to use the ExponentialBackoff function.
retryErr := ExponentialBackoff(config.backoff, config.logger, func() (bool, error) {
c, _, err := zk.Connect(config.endpoints, config.timeout, func(c *zk.Conn) { c.SetLogger(config.logger) })
if err != nil {
return false, NewTemporaryError(errors.Wrap(err, "Failed to connect to Zookeeper"))
}
defer c.Close()
// Open up descriptor for the ZK path given
children, _, _, err := c.ChildrenW(config.path)
if err != nil {
// Sentinel error check as there is no other way to check.
if err == zk.ErrInvalidPath {
return false, errors.Wrapf(err, "path %s is an invalid Zookeeper path", config.path)
}
return false, NewTemporaryError(errors.Wrapf(err, "Path %s doesn't exist on Zookeeper ", config.path))
}
// Search for the leader through all the children in the given path
serviceInst := new(ServiceInstance)
for _, child := range children {
// Only the leader will start with member_
if strings.HasPrefix(child, "member_") {
childPath := config.path + "/" + child
data, _, err := c.Get(childPath)
if err != nil {
if err == zk.ErrInvalidPath {
return false, errors.Wrapf(err, "path %s is an invalid Zookeeper path", childPath)
}
return false, NewTemporaryError(errors.Wrap(err, "Error fetching contents of leader"))
}
err = json.Unmarshal([]byte(data), serviceInst)
if err != nil {
return false, NewTemporaryError(errors.Wrap(err, "Unable to unmarshall contents of leader"))
}
// Should only be one endpoint.
// This should never be encountered as it would indicate Aurora
// writing bad info into Zookeeper but is kept here as a safety net.
if len(serviceInst.AdditionalEndpoints) > 1 {
return false, NewTemporaryError(errors.New("ambiguous endpoints in json blob, Aurora wrote bad info to ZK"))
}
var scheme, host, port string
for k, v := range serviceInst.AdditionalEndpoints {
scheme = k
host = v.Host
port = strconv.Itoa(v.Port)
}
leaderURL = scheme + "://" + host + ":" + port
return true, nil
}
}
// Leader data might not be available yet, try to fetch again.
return false, NewTemporaryError(errors.New("No leader found"))
})
if retryErr != nil {
config.logger.Printf("Failed to determine leader after %v attempts", config.backoff.Steps)
return "", retryErr
}
return leaderURL, nil
}

69
vendor/github.com/paypal/gorealis/zk_test.go generated vendored Normal file
View file

@ -0,0 +1,69 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis_test
import (
"log"
"os"
"testing"
"time"
"github.com/paypal/gorealis"
"github.com/stretchr/testify/assert"
)
var backoff realis.Backoff = realis.Backoff{ // Reduce penalties for this test to make it quick
Steps: 5,
Duration: 1 * time.Second,
Factor: 1.0,
Jitter: 0.1}
// Test for behavior when no endpoints are given to the ZK leader finding function.
func TestZKNoEndpoints(t *testing.T) {
_, err := realis.LeaderFromZKOpts()
assert.Error(t, err)
}
// Test for behavior when no path is given to the ZK leader finding function.
func TestZKNoPath(t *testing.T) {
_, err := realis.LeaderFromZKOpts(realis.ZKEndpoints("127.0.0.1:2181"))
assert.Error(t, err)
}
// Test for behavior when a valid but non-existent path is given to the ZK leader finding function.
func TestZKPathDoesntExist(t *testing.T) {
_, err := realis.LeaderFromZKOpts(realis.ZKEndpoints("127.0.0.1:2181"),
realis.ZKPath("/somepath"),
realis.ZKBackoff(backoff),
realis.ZKLogger(log.New(os.Stdout, "realis-debug: ", log.Ldate)))
assert.True(t, realis.IsTimeout(err), "a non-existent path should result in a timeout behaving error")
retryErr := realis.ToRetryCount(err)
assert.NotNil(t, retryErr, "conversion to retry error failed")
assert.Equal(t, backoff.Steps, retryErr.RetryCount(), "retry count is off")
}
// Test for behavior when an invalid Zookeeper path is passed. Should fail right away.
func TestZKBadPath(t *testing.T) {
_, err := realis.LeaderFromZKOpts(realis.ZKEndpoints("127.0.0.1:2181"),
realis.ZKPath("invalidpath"),
realis.ZKBackoff(backoff),
realis.ZKLogger(log.New(os.Stdout, "realis-debug: ", log.Ldate)))
assert.False(t, realis.IsTimeout(err), "a bad path should result in a NON-timeout behaving error")
}