Checking in vendor folder for ease of using go get.

This commit is contained in:
Renan DelValle 2018-10-23 23:32:59 -07:00
parent 7a1251853b
commit cdb4b5a1d0
No known key found for this signature in database
GPG key ID: C240AD6D6F443EC9
3554 changed files with 1270116 additions and 0 deletions

View file

@ -0,0 +1,22 @@
-----BEGIN CERTIFICATE-----
MIIDrTCCApWgAwIBAgIJAM+bKx50CY9JMA0GCSqGSIb3DQEBCwUAMG0xCzAJBgNV
BAYTAkdCMQ8wDQYDVQQIDAZMb25kb24xDzANBgNVBAcMBkxvbmRvbjEYMBYGA1UE
CgwPR2xvYmFsIFNlY3VyaXR5MRYwFAYDVQQLDA1JVCBEZXBhcnRtZW50MQowCAYD
VQQDDAEqMB4XDTE3MTIwODIwNTMwMVoXDTI3MTIwNjIwNTMwMVowbTELMAkGA1UE
BhMCR0IxDzANBgNVBAgMBkxvbmRvbjEPMA0GA1UEBwwGTG9uZG9uMRgwFgYDVQQK
DA9HbG9iYWwgU2VjdXJpdHkxFjAUBgNVBAsMDUlUIERlcGFydG1lbnQxCjAIBgNV
BAMMASowggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDhdN0KH80BF3Dk
RQqAARcf7F87uNhQM05HXK8ffpESvKhzrO9BHuDZ0yS3il0BK9XpTyTtHSLIbphk
rO3BOsmPj0zhaM20LsPtwy8GmMCym3hVNSYYyP5XCdjA3uZIYq2R8ruk+vZTe4Zr
F8GHV/xGYU4zKPMGzsQbICjZhj0yiYF9UQ2J+xw79nsqPTmo8+EdVuunLz39dt2o
SbDA01g/kPTIg9K2CAUH0mm4zegiqytwpn2JKVoemmgrDYECWnhLprWlvN9t/fX9
IgprDAHN1BsMrzfmfQXZpVmbIlTriVSdYVeTwG8rT7Tg8soIHqBrnJ1ykTpY4VrO
6tc2z4kTAgMBAAGjUDBOMB0GA1UdDgQWBBSLvwax1Zd6ZiE7TjRklWYNPwgZ2zAf
BgNVHSMEGDAWgBSLvwax1Zd6ZiE7TjRklWYNPwgZ2zAMBgNVHRMEBTADAQH/MA0G
CSqGSIb3DQEBCwUAA4IBAQCJY/EJxlyiSrnO82QcsWm9cT/ciU/G7Y4vX/tGs74C
tNxuBpc0vMfW4a9u6tmi3cW3EXD/KRvPwKZXxzTOhoQY9ZpbZLZ6VvCQ+aWQaXWT
664IS/mrEUZ/p3pgqTNtifdpPAZqVqNdS+Od8/B3/nWUn6JBkDZ4WaFQgfsSulxK
yzYN6UbwhLHfQUupFFhPfvYIVLH9ErGzcv5ZCHX9FornCc0W/8hL4EdjmpTW2ML2
hM5aTKynMiR1GuGSdSpJ+BOeiUI7Go1jGwjV+H9Pw/kfmooq2wuuUGti5dr0Qq7h
CQx1a14BmDBwGoMIOdjFATRwnami5e188fAJozL++i+s
-----END CERTIFICATE-----

View file

@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDhdN0KH80BF3Dk
RQqAARcf7F87uNhQM05HXK8ffpESvKhzrO9BHuDZ0yS3il0BK9XpTyTtHSLIbphk
rO3BOsmPj0zhaM20LsPtwy8GmMCym3hVNSYYyP5XCdjA3uZIYq2R8ruk+vZTe4Zr
F8GHV/xGYU4zKPMGzsQbICjZhj0yiYF9UQ2J+xw79nsqPTmo8+EdVuunLz39dt2o
SbDA01g/kPTIg9K2CAUH0mm4zegiqytwpn2JKVoemmgrDYECWnhLprWlvN9t/fX9
IgprDAHN1BsMrzfmfQXZpVmbIlTriVSdYVeTwG8rT7Tg8soIHqBrnJ1ykTpY4VrO
6tc2z4kTAgMBAAECggEBAMZL7SY8dikhnu+HMgcH7njrg4+ZsthHZ/AoOvcucRbT
zC2ByyWxrP6pUUAFeGvRTGHadJYA7FjxvSO/XZZ4yFN2LJ6NeW+jOjzjUXcx3zq4
t4vqJUnjbqDLTlPFOTItaJBXuGcRPJqMqNuEl3kdEAwvBYLF34r7TUy2and4NFc/
JziGljkiucoNBk62TCDrffnvxMJXht+ab6PMWO87PzMVs4xUFPe0ezv4O54btUcV
EJsU58013EHeCai8AnxjcIPlMlB+lg4Y3C4VXf0mJ//cBvbCp+kyWybMw/e+e222
xq/98vnCOIqcy4u+9ENPLJQe7hXZ3Sqh38kf0GuOh8ECgYEA+VFvuuBP0OQHTxeE
dUizR3Iz/xkeGDUZ/8Ix4TCUmRRuhEXrV7ShwUmuanO3pNhChW6hXZ6qj/yuhfOC
D4V4upEnJDccz/cbH1PdBsfALhC8/C0WSGvnEWZMw/SggmY4KwReqWwN9aA8qjdq
kFTOJc2Js+dCHP9kn9J3U16A+oMCgYEA53+2lhckAI8bsrbCayWRZAVx7hUNPijt
MQvH+PCJ3QeZ0z801zk+4ny5WQ1BT0vRzwj8an4Byi2ZuTQU//N4oawDK0JVYi7q
rjKX/AhAx/puoGAgqiS1nDmuiUiplW06HqayCFbpJ1CoXz8+MwdRXiJ8dgioafVJ
+7wHZDVmMjECgYEAoULxd/ia58x2hcv6Wzo469+MjlYaxyGhvXJIfRXFJ/a1PU1U
Whh1/+W+sRBEGpXfARt7uGhmfle8Mtw8pfl5C4PTw3L6afG1U2AVOMt/HMyq0JoB
LbrNbM20nZLfNzkS35AmAoPny5ZnZtoNTWntJTp69SiB9OuklFO35u7bki0CgYAL
qQYkVzQMBylI/iWaygChvhh3+n15RQx1bPd8lXkMNgbMeiGKOaruM4QOdTl16ga+
W+CC6KfkbBmTF4l7PuMzmXtrYWL1mBFgBtJa8nt41yddUpoyl7jCDrG43n0UNrU3
uAO9ocsKnOhuK7xRS6wQhsIoG9WHyMAaOuVQadQk8QKBgQDVibcvOPXNcF1aRMG7
V24nBb+YYz+00g/cLRkDnBX9/HORle0HSfeT70ctRhuFCoHHbHF4fnp/iAwDgxdB
dNufthftTZTtFGITUsJDN36fSXNjEvKzmKEAlEYkGAYijLlDwknPB+bf4NQ6T0R+
AtnKQY6G4kFSfw9AKgWGy7ZKfg==
-----END PRIVATE KEY-----

View file

@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC6zCCAdOgAwIBAgIJAMgY8gND5lFnMA0GCSqGSIb3DQEBCwUAMAwxCjAIBgNV
BAMMASowHhcNMTcxMjA4MjA1MTQyWhcNMjcxMjA2MjA1MTQyWjAMMQowCAYDVQQD
DAEqMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwKcyyXg90wen25yh
QA17MDyzjzBsIL9+kznzRD1azoNqA3RShAAWXn5a81HeWvncpVL+TKPMU3UC02XT
I6GtX1U7xmdKstBLKiHQxGWX04DshSrVgcVzLUI6OHBG6feoL1mGAa8jB2UEE6ER
uXdgYgKbLUrvduSn4fBvPIhhXg9YL2n2TVujkaY9bPZ9M5tQ5K+g4wRwCAYgjTUN
55J82uzAsLCs+AQi9D4bLJmw0z2H7enRLkd9sRE2pArhXm4LLg/QlL8I5ZHv7vfl
RYdOoC3bjgKk+OVOmb2Fb/dWVlOMcnO8qeo9WyQbhAcjNK2W9Tqk5E5orGZ/bkw/
iZc0MwIDAQABo1AwTjAdBgNVHQ4EFgQUA0xmNKQqxUQgaM9ceCzFyocn9jswHwYD
VR0jBBgwFoAUA0xmNKQqxUQgaM9ceCzFyocn9jswDAYDVR0TBAUwAwEB/zANBgkq
hkiG9w0BAQsFAAOCAQEAnL7VvBTcFyLeNeuTAWmM0bjlwWsuL9Va2LZitnATgzE7
ACS+ZNURnpK/o3UHGc2ePDCFgPsF2mnh4Jmye2tl5uPxQS2zR96hp16ZGVi9N1gx
4aQyknKt6UFRP/cvWwgDN5N3pnRZQ7J0kaAWCPtAIldeGK7UDjOJ1DLDVLeByr7x
27TCt69ysisTtz6Tzr5vvVDEtu2yNIf/uGk3od+pe/0E1UXVCTItvwM30wvfcTPU
aMZXBYNmSrjnJ4k/9FSjZYNKPtK1c/JR+zUng1h+I3b7itY5VBGdzdq9fEk20PHm
Xdg1Ptbebtl6PJqWX+rydXuen6SUt8vFJE89MkbWSw==
-----END CERTIFICATE-----

625
vendor/github.com/paypal/gorealis/examples/client.go generated vendored Normal file
View file

@ -0,0 +1,625 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"strings"
"time"
"github.com/paypal/gorealis"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/paypal/gorealis/response"
)
var cmd, executor, url, clustersConfig, clusterName, updateId, username, password, zkUrl, hostList, role string
var caCertsPath string
var clientKey, clientCert string
var CONNECTION_TIMEOUT = 20000
func init() {
flag.StringVar(&cmd, "cmd", "", "Job request type to send to Aurora Scheduler")
flag.StringVar(&executor, "executor", "thermos", "Executor to use")
flag.StringVar(&url, "url", "", "URL at which the Aurora Scheduler exists as [url]:[port]")
flag.StringVar(&clustersConfig, "clusters", "", "Location of the clusters.json file used by aurora.")
flag.StringVar(&clusterName, "cluster", "devcluster", "Name of cluster to run job on (only necessary if clusters is set)")
flag.StringVar(&updateId, "updateId", "", "Update ID to operate on")
flag.StringVar(&username, "username", "aurora", "Username to use for authorization")
flag.StringVar(&password, "password", "secret", "Password to use for authorization")
flag.StringVar(&zkUrl, "zkurl", "", "zookeeper url")
flag.StringVar(&hostList, "hostList", "", "Comma separated list of hosts to operate on")
flag.StringVar(&role, "role", "", "owner role to use")
flag.StringVar(&caCertsPath, "caCertsPath", "", "Path to CA certs on local machine.")
flag.StringVar(&clientCert, "clientCert", "", "Client certificate to use to connect to Aurora.")
flag.StringVar(&clientKey, "clientKey", "", "Client private key to use to connect to Aurora.")
flag.Parse()
// Attempt to load leader from zookeeper using a
// cluster.json file used for the default aurora client if provided.
// This will override the provided url in the arguments
if clustersConfig != "" {
clusters, err := realis.LoadClusters(clustersConfig)
if err != nil {
log.Fatalln(err)
}
cluster, ok := clusters[clusterName]
if !ok {
log.Fatalf("Cluster %s doesn't exist in the file provided\n", clusterName)
}
url, err = realis.LeaderFromZK(cluster)
if err != nil {
log.Fatalln(err)
}
}
}
func main() {
var job realis.Job
var err error
var monitor *realis.Monitor
var r realis.Realis
clientOptions := []realis.ClientOption{
realis.BasicAuth(username, password),
realis.ThriftJSON(),
realis.TimeoutMS(CONNECTION_TIMEOUT),
realis.BackOff(realis.Backoff{
Steps: 2,
Duration: 10 * time.Second,
Factor: 2.0,
Jitter: 0.1,
}),
realis.Debug(),
}
//check if zkUrl is available.
if zkUrl != "" {
fmt.Println("zkUrl: ", zkUrl)
clientOptions = append(clientOptions, realis.ZKUrl(zkUrl))
} else {
clientOptions = append(clientOptions, realis.SchedulerUrl(url))
}
if caCertsPath != "" {
clientOptions = append(clientOptions, realis.Certspath(caCertsPath))
}
if clientKey != "" && clientCert != "" {
clientOptions = append(clientOptions, realis.ClientCerts(clientKey, clientCert))
}
r, err = realis.NewRealisClient(clientOptions...)
if err != nil {
log.Fatalln(err)
}
monitor = &realis.Monitor{r}
defer r.Close()
switch executor {
case "thermos":
payload, err := ioutil.ReadFile("examples/thermos_payload.json")
if err != nil {
log.Fatalln("Error reading json config file: ", err)
}
job = realis.NewJob().
Environment("prod").
Role("vagrant").
Name("hello_world_from_gorealis").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData(string(payload)).
CPU(1).
RAM(64).
Disk(100).
IsService(true).
InstanceCount(1).
AddPorts(1)
case "compose":
job = realis.NewJob().
Environment("prod").
Role("vagrant").
Name("docker-compose-test").
ExecutorName("docker-compose-executor").
ExecutorData("{}").
CPU(0.25).
RAM(512).
Disk(100).
IsService(true).
InstanceCount(1).
AddPorts(4).
AddLabel("fileName", "sample-app/docker-compose.yml").
AddURIs(true, true, "https://github.com/mesos/docker-compose-executor/releases/download/0.1.0/sample-app.tar.gz")
case "none":
job = realis.NewJob().
Environment("prod").
Role("vagrant").
Name("docker_as_task").
CPU(1).
RAM(64).
Disk(100).
IsService(true).
InstanceCount(1).
AddPorts(1)
default:
log.Fatalln("Only thermos, compose, and none are supported for now")
}
switch cmd {
case "create":
fmt.Println("Creating job")
resp, err := r.CreateJob(job)
if err != nil {
log.Fatalln(err)
}
fmt.Println(resp.String())
if ok, mErr := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 5, 50); !ok || mErr != nil {
_, err := r.KillJob(job.JobKey())
if err != nil {
log.Fatalln(err)
}
log.Fatalf("ok: %v\n err: %v", ok, mErr)
}
case "createService":
// Create a service with three instances using the update API instead of the createJob API
fmt.Println("Creating service")
settings := realis.NewUpdateSettings()
job.InstanceCount(3)
resp, result, err := r.CreateService(job, settings)
if err != nil {
log.Println("error: ", err)
log.Fatal("response: ", resp.String())
}
fmt.Println(result.String())
if ok, mErr := monitor.JobUpdate(*result.GetKey(), 5, 180); !ok || mErr != nil {
_, err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out")
_, err = r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
log.Fatalf("ok: %v\n err: %v", ok, mErr)
}
case "createDocker":
fmt.Println("Creating a docker based job")
container := realis.NewDockerContainer().Image("python:2.7").AddParameter("network", "host")
job.Container(container)
resp, err := r.CreateJob(job)
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
_, err := r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
}
case "createMesosContainer":
fmt.Println("Creating a docker based job")
container := realis.NewMesosContainer().DockerImage("python", "2.7")
job.Container(container)
resp, err := r.CreateJob(job)
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
_, err := r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
}
case "scheduleCron":
fmt.Println("Scheduling a Cron job")
// Cron config
job.CronSchedule("* * * * *")
job.IsService(false)
resp, err := r.ScheduleCronJob(job)
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "startCron":
fmt.Println("Starting a Cron job")
resp, err := r.StartCronJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "descheduleCron":
fmt.Println("Descheduling a Cron job")
resp, err := r.DescheduleCronJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "kill":
fmt.Println("Killing job")
resp, err := r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
if ok, err := monitor.Instances(job.JobKey(), 0, 5, 50); !ok || err != nil {
log.Fatal("Unable to kill all instances of job")
}
fmt.Println(resp.String())
case "restart":
fmt.Println("Restarting job")
resp, err := r.RestartJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "liveCount":
fmt.Println("Getting instance count")
live, err := r.GetInstanceIds(job.JobKey(), aurora.LIVE_STATES)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Live instances: %+v\n", live)
case "activeCount":
fmt.Println("Getting instance count")
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
}
fmt.Println("Number of live instances: ", len(live))
case "flexUp":
fmt.Println("Flexing up job")
numOfInstances := int32(4)
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
}
currInstances := int32(len(live))
fmt.Println("Current num of instances: ", currInstances)
var instId int32
for k := range live {
instId = k
}
resp, err := r.AddInstances(aurora.InstanceKey{
JobKey: job.JobKey(),
InstanceId: instId,
},
numOfInstances)
if err != nil {
log.Fatal(err)
}
if ok, err := monitor.Instances(job.JobKey(), currInstances+numOfInstances, 5, 50); !ok || err != nil {
fmt.Println("Flexing up failed")
}
fmt.Println(resp.String())
case "flexDown":
fmt.Println("Flexing down job")
numOfInstances := int32(2)
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
}
currInstances := int32(len(live))
fmt.Println("Current num of instances: ", currInstances)
resp, err := r.RemoveInstances(job.JobKey(), numOfInstances)
if err != nil {
log.Fatal(err)
}
if ok, err := monitor.Instances(job.JobKey(), currInstances-numOfInstances, 5, 100); !ok || err != nil {
fmt.Println("flexDown failed")
}
fmt.Println(resp.String())
case "update":
fmt.Println("Updating a job with with more RAM and to 5 instances")
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
}
var instId int32
for k := range live {
instId = k
}
taskConfig, err := r.FetchTaskConfig(aurora.InstanceKey{
JobKey: job.JobKey(),
InstanceId: instId,
})
if err != nil {
log.Fatal(err)
}
updateJob := realis.NewDefaultUpdateJob(taskConfig)
updateJob.InstanceCount(5).RAM(128)
resp, err := r.StartJobUpdate(updateJob, "")
if err != nil {
log.Fatal(err)
}
jobUpdateKey := response.JobUpdateKey(resp)
monitor.JobUpdate(*jobUpdateKey, 5, 500)
case "pauseJobUpdate":
resp, err := r.PauseJobUpdate(&aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
}, "")
if err != nil {
log.Fatal(err)
}
fmt.Println("PauseJobUpdate response: ", resp.String())
case "resumeJobUpdate":
resp, err := r.ResumeJobUpdate(&aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
}, "")
if err != nil {
log.Fatal(err)
}
fmt.Println("ResumeJobUpdate response: ", resp.String())
case "pulseJobUpdate":
resp, err := r.PulseJobUpdate(&aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
})
if err != nil {
log.Fatal(err)
}
fmt.Println("PulseJobUpdate response: ", resp.String())
case "updateDetails":
resp, err := r.JobUpdateDetails(aurora.JobUpdateQuery{
Key: &aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
},
Limit: 1,
})
if err != nil {
log.Fatal(err)
}
fmt.Println(response.JobUpdateDetails(resp))
case "abortUpdate":
fmt.Println("Abort update")
resp, err := r.AbortJobUpdate(aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
},
"")
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "rollbackUpdate":
fmt.Println("Abort update")
resp, err := r.RollbackJobUpdate(aurora.JobUpdateKey{
Job: job.JobKey(),
ID: updateId,
},
"")
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "taskConfig":
fmt.Println("Getting job info")
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
}
var instId int32
for k := range live {
instId = k
break
}
config, err := r.FetchTaskConfig(aurora.InstanceKey{
JobKey: job.JobKey(),
InstanceId: instId,
})
if err != nil {
log.Fatal(err)
}
log.Println(config.String())
case "updatesummary":
fmt.Println("Getting job update summary")
jobquery := &aurora.JobUpdateQuery{
Role: &job.JobKey().Role,
JobKey: job.JobKey(),
}
updatesummary, err := r.GetJobUpdateSummaries(jobquery)
if err != nil {
log.Fatalf("error while getting update summary: %v", err)
}
fmt.Println(updatesummary)
case "taskStatus":
fmt.Println("Getting task status")
taskQ := &aurora.TaskQuery{
Role: &job.JobKey().Role,
Environment: &job.JobKey().Environment,
JobName: &job.JobKey().Name,
}
tasks, err := r.GetTaskStatus(taskQ)
if err != nil {
log.Fatalf("error: %+v\n ", err)
}
fmt.Printf("length: %d\n ", len(tasks))
fmt.Printf("tasks: %+v\n", tasks)
case "tasksWithoutConfig":
fmt.Println("Getting task status")
taskQ := &aurora.TaskQuery{
Role: &job.JobKey().Role,
Environment: &job.JobKey().Environment,
JobName: &job.JobKey().Name,
}
tasks, err := r.GetTasksWithoutConfigs(taskQ)
if err != nil {
log.Fatalf("error: %+v\n ", err)
}
fmt.Printf("length: %d\n ", len(tasks))
fmt.Printf("tasks: %+v\n", tasks)
case "drainHosts":
fmt.Println("Setting hosts to DRAINING")
if hostList == "" {
log.Fatal("No hosts specified to drain")
}
hosts := strings.Split(hostList, ",")
_, result, err := r.DrainHosts(hosts...)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
// Monitor change to DRAINING and DRAINED mode
hostResult, err := monitor.HostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
5,
10)
if err != nil {
for host, ok := range hostResult {
if !ok {
fmt.Printf("Host %s did not transtion into desired mode(s)\n", host)
}
}
log.Fatalf("error: %+v\n", err.Error())
}
fmt.Print(result.String())
case "endMaintenance":
fmt.Println("Setting hosts to ACTIVE")
if hostList == "" {
log.Fatal("No hosts specified to drain")
}
hosts := strings.Split(hostList, ",")
_, result, err := r.EndMaintenance(hosts...)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
// Monitor change to DRAINING and DRAINED mode
hostResult, err := monitor.HostMaintenance(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
5,
10)
if err != nil {
for host, ok := range hostResult {
if !ok {
fmt.Printf("Host %s did not transtion into desired mode(s)\n", host)
}
}
log.Fatalf("error: %+v\n", err.Error())
}
fmt.Print(result.String())
case "getJobs":
fmt.Println("GetJobs...role: ", role)
_, result, err := r.GetJobs(role)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
fmt.Println("map size: ", len(result.Configs))
fmt.Println(result.String())
case "snapshot":
fmt.Println("Forcing scheduler to write snapshot to mesos replicated log")
err := r.Snapshot()
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
case "performBackup":
fmt.Println("Writing Backup of Snapshot to file system")
err := r.PerformBackup()
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
case "forceExplicitRecon":
fmt.Println("Force an explicit recon")
err := r.ForceExplicitTaskReconciliation(nil)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
case "forceImplicitRecon":
fmt.Println("Force an implicit recon")
err := r.ForceImplicitTaskReconciliation()
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
default:
log.Fatal("Command not supported")
}
}

View file

@ -0,0 +1,8 @@
[{
"name": "devcluster",
"zk": "192.168.33.7",
"scheduler_zk_path": "/aurora/scheduler",
"auth_mechanism": "UNAUTHENTICATED",
"slave_run_directory": "latest",
"slave_root": "/var/lib/mesos"
}]

13
vendor/github.com/paypal/gorealis/examples/config.json generated vendored Normal file
View file

@ -0,0 +1,13 @@
{
"username": "aurora",
"password": "secret",
"sched_url": "http://192.168.33.7:8081",
"cluster" : {
"name": "devcluster",
"zk": "192.168.33.7",
"scheduler_zk_path": "/aurora/scheduler",
"auth_mechanism": "UNAUTHENTICATED",
"slave_run_directory": "latest",
"slave_root": "/var/lib/mesos"
}
}

21
vendor/github.com/paypal/gorealis/examples/job.json generated vendored Normal file
View file

@ -0,0 +1,21 @@
{
"name": "sample",
"cpu": 1.4,
"ram_mb": 128,
"disk_mb": 64,
"executor": "docker-compose-executor",
"service": false,
"ports": 1,
"instances": 1,
"uris": [
{
"uri": "https://github.com/mesos/docker-compose-executor/releases/download/0.1.0/sample-app.tar.gz",
"extract": true,
"cache": true
}
],
"labels":{
"fileName":"sample-app/docker-compose.yml"
}
}

View file

@ -0,0 +1,21 @@
{
"name": "sampleapp",
"cpu": 0.25,
"ram_mb": 256,
"disk_mb": 100,
"executor": "docker-compose-executor",
"service": true,
"ports": 4,
"instances": 1,
"uris": [
{
"uri": "http://192.168.33.8/app.tar.gz",
"extract": true,
"cache": false
}
],
"labels":{
"fileName":"sampleapp/docker-compose.yml,sampleapp/docker-compose-healthcheck.yml"
}
}

View file

@ -0,0 +1,11 @@
{
"name": "hello_world_from_gorealis",
"cpu": 1.0,
"ram_mb": 64,
"disk_mb": 100,
"executor": "thermos",
"exec_data_file": "examples/thermos_payload.json",
"service": true,
"ports": 1,
"instances": 1
}

View file

@ -0,0 +1,226 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"time"
"github.com/paypal/gorealis"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/pkg/errors"
)
type URIJson struct {
URI string `json:"uri"`
Extract bool `json:"extract"`
Cache bool `json:"cache"`
}
type JobJson struct {
Name string `json:"name"`
CPU float64 `json:"cpu"`
RAM int64 `json:"ram_mb"`
Disk int64 `json:"disk_mb"`
Executor string `json:"executor"`
ExecutorDataFile string `json:"exec_data_file,omitempty"`
Instances int32 `json:"instances"`
URIs []URIJson `json:"uris"`
Labels map[string]string `json:"labels"`
Service bool `json:"service"`
Ports int `json:"ports"`
}
func (j *JobJson) Validate() bool {
if j.Name == "" {
return false
}
if j.CPU <= 0.0 {
return false
}
if j.RAM <= 0 {
return false
}
if j.Disk <= 0 {
return false
}
return true
}
type Config struct {
realis.Cluster `json:"cluster"`
Username string `json:"username"`
Password string `json:"password"`
SchedUrl string `json:"sched_url"`
Transport string `json:"transport,omitempty"`
Debug bool `json:"debug,omitempty"`
}
// Command-line arguments for config and job JSON files.
var configJSONFile, jobJSONFile string
var job *JobJson
var config *Config
// Reading command line arguments and validating.
// If Aurora scheduler URL not provided, then using zookeeper to locate the leader.
func init() {
flag.StringVar(&configJSONFile, "config", "./config.json", "The config file that contains username, password, and the cluster configuration information.")
flag.StringVar(&jobJSONFile, "job", "./job.json", "JSON file containing job definitions.")
flag.Parse()
job = new(JobJson)
config = new(Config)
if jobsFile, jobJSONReadErr := os.Open(jobJSONFile); jobJSONReadErr != nil {
flag.Usage()
fmt.Println("Error reading the job JSON file: ", jobJSONReadErr)
os.Exit(1)
} else {
if unmarshallErr := json.NewDecoder(jobsFile).Decode(job); unmarshallErr != nil {
flag.Usage()
fmt.Println("Error parsing job json file: ", unmarshallErr)
os.Exit(1)
}
// Need to validate the job JSON file.
if !job.Validate() {
fmt.Println("Invalid Job.")
os.Exit(1)
}
}
if configFile, configJSONErr := os.Open(configJSONFile); configJSONErr != nil {
flag.Usage()
fmt.Println("Error reading the config JSON file: ", configJSONErr)
os.Exit(1)
} else {
if unmarshallErr := json.NewDecoder(configFile).Decode(config); unmarshallErr != nil {
fmt.Println("Error parsing config JSON file: ", unmarshallErr)
os.Exit(1)
}
}
}
func CreateRealisClient(config *Config) (realis.Realis, error) {
var transportOption realis.ClientOption
// Configuring transport protocol. If not transport is provided, then using JSON as the
// default transport protocol.
switch config.Transport {
case "binary":
transportOption = realis.ThriftBinary()
case "json", "":
transportOption = realis.ThriftJSON()
default:
fmt.Println("Invalid transport option provided!")
os.Exit(1)
}
clientOptions := []realis.ClientOption{
realis.BasicAuth(config.Username, config.Password),
transportOption,
realis.ZKCluster(&config.Cluster),
// realis.SchedulerUrl(config.SchedUrl),
realis.SetLogger(log.New(os.Stdout, "realis-debug: ", log.Ldate)),
realis.BackOff(realis.Backoff{
Steps: 2,
Duration: 10 * time.Second,
Factor: 2.0,
Jitter: 0.1,
}),
}
if config.Debug {
clientOptions = append(clientOptions, realis.Debug())
}
return realis.NewRealisClient(clientOptions...)
}
func main() {
if r, clientCreationErr := CreateRealisClient(config); clientCreationErr != nil {
fmt.Println(clientCreationErr)
os.Exit(1)
} else {
monitor := &realis.Monitor{Client: r}
defer r.Close()
uris := job.URIs
labels := job.Labels
auroraJob := realis.NewJob().
Environment("prod").
Role("vagrant").
Name(job.Name).
CPU(job.CPU).
RAM(job.RAM).
Disk(job.Disk).
IsService(job.Service).
InstanceCount(job.Instances).
AddPorts(job.Ports)
// If thermos executor, then reading in the thermos payload.
if (job.Executor == aurora.AURORA_EXECUTOR_NAME) || (job.Executor == "thermos") {
payload, err := ioutil.ReadFile(job.ExecutorDataFile)
if err != nil {
fmt.Println(errors.Wrap(err, "Invalid thermos payload file!"))
os.Exit(1)
}
auroraJob.ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData(string(payload))
} else {
auroraJob.ExecutorName(job.Executor)
}
// Adding URIs.
for _, uri := range uris {
auroraJob.AddURIs(uri.Extract, uri.Cache, uri.URI)
}
// Adding Labels.
for key, value := range labels {
auroraJob.AddLabel(key, value)
}
fmt.Println("Creating Job...")
if resp, jobCreationErr := r.CreateJob(auroraJob); jobCreationErr != nil {
fmt.Println("Error creating Aurora job: ", jobCreationErr)
os.Exit(1)
} else {
if resp.ResponseCode == aurora.ResponseCode_OK {
if ok, monitorErr := monitor.Instances(auroraJob.JobKey(), auroraJob.GetInstanceCount(), 5, 50); !ok || monitorErr != nil {
if _, jobErr := r.KillJob(auroraJob.JobKey()); jobErr !=
nil {
fmt.Println(jobErr)
os.Exit(1)
} else {
fmt.Println("ok: ", ok)
fmt.Println("jobErr: ", jobErr)
}
}
}
}
}
}

View file

@ -0,0 +1,62 @@
{
"environment": "prod",
"health_check_config": {
"initial_interval_secs": 15.0,
"health_checker": {
"http": {
"expected_response_code": 0,
"endpoint": "/health",
"expected_response": "ok"
}
},
"interval_secs": 10.0,
"timeout_secs": 1.0,
"max_consecutive_failures": 0
},
"name": "hello_world_from_gorealis",
"service": false,
"max_task_failures": 1,
"cron_collision_policy": "KILL_EXISTING",
"enable_hooks": false,
"cluster": "devcluster",
"task": {
"processes": [
{
"daemon": false,
"name": "hello",
"ephemeral": false,
"max_failures": 1,
"min_duration": 5,
"cmdline": "echo hello world from gorealis; sleep 10",
"final": false
}
],
"name": "hello",
"finalization_wait": 30,
"max_failures": 1,
"max_concurrency": 0,
"resources": {
"gpu": 0,
"disk": 134217728,
"ram": 134217728,
"cpu": 1.0
},
"constraints": [
{
"order": [
"hello"
]
}
]
},
"production": false,
"role": "vagrant",
"lifecycle": {
"http": {
"graceful_shutdown_endpoint": "/quitquitquit",
"port": "health",
"shutdown_endpoint": "/abortabortabort"
}
},
"priority": 0
}

View file

@ -0,0 +1,62 @@
{
"environment": "prod",
"health_check_config": {
"initial_interval_secs": 15.0,
"health_checker": {
"http": {
"expected_response_code": 0,
"endpoint": "/health",
"expected_response": "ok"
}
},
"interval_secs": 10.0,
"timeout_secs": 1.0,
"max_consecutive_failures": 0
},
"name": "hello_world_from_gorealis",
"service": false,
"max_task_failures": 1,
"cron_collision_policy": "KILL_EXISTING",
"enable_hooks": false,
"cluster": "devcluster",
"task": {
"processes": [
{
"daemon": false,
"name": "hello",
"ephemeral": false,
"max_failures": 1,
"min_duration": 5,
"cmdline": "\n while true; do\n echo hello world from gorealis\n sleep 10\n done\n ",
"final": false
}
],
"name": "hello",
"finalization_wait": 30,
"max_failures": 1,
"max_concurrency": 0,
"resources": {
"gpu": 0,
"disk": 134217728,
"ram": 134217728,
"cpu": 1.0
},
"constraints": [
{
"order": [
"hello"
]
}
]
},
"production": false,
"role": "vagrant",
"lifecycle": {
"http": {
"graceful_shutdown_endpoint": "/quitquitquit",
"port": "health",
"shutdown_endpoint": "/abortabortabort"
}
},
"priority": 0
}