gorealis v2 refactor (#5)

* Changing default timeout for start maintenance.

* Upgrading dependencies to gorealis v2 and thrift  0.12.0

* Refactored to update to gorealis v2.
This commit is contained in:
Renan DelValle 2018-12-27 11:31:51 -08:00 committed by GitHub
parent ad4dd9606e
commit 6ab5c9334d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
1335 changed files with 137431 additions and 61530 deletions

View file

@ -0,0 +1,5 @@
[users]
aurora = secret, admin
[roles]
admin = *

3
vendor/github.com/paypal/gorealis/.gitattributes generated vendored Normal file
View file

@ -0,0 +1,3 @@
gen-go/ linguist-generated=true
vendor/ linguist-generated=true
Gopkg.lock linguist-generated=true

View file

@ -3,7 +3,7 @@ sudo: required
language: go
go:
- "1.10.x"
- "1.11.x"
env:
global:

View file

@ -2,43 +2,59 @@
[[projects]]
branch = "0.10.0-http-client-fix"
branch = "0.12.0"
digest = "1:0d6d7a897b900dd5924e36bfa05dea429d7049b4b0b87bd3f93cb5f0acc35a21"
name = "git.apache.org/thrift.git"
packages = ["lib/go/thrift"]
revision = "cb1afec972a85791e9b24a04b60fc9dbbfc3cda3"
source = "github.com/rdelval/thrift"
pruneopts = ""
revision = "5c1ecb67cde4d9aff7ed3188ab11566184b27bf0"
[[projects]]
digest = "1:56c130d885a4aacae1dd9c7b71cfe39912c7ebc1ff7d2b46083c8812996dc43b"
name = "github.com/davecgh/go-spew"
packages = ["spew"]
pruneopts = ""
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
digest = "1:df48fb76fb2a40edea0c9b3d960bc95e326660d82ff1114e1f88001f7a236b40"
name = "github.com/pkg/errors"
packages = ["."]
pruneopts = ""
revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc"
[[projects]]
digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411"
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
pruneopts = ""
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
digest = "1:78bea5e26e82826dacc5fd64a1013a6711b7075ec8072819b89e6ad76cb8196d"
name = "github.com/samuel/go-zookeeper"
packages = ["zk"]
pruneopts = ""
revision = "471cd4e61d7a78ece1791fa5faa0345dc8c7d5a5"
[[projects]]
digest = "1:2d0dc026c4aef5e2f3a0e06a4dabe268b840d8f63190cf6894e02134a03f52c5"
name = "github.com/stretchr/testify"
packages = ["assert"]
pruneopts = ""
revision = "b91bfb9ebec76498946beb6af7c0230c7cc7ba6c"
version = "v1.2.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "fdb631400420ca7299ad56b66175c9157ca073e3de52f666a84fc1d6fa893978"
input-imports = [
"git.apache.org/thrift.git/lib/go/thrift",
"github.com/pkg/errors",
"github.com/samuel/go-zookeeper/zk",
"github.com/stretchr/testify/assert",
]
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -1,7 +1,6 @@
[[constraint]]
name = "git.apache.org/thrift.git"
branch = "0.10.0-http-client-fix"
source = "github.com/rdelval/thrift"
branch = "0.12.0"
[[constraint]]
name = "github.com/pkg/errors"

View file

@ -54,3 +54,14 @@ func LoadClusters(config string) (map[string]Cluster, error) {
return m, nil
}
func GetDefaultClusterFromZKUrl(zkURL string) *Cluster {
return &Cluster{
Name: "defaultCluster",
AuthMechanism: "UNAUTHENTICATED",
ZK: zkURL,
SchedZKPath: "/aurora/scheduler",
AgentRunDir: "latest",
AgentRoot: "/var/lib/mesos",
}
}

View file

@ -18,7 +18,7 @@ import (
"fmt"
"testing"
"github.com/paypal/gorealis"
realis "github.com/paypal/gorealis"
"github.com/stretchr/testify/assert"
)

View file

@ -22,28 +22,24 @@ type Container interface {
Build() *aurora.Container
}
type MesosContainer struct {
container *aurora.MesosContainer
}
type DockerContainer struct {
container *aurora.DockerContainer
}
func NewDockerContainer() DockerContainer {
return DockerContainer{container: aurora.NewDockerContainer()}
func NewDockerContainer() *DockerContainer {
return &DockerContainer{container: aurora.NewDockerContainer()}
}
func (c DockerContainer) Build() *aurora.Container {
func (c *DockerContainer) Build() *aurora.Container {
return &aurora.Container{Docker: c.container}
}
func (c DockerContainer) Image(image string) DockerContainer {
func (c *DockerContainer) Image(image string) *DockerContainer {
c.container.Image = image
return c
}
func (c DockerContainer) AddParameter(name, value string) DockerContainer {
func (c *DockerContainer) AddParameter(name, value string) *DockerContainer {
c.container.Parameters = append(c.container.Parameters, &aurora.DockerParameter{
Name: name,
Value: value,
@ -51,15 +47,19 @@ func (c DockerContainer) AddParameter(name, value string) DockerContainer {
return c
}
func NewMesosContainer() MesosContainer {
return MesosContainer{container: aurora.NewMesosContainer()}
type MesosContainer struct {
container *aurora.MesosContainer
}
func (c MesosContainer) Build() *aurora.Container {
func NewMesosContainer() *MesosContainer {
return &MesosContainer{container: aurora.NewMesosContainer()}
}
func (c *MesosContainer) Build() *aurora.Container {
return &aurora.Container{Mesos: c.container}
}
func (c MesosContainer) DockerImage(name, tag string) MesosContainer {
func (c *MesosContainer) DockerImage(name, tag string) *MesosContainer {
if c.container.Image == nil {
c.container.Image = aurora.NewImage()
}
@ -68,7 +68,7 @@ func (c MesosContainer) DockerImage(name, tag string) MesosContainer {
return c
}
func (c MesosContainer) AppcImage(name, imageId string) MesosContainer {
func (c *MesosContainer) AppcImage(name, imageId string) *MesosContainer {
if c.container.Image == nil {
c.container.Image = aurora.NewImage()
}
@ -76,3 +76,12 @@ func (c MesosContainer) AppcImage(name, imageId string) MesosContainer {
c.container.Image.Appc = &aurora.AppcImage{Name: name, ImageId: imageId}
return c
}
func (c *MesosContainer) AddVolume(hostPath, containerPath string, mode aurora.Mode) *MesosContainer {
c.container.Volumes = append(c.container.Volumes, &aurora.Volume{
HostPath: hostPath,
ContainerPath: containerPath,
Mode: mode})
return c
}

View file

@ -65,6 +65,12 @@ services:
CLUSTER_NAME: test-cluster
ZK_ENDPOINTS: "192.168.33.2:2181"
MESOS_MASTER: "zk://192.168.33.2:2181/mesos"
EXTRA_SCHEDULER_ARGS: >
-http_authentication_mechanism=BASIC
-shiro_realm_modules=INI_AUTHNZ
-shiro_ini_path=/etc/aurora/security.ini
volumes:
- ./.aurora-config:/etc/aurora
networks:
aurora_cluster:
ipv4_address: 192.168.33.7

View file

@ -17,7 +17,7 @@ package realis
// Using a pattern described by Dave Cheney to differentiate errors
// https://dave.cheney.net/2016/04/27/dont-just-check-errors-handle-them-gracefully
// Timeout errors are returned when a function is unable to continue executing due
// Timedout errors are returned when a function is unable to continue executing due
// to a time constraint or meeting a set number of retries.
type timeout interface {
Timedout() bool

View file

@ -22,19 +22,18 @@ import (
"strings"
"time"
"github.com/paypal/gorealis"
realis "github.com/paypal/gorealis"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/paypal/gorealis/response"
)
var cmd, executor, url, clustersConfig, clusterName, updateId, username, password, zkUrl, hostList, role string
var caCertsPath string
var clientKey, clientCert string
var CONNECTION_TIMEOUT = 20000
var ConnectionTimeout = 20 * time.Second
func init() {
flag.StringVar(&cmd, "cmd", "", "Job request type to send to Aurora Scheduler")
flag.StringVar(&cmd, "cmd", "", "Aurora Job request type to send to Aurora Scheduler")
flag.StringVar(&executor, "executor", "thermos", "Executor to use")
flag.StringVar(&url, "url", "", "URL at which the Aurora Scheduler exists as [url]:[port]")
flag.StringVar(&clustersConfig, "clusters", "", "Location of the clusters.json file used by aurora.")
@ -74,15 +73,14 @@ func init() {
func main() {
var job realis.Job
var job *realis.AuroraJob
var err error
var monitor *realis.Monitor
var r realis.Realis
var r *realis.Client
clientOptions := []realis.ClientOption{
realis.BasicAuth(username, password),
realis.ThriftJSON(),
realis.TimeoutMS(CONNECTION_TIMEOUT),
realis.Timeout(ConnectionTimeout),
realis.BackOff(realis.Backoff{
Steps: 2,
Duration: 10 * time.Second,
@ -92,7 +90,7 @@ func main() {
realis.Debug(),
}
//check if zkUrl is available.
// Check if zkUrl is available.
if zkUrl != "" {
fmt.Println("zkUrl: ", zkUrl)
clientOptions = append(clientOptions, realis.ZKUrl(zkUrl))
@ -101,18 +99,17 @@ func main() {
}
if caCertsPath != "" {
clientOptions = append(clientOptions, realis.Certspath(caCertsPath))
clientOptions = append(clientOptions, realis.CertsPath(caCertsPath))
}
if clientKey != "" && clientCert != "" {
clientOptions = append(clientOptions, realis.ClientCerts(clientKey, clientCert))
}
r, err = realis.NewRealisClient(clientOptions...)
r, err = realis.NewClient(clientOptions...)
if err != nil {
log.Fatalln(err)
}
monitor = &realis.Monitor{r}
defer r.Close()
switch executor {
@ -167,14 +164,13 @@ func main() {
switch cmd {
case "create":
fmt.Println("Creating job")
resp, err := r.CreateJob(job)
err := r.CreateJob(job)
if err != nil {
log.Fatalln(err)
}
fmt.Println(resp.String())
if ok, mErr := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 5, 50); !ok || mErr != nil {
_, err := r.KillJob(job.JobKey())
if ok, mErr := r.InstancesMonitor(job.JobKey(), job.GetInstanceCount(), 5*time.Second, 50*time.Second); !ok || mErr != nil {
err := r.KillJob(job.JobKey())
if err != nil {
log.Fatalln(err)
}
@ -184,18 +180,16 @@ func main() {
case "createService":
// Create a service with three instances using the update API instead of the createJob API
fmt.Println("Creating service")
settings := realis.NewUpdateSettings()
job.InstanceCount(3)
resp, result, err := r.CreateService(job, settings)
settings := realis.JobUpdateFromConfig(job.TaskConfig()).InstanceCount(3)
result, err := r.CreateService(settings)
if err != nil {
log.Println("error: ", err)
log.Fatal("response: ", resp.String())
log.Fatal("error: ", err)
}
fmt.Println(result.String())
if ok, mErr := monitor.JobUpdate(*result.GetKey(), 5, 180); !ok || mErr != nil {
_, err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out")
_, err = r.KillJob(job.JobKey())
if ok, mErr := r.JobUpdateMonitor(*result.GetKey(), 5*time.Second, 180*time.Second); !ok || mErr != nil {
err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out")
err = r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
@ -206,14 +200,13 @@ func main() {
fmt.Println("Creating a docker based job")
container := realis.NewDockerContainer().Image("python:2.7").AddParameter("network", "host")
job.Container(container)
resp, err := r.CreateJob(job)
err := r.CreateJob(job)
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
_, err := r.KillJob(job.JobKey())
if ok, err := r.InstancesMonitor(job.JobKey(), job.GetInstanceCount(), 10*time.Second, 300*time.Second); !ok || err != nil {
err := r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
@ -223,14 +216,13 @@ func main() {
fmt.Println("Creating a docker based job")
container := realis.NewMesosContainer().DockerImage("python", "2.7")
job.Container(container)
resp, err := r.CreateJob(job)
err := r.CreateJob(job)
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
if ok, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 10, 300); !ok || err != nil {
_, err := r.KillJob(job.JobKey())
if ok, err := r.InstancesMonitor(job.JobKey(), job.GetInstanceCount(), 10*time.Second, 300*time.Second); !ok || err != nil {
err := r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
@ -241,50 +233,44 @@ func main() {
// Cron config
job.CronSchedule("* * * * *")
job.IsService(false)
resp, err := r.ScheduleCronJob(job)
err := r.ScheduleCronJob(job)
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "startCron":
fmt.Println("Starting a Cron job")
resp, err := r.StartCronJob(job.JobKey())
err := r.StartCronJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "descheduleCron":
fmt.Println("Descheduling a Cron job")
resp, err := r.DescheduleCronJob(job.JobKey())
err := r.DescheduleCronJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "kill":
fmt.Println("Killing job")
resp, err := r.KillJob(job.JobKey())
err := r.KillJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
if ok, err := monitor.Instances(job.JobKey(), 0, 5, 50); !ok || err != nil {
if ok, err := r.InstancesMonitor(job.JobKey(), 0, 5*time.Second, 50*time.Second); !ok || err != nil {
log.Fatal("Unable to kill all instances of job")
}
fmt.Println(resp.String())
case "restart":
fmt.Println("Restarting job")
resp, err := r.RestartJob(job.JobKey())
err := r.RestartJob(job.JobKey())
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "liveCount":
fmt.Println("Getting instance count")
@ -303,114 +289,110 @@ func main() {
log.Fatal(err)
}
fmt.Println("Number of live instances: ", len(live))
fmt.Println("Active instances: ", live)
case "flexUp":
fmt.Println("Flexing up job")
numOfInstances := int32(4)
numOfInstances := 4
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
}
currInstances := int32(len(live))
currInstances := len(live)
fmt.Println("Current num of instances: ", currInstances)
var instId int32
for k := range live {
instId = k
}
resp, err := r.AddInstances(aurora.InstanceKey{
JobKey: job.JobKey(),
InstanceId: instId,
key := job.JobKey()
err = r.AddInstances(aurora.InstanceKey{
JobKey: &key,
InstanceId: live[0],
},
numOfInstances)
int32(numOfInstances))
if err != nil {
log.Fatal(err)
}
if ok, err := monitor.Instances(job.JobKey(), currInstances+numOfInstances, 5, 50); !ok || err != nil {
if ok, err := r.InstancesMonitor(job.JobKey(), int32(currInstances+numOfInstances), 5*time.Second, 50*time.Second); !ok || err != nil {
fmt.Println("Flexing up failed")
}
fmt.Println(resp.String())
case "flexDown":
fmt.Println("Flexing down job")
numOfInstances := int32(2)
numOfInstances := 2
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
}
currInstances := int32(len(live))
currInstances := len(live)
fmt.Println("Current num of instances: ", currInstances)
resp, err := r.RemoveInstances(job.JobKey(), numOfInstances)
err = r.RemoveInstances(job.JobKey(), numOfInstances)
if err != nil {
log.Fatal(err)
}
if ok, err := monitor.Instances(job.JobKey(), currInstances-numOfInstances, 5, 100); !ok || err != nil {
if ok, err := r.InstancesMonitor(job.JobKey(), int32(currInstances-numOfInstances), 5*time.Second, 100*time.Second); !ok || err != nil {
fmt.Println("flexDown failed")
}
fmt.Println(resp.String())
case "update":
fmt.Println("Updating a job with with more RAM and to 5 instances")
live, err := r.GetInstanceIds(job.JobKey(), aurora.ACTIVE_STATES)
if err != nil {
log.Fatal(err)
}
var instId int32
for k := range live {
instId = k
}
key := job.JobKey()
taskConfig, err := r.FetchTaskConfig(aurora.InstanceKey{
JobKey: job.JobKey(),
InstanceId: instId,
JobKey: &key,
InstanceId: live[0],
})
if err != nil {
log.Fatal(err)
}
updateJob := realis.NewDefaultUpdateJob(taskConfig)
updateJob.InstanceCount(5).RAM(128)
updateJob := realis.JobUpdateFromConfig(taskConfig).InstanceCount(5).RAM(128)
resp, err := r.StartJobUpdate(updateJob, "")
result, err := r.StartJobUpdate(updateJob, "")
if err != nil {
log.Fatal(err)
}
jobUpdateKey := response.JobUpdateKey(resp)
monitor.JobUpdate(*jobUpdateKey, 5, 500)
jobUpdateKey := result.GetKey()
_, err = r.JobUpdateMonitor(*jobUpdateKey, 5*time.Second, 6*time.Minute)
if err != nil {
log.Fatal(err)
}
case "pauseJobUpdate":
resp, err := r.PauseJobUpdate(&aurora.JobUpdateKey{
Job: job.JobKey(),
key := job.JobKey()
err := r.PauseJobUpdate(&aurora.JobUpdateKey{
Job: &key,
ID: updateId,
}, "")
if err != nil {
log.Fatal(err)
}
fmt.Println("PauseJobUpdate response: ", resp.String())
case "resumeJobUpdate":
resp, err := r.ResumeJobUpdate(&aurora.JobUpdateKey{
Job: job.JobKey(),
key := job.JobKey()
err := r.ResumeJobUpdate(&aurora.JobUpdateKey{
Job: &key,
ID: updateId,
}, "")
if err != nil {
log.Fatal(err)
}
fmt.Println("ResumeJobUpdate response: ", resp.String())
case "pulseJobUpdate":
key := job.JobKey()
resp, err := r.PulseJobUpdate(&aurora.JobUpdateKey{
Job: job.JobKey(),
Job: &key,
ID: updateId,
})
if err != nil {
@ -420,9 +402,10 @@ func main() {
fmt.Println("PulseJobUpdate response: ", resp.String())
case "updateDetails":
resp, err := r.JobUpdateDetails(aurora.JobUpdateQuery{
key := job.JobKey()
result, err := r.JobUpdateDetails(aurora.JobUpdateQuery{
Key: &aurora.JobUpdateKey{
Job: job.JobKey(),
Job: &key,
ID: updateId,
},
Limit: 1,
@ -432,12 +415,13 @@ func main() {
log.Fatal(err)
}
fmt.Println(response.JobUpdateDetails(resp))
fmt.Println(result)
case "abortUpdate":
fmt.Println("Abort update")
resp, err := r.AbortJobUpdate(aurora.JobUpdateKey{
Job: job.JobKey(),
key := job.JobKey()
err := r.AbortJobUpdate(aurora.JobUpdateKey{
Job: &key,
ID: updateId,
},
"")
@ -445,12 +429,12 @@ func main() {
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "rollbackUpdate":
fmt.Println("Abort update")
resp, err := r.RollbackJobUpdate(aurora.JobUpdateKey{
Job: job.JobKey(),
key := job.JobKey()
err := r.RollbackJobUpdate(aurora.JobUpdateKey{
Job: &key,
ID: updateId,
},
"")
@ -458,7 +442,6 @@ func main() {
if err != nil {
log.Fatal(err)
}
fmt.Println(resp.String())
case "taskConfig":
fmt.Println("Getting job info")
@ -467,14 +450,10 @@ func main() {
log.Fatal(err)
}
var instId int32
for k := range live {
instId = k
break
}
key := job.JobKey()
config, err := r.FetchTaskConfig(aurora.InstanceKey{
JobKey: job.JobKey(),
InstanceId: instId,
JobKey: &key,
InstanceId: live[0],
})
if err != nil {
@ -485,9 +464,10 @@ func main() {
case "updatesummary":
fmt.Println("Getting job update summary")
key := job.JobKey()
jobquery := &aurora.JobUpdateQuery{
Role: &job.JobKey().Role,
JobKey: job.JobKey(),
Role: &key.Role,
JobKey: &key,
}
updatesummary, err := r.GetJobUpdateSummaries(jobquery)
if err != nil {
@ -498,10 +478,11 @@ func main() {
case "taskStatus":
fmt.Println("Getting task status")
key := job.JobKey()
taskQ := &aurora.TaskQuery{
Role: &job.JobKey().Role,
Environment: &job.JobKey().Environment,
JobName: &job.JobKey().Name,
Role: &key.Role,
Environment: &key.Environment,
JobName: &key.Name,
}
tasks, err := r.GetTaskStatus(taskQ)
if err != nil {
@ -513,10 +494,11 @@ func main() {
case "tasksWithoutConfig":
fmt.Println("Getting task status")
key := job.JobKey()
taskQ := &aurora.TaskQuery{
Role: &job.JobKey().Role,
Environment: &job.JobKey().Environment,
JobName: &job.JobKey().Name,
Role: &key.Role,
Environment: &key.Environment,
JobName: &key.Name,
}
tasks, err := r.GetTasksWithoutConfigs(taskQ)
if err != nil {
@ -532,17 +514,17 @@ func main() {
log.Fatal("No hosts specified to drain")
}
hosts := strings.Split(hostList, ",")
_, result, err := r.DrainHosts(hosts...)
_, err := r.DrainHosts(hosts...)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
// Monitor change to DRAINING and DRAINED mode
hostResult, err := monitor.HostMaintenance(
hostResult, err := r.HostMaintenanceMonitor(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
5,
10)
5*time.Second,
10*time.Second)
if err != nil {
for host, ok := range hostResult {
if !ok {
@ -552,8 +534,6 @@ func main() {
log.Fatalf("error: %+v\n", err.Error())
}
fmt.Print(result.String())
case "SLADrainHosts":
fmt.Println("Setting hosts to DRAINING using SLA aware draining")
if hostList == "" {
@ -563,17 +543,17 @@ func main() {
policy := aurora.SlaPolicy{PercentageSlaPolicy: &aurora.PercentageSlaPolicy{Percentage: 50.0}}
result, err := r.SLADrainHosts(&policy, 30, hosts...)
_, err := r.SLADrainHosts(&policy, 30, hosts...)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
// Monitor change to DRAINING and DRAINED mode
hostResult, err := monitor.HostMaintenance(
hostResult, err := r.HostMaintenanceMonitor(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
5,
10)
5*time.Second,
10*time.Second)
if err != nil {
for host, ok := range hostResult {
if !ok {
@ -583,25 +563,23 @@ func main() {
log.Fatalf("error: %+v\n", err.Error())
}
fmt.Print(result.String())
case "endMaintenance":
fmt.Println("Setting hosts to ACTIVE")
if hostList == "" {
log.Fatal("No hosts specified to drain")
}
hosts := strings.Split(hostList, ",")
_, result, err := r.EndMaintenance(hosts...)
_, err := r.EndMaintenance(hosts...)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}
// Monitor change to DRAINING and DRAINED mode
hostResult, err := monitor.HostMaintenance(
hostResult, err := r.HostMaintenanceMonitor(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
5,
10)
5*time.Second,
10*time.Second)
if err != nil {
for host, ok := range hostResult {
if !ok {
@ -611,14 +589,13 @@ func main() {
log.Fatalf("error: %+v\n", err.Error())
}
fmt.Print(result.String())
case "getPendingReasons":
fmt.Println("Getting pending reasons")
key := job.JobKey()
taskQ := &aurora.TaskQuery{
Role: &job.JobKey().Role,
Environment: &job.JobKey().Environment,
JobName: &job.JobKey().Name,
Role: &key.Role,
Environment: &key.Environment,
JobName: &key.Name,
}
reasons, err := r.GetPendingReason(taskQ)
if err != nil {
@ -630,7 +607,7 @@ func main() {
case "getJobs":
fmt.Println("GetJobs...role: ", role)
_, result, err := r.GetJobs(role)
result, err := r.GetJobs(role)
if err != nil {
log.Fatalf("error: %+v\n", err.Error())
}

View file

@ -23,7 +23,7 @@ import (
"os"
"time"
"github.com/paypal/gorealis"
realis "github.com/paypal/gorealis"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/pkg/errors"
)
@ -125,7 +125,7 @@ func init() {
}
}
func CreateRealisClient(config *Config) (realis.Realis, error) {
func CreateRealisClient(config *Config) (*realis.Client, error) {
var transportOption realis.ClientOption
// Configuring transport protocol. If not transport is provided, then using JSON as the
// default transport protocol.
@ -157,7 +157,7 @@ func CreateRealisClient(config *Config) (realis.Realis, error) {
clientOptions = append(clientOptions, realis.Debug())
}
return realis.NewRealisClient(clientOptions...)
return realis.NewClient(clientOptions...)
}
func main() {
@ -165,7 +165,6 @@ func main() {
fmt.Println(clientCreationErr)
os.Exit(1)
} else {
monitor := &realis.Monitor{Client: r}
defer r.Close()
uris := job.URIs
labels := job.Labels
@ -205,20 +204,18 @@ func main() {
}
fmt.Println("Creating Job...")
if resp, jobCreationErr := r.CreateJob(auroraJob); jobCreationErr != nil {
if jobCreationErr := r.CreateJob(auroraJob); jobCreationErr != nil {
fmt.Println("Error creating Aurora job: ", jobCreationErr)
os.Exit(1)
} else {
if resp.ResponseCode == aurora.ResponseCode_OK {
if ok, monitorErr := monitor.Instances(auroraJob.JobKey(), auroraJob.GetInstanceCount(), 5, 50); !ok || monitorErr != nil {
if _, jobErr := r.KillJob(auroraJob.JobKey()); jobErr !=
nil {
fmt.Println(jobErr)
os.Exit(1)
} else {
fmt.Println("ok: ", ok)
fmt.Println("jobErr: ", jobErr)
}
if ok, monitorErr := r.InstancesMonitor(auroraJob.JobKey(), auroraJob.GetInstanceCount(), 5, 50); !ok || monitorErr != nil {
if jobErr := r.KillJob(auroraJob.JobKey()); jobErr !=
nil {
fmt.Println(jobErr)
os.Exit(1)
} else {
fmt.Println("ok: ", ok)
fmt.Println("jobErr: ", jobErr)
}
}
}

View file

@ -0,0 +1,7 @@
// Autogenerated by Thrift Compiler (1.0.0-dev)
// Autogenerated by Thrift Compiler (0.12.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package aurora
var GoUnusedProtection__ int

View file

@ -0,0 +1,58 @@
// Autogenerated by Thrift Compiler (0.12.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package aurora
import (
"bytes"
"context"
"fmt"
"reflect"
"git.apache.org/thrift.git/lib/go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = context.Background
var _ = reflect.DeepEqual
var _ = bytes.Equal
const AURORA_EXECUTOR_NAME = "AuroraExecutor"
var ACTIVE_STATES []ScheduleStatus
var SLAVE_ASSIGNED_STATES []ScheduleStatus
var LIVE_STATES []ScheduleStatus
var TERMINAL_STATES []ScheduleStatus
const GOOD_IDENTIFIER_PATTERN = "^[\\w\\-\\.]+$"
const GOOD_IDENTIFIER_PATTERN_JVM = "^[\\w\\-\\.]+$"
const GOOD_IDENTIFIER_PATTERN_PYTHON = "^[\\w\\-\\.]+$"
var ACTIVE_JOB_UPDATE_STATES []JobUpdateStatus
var AWAITNG_PULSE_JOB_UPDATE_STATES []JobUpdateStatus
const BYPASS_LEADER_REDIRECT_HEADER_NAME = "Bypass-Leader-Redirect"
const TASK_FILESYSTEM_MOUNT_POINT = "taskfs"
func init() {
ACTIVE_STATES = []ScheduleStatus{
9, 17, 6, 0, 13, 12, 2, 1, 18, 16}
SLAVE_ASSIGNED_STATES = []ScheduleStatus{
9, 17, 6, 13, 12, 2, 18, 1}
LIVE_STATES = []ScheduleStatus{
6, 13, 12, 17, 18, 2}
TERMINAL_STATES = []ScheduleStatus{
4, 3, 5, 7}
ACTIVE_JOB_UPDATE_STATES = []JobUpdateStatus{
0, 1, 2, 3, 9, 10}
AWAITNG_PULSE_JOB_UPDATE_STATES = []JobUpdateStatus{
9, 10}
}

File diff suppressed because it is too large Load diff

View file

@ -1,19 +1,21 @@
// Autogenerated by Thrift Compiler (0.9.3)
// Autogenerated by Thrift Compiler (0.12.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package main
import (
"apache/aurora"
"context"
"flag"
"fmt"
"git.apache.org/thrift.git/lib/go/thrift"
"math"
"net"
"net/url"
"os"
"strconv"
"strings"
"git.apache.org/thrift.git/lib/go/thrift"
)
func Usage() {
@ -69,6 +71,22 @@ func Usage() {
os.Exit(0)
}
type httpHeaders map[string]string
func (h httpHeaders) String() string {
var m map[string]string = h
return fmt.Sprintf("%s", m)
}
func (h httpHeaders) Set(value string) error {
parts := strings.Split(value, ": ")
if len(parts) != 2 {
return fmt.Errorf("header should be of format 'Key: Value'")
}
h[parts[0]] = parts[1]
return nil
}
func main() {
flag.Usage = Usage
var host string
@ -77,7 +95,8 @@ func main() {
var urlString string
var framed bool
var useHttp bool
var parsedUrl url.URL
headers := make(httpHeaders)
var parsedUrl *url.URL
var trans thrift.TTransport
_ = strconv.Atoi
_ = math.Abs
@ -88,16 +107,18 @@ func main() {
flag.StringVar(&urlString, "u", "", "Specify the url")
flag.BoolVar(&framed, "framed", false, "Use framed transport")
flag.BoolVar(&useHttp, "http", false, "Use http")
flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")")
flag.Parse()
if len(urlString) > 0 {
parsedUrl, err := url.Parse(urlString)
var err error
parsedUrl, err = url.Parse(urlString)
if err != nil {
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
flag.Usage()
}
host = parsedUrl.Host
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http"
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https"
} else if useHttp {
_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
if err != nil {
@ -110,6 +131,12 @@ func main() {
var err error
if useHttp {
trans, err = thrift.NewTHttpClient(parsedUrl.String())
if len(headers) > 0 {
httptrans := trans.(*thrift.THttpClient)
for key, value := range headers {
httptrans.SetHeader(key, value)
}
}
} else {
portStr := fmt.Sprint(port)
if strings.Contains(host, ":") {
@ -152,7 +179,9 @@ func main() {
Usage()
os.Exit(1)
}
client := aurora.NewAuroraAdminClientFactory(trans, protocolFactory)
iprot := protocolFactory.GetProtocol(trans)
oprot := protocolFactory.GetProtocol(trans)
client := aurora.NewAuroraAdminClient(thrift.NewTStandardClient(iprot, oprot))
if err := trans.Open(); err != nil {
fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
os.Exit(1)
@ -174,7 +203,7 @@ func main() {
Usage()
return
}
factory357 := thrift.NewTSimpleJSONProtocolFactory()
factory357 := thrift.NewTJSONProtocolFactory()
jsProt358 := factory357.GetProtocol(mbTrans355)
argvalue1 := aurora.NewResourceAggregate()
err359 := argvalue1.Read(jsProt358)
@ -183,7 +212,7 @@ func main() {
return
}
value1 := argvalue1
fmt.Print(client.SetQuota(value0, value1))
fmt.Print(client.SetQuota(context.Background(), value0, value1))
fmt.Print("\n")
break
case "forceTaskState":
@ -200,7 +229,7 @@ func main() {
}
argvalue1 := aurora.ScheduleStatus(tmp1)
value1 := argvalue1
fmt.Print(client.ForceTaskState(value0, value1))
fmt.Print(client.ForceTaskState(context.Background(), value0, value1))
fmt.Print("\n")
break
case "performBackup":
@ -208,7 +237,7 @@ func main() {
fmt.Fprintln(os.Stderr, "PerformBackup requires 0 args")
flag.Usage()
}
fmt.Print(client.PerformBackup())
fmt.Print(client.PerformBackup(context.Background()))
fmt.Print("\n")
break
case "listBackups":
@ -216,7 +245,7 @@ func main() {
fmt.Fprintln(os.Stderr, "ListBackups requires 0 args")
flag.Usage()
}
fmt.Print(client.ListBackups())
fmt.Print(client.ListBackups(context.Background()))
fmt.Print("\n")
break
case "stageRecovery":
@ -226,7 +255,7 @@ func main() {
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.StageRecovery(value0))
fmt.Print(client.StageRecovery(context.Background(), value0))
fmt.Print("\n")
break
case "queryRecovery":
@ -242,7 +271,7 @@ func main() {
Usage()
return
}
factory365 := thrift.NewTSimpleJSONProtocolFactory()
factory365 := thrift.NewTJSONProtocolFactory()
jsProt366 := factory365.GetProtocol(mbTrans363)
argvalue0 := aurora.NewTaskQuery()
err367 := argvalue0.Read(jsProt366)
@ -251,7 +280,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.QueryRecovery(value0))
fmt.Print(client.QueryRecovery(context.Background(), value0))
fmt.Print("\n")
break
case "deleteRecoveryTasks":
@ -267,7 +296,7 @@ func main() {
Usage()
return
}
factory371 := thrift.NewTSimpleJSONProtocolFactory()
factory371 := thrift.NewTJSONProtocolFactory()
jsProt372 := factory371.GetProtocol(mbTrans369)
argvalue0 := aurora.NewTaskQuery()
err373 := argvalue0.Read(jsProt372)
@ -276,7 +305,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.DeleteRecoveryTasks(value0))
fmt.Print(client.DeleteRecoveryTasks(context.Background(), value0))
fmt.Print("\n")
break
case "commitRecovery":
@ -284,7 +313,7 @@ func main() {
fmt.Fprintln(os.Stderr, "CommitRecovery requires 0 args")
flag.Usage()
}
fmt.Print(client.CommitRecovery())
fmt.Print(client.CommitRecovery(context.Background()))
fmt.Print("\n")
break
case "unloadRecovery":
@ -292,7 +321,7 @@ func main() {
fmt.Fprintln(os.Stderr, "UnloadRecovery requires 0 args")
flag.Usage()
}
fmt.Print(client.UnloadRecovery())
fmt.Print(client.UnloadRecovery(context.Background()))
fmt.Print("\n")
break
case "startMaintenance":
@ -308,7 +337,7 @@ func main() {
Usage()
return
}
factory377 := thrift.NewTSimpleJSONProtocolFactory()
factory377 := thrift.NewTJSONProtocolFactory()
jsProt378 := factory377.GetProtocol(mbTrans375)
argvalue0 := aurora.NewHosts()
err379 := argvalue0.Read(jsProt378)
@ -317,7 +346,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.StartMaintenance(value0))
fmt.Print(client.StartMaintenance(context.Background(), value0))
fmt.Print("\n")
break
case "drainHosts":
@ -333,7 +362,7 @@ func main() {
Usage()
return
}
factory383 := thrift.NewTSimpleJSONProtocolFactory()
factory383 := thrift.NewTJSONProtocolFactory()
jsProt384 := factory383.GetProtocol(mbTrans381)
argvalue0 := aurora.NewHosts()
err385 := argvalue0.Read(jsProt384)
@ -342,7 +371,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.DrainHosts(value0))
fmt.Print(client.DrainHosts(context.Background(), value0))
fmt.Print("\n")
break
case "maintenanceStatus":
@ -358,7 +387,7 @@ func main() {
Usage()
return
}
factory389 := thrift.NewTSimpleJSONProtocolFactory()
factory389 := thrift.NewTJSONProtocolFactory()
jsProt390 := factory389.GetProtocol(mbTrans387)
argvalue0 := aurora.NewHosts()
err391 := argvalue0.Read(jsProt390)
@ -367,7 +396,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.MaintenanceStatus(value0))
fmt.Print(client.MaintenanceStatus(context.Background(), value0))
fmt.Print("\n")
break
case "endMaintenance":
@ -383,7 +412,7 @@ func main() {
Usage()
return
}
factory395 := thrift.NewTSimpleJSONProtocolFactory()
factory395 := thrift.NewTJSONProtocolFactory()
jsProt396 := factory395.GetProtocol(mbTrans393)
argvalue0 := aurora.NewHosts()
err397 := argvalue0.Read(jsProt396)
@ -392,7 +421,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.EndMaintenance(value0))
fmt.Print(client.EndMaintenance(context.Background(), value0))
fmt.Print("\n")
break
case "slaDrainHosts":
@ -408,7 +437,7 @@ func main() {
Usage()
return
}
factory401 := thrift.NewTSimpleJSONProtocolFactory()
factory401 := thrift.NewTJSONProtocolFactory()
jsProt402 := factory401.GetProtocol(mbTrans399)
argvalue0 := aurora.NewHosts()
err403 := argvalue0.Read(jsProt402)
@ -425,7 +454,7 @@ func main() {
Usage()
return
}
factory407 := thrift.NewTSimpleJSONProtocolFactory()
factory407 := thrift.NewTJSONProtocolFactory()
jsProt408 := factory407.GetProtocol(mbTrans405)
argvalue1 := aurora.NewSlaPolicy()
err409 := argvalue1.Read(jsProt408)
@ -440,7 +469,7 @@ func main() {
return
}
value2 := argvalue2
fmt.Print(client.SlaDrainHosts(value0, value1, value2))
fmt.Print(client.SlaDrainHosts(context.Background(), value0, value1, value2))
fmt.Print("\n")
break
case "snapshot":
@ -448,7 +477,7 @@ func main() {
fmt.Fprintln(os.Stderr, "Snapshot requires 0 args")
flag.Usage()
}
fmt.Print(client.Snapshot())
fmt.Print(client.Snapshot(context.Background()))
fmt.Print("\n")
break
case "triggerExplicitTaskReconciliation":
@ -464,7 +493,7 @@ func main() {
Usage()
return
}
factory414 := thrift.NewTSimpleJSONProtocolFactory()
factory414 := thrift.NewTJSONProtocolFactory()
jsProt415 := factory414.GetProtocol(mbTrans412)
argvalue0 := aurora.NewExplicitReconciliationSettings()
err416 := argvalue0.Read(jsProt415)
@ -473,7 +502,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.TriggerExplicitTaskReconciliation(value0))
fmt.Print(client.TriggerExplicitTaskReconciliation(context.Background(), value0))
fmt.Print("\n")
break
case "triggerImplicitTaskReconciliation":
@ -481,7 +510,7 @@ func main() {
fmt.Fprintln(os.Stderr, "TriggerImplicitTaskReconciliation requires 0 args")
flag.Usage()
}
fmt.Print(client.TriggerImplicitTaskReconciliation())
fmt.Print(client.TriggerImplicitTaskReconciliation(context.Background()))
fmt.Print("\n")
break
case "pruneTasks":
@ -497,7 +526,7 @@ func main() {
Usage()
return
}
factory420 := thrift.NewTSimpleJSONProtocolFactory()
factory420 := thrift.NewTJSONProtocolFactory()
jsProt421 := factory420.GetProtocol(mbTrans418)
argvalue0 := aurora.NewTaskQuery()
err422 := argvalue0.Read(jsProt421)
@ -506,7 +535,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.PruneTasks(value0))
fmt.Print(client.PruneTasks(context.Background(), value0))
fmt.Print("\n")
break
case "createJob":
@ -522,7 +551,7 @@ func main() {
Usage()
return
}
factory426 := thrift.NewTSimpleJSONProtocolFactory()
factory426 := thrift.NewTJSONProtocolFactory()
jsProt427 := factory426.GetProtocol(mbTrans424)
argvalue0 := aurora.NewJobConfiguration()
err428 := argvalue0.Read(jsProt427)
@ -531,7 +560,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.CreateJob(value0))
fmt.Print(client.CreateJob(context.Background(), value0))
fmt.Print("\n")
break
case "scheduleCronJob":
@ -547,7 +576,7 @@ func main() {
Usage()
return
}
factory432 := thrift.NewTSimpleJSONProtocolFactory()
factory432 := thrift.NewTJSONProtocolFactory()
jsProt433 := factory432.GetProtocol(mbTrans430)
argvalue0 := aurora.NewJobConfiguration()
err434 := argvalue0.Read(jsProt433)
@ -556,7 +585,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.ScheduleCronJob(value0))
fmt.Print(client.ScheduleCronJob(context.Background(), value0))
fmt.Print("\n")
break
case "descheduleCronJob":
@ -572,7 +601,7 @@ func main() {
Usage()
return
}
factory438 := thrift.NewTSimpleJSONProtocolFactory()
factory438 := thrift.NewTJSONProtocolFactory()
jsProt439 := factory438.GetProtocol(mbTrans436)
argvalue0 := aurora.NewJobKey()
err440 := argvalue0.Read(jsProt439)
@ -581,7 +610,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.DescheduleCronJob(value0))
fmt.Print(client.DescheduleCronJob(context.Background(), value0))
fmt.Print("\n")
break
case "startCronJob":
@ -597,7 +626,7 @@ func main() {
Usage()
return
}
factory444 := thrift.NewTSimpleJSONProtocolFactory()
factory444 := thrift.NewTJSONProtocolFactory()
jsProt445 := factory444.GetProtocol(mbTrans442)
argvalue0 := aurora.NewJobKey()
err446 := argvalue0.Read(jsProt445)
@ -606,7 +635,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.StartCronJob(value0))
fmt.Print(client.StartCronJob(context.Background(), value0))
fmt.Print("\n")
break
case "restartShards":
@ -622,7 +651,7 @@ func main() {
Usage()
return
}
factory450 := thrift.NewTSimpleJSONProtocolFactory()
factory450 := thrift.NewTJSONProtocolFactory()
jsProt451 := factory450.GetProtocol(mbTrans448)
argvalue0 := aurora.NewJobKey()
err452 := argvalue0.Read(jsProt451)
@ -639,7 +668,7 @@ func main() {
Usage()
return
}
factory456 := thrift.NewTSimpleJSONProtocolFactory()
factory456 := thrift.NewTJSONProtocolFactory()
jsProt457 := factory456.GetProtocol(mbTrans454)
containerStruct1 := aurora.NewAuroraAdminRestartShardsArgs()
err458 := containerStruct1.ReadField2(jsProt457)
@ -649,7 +678,7 @@ func main() {
}
argvalue1 := containerStruct1.ShardIds
value1 := argvalue1
fmt.Print(client.RestartShards(value0, value1))
fmt.Print(client.RestartShards(context.Background(), value0, value1))
fmt.Print("\n")
break
case "killTasks":
@ -665,7 +694,7 @@ func main() {
Usage()
return
}
factory462 := thrift.NewTSimpleJSONProtocolFactory()
factory462 := thrift.NewTJSONProtocolFactory()
jsProt463 := factory462.GetProtocol(mbTrans460)
argvalue0 := aurora.NewJobKey()
err464 := argvalue0.Read(jsProt463)
@ -682,7 +711,7 @@ func main() {
Usage()
return
}
factory468 := thrift.NewTSimpleJSONProtocolFactory()
factory468 := thrift.NewTJSONProtocolFactory()
jsProt469 := factory468.GetProtocol(mbTrans466)
containerStruct1 := aurora.NewAuroraAdminKillTasksArgs()
err470 := containerStruct1.ReadField2(jsProt469)
@ -694,7 +723,7 @@ func main() {
value1 := argvalue1
argvalue2 := flag.Arg(3)
value2 := argvalue2
fmt.Print(client.KillTasks(value0, value1, value2))
fmt.Print(client.KillTasks(context.Background(), value0, value1, value2))
fmt.Print("\n")
break
case "addInstances":
@ -710,7 +739,7 @@ func main() {
Usage()
return
}
factory475 := thrift.NewTSimpleJSONProtocolFactory()
factory475 := thrift.NewTJSONProtocolFactory()
jsProt476 := factory475.GetProtocol(mbTrans473)
argvalue0 := aurora.NewInstanceKey()
err477 := argvalue0.Read(jsProt476)
@ -726,7 +755,7 @@ func main() {
}
argvalue1 := int32(tmp1)
value1 := argvalue1
fmt.Print(client.AddInstances(value0, value1))
fmt.Print(client.AddInstances(context.Background(), value0, value1))
fmt.Print("\n")
break
case "replaceCronTemplate":
@ -742,7 +771,7 @@ func main() {
Usage()
return
}
factory482 := thrift.NewTSimpleJSONProtocolFactory()
factory482 := thrift.NewTJSONProtocolFactory()
jsProt483 := factory482.GetProtocol(mbTrans480)
argvalue0 := aurora.NewJobConfiguration()
err484 := argvalue0.Read(jsProt483)
@ -751,7 +780,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.ReplaceCronTemplate(value0))
fmt.Print(client.ReplaceCronTemplate(context.Background(), value0))
fmt.Print("\n")
break
case "startJobUpdate":
@ -767,7 +796,7 @@ func main() {
Usage()
return
}
factory488 := thrift.NewTSimpleJSONProtocolFactory()
factory488 := thrift.NewTJSONProtocolFactory()
jsProt489 := factory488.GetProtocol(mbTrans486)
argvalue0 := aurora.NewJobUpdateRequest()
err490 := argvalue0.Read(jsProt489)
@ -778,7 +807,7 @@ func main() {
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.StartJobUpdate(value0, value1))
fmt.Print(client.StartJobUpdate(context.Background(), value0, value1))
fmt.Print("\n")
break
case "pauseJobUpdate":
@ -794,7 +823,7 @@ func main() {
Usage()
return
}
factory495 := thrift.NewTSimpleJSONProtocolFactory()
factory495 := thrift.NewTJSONProtocolFactory()
jsProt496 := factory495.GetProtocol(mbTrans493)
argvalue0 := aurora.NewJobUpdateKey()
err497 := argvalue0.Read(jsProt496)
@ -805,7 +834,7 @@ func main() {
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.PauseJobUpdate(value0, value1))
fmt.Print(client.PauseJobUpdate(context.Background(), value0, value1))
fmt.Print("\n")
break
case "resumeJobUpdate":
@ -821,7 +850,7 @@ func main() {
Usage()
return
}
factory502 := thrift.NewTSimpleJSONProtocolFactory()
factory502 := thrift.NewTJSONProtocolFactory()
jsProt503 := factory502.GetProtocol(mbTrans500)
argvalue0 := aurora.NewJobUpdateKey()
err504 := argvalue0.Read(jsProt503)
@ -832,7 +861,7 @@ func main() {
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.ResumeJobUpdate(value0, value1))
fmt.Print(client.ResumeJobUpdate(context.Background(), value0, value1))
fmt.Print("\n")
break
case "abortJobUpdate":
@ -848,7 +877,7 @@ func main() {
Usage()
return
}
factory509 := thrift.NewTSimpleJSONProtocolFactory()
factory509 := thrift.NewTJSONProtocolFactory()
jsProt510 := factory509.GetProtocol(mbTrans507)
argvalue0 := aurora.NewJobUpdateKey()
err511 := argvalue0.Read(jsProt510)
@ -859,7 +888,7 @@ func main() {
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.AbortJobUpdate(value0, value1))
fmt.Print(client.AbortJobUpdate(context.Background(), value0, value1))
fmt.Print("\n")
break
case "rollbackJobUpdate":
@ -875,7 +904,7 @@ func main() {
Usage()
return
}
factory516 := thrift.NewTSimpleJSONProtocolFactory()
factory516 := thrift.NewTJSONProtocolFactory()
jsProt517 := factory516.GetProtocol(mbTrans514)
argvalue0 := aurora.NewJobUpdateKey()
err518 := argvalue0.Read(jsProt517)
@ -886,7 +915,7 @@ func main() {
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.RollbackJobUpdate(value0, value1))
fmt.Print(client.RollbackJobUpdate(context.Background(), value0, value1))
fmt.Print("\n")
break
case "pulseJobUpdate":
@ -902,7 +931,7 @@ func main() {
Usage()
return
}
factory523 := thrift.NewTSimpleJSONProtocolFactory()
factory523 := thrift.NewTJSONProtocolFactory()
jsProt524 := factory523.GetProtocol(mbTrans521)
argvalue0 := aurora.NewJobUpdateKey()
err525 := argvalue0.Read(jsProt524)
@ -911,7 +940,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.PulseJobUpdate(value0))
fmt.Print(client.PulseJobUpdate(context.Background(), value0))
fmt.Print("\n")
break
case "getRoleSummary":
@ -919,7 +948,7 @@ func main() {
fmt.Fprintln(os.Stderr, "GetRoleSummary requires 0 args")
flag.Usage()
}
fmt.Print(client.GetRoleSummary())
fmt.Print(client.GetRoleSummary(context.Background()))
fmt.Print("\n")
break
case "getJobSummary":
@ -929,7 +958,7 @@ func main() {
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetJobSummary(value0))
fmt.Print(client.GetJobSummary(context.Background(), value0))
fmt.Print("\n")
break
case "getTasksStatus":
@ -945,7 +974,7 @@ func main() {
Usage()
return
}
factory530 := thrift.NewTSimpleJSONProtocolFactory()
factory530 := thrift.NewTJSONProtocolFactory()
jsProt531 := factory530.GetProtocol(mbTrans528)
argvalue0 := aurora.NewTaskQuery()
err532 := argvalue0.Read(jsProt531)
@ -954,7 +983,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetTasksStatus(value0))
fmt.Print(client.GetTasksStatus(context.Background(), value0))
fmt.Print("\n")
break
case "getTasksWithoutConfigs":
@ -970,7 +999,7 @@ func main() {
Usage()
return
}
factory536 := thrift.NewTSimpleJSONProtocolFactory()
factory536 := thrift.NewTJSONProtocolFactory()
jsProt537 := factory536.GetProtocol(mbTrans534)
argvalue0 := aurora.NewTaskQuery()
err538 := argvalue0.Read(jsProt537)
@ -979,7 +1008,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetTasksWithoutConfigs(value0))
fmt.Print(client.GetTasksWithoutConfigs(context.Background(), value0))
fmt.Print("\n")
break
case "getPendingReason":
@ -995,7 +1024,7 @@ func main() {
Usage()
return
}
factory542 := thrift.NewTSimpleJSONProtocolFactory()
factory542 := thrift.NewTJSONProtocolFactory()
jsProt543 := factory542.GetProtocol(mbTrans540)
argvalue0 := aurora.NewTaskQuery()
err544 := argvalue0.Read(jsProt543)
@ -1004,7 +1033,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetPendingReason(value0))
fmt.Print(client.GetPendingReason(context.Background(), value0))
fmt.Print("\n")
break
case "getConfigSummary":
@ -1020,7 +1049,7 @@ func main() {
Usage()
return
}
factory548 := thrift.NewTSimpleJSONProtocolFactory()
factory548 := thrift.NewTJSONProtocolFactory()
jsProt549 := factory548.GetProtocol(mbTrans546)
argvalue0 := aurora.NewJobKey()
err550 := argvalue0.Read(jsProt549)
@ -1029,7 +1058,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetConfigSummary(value0))
fmt.Print(client.GetConfigSummary(context.Background(), value0))
fmt.Print("\n")
break
case "getJobs":
@ -1039,7 +1068,7 @@ func main() {
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetJobs(value0))
fmt.Print(client.GetJobs(context.Background(), value0))
fmt.Print("\n")
break
case "getQuota":
@ -1049,7 +1078,7 @@ func main() {
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetQuota(value0))
fmt.Print(client.GetQuota(context.Background(), value0))
fmt.Print("\n")
break
case "populateJobConfig":
@ -1065,7 +1094,7 @@ func main() {
Usage()
return
}
factory556 := thrift.NewTSimpleJSONProtocolFactory()
factory556 := thrift.NewTJSONProtocolFactory()
jsProt557 := factory556.GetProtocol(mbTrans554)
argvalue0 := aurora.NewJobConfiguration()
err558 := argvalue0.Read(jsProt557)
@ -1074,7 +1103,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.PopulateJobConfig(value0))
fmt.Print(client.PopulateJobConfig(context.Background(), value0))
fmt.Print("\n")
break
case "getJobUpdateSummaries":
@ -1090,7 +1119,7 @@ func main() {
Usage()
return
}
factory562 := thrift.NewTSimpleJSONProtocolFactory()
factory562 := thrift.NewTJSONProtocolFactory()
jsProt563 := factory562.GetProtocol(mbTrans560)
argvalue0 := aurora.NewJobUpdateQuery()
err564 := argvalue0.Read(jsProt563)
@ -1099,7 +1128,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateSummaries(value0))
fmt.Print(client.GetJobUpdateSummaries(context.Background(), value0))
fmt.Print("\n")
break
case "getJobUpdateDetails":
@ -1115,7 +1144,7 @@ func main() {
Usage()
return
}
factory568 := thrift.NewTSimpleJSONProtocolFactory()
factory568 := thrift.NewTJSONProtocolFactory()
jsProt569 := factory568.GetProtocol(mbTrans566)
argvalue0 := aurora.NewJobUpdateQuery()
err570 := argvalue0.Read(jsProt569)
@ -1124,7 +1153,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateDetails(value0))
fmt.Print(client.GetJobUpdateDetails(context.Background(), value0))
fmt.Print("\n")
break
case "getJobUpdateDiff":
@ -1140,7 +1169,7 @@ func main() {
Usage()
return
}
factory574 := thrift.NewTSimpleJSONProtocolFactory()
factory574 := thrift.NewTJSONProtocolFactory()
jsProt575 := factory574.GetProtocol(mbTrans572)
argvalue0 := aurora.NewJobUpdateRequest()
err576 := argvalue0.Read(jsProt575)
@ -1149,7 +1178,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateDiff(value0))
fmt.Print(client.GetJobUpdateDiff(context.Background(), value0))
fmt.Print("\n")
break
case "getTierConfigs":
@ -1157,7 +1186,7 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTierConfigs requires 0 args")
flag.Usage()
}
fmt.Print(client.GetTierConfigs())
fmt.Print(client.GetTierConfigs(context.Background()))
fmt.Print("\n")
break
case "":

View file

@ -1,19 +1,21 @@
// Autogenerated by Thrift Compiler (0.9.3)
// Autogenerated by Thrift Compiler (0.12.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package main
import (
"apache/aurora"
"context"
"flag"
"fmt"
"git.apache.org/thrift.git/lib/go/thrift"
"math"
"net"
"net/url"
"os"
"strconv"
"strings"
"git.apache.org/thrift.git/lib/go/thrift"
)
func Usage() {
@ -51,6 +53,22 @@ func Usage() {
os.Exit(0)
}
type httpHeaders map[string]string
func (h httpHeaders) String() string {
var m map[string]string = h
return fmt.Sprintf("%s", m)
}
func (h httpHeaders) Set(value string) error {
parts := strings.Split(value, ": ")
if len(parts) != 2 {
return fmt.Errorf("header should be of format 'Key: Value'")
}
h[parts[0]] = parts[1]
return nil
}
func main() {
flag.Usage = Usage
var host string
@ -59,7 +77,8 @@ func main() {
var urlString string
var framed bool
var useHttp bool
var parsedUrl url.URL
headers := make(httpHeaders)
var parsedUrl *url.URL
var trans thrift.TTransport
_ = strconv.Atoi
_ = math.Abs
@ -70,16 +89,18 @@ func main() {
flag.StringVar(&urlString, "u", "", "Specify the url")
flag.BoolVar(&framed, "framed", false, "Use framed transport")
flag.BoolVar(&useHttp, "http", false, "Use http")
flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")")
flag.Parse()
if len(urlString) > 0 {
parsedUrl, err := url.Parse(urlString)
var err error
parsedUrl, err = url.Parse(urlString)
if err != nil {
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
flag.Usage()
}
host = parsedUrl.Host
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http"
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https"
} else if useHttp {
_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
if err != nil {
@ -92,6 +113,12 @@ func main() {
var err error
if useHttp {
trans, err = thrift.NewTHttpClient(parsedUrl.String())
if len(headers) > 0 {
httptrans := trans.(*thrift.THttpClient)
for key, value := range headers {
httptrans.SetHeader(key, value)
}
}
} else {
portStr := fmt.Sprint(port)
if strings.Contains(host, ":") {
@ -134,7 +161,9 @@ func main() {
Usage()
os.Exit(1)
}
client := aurora.NewAuroraSchedulerManagerClientFactory(trans, protocolFactory)
iprot := protocolFactory.GetProtocol(trans)
oprot := protocolFactory.GetProtocol(trans)
client := aurora.NewAuroraSchedulerManagerClient(thrift.NewTStandardClient(iprot, oprot))
if err := trans.Open(); err != nil {
fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
os.Exit(1)
@ -154,7 +183,7 @@ func main() {
Usage()
return
}
factory165 := thrift.NewTSimpleJSONProtocolFactory()
factory165 := thrift.NewTJSONProtocolFactory()
jsProt166 := factory165.GetProtocol(mbTrans163)
argvalue0 := aurora.NewJobConfiguration()
err167 := argvalue0.Read(jsProt166)
@ -163,7 +192,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.CreateJob(value0))
fmt.Print(client.CreateJob(context.Background(), value0))
fmt.Print("\n")
break
case "scheduleCronJob":
@ -179,7 +208,7 @@ func main() {
Usage()
return
}
factory171 := thrift.NewTSimpleJSONProtocolFactory()
factory171 := thrift.NewTJSONProtocolFactory()
jsProt172 := factory171.GetProtocol(mbTrans169)
argvalue0 := aurora.NewJobConfiguration()
err173 := argvalue0.Read(jsProt172)
@ -188,7 +217,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.ScheduleCronJob(value0))
fmt.Print(client.ScheduleCronJob(context.Background(), value0))
fmt.Print("\n")
break
case "descheduleCronJob":
@ -204,7 +233,7 @@ func main() {
Usage()
return
}
factory177 := thrift.NewTSimpleJSONProtocolFactory()
factory177 := thrift.NewTJSONProtocolFactory()
jsProt178 := factory177.GetProtocol(mbTrans175)
argvalue0 := aurora.NewJobKey()
err179 := argvalue0.Read(jsProt178)
@ -213,7 +242,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.DescheduleCronJob(value0))
fmt.Print(client.DescheduleCronJob(context.Background(), value0))
fmt.Print("\n")
break
case "startCronJob":
@ -229,7 +258,7 @@ func main() {
Usage()
return
}
factory183 := thrift.NewTSimpleJSONProtocolFactory()
factory183 := thrift.NewTJSONProtocolFactory()
jsProt184 := factory183.GetProtocol(mbTrans181)
argvalue0 := aurora.NewJobKey()
err185 := argvalue0.Read(jsProt184)
@ -238,7 +267,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.StartCronJob(value0))
fmt.Print(client.StartCronJob(context.Background(), value0))
fmt.Print("\n")
break
case "restartShards":
@ -254,7 +283,7 @@ func main() {
Usage()
return
}
factory189 := thrift.NewTSimpleJSONProtocolFactory()
factory189 := thrift.NewTJSONProtocolFactory()
jsProt190 := factory189.GetProtocol(mbTrans187)
argvalue0 := aurora.NewJobKey()
err191 := argvalue0.Read(jsProt190)
@ -271,7 +300,7 @@ func main() {
Usage()
return
}
factory195 := thrift.NewTSimpleJSONProtocolFactory()
factory195 := thrift.NewTJSONProtocolFactory()
jsProt196 := factory195.GetProtocol(mbTrans193)
containerStruct1 := aurora.NewAuroraSchedulerManagerRestartShardsArgs()
err197 := containerStruct1.ReadField2(jsProt196)
@ -281,7 +310,7 @@ func main() {
}
argvalue1 := containerStruct1.ShardIds
value1 := argvalue1
fmt.Print(client.RestartShards(value0, value1))
fmt.Print(client.RestartShards(context.Background(), value0, value1))
fmt.Print("\n")
break
case "killTasks":
@ -297,7 +326,7 @@ func main() {
Usage()
return
}
factory201 := thrift.NewTSimpleJSONProtocolFactory()
factory201 := thrift.NewTJSONProtocolFactory()
jsProt202 := factory201.GetProtocol(mbTrans199)
argvalue0 := aurora.NewJobKey()
err203 := argvalue0.Read(jsProt202)
@ -314,7 +343,7 @@ func main() {
Usage()
return
}
factory207 := thrift.NewTSimpleJSONProtocolFactory()
factory207 := thrift.NewTJSONProtocolFactory()
jsProt208 := factory207.GetProtocol(mbTrans205)
containerStruct1 := aurora.NewAuroraSchedulerManagerKillTasksArgs()
err209 := containerStruct1.ReadField2(jsProt208)
@ -326,7 +355,7 @@ func main() {
value1 := argvalue1
argvalue2 := flag.Arg(3)
value2 := argvalue2
fmt.Print(client.KillTasks(value0, value1, value2))
fmt.Print(client.KillTasks(context.Background(), value0, value1, value2))
fmt.Print("\n")
break
case "addInstances":
@ -342,7 +371,7 @@ func main() {
Usage()
return
}
factory214 := thrift.NewTSimpleJSONProtocolFactory()
factory214 := thrift.NewTJSONProtocolFactory()
jsProt215 := factory214.GetProtocol(mbTrans212)
argvalue0 := aurora.NewInstanceKey()
err216 := argvalue0.Read(jsProt215)
@ -358,7 +387,7 @@ func main() {
}
argvalue1 := int32(tmp1)
value1 := argvalue1
fmt.Print(client.AddInstances(value0, value1))
fmt.Print(client.AddInstances(context.Background(), value0, value1))
fmt.Print("\n")
break
case "replaceCronTemplate":
@ -374,7 +403,7 @@ func main() {
Usage()
return
}
factory221 := thrift.NewTSimpleJSONProtocolFactory()
factory221 := thrift.NewTJSONProtocolFactory()
jsProt222 := factory221.GetProtocol(mbTrans219)
argvalue0 := aurora.NewJobConfiguration()
err223 := argvalue0.Read(jsProt222)
@ -383,7 +412,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.ReplaceCronTemplate(value0))
fmt.Print(client.ReplaceCronTemplate(context.Background(), value0))
fmt.Print("\n")
break
case "startJobUpdate":
@ -399,7 +428,7 @@ func main() {
Usage()
return
}
factory227 := thrift.NewTSimpleJSONProtocolFactory()
factory227 := thrift.NewTJSONProtocolFactory()
jsProt228 := factory227.GetProtocol(mbTrans225)
argvalue0 := aurora.NewJobUpdateRequest()
err229 := argvalue0.Read(jsProt228)
@ -410,7 +439,7 @@ func main() {
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.StartJobUpdate(value0, value1))
fmt.Print(client.StartJobUpdate(context.Background(), value0, value1))
fmt.Print("\n")
break
case "pauseJobUpdate":
@ -426,7 +455,7 @@ func main() {
Usage()
return
}
factory234 := thrift.NewTSimpleJSONProtocolFactory()
factory234 := thrift.NewTJSONProtocolFactory()
jsProt235 := factory234.GetProtocol(mbTrans232)
argvalue0 := aurora.NewJobUpdateKey()
err236 := argvalue0.Read(jsProt235)
@ -437,7 +466,7 @@ func main() {
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.PauseJobUpdate(value0, value1))
fmt.Print(client.PauseJobUpdate(context.Background(), value0, value1))
fmt.Print("\n")
break
case "resumeJobUpdate":
@ -453,7 +482,7 @@ func main() {
Usage()
return
}
factory241 := thrift.NewTSimpleJSONProtocolFactory()
factory241 := thrift.NewTJSONProtocolFactory()
jsProt242 := factory241.GetProtocol(mbTrans239)
argvalue0 := aurora.NewJobUpdateKey()
err243 := argvalue0.Read(jsProt242)
@ -464,7 +493,7 @@ func main() {
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.ResumeJobUpdate(value0, value1))
fmt.Print(client.ResumeJobUpdate(context.Background(), value0, value1))
fmt.Print("\n")
break
case "abortJobUpdate":
@ -480,7 +509,7 @@ func main() {
Usage()
return
}
factory248 := thrift.NewTSimpleJSONProtocolFactory()
factory248 := thrift.NewTJSONProtocolFactory()
jsProt249 := factory248.GetProtocol(mbTrans246)
argvalue0 := aurora.NewJobUpdateKey()
err250 := argvalue0.Read(jsProt249)
@ -491,7 +520,7 @@ func main() {
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.AbortJobUpdate(value0, value1))
fmt.Print(client.AbortJobUpdate(context.Background(), value0, value1))
fmt.Print("\n")
break
case "rollbackJobUpdate":
@ -507,7 +536,7 @@ func main() {
Usage()
return
}
factory255 := thrift.NewTSimpleJSONProtocolFactory()
factory255 := thrift.NewTJSONProtocolFactory()
jsProt256 := factory255.GetProtocol(mbTrans253)
argvalue0 := aurora.NewJobUpdateKey()
err257 := argvalue0.Read(jsProt256)
@ -518,7 +547,7 @@ func main() {
value0 := argvalue0
argvalue1 := flag.Arg(2)
value1 := argvalue1
fmt.Print(client.RollbackJobUpdate(value0, value1))
fmt.Print(client.RollbackJobUpdate(context.Background(), value0, value1))
fmt.Print("\n")
break
case "pulseJobUpdate":
@ -534,7 +563,7 @@ func main() {
Usage()
return
}
factory262 := thrift.NewTSimpleJSONProtocolFactory()
factory262 := thrift.NewTJSONProtocolFactory()
jsProt263 := factory262.GetProtocol(mbTrans260)
argvalue0 := aurora.NewJobUpdateKey()
err264 := argvalue0.Read(jsProt263)
@ -543,7 +572,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.PulseJobUpdate(value0))
fmt.Print(client.PulseJobUpdate(context.Background(), value0))
fmt.Print("\n")
break
case "getRoleSummary":
@ -551,7 +580,7 @@ func main() {
fmt.Fprintln(os.Stderr, "GetRoleSummary requires 0 args")
flag.Usage()
}
fmt.Print(client.GetRoleSummary())
fmt.Print(client.GetRoleSummary(context.Background()))
fmt.Print("\n")
break
case "getJobSummary":
@ -561,7 +590,7 @@ func main() {
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetJobSummary(value0))
fmt.Print(client.GetJobSummary(context.Background(), value0))
fmt.Print("\n")
break
case "getTasksStatus":
@ -577,7 +606,7 @@ func main() {
Usage()
return
}
factory269 := thrift.NewTSimpleJSONProtocolFactory()
factory269 := thrift.NewTJSONProtocolFactory()
jsProt270 := factory269.GetProtocol(mbTrans267)
argvalue0 := aurora.NewTaskQuery()
err271 := argvalue0.Read(jsProt270)
@ -586,7 +615,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetTasksStatus(value0))
fmt.Print(client.GetTasksStatus(context.Background(), value0))
fmt.Print("\n")
break
case "getTasksWithoutConfigs":
@ -602,7 +631,7 @@ func main() {
Usage()
return
}
factory275 := thrift.NewTSimpleJSONProtocolFactory()
factory275 := thrift.NewTJSONProtocolFactory()
jsProt276 := factory275.GetProtocol(mbTrans273)
argvalue0 := aurora.NewTaskQuery()
err277 := argvalue0.Read(jsProt276)
@ -611,7 +640,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetTasksWithoutConfigs(value0))
fmt.Print(client.GetTasksWithoutConfigs(context.Background(), value0))
fmt.Print("\n")
break
case "getPendingReason":
@ -627,7 +656,7 @@ func main() {
Usage()
return
}
factory281 := thrift.NewTSimpleJSONProtocolFactory()
factory281 := thrift.NewTJSONProtocolFactory()
jsProt282 := factory281.GetProtocol(mbTrans279)
argvalue0 := aurora.NewTaskQuery()
err283 := argvalue0.Read(jsProt282)
@ -636,7 +665,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetPendingReason(value0))
fmt.Print(client.GetPendingReason(context.Background(), value0))
fmt.Print("\n")
break
case "getConfigSummary":
@ -652,7 +681,7 @@ func main() {
Usage()
return
}
factory287 := thrift.NewTSimpleJSONProtocolFactory()
factory287 := thrift.NewTJSONProtocolFactory()
jsProt288 := factory287.GetProtocol(mbTrans285)
argvalue0 := aurora.NewJobKey()
err289 := argvalue0.Read(jsProt288)
@ -661,7 +690,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetConfigSummary(value0))
fmt.Print(client.GetConfigSummary(context.Background(), value0))
fmt.Print("\n")
break
case "getJobs":
@ -671,7 +700,7 @@ func main() {
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetJobs(value0))
fmt.Print(client.GetJobs(context.Background(), value0))
fmt.Print("\n")
break
case "getQuota":
@ -681,7 +710,7 @@ func main() {
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetQuota(value0))
fmt.Print(client.GetQuota(context.Background(), value0))
fmt.Print("\n")
break
case "populateJobConfig":
@ -697,7 +726,7 @@ func main() {
Usage()
return
}
factory295 := thrift.NewTSimpleJSONProtocolFactory()
factory295 := thrift.NewTJSONProtocolFactory()
jsProt296 := factory295.GetProtocol(mbTrans293)
argvalue0 := aurora.NewJobConfiguration()
err297 := argvalue0.Read(jsProt296)
@ -706,7 +735,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.PopulateJobConfig(value0))
fmt.Print(client.PopulateJobConfig(context.Background(), value0))
fmt.Print("\n")
break
case "getJobUpdateSummaries":
@ -722,7 +751,7 @@ func main() {
Usage()
return
}
factory301 := thrift.NewTSimpleJSONProtocolFactory()
factory301 := thrift.NewTJSONProtocolFactory()
jsProt302 := factory301.GetProtocol(mbTrans299)
argvalue0 := aurora.NewJobUpdateQuery()
err303 := argvalue0.Read(jsProt302)
@ -731,7 +760,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateSummaries(value0))
fmt.Print(client.GetJobUpdateSummaries(context.Background(), value0))
fmt.Print("\n")
break
case "getJobUpdateDetails":
@ -747,7 +776,7 @@ func main() {
Usage()
return
}
factory307 := thrift.NewTSimpleJSONProtocolFactory()
factory307 := thrift.NewTJSONProtocolFactory()
jsProt308 := factory307.GetProtocol(mbTrans305)
argvalue0 := aurora.NewJobUpdateQuery()
err309 := argvalue0.Read(jsProt308)
@ -756,7 +785,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateDetails(value0))
fmt.Print(client.GetJobUpdateDetails(context.Background(), value0))
fmt.Print("\n")
break
case "getJobUpdateDiff":
@ -772,7 +801,7 @@ func main() {
Usage()
return
}
factory313 := thrift.NewTSimpleJSONProtocolFactory()
factory313 := thrift.NewTJSONProtocolFactory()
jsProt314 := factory313.GetProtocol(mbTrans311)
argvalue0 := aurora.NewJobUpdateRequest()
err315 := argvalue0.Read(jsProt314)
@ -781,7 +810,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateDiff(value0))
fmt.Print(client.GetJobUpdateDiff(context.Background(), value0))
fmt.Print("\n")
break
case "getTierConfigs":
@ -789,7 +818,7 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTierConfigs requires 0 args")
flag.Usage()
}
fmt.Print(client.GetTierConfigs())
fmt.Print(client.GetTierConfigs(context.Background()))
fmt.Print("\n")
break
case "":

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,89 +0,0 @@
// Autogenerated by Thrift Compiler (0.9.3)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package aurora
import (
"bytes"
"fmt"
"git.apache.org/thrift.git/lib/go/thrift"
)
// (needed to ensure safety because of naive import list construction.)
var _ = thrift.ZERO
var _ = fmt.Printf
var _ = bytes.Equal
const AURORA_EXECUTOR_NAME = "AuroraExecutor"
var ACTIVE_STATES map[ScheduleStatus]bool
var SLAVE_ASSIGNED_STATES map[ScheduleStatus]bool
var LIVE_STATES map[ScheduleStatus]bool
var TERMINAL_STATES map[ScheduleStatus]bool
const GOOD_IDENTIFIER_PATTERN = "^[\\w\\-\\.]+$"
const GOOD_IDENTIFIER_PATTERN_JVM = "^[\\w\\-\\.]+$"
const GOOD_IDENTIFIER_PATTERN_PYTHON = "^[\\w\\-\\.]+$"
var ACTIVE_JOB_UPDATE_STATES map[JobUpdateStatus]bool
var AWAITNG_PULSE_JOB_UPDATE_STATES map[JobUpdateStatus]bool
const BYPASS_LEADER_REDIRECT_HEADER_NAME = "Bypass-Leader-Redirect"
const TASK_FILESYSTEM_MOUNT_POINT = "taskfs"
func init() {
ACTIVE_STATES = map[ScheduleStatus]bool{
9: true,
17: true,
6: true,
0: true,
13: true,
12: true,
2: true,
1: true,
18: true,
16: true,
}
SLAVE_ASSIGNED_STATES = map[ScheduleStatus]bool{
9: true,
17: true,
6: true,
13: true,
12: true,
2: true,
18: true,
1: true,
}
LIVE_STATES = map[ScheduleStatus]bool{
6: true,
13: true,
12: true,
17: true,
18: true,
2: true,
}
TERMINAL_STATES = map[ScheduleStatus]bool{
4: true,
3: true,
5: true,
7: true,
}
ACTIVE_JOB_UPDATE_STATES = map[JobUpdateStatus]bool{
0: true,
1: true,
2: true,
3: true,
9: true,
10: true,
}
AWAITNG_PULSE_JOB_UPDATE_STATES = map[JobUpdateStatus]bool{
9: true,
10: true,
}
}

View file

@ -1,19 +1,21 @@
// Autogenerated by Thrift Compiler (0.9.3)
// Autogenerated by Thrift Compiler (0.12.0)
// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
package main
import (
"apache/aurora"
"context"
"flag"
"fmt"
"git.apache.org/thrift.git/lib/go/thrift"
"math"
"net"
"net/url"
"os"
"strconv"
"strings"
"git.apache.org/thrift.git/lib/go/thrift"
)
func Usage() {
@ -37,6 +39,22 @@ func Usage() {
os.Exit(0)
}
type httpHeaders map[string]string
func (h httpHeaders) String() string {
var m map[string]string = h
return fmt.Sprintf("%s", m)
}
func (h httpHeaders) Set(value string) error {
parts := strings.Split(value, ": ")
if len(parts) != 2 {
return fmt.Errorf("header should be of format 'Key: Value'")
}
h[parts[0]] = parts[1]
return nil
}
func main() {
flag.Usage = Usage
var host string
@ -45,7 +63,8 @@ func main() {
var urlString string
var framed bool
var useHttp bool
var parsedUrl url.URL
headers := make(httpHeaders)
var parsedUrl *url.URL
var trans thrift.TTransport
_ = strconv.Atoi
_ = math.Abs
@ -56,16 +75,18 @@ func main() {
flag.StringVar(&urlString, "u", "", "Specify the url")
flag.BoolVar(&framed, "framed", false, "Use framed transport")
flag.BoolVar(&useHttp, "http", false, "Use http")
flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")")
flag.Parse()
if len(urlString) > 0 {
parsedUrl, err := url.Parse(urlString)
var err error
parsedUrl, err = url.Parse(urlString)
if err != nil {
fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
flag.Usage()
}
host = parsedUrl.Host
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http"
useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https"
} else if useHttp {
_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
if err != nil {
@ -78,6 +99,12 @@ func main() {
var err error
if useHttp {
trans, err = thrift.NewTHttpClient(parsedUrl.String())
if len(headers) > 0 {
httptrans := trans.(*thrift.THttpClient)
for key, value := range headers {
httptrans.SetHeader(key, value)
}
}
} else {
portStr := fmt.Sprint(port)
if strings.Contains(host, ":") {
@ -120,7 +147,9 @@ func main() {
Usage()
os.Exit(1)
}
client := aurora.NewReadOnlySchedulerClientFactory(trans, protocolFactory)
iprot := protocolFactory.GetProtocol(trans)
oprot := protocolFactory.GetProtocol(trans)
client := aurora.NewReadOnlySchedulerClient(thrift.NewTStandardClient(iprot, oprot))
if err := trans.Open(); err != nil {
fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
os.Exit(1)
@ -132,7 +161,7 @@ func main() {
fmt.Fprintln(os.Stderr, "GetRoleSummary requires 0 args")
flag.Usage()
}
fmt.Print(client.GetRoleSummary())
fmt.Print(client.GetRoleSummary(context.Background()))
fmt.Print("\n")
break
case "getJobSummary":
@ -142,7 +171,7 @@ func main() {
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetJobSummary(value0))
fmt.Print(client.GetJobSummary(context.Background(), value0))
fmt.Print("\n")
break
case "getTasksStatus":
@ -158,7 +187,7 @@ func main() {
Usage()
return
}
factory84 := thrift.NewTSimpleJSONProtocolFactory()
factory84 := thrift.NewTJSONProtocolFactory()
jsProt85 := factory84.GetProtocol(mbTrans82)
argvalue0 := aurora.NewTaskQuery()
err86 := argvalue0.Read(jsProt85)
@ -167,7 +196,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetTasksStatus(value0))
fmt.Print(client.GetTasksStatus(context.Background(), value0))
fmt.Print("\n")
break
case "getTasksWithoutConfigs":
@ -183,7 +212,7 @@ func main() {
Usage()
return
}
factory90 := thrift.NewTSimpleJSONProtocolFactory()
factory90 := thrift.NewTJSONProtocolFactory()
jsProt91 := factory90.GetProtocol(mbTrans88)
argvalue0 := aurora.NewTaskQuery()
err92 := argvalue0.Read(jsProt91)
@ -192,7 +221,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetTasksWithoutConfigs(value0))
fmt.Print(client.GetTasksWithoutConfigs(context.Background(), value0))
fmt.Print("\n")
break
case "getPendingReason":
@ -208,7 +237,7 @@ func main() {
Usage()
return
}
factory96 := thrift.NewTSimpleJSONProtocolFactory()
factory96 := thrift.NewTJSONProtocolFactory()
jsProt97 := factory96.GetProtocol(mbTrans94)
argvalue0 := aurora.NewTaskQuery()
err98 := argvalue0.Read(jsProt97)
@ -217,7 +246,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetPendingReason(value0))
fmt.Print(client.GetPendingReason(context.Background(), value0))
fmt.Print("\n")
break
case "getConfigSummary":
@ -233,7 +262,7 @@ func main() {
Usage()
return
}
factory102 := thrift.NewTSimpleJSONProtocolFactory()
factory102 := thrift.NewTJSONProtocolFactory()
jsProt103 := factory102.GetProtocol(mbTrans100)
argvalue0 := aurora.NewJobKey()
err104 := argvalue0.Read(jsProt103)
@ -242,7 +271,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetConfigSummary(value0))
fmt.Print(client.GetConfigSummary(context.Background(), value0))
fmt.Print("\n")
break
case "getJobs":
@ -252,7 +281,7 @@ func main() {
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetJobs(value0))
fmt.Print(client.GetJobs(context.Background(), value0))
fmt.Print("\n")
break
case "getQuota":
@ -262,7 +291,7 @@ func main() {
}
argvalue0 := flag.Arg(1)
value0 := argvalue0
fmt.Print(client.GetQuota(value0))
fmt.Print(client.GetQuota(context.Background(), value0))
fmt.Print("\n")
break
case "populateJobConfig":
@ -278,7 +307,7 @@ func main() {
Usage()
return
}
factory110 := thrift.NewTSimpleJSONProtocolFactory()
factory110 := thrift.NewTJSONProtocolFactory()
jsProt111 := factory110.GetProtocol(mbTrans108)
argvalue0 := aurora.NewJobConfiguration()
err112 := argvalue0.Read(jsProt111)
@ -287,7 +316,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.PopulateJobConfig(value0))
fmt.Print(client.PopulateJobConfig(context.Background(), value0))
fmt.Print("\n")
break
case "getJobUpdateSummaries":
@ -303,7 +332,7 @@ func main() {
Usage()
return
}
factory116 := thrift.NewTSimpleJSONProtocolFactory()
factory116 := thrift.NewTJSONProtocolFactory()
jsProt117 := factory116.GetProtocol(mbTrans114)
argvalue0 := aurora.NewJobUpdateQuery()
err118 := argvalue0.Read(jsProt117)
@ -312,7 +341,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateSummaries(value0))
fmt.Print(client.GetJobUpdateSummaries(context.Background(), value0))
fmt.Print("\n")
break
case "getJobUpdateDetails":
@ -328,7 +357,7 @@ func main() {
Usage()
return
}
factory122 := thrift.NewTSimpleJSONProtocolFactory()
factory122 := thrift.NewTJSONProtocolFactory()
jsProt123 := factory122.GetProtocol(mbTrans120)
argvalue0 := aurora.NewJobUpdateQuery()
err124 := argvalue0.Read(jsProt123)
@ -337,7 +366,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateDetails(value0))
fmt.Print(client.GetJobUpdateDetails(context.Background(), value0))
fmt.Print("\n")
break
case "getJobUpdateDiff":
@ -353,7 +382,7 @@ func main() {
Usage()
return
}
factory128 := thrift.NewTSimpleJSONProtocolFactory()
factory128 := thrift.NewTJSONProtocolFactory()
jsProt129 := factory128.GetProtocol(mbTrans126)
argvalue0 := aurora.NewJobUpdateRequest()
err130 := argvalue0.Read(jsProt129)
@ -362,7 +391,7 @@ func main() {
return
}
value0 := argvalue0
fmt.Print(client.GetJobUpdateDiff(value0))
fmt.Print(client.GetJobUpdateDiff(context.Background(), value0))
fmt.Print("\n")
break
case "getTierConfigs":
@ -370,7 +399,7 @@ func main() {
fmt.Fprintln(os.Stderr, "GetTierConfigs requires 0 args")
flag.Usage()
}
fmt.Print(client.GetTierConfigs())
fmt.Print(client.GetTierConfigs(context.Background()))
fmt.Print("\n")
break
case "":

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
#! /bin/bash
THRIFT_VER=0.9.3
THRIFT_VER=0.12.0
if [[ $(thrift -version | grep -e $THRIFT_VER -c) -ne 1 ]]; then
echo "Warning: This wrapper has only been tested with version" $THRIFT_VER;

10
vendor/github.com/paypal/gorealis/go.mod generated vendored Normal file
View file

@ -0,0 +1,10 @@
module github.com/paypal/gorealis/v2
require (
git.apache.org/thrift.git v0.0.0-20181016064013-5c1ecb67cde4
github.com/davecgh/go-spew v1.1.0
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e
github.com/pmezard/go-difflib v1.0.0
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a
github.com/stretchr/testify v1.2.0
)

9
vendor/github.com/paypal/gorealis/go.sum generated vendored Normal file
View file

@ -0,0 +1,9 @@
git.apache.org/thrift.git v0.0.0-20181016064013-5c1ecb67cde4 h1:crM1vBe07qSmh3hRVFXp4/TCfWaQkayD6ZOlNbQYtvw=
git.apache.org/thrift.git v0.0.0-20181016064013-5c1ecb67cde4/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e h1:+RHxT/gm0O3UF7nLJbdNzAmULvCFt4XfXHWzh3XI/zs=
github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a h1:EYL2xz/Zdo0hyqdZMXR4lmT2O11jDLTPCEqIe/FR6W4=
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=

View file

@ -15,180 +15,72 @@
package realis
import (
"strconv"
"github.com/paypal/gorealis/gen-go/apache/aurora"
)
type Job interface {
// Set Job Key environment.
Environment(env string) Job
Role(role string) Job
Name(name string) Job
CPU(cpus float64) Job
CronSchedule(cron string) Job
CronCollisionPolicy(policy aurora.CronCollisionPolicy) Job
Disk(disk int64) Job
RAM(ram int64) Job
ExecutorName(name string) Job
ExecutorData(data string) Job
AddPorts(num int) Job
AddLabel(key string, value string) Job
AddNamedPorts(names ...string) Job
AddLimitConstraint(name string, limit int32) Job
AddValueConstraint(name string, negated bool, values ...string) Job
// From Aurora Docs:
// dedicated attribute. Aurora treats this specially, and only allows matching jobs
// to run on these machines, and will only schedule matching jobs on these machines.
// When a job is created, the scheduler requires that the $role component matches
// the role field in the job configuration, and will reject the job creation otherwise.
// A wildcard (*) may be used for the role portion of the dedicated attribute, which
// will allow any owner to elect for a job to run on the host(s)
AddDedicatedConstraint(role, name string) Job
AddURIs(extract bool, cache bool, values ...string) Job
JobKey() *aurora.JobKey
JobConfig() *aurora.JobConfiguration
TaskConfig() *aurora.TaskConfig
IsService(isService bool) Job
InstanceCount(instCount int32) Job
GetInstanceCount() int32
MaxFailure(maxFail int32) Job
Container(container Container) Job
}
// Structure to collect all information pertaining to an Aurora job.
type AuroraJob struct {
jobConfig *aurora.JobConfiguration
resources map[string]*aurora.Resource
portCount int
task *AuroraTask
}
// Create a Job object with everything initialized.
func NewJob() Job {
jobConfig := aurora.NewJobConfiguration()
taskConfig := aurora.NewTaskConfig()
jobKey := aurora.NewJobKey()
// Create a AuroraJob object with everything initialized.
func NewJob() *AuroraJob {
// Job Config
jobConfig.Key = jobKey
jobConfig.TaskConfig = taskConfig
jobKey := &aurora.JobKey{}
// Task Config
taskConfig.Job = jobKey
taskConfig.Container = aurora.NewContainer()
taskConfig.Container.Mesos = aurora.NewMesosContainer()
taskConfig.MesosFetcherUris = make(map[*aurora.MesosFetcherURI]bool)
taskConfig.Metadata = make(map[*aurora.Metadata]bool)
taskConfig.Constraints = make(map[*aurora.Constraint]bool)
// AuroraTask clientConfig
task := NewTask()
task.task.Job = jobKey
// Resources
numCpus := aurora.NewResource()
ramMb := aurora.NewResource()
diskMb := aurora.NewResource()
resources := make(map[string]*aurora.Resource)
resources["cpu"] = numCpus
resources["ram"] = ramMb
resources["disk"] = diskMb
taskConfig.Resources = make(map[*aurora.Resource]bool)
taskConfig.Resources[numCpus] = true
taskConfig.Resources[ramMb] = true
taskConfig.Resources[diskMb] = true
numCpus.NumCpus = new(float64)
ramMb.RamMb = new(int64)
diskMb.DiskMb = new(int64)
// AuroraJob clientConfig
jobConfig := &aurora.JobConfiguration{
Key: jobKey,
TaskConfig: task.TaskConfig(),
}
return &AuroraJob{
jobConfig: jobConfig,
resources: resources,
portCount: 0,
task: task,
}
}
// Set Job Key environment.
func (j *AuroraJob) Environment(env string) Job {
// Set AuroraJob Key environment. Explicit changes to AuroraTask's job key are not needed
// because they share a pointer to the same JobKey.
func (j *AuroraJob) Environment(env string) *AuroraJob {
j.jobConfig.Key.Environment = env
return j
}
// Set Job Key Role.
func (j *AuroraJob) Role(role string) Job {
// Set AuroraJob Key Role.
func (j *AuroraJob) Role(role string) *AuroraJob {
j.jobConfig.Key.Role = role
//Will be deprecated
// Will be deprecated
identity := &aurora.Identity{User: role}
j.jobConfig.Owner = identity
j.jobConfig.TaskConfig.Owner = identity
return j
}
// Set Job Key Name.
func (j *AuroraJob) Name(name string) Job {
// Set AuroraJob Key Name.
func (j *AuroraJob) Name(name string) *AuroraJob {
j.jobConfig.Key.Name = name
return j
}
// Set name of the executor that will the task will be configured to.
func (j *AuroraJob) ExecutorName(name string) Job {
if j.jobConfig.TaskConfig.ExecutorConfig == nil {
j.jobConfig.TaskConfig.ExecutorConfig = aurora.NewExecutorConfig()
}
j.jobConfig.TaskConfig.ExecutorConfig.Name = name
return j
}
// Will be included as part of entire task inside the scheduler that will be serialized.
func (j *AuroraJob) ExecutorData(data string) Job {
if j.jobConfig.TaskConfig.ExecutorConfig == nil {
j.jobConfig.TaskConfig.ExecutorConfig = aurora.NewExecutorConfig()
}
j.jobConfig.TaskConfig.ExecutorConfig.Data = data
return j
}
func (j *AuroraJob) CPU(cpus float64) Job {
*j.resources["cpu"].NumCpus = cpus
return j
}
func (j *AuroraJob) RAM(ram int64) Job {
*j.resources["ram"].RamMb = ram
return j
}
func (j *AuroraJob) Disk(disk int64) Job {
*j.resources["disk"].DiskMb = disk
return j
}
// How many failures to tolerate before giving up.
func (j *AuroraJob) MaxFailure(maxFail int32) Job {
j.jobConfig.TaskConfig.MaxTaskFailures = maxFail
return j
}
// How many instances of the job to run
func (j *AuroraJob) InstanceCount(instCount int32) Job {
func (j *AuroraJob) InstanceCount(instCount int32) *AuroraJob {
j.jobConfig.InstanceCount = instCount
return j
}
func (j *AuroraJob) CronSchedule(cron string) Job {
func (j *AuroraJob) CronSchedule(cron string) *AuroraJob {
j.jobConfig.CronSchedule = &cron
return j
}
func (j *AuroraJob) CronCollisionPolicy(policy aurora.CronCollisionPolicy) Job {
func (j *AuroraJob) CronCollisionPolicy(policy aurora.CronCollisionPolicy) *AuroraJob {
j.jobConfig.CronCollisionPolicy = policy
return j
}
@ -198,15 +90,9 @@ func (j *AuroraJob) GetInstanceCount() int32 {
return j.jobConfig.InstanceCount
}
// Restart the job's tasks if they fail
func (j *AuroraJob) IsService(isService bool) Job {
j.jobConfig.TaskConfig.IsService = isService
return j
}
// Get the current job configurations key to use for some realis calls.
func (j *AuroraJob) JobKey() *aurora.JobKey {
return j.jobConfig.Key
func (j *AuroraJob) JobKey() aurora.JobKey {
return *j.jobConfig.Key
}
// Get the current job configurations key to use for some realis calls.
@ -214,104 +100,90 @@ func (j *AuroraJob) JobConfig() *aurora.JobConfiguration {
return j.jobConfig
}
/*
AuroraTask specific API, see task.go for further documentation.
These functions are provided for the convenience of chaining API calls.
*/
func (j *AuroraJob) ExecutorName(name string) *AuroraJob {
j.task.ExecutorName(name)
return j
}
func (j *AuroraJob) ExecutorData(data string) *AuroraJob {
j.task.ExecutorData(data)
return j
}
func (j *AuroraJob) CPU(cpus float64) *AuroraJob {
j.task.CPU(cpus)
return j
}
func (j *AuroraJob) RAM(ram int64) *AuroraJob {
j.task.RAM(ram)
return j
}
func (j *AuroraJob) Disk(disk int64) *AuroraJob {
j.task.Disk(disk)
return j
}
func (j *AuroraJob) Tier(tier string) *AuroraJob {
j.task.Tier(tier)
return j
}
func (j *AuroraJob) MaxFailure(maxFail int32) *AuroraJob {
j.task.MaxFailure(maxFail)
return j
}
func (j *AuroraJob) IsService(isService bool) *AuroraJob {
j.task.IsService(isService)
return j
}
func (j *AuroraJob) TaskConfig() *aurora.TaskConfig {
return j.jobConfig.TaskConfig
return j.task.TaskConfig()
}
// Add a list of URIs with the same extract and cache configuration. Scheduler must have
// --enable_mesos_fetcher flag enabled. Currently there is no duplicate detection.
func (j *AuroraJob) AddURIs(extract bool, cache bool, values ...string) Job {
for _, value := range values {
j.jobConfig.TaskConfig.MesosFetcherUris[&aurora.MesosFetcherURI{
Value: value,
Extract: &extract,
Cache: &cache,
}] = true
}
func (j *AuroraJob) AddURIs(extract bool, cache bool, values ...string) *AuroraJob {
j.task.AddURIs(extract, cache, values...)
return j
}
// Adds a Mesos label to the job. Note that Aurora will add the
// prefix "org.apache.aurora.metadata." to the beginning of each key.
func (j *AuroraJob) AddLabel(key string, value string) Job {
j.jobConfig.TaskConfig.Metadata[&aurora.Metadata{Key: key, Value: value}] = true
func (j *AuroraJob) AddLabel(key string, value string) *AuroraJob {
j.task.AddLabel(key, value)
return j
}
// Add a named port to the job configuration These are random ports as it's
// not currently possible to request specific ports using Aurora.
func (j *AuroraJob) AddNamedPorts(names ...string) Job {
j.portCount += len(names)
for _, name := range names {
j.jobConfig.TaskConfig.Resources[&aurora.Resource{NamedPort: &name}] = true
}
func (j *AuroraJob) AddNamedPorts(names ...string) *AuroraJob {
j.task.AddNamedPorts(names...)
return j
}
// Adds a request for a number of ports to the job configuration. The names chosen for these ports
// will be org.apache.aurora.port.X, where X is the current port count for the job configuration
// starting at 0. These are random ports as it's not currently possible to request
// specific ports using Aurora.
func (j *AuroraJob) AddPorts(num int) Job {
start := j.portCount
j.portCount += num
for i := start; i < j.portCount; i++ {
portName := "org.apache.aurora.port." + strconv.Itoa(i)
j.jobConfig.TaskConfig.Resources[&aurora.Resource{NamedPort: &portName}] = true
}
func (j *AuroraJob) AddPorts(num int) *AuroraJob {
j.task.AddPorts(num)
return j
}
func (j *AuroraJob) AddValueConstraint(name string, negated bool, values ...string) *AuroraJob {
j.task.AddValueConstraint(name, negated, values...)
return j
}
// From Aurora Docs:
// Add a Value constraint
// name - Mesos slave attribute that the constraint is matched against.
// If negated = true , treat this as a 'not' - to avoid specific values.
// Values - list of values we look for in attribute name
func (j *AuroraJob) AddValueConstraint(name string, negated bool, values ...string) Job {
constraintValues := make(map[string]bool)
for _, value := range values {
constraintValues[value] = true
}
j.jobConfig.TaskConfig.Constraints[&aurora.Constraint{
Name: name,
Constraint: &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: negated,
Values: constraintValues,
},
Limit: nil,
},
}] = true
func (j *AuroraJob) AddLimitConstraint(name string, limit int32) *AuroraJob {
j.task.AddLimitConstraint(name, limit)
return j
}
// From Aurora Docs:
// A constraint that specifies the maximum number of active tasks on a host with
// a matching attribute that may be scheduled simultaneously.
func (j *AuroraJob) AddLimitConstraint(name string, limit int32) Job {
j.jobConfig.TaskConfig.Constraints[&aurora.Constraint{
Name: name,
Constraint: &aurora.TaskConstraint{
Value: nil,
Limit: &aurora.LimitConstraint{Limit: limit},
},
}] = true
func (j *AuroraJob) AddDedicatedConstraint(role, name string) *AuroraJob {
j.task.AddDedicatedConstraint(role, name)
return j
}
func (j *AuroraJob) AddDedicatedConstraint(role, name string) Job {
j.AddValueConstraint("dedicated", false, role+"/"+name)
return j
}
// Set a container to run for the job configuration to run.
func (j *AuroraJob) Container(container Container) Job {
j.jobConfig.TaskConfig.Container = container.Build()
func (j *AuroraJob) Container(container Container) *AuroraJob {
j.task.Container(container)
return j
}

232
vendor/github.com/paypal/gorealis/jobUpdate.go generated vendored Normal file
View file

@ -0,0 +1,232 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
import (
"time"
"git.apache.org/thrift.git/lib/go/thrift"
"github.com/paypal/gorealis/gen-go/apache/aurora"
)
// Structure to collect all information required to create job update
type JobUpdate struct {
task *AuroraTask
request *aurora.JobUpdateRequest
}
// Create a default JobUpdate object with an empty task and no fields filled in.
func NewJobUpdate() *JobUpdate {
newTask := NewTask()
req := aurora.JobUpdateRequest{}
req.TaskConfig = newTask.TaskConfig()
req.Settings = newUpdateSettings()
return &JobUpdate{task: newTask, request: &req}
}
func JobUpdateFromAuroraTask(task *AuroraTask) *JobUpdate {
newTask := task.Clone()
req := aurora.JobUpdateRequest{}
req.TaskConfig = newTask.TaskConfig()
req.Settings = newUpdateSettings()
return &JobUpdate{task: newTask, request: &req}
}
func JobUpdateFromConfig(task *aurora.TaskConfig) *JobUpdate {
// Perform a deep copy to avoid unexpected behavior
newTask := TaskFromThrift(task)
req := aurora.JobUpdateRequest{}
req.TaskConfig = newTask.TaskConfig()
req.Settings = newUpdateSettings()
return &JobUpdate{task: newTask, request: &req}
}
// Set instance count the job will have after the update.
func (j *JobUpdate) InstanceCount(inst int32) *JobUpdate {
j.request.InstanceCount = inst
return j
}
// Max number of instances being updated at any given moment.
func (j *JobUpdate) BatchSize(size int32) *JobUpdate {
j.request.Settings.UpdateGroupSize = size
return j
}
// Minimum number of seconds a shard must remain in RUNNING state before considered a success.
func (j *JobUpdate) WatchTime(timeout time.Duration) *JobUpdate {
j.request.Settings.MinWaitInInstanceRunningMs = int32(timeout.Seconds() * 1000)
return j
}
// Wait for all instances in a group to be done before moving on.
func (j *JobUpdate) WaitForBatchCompletion(batchWait bool) *JobUpdate {
j.request.Settings.WaitForBatchCompletion = batchWait
return j
}
// Max number of instance failures to tolerate before marking instance as FAILED.
func (j *JobUpdate) MaxPerInstanceFailures(inst int32) *JobUpdate {
j.request.Settings.MaxPerInstanceFailures = inst
return j
}
// Max number of FAILED instances to tolerate before terminating the update.
func (j *JobUpdate) MaxFailedInstances(inst int32) *JobUpdate {
j.request.Settings.MaxFailedInstances = inst
return j
}
// When False, prevents auto rollback of a failed update.
func (j *JobUpdate) RollbackOnFail(rollback bool) *JobUpdate {
j.request.Settings.RollbackOnFailure = rollback
return j
}
// Sets the interval at which pulses should be received by the job update before timing out.
func (j *JobUpdate) PulseIntervalTimeout(timeout time.Duration) *JobUpdate {
j.request.Settings.BlockIfNoPulsesAfterMs = thrift.Int32Ptr(int32(timeout.Seconds() * 1000))
return j
}
func newUpdateSettings() *aurora.JobUpdateSettings {
us := aurora.JobUpdateSettings{}
// Mirrors defaults set by Pystachio
us.UpdateOnlyTheseInstances = []*aurora.Range{}
us.UpdateGroupSize = 1
us.WaitForBatchCompletion = false
us.MinWaitInInstanceRunningMs = 45000
us.MaxPerInstanceFailures = 0
us.MaxFailedInstances = 0
us.RollbackOnFailure = true
return &us
}
/*
These methods are provided for user convenience in order to chain
calls for configuration.
API below here are wrappers around modifying an AuroraTask instance.
See task.go for further documentation.
*/
func (t *JobUpdate) Environment(env string) *JobUpdate {
t.task.Environment(env)
return t
}
func (t *JobUpdate) Role(role string) *JobUpdate {
t.task.Role(role)
return t
}
func (t *JobUpdate) Name(name string) *JobUpdate {
t.task.Name(name)
return t
}
func (j *JobUpdate) ExecutorName(name string) *JobUpdate {
j.task.ExecutorName(name)
return j
}
func (j *JobUpdate) ExecutorData(data string) *JobUpdate {
j.task.ExecutorData(data)
return j
}
func (j *JobUpdate) CPU(cpus float64) *JobUpdate {
j.task.CPU(cpus)
return j
}
func (j *JobUpdate) RAM(ram int64) *JobUpdate {
j.task.RAM(ram)
return j
}
func (j *JobUpdate) Disk(disk int64) *JobUpdate {
j.task.Disk(disk)
return j
}
func (j *JobUpdate) Tier(tier string) *JobUpdate {
j.task.Tier(tier)
return j
}
func (j *JobUpdate) MaxFailure(maxFail int32) *JobUpdate {
j.task.MaxFailure(maxFail)
return j
}
func (j *JobUpdate) IsService(isService bool) *JobUpdate {
j.task.IsService(isService)
return j
}
func (j *JobUpdate) TaskConfig() *aurora.TaskConfig {
return j.task.TaskConfig()
}
func (j *JobUpdate) AddURIs(extract bool, cache bool, values ...string) *JobUpdate {
j.task.AddURIs(extract, cache, values...)
return j
}
func (j *JobUpdate) AddLabel(key string, value string) *JobUpdate {
j.task.AddLabel(key, value)
return j
}
func (j *JobUpdate) AddNamedPorts(names ...string) *JobUpdate {
j.task.AddNamedPorts(names...)
return j
}
func (j *JobUpdate) AddPorts(num int) *JobUpdate {
j.task.AddPorts(num)
return j
}
func (j *JobUpdate) AddValueConstraint(name string, negated bool, values ...string) *JobUpdate {
j.task.AddValueConstraint(name, negated, values...)
return j
}
func (j *JobUpdate) AddLimitConstraint(name string, limit int32) *JobUpdate {
j.task.AddLimitConstraint(name, limit)
return j
}
func (j *JobUpdate) AddDedicatedConstraint(role, name string) *JobUpdate {
j.task.AddDedicatedConstraint(role, name)
return j
}
func (j *JobUpdate) Container(container Container) *JobUpdate {
j.task.Container(container)
return j
}
func (j *JobUpdate) JobKey() aurora.JobKey {
return j.task.JobKey()
}

View file

@ -40,20 +40,20 @@ func (l *LevelLogger) EnableDebug(enable bool) {
func (l LevelLogger) DebugPrintf(format string, a ...interface{}) {
if l.debug {
l.Print("[DEBUG] ")
l.Printf(format, a)
l.Printf(format, a...)
}
}
func (l LevelLogger) DebugPrint(a ...interface{}) {
if l.debug {
l.Print("[DEBUG] ")
l.Print(a)
l.Print(a...)
}
}
func (l LevelLogger) DebugPrintln(a ...interface{}) {
if l.debug {
l.Print("[DEBUG] ")
l.Println(a)
l.Println(a...)
}
}

View file

@ -19,87 +19,94 @@ import (
"time"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/paypal/gorealis/response"
"github.com/pkg/errors"
)
const (
UpdateFailed = "update failed"
RolledBack = "update rolled back"
Timeout = "timeout"
Timedout = "timeout"
)
type Monitor struct {
Client Realis
}
// Polls the scheduler every certain amount of time to see if the update has succeeded
func (m *Monitor) JobUpdate(updateKey aurora.JobUpdateKey, interval int, timeout int) (bool, error) {
func (c *Client) JobUpdateMonitor(updateKey aurora.JobUpdateKey, interval, timeout time.Duration) (bool, error) {
if interval < 1*time.Second {
interval = interval * time.Second
}
if timeout < 1*time.Second {
timeout = timeout * time.Second
}
updateQ := aurora.JobUpdateQuery{
Key: &updateKey,
Limit: 1,
}
ticker := time.NewTicker(time.Second * time.Duration(interval))
ticker := time.NewTicker(interval)
defer ticker.Stop()
timer := time.NewTimer(time.Second * time.Duration(timeout))
timer := time.NewTimer(timeout)
defer timer.Stop()
var cliErr error
var respDetail *aurora.Response
for {
select {
case <-ticker.C:
respDetail, cliErr = m.Client.JobUpdateDetails(updateQ)
updateDetail, cliErr := c.JobUpdateDetails(updateQ)
if cliErr != nil {
return false, cliErr
}
updateDetail := response.JobUpdateDetails(respDetail)
if len(updateDetail) == 0 {
m.Client.RealisConfig().logger.Println("No update found")
c.RealisConfig().logger.Println("No update found")
return false, errors.New("No update found for " + updateKey.String())
}
status := updateDetail[0].Update.Summary.State.Status
if _, ok := aurora.ACTIVE_JOB_UPDATE_STATES[status]; !ok {
// Convert Thrift Set to Golang map for quick lookup
if _, ok := ActiveJobUpdateStates[status]; !ok {
// Rolled forward is the only state in which an update has been successfully updated
// if we encounter an inactive state and it is not at rolled forward, update failed
switch status {
case aurora.JobUpdateStatus_ROLLED_FORWARD:
m.Client.RealisConfig().logger.Println("Update succeeded")
c.RealisConfig().logger.Println("Update succeeded")
return true, nil
case aurora.JobUpdateStatus_FAILED:
m.Client.RealisConfig().logger.Println("Update failed")
c.RealisConfig().logger.Println("Update failed")
return false, errors.New(UpdateFailed)
case aurora.JobUpdateStatus_ROLLED_BACK:
m.Client.RealisConfig().logger.Println("rolled back")
c.RealisConfig().logger.Println("rolled back")
return false, errors.New(RolledBack)
default:
return false, nil
}
}
case <-timer.C:
return false, errors.New(Timeout)
return false, errors.New(Timedout)
}
}
}
// Monitor a Job until all instances enter one of the LIVE_STATES
func (m *Monitor) Instances(key *aurora.JobKey, instances int32, interval, timeout int) (bool, error) {
return m.ScheduleStatus(key, instances, aurora.LIVE_STATES, interval, timeout)
// Monitor a AuroraJob until all instances enter one of the LiveStates
func (c *Client) InstancesMonitor(key aurora.JobKey, instances int32, interval, timeout time.Duration) (bool, error) {
return c.ScheduleStatusMonitor(key, instances, aurora.LIVE_STATES, interval, timeout)
}
// Monitor a Job until all instances enter a desired status.
// Monitor a AuroraJob until all instances enter a desired status.
// Defaults sets of desired statuses provided by the thrift API include:
// ACTIVE_STATES, SLAVE_ASSIGNED_STATES, LIVE_STATES, and TERMINAL_STATES
func (m *Monitor) ScheduleStatus(key *aurora.JobKey, instanceCount int32, desiredStatuses map[aurora.ScheduleStatus]bool, interval, timeout int) (bool, error) {
// ActiveStates, SlaveAssignedStates, LiveStates, and TerminalStates
func (c *Client) ScheduleStatusMonitor(key aurora.JobKey, instanceCount int32, desiredStatuses []aurora.ScheduleStatus, interval, timeout time.Duration) (bool, error) {
if interval < 1*time.Second {
interval = interval * time.Second
}
ticker := time.NewTicker(time.Second * time.Duration(interval))
if timeout < 1*time.Second {
timeout = timeout * time.Second
}
ticker := time.NewTicker(interval)
defer ticker.Stop()
timer := time.NewTimer(time.Second * time.Duration(timeout))
timer := time.NewTimer(timeout)
defer timer.Stop()
for {
@ -107,7 +114,7 @@ func (m *Monitor) ScheduleStatus(key *aurora.JobKey, instanceCount int32, desire
case <-ticker.C:
// Query Aurora for the state of the job key ever interval
instCount, cliErr := m.Client.GetInstanceIds(key, desiredStatuses)
instCount, cliErr := c.GetInstanceIds(key, desiredStatuses)
if cliErr != nil {
return false, errors.Wrap(cliErr, "Unable to communicate with Aurora")
}
@ -117,14 +124,21 @@ func (m *Monitor) ScheduleStatus(key *aurora.JobKey, instanceCount int32, desire
case <-timer.C:
// If the timer runs out, return a timeout error to user
return false, errors.New(Timeout)
return false, errors.New(Timedout)
}
}
}
// Monitor host status until all hosts match the status provided. Returns a map where the value is true if the host
// is in one of the desired mode(s) or false if it is not as of the time when the monitor exited.
func (m *Monitor) HostMaintenance(hosts []string, modes []aurora.MaintenanceMode, interval, timeout int) (map[string]bool, error) {
func (c *Client) HostMaintenanceMonitor(hosts []string, modes []aurora.MaintenanceMode, interval, timeout time.Duration) (map[string]bool, error) {
if interval < 1*time.Second {
interval = interval * time.Second
}
if timeout < 1*time.Second {
timeout = timeout * time.Second
}
// Transform modes to monitor for into a set for easy lookup
desiredMode := make(map[aurora.MaintenanceMode]struct{})
@ -142,16 +156,16 @@ func (m *Monitor) HostMaintenance(hosts []string, modes []aurora.MaintenanceMode
hostResult := make(map[string]bool)
ticker := time.NewTicker(time.Second * time.Duration(interval))
ticker := time.NewTicker(interval)
defer ticker.Stop()
timer := time.NewTimer(time.Second * time.Duration(timeout))
timer := time.NewTimer(timeout)
defer timer.Stop()
for {
select {
case <-ticker.C:
// Client call has multiple retries internally
_, result, err := m.Client.MaintenanceStatus(hosts...)
result, err := c.MaintenanceStatus(hosts...)
if err != nil {
// Error is either a payload error or a severe connection error
for host := range remainingHosts {
@ -160,7 +174,7 @@ func (m *Monitor) HostMaintenance(hosts []string, modes []aurora.MaintenanceMode
return hostResult, errors.Wrap(err, "client error in monitor")
}
for status := range result.GetStatuses() {
for _, status := range result.GetStatuses() {
if _, ok := desiredMode[status.GetMode()]; ok {
hostResult[status.GetHost()] = true
@ -177,7 +191,7 @@ func (m *Monitor) HostMaintenance(hosts []string, modes []aurora.MaintenanceMode
hostResult[host] = false
}
return hostResult, errors.New(Timeout)
return hostResult, errors.New(Timedout)
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -22,24 +22,22 @@ import (
"testing"
"time"
"github.com/paypal/gorealis"
realis "github.com/paypal/gorealis"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/paypal/gorealis/response"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
var r realis.Realis
var monitor *realis.Monitor
var r *realis.Client
var thermosPayload []byte
func TestMain(m *testing.M) {
var err error
// New configuration to connect to docker container
r, err = realis.NewRealisClient(realis.SchedulerUrl("http://192.168.33.7:8081"),
r, err = realis.NewClient(realis.SchedulerUrl("http://192.168.33.7:8081"),
realis.BasicAuth("aurora", "secret"),
realis.TimeoutMS(20000))
realis.Timeout(20*time.Second))
if err != nil {
fmt.Println("Please run docker-compose up -d before running test suite")
@ -48,9 +46,6 @@ func TestMain(m *testing.M) {
defer r.Close()
// Create monitor
monitor = &realis.Monitor{Client: r}
thermosPayload, err = ioutil.ReadFile("examples/thermos_payload.json")
if err != nil {
fmt.Println("Error reading thermos payload file: ", err)
@ -68,8 +63,8 @@ func TestNonExistentEndpoint(t *testing.T) {
Jitter: 0.1}
// Attempt to connect to a bad endpoint
r, err := realis.NewRealisClient(realis.SchedulerUrl("http://192.168.33.7:8081/doesntexist/"),
realis.TimeoutMS(200),
r, err := realis.NewClient(realis.SchedulerUrl("http://doesntexist.com:8081/api"),
realis.Timeout(200*time.Millisecond),
realis.BackOff(backoff),
)
defer r.Close()
@ -90,13 +85,38 @@ func TestNonExistentEndpoint(t *testing.T) {
}
func TestBadCredentials(t *testing.T) {
r, err := realis.NewClient(realis.SchedulerUrl("http://192.168.33.7:8081"),
realis.BasicAuth("incorrect", "password"),
realis.Debug())
defer r.Close()
assert.NoError(t, err)
job := realis.NewJob().
Environment("prod").
Role("vagrant").
Name("create_thermos_job_test").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData(string(thermosPayload)).
CPU(.5).
RAM(64).
Disk(100).
IsService(true).
InstanceCount(2).
AddPorts(1)
assert.Error(t, r.CreateJob(job))
}
func TestThriftBinary(t *testing.T) {
r, err := realis.NewRealisClient(realis.SchedulerUrl("http://192.168.33.7:8081"),
r, err := realis.NewClient(realis.SchedulerUrl("http://192.168.33.7:8081"),
realis.BasicAuth("aurora", "secret"),
realis.TimeoutMS(20000),
realis.Timeout(20*time.Second),
realis.ThriftBinary())
assert.NoError(t, err)
defer r.Close()
role := "all"
taskQ := &aurora.TaskQuery{
@ -108,14 +128,12 @@ func TestThriftBinary(t *testing.T) {
assert.NoError(t, err)
r.Close()
}
func TestThriftJSON(t *testing.T) {
r, err := realis.NewRealisClient(realis.SchedulerUrl("http://192.168.33.7:8081"),
r, err := realis.NewClient(realis.SchedulerUrl("http://192.168.33.7:8081"),
realis.BasicAuth("aurora", "secret"),
realis.TimeoutMS(20000),
realis.Timeout(20*time.Second),
realis.ThriftJSON())
assert.NoError(t, err)
@ -135,7 +153,7 @@ func TestThriftJSON(t *testing.T) {
}
func TestNoopLogger(t *testing.T) {
r, err := realis.NewRealisClient(realis.SchedulerUrl("http://192.168.33.7:8081"),
r, err := realis.NewClient(realis.SchedulerUrl("http://192.168.33.7:8081"),
realis.BasicAuth("aurora", "secret"),
realis.SetLogger(realis.NoopLogger{}))
@ -160,6 +178,34 @@ func TestLeaderFromZK(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, "http://192.168.33.7:8081", url)
}
func TestInvalidAuroraURL(t *testing.T) {
for _, url := range []string{
"http://doesntexist.com:8081/apitest",
"test://doesntexist.com:8081",
"https://doesntexist.com:8081/testing/api",
} {
r, err := realis.NewClient(realis.SchedulerUrl(url))
assert.Error(t, err)
assert.Nil(t, r)
}
}
func TestValidAuroraURL(t *testing.T) {
for _, url := range []string{
"http://domain.com:8081/api",
"https://domain.com:8081/api",
"domain.com:8081",
"domain.com",
"192.168.33.7",
"192.168.33.7:8081",
"192.168.33.7:8081/api",
} {
r, err := realis.NewClient(realis.SchedulerUrl(url))
assert.NoError(t, err)
assert.NotNil(t, r)
}
}
func TestRealisClient_ReestablishConn(t *testing.T) {
@ -193,23 +239,21 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
InstanceCount(2).
AddPorts(1)
resp, err := r.CreateJob(job)
err := r.CreateJob(job)
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
// Test Instances Monitor
success, err := monitor.Instances(job.JobKey(), job.GetInstanceCount(), 1, 50)
success, err := r.InstancesMonitor(job.JobKey(), job.GetInstanceCount(), 1*time.Second, 50*time.Second)
assert.True(t, success)
assert.NoError(t, err)
//Fetch all Jobs
_, result, err := r.GetJobs(role)
// Fetch all Jobs
result, err := r.GetJobs(role)
fmt.Printf("GetJobs length: %+v \n", len(result.Configs))
assert.Len(t, result.Configs, 1)
assert.NoError(t, err)
// Test asking the scheduler to perform a Snpshot
// Test asking the scheduler to perform a Snapshot
t.Run("TestRealisClient_Snapshot", func(t *testing.T) {
err := r.Snapshot()
assert.NoError(t, err)
@ -223,12 +267,10 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
// Tasks must exist for it to, be killed
t.Run("TestRealisClient_KillJob_Thermos", func(t *testing.T) {
resp, err := r.KillJob(job.JobKey())
err := r.KillJob(job.JobKey())
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
success, err := monitor.Instances(job.JobKey(), 0, 1, 50)
success, err := r.InstancesMonitor(job.JobKey(), 0, 1*time.Second, 50*time.Second)
assert.True(t, success)
assert.NoError(t, err)
})
@ -249,9 +291,8 @@ func TestRealisClient_CreateJob_ExecutorDoesNotExist(t *testing.T) {
Disk(10).
InstanceCount(1)
resp, err := r.CreateJob(job)
err := r.CreateJob(job)
assert.Error(t, err)
assert.Equal(t, aurora.ResponseCode_INVALID_REQUEST, resp.GetResponseCode())
}
// Test configuring an executor that doesn't exist for CreateJob API
@ -273,9 +314,8 @@ func TestRealisClient_GetPendingReason(t *testing.T) {
Disk(100).
InstanceCount(1)
resp, err := r.CreateJob(job)
err := r.CreateJob(job)
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
taskQ := &aurora.TaskQuery{
Role: &role,
@ -287,7 +327,7 @@ func TestRealisClient_GetPendingReason(t *testing.T) {
assert.NoError(t, err)
assert.Len(t, reasons, 1)
resp, err = r.KillJob(job.JobKey())
err = r.KillJob(job.JobKey())
assert.NoError(t, err)
}
@ -295,7 +335,7 @@ func TestRealisClient_CreateService_WithPulse_Thermos(t *testing.T) {
fmt.Println("Creating service")
role := "vagrant"
job := realis.NewJob().
job := realis.NewJobUpdate().
Environment("prod").
Role(role).
Name("create_thermos_job_test").
@ -305,72 +345,61 @@ func TestRealisClient_CreateService_WithPulse_Thermos(t *testing.T) {
RAM(64).
Disk(100).
IsService(true).
InstanceCount(1).
InstanceCount(2).
AddPorts(1).
AddLabel("currentTime", time.Now().String())
AddLabel("currentTime", time.Now().String()).
PulseIntervalTimeout(30 * time.Millisecond).
BatchSize(1).WaitForBatchCompletion(true)
pulse := int32(30)
timeout := 300
settings := realis.NewUpdateSettings()
settings.BlockIfNoPulsesAfterMs = &pulse
settings.UpdateGroupSize = 1
settings.WaitForBatchCompletion = true
job.InstanceCount(2)
resp, result, err := r.CreateService(job, settings)
result, err := r.CreateService(job)
fmt.Println(result.String())
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
updateQ := aurora.JobUpdateQuery{
Key: result.GetKey(),
Limit: 1,
}
start := time.Now()
var updateDetails []*aurora.JobUpdateDetails
for i := 0; i*int(pulse) <= timeout; i++ {
fmt.Println("sending PulseJobUpdate....")
resp, err = r.PulseJobUpdate(result.GetKey())
assert.NotNil(t, resp)
assert.Nil(t, err)
pulseStatus, err := r.PulseJobUpdate(result.GetKey())
respDetail, err := r.JobUpdateDetails(updateQ)
assert.Nil(t, err)
updateDetail := response.JobUpdateDetails(respDetail)
if len(updateDetail) == 0 {
fmt.Println("No update found")
assert.NotEqual(t, len(updateDetail), 0)
if pulseStatus != aurora.JobUpdatePulseStatus_OK && pulseStatus != aurora.JobUpdatePulseStatus_FINISHED {
assert.Fail(t, "Pulse update status received doesn't exist")
}
status := updateDetail[0].Update.Summary.State.Status
if _, ok := aurora.ACTIVE_JOB_UPDATE_STATES[status]; !ok {
updateDetails, err = r.JobUpdateDetails(updateQ)
assert.Nil(t, err)
assert.Equal(t, len(updateDetails), 1, "No update matching query found")
status := updateDetails[0].Update.Summary.State.Status
if _, ok := realis.ActiveJobUpdateStates[status]; !ok {
// Rolled forward is the only state in which an update has been successfully updated
// if we encounter an inactive state and it is not at rolled forward, update failed
if status == aurora.JobUpdateStatus_ROLLED_FORWARD {
fmt.Println("Update succeded")
fmt.Println("Update succeeded")
break
} else {
fmt.Println("Update failed")
break
}
}
fmt.Println("Polling, update still active...")
time.Sleep(time.Duration(pulse) * time.Second)
}
end := time.Now()
fmt.Printf("Update call took %d ns\n", (end.UnixNano() - start.UnixNano()))
t.Run("TestRealisClient_KillJob_Thermos", func(t *testing.T) {
start := time.Now()
resp, err := r.KillJob(job.JobKey())
end := time.Now()
err := r.AbortJobUpdate(*updateDetails[0].GetUpdate().GetSummary().GetKey(), "")
assert.NoError(t, err)
err = r.KillJob(job.JobKey())
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
fmt.Printf("Kill call took %d ns\n", (end.UnixNano() - start.UnixNano()))
})
}
@ -379,7 +408,7 @@ func TestRealisClient_CreateService_WithPulse_Thermos(t *testing.T) {
func TestRealisClient_CreateService(t *testing.T) {
// Create a single job
job := realis.NewJob().
job := realis.NewJobUpdate().
Environment("prod").
Role("vagrant").
Name("create_service_test").
@ -389,25 +418,23 @@ func TestRealisClient_CreateService(t *testing.T) {
RAM(4).
Disk(10).
InstanceCount(3).
IsService(true)
WatchTime(20 * time.Second).
IsService(true).
BatchSize(2)
settings := realis.NewUpdateSettings()
settings.UpdateGroupSize = 2
job.InstanceCount(3)
resp, result, err := r.CreateService(job, settings)
result, err := r.CreateService(job)
assert.NoError(t, err)
assert.NotNil(t, result)
assert.Equal(t, aurora.ResponseCode_OK, resp.GetResponseCode())
var ok bool
var mErr error
if ok, mErr = monitor.JobUpdate(*result.GetKey(), 5, 180); !ok || mErr != nil {
if ok, mErr = r.JobUpdateMonitor(*result.GetKey(), 5*time.Second, 4*time.Minute); !ok || mErr != nil {
// Update may already be in a terminal state so don't check for error
_, err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out.")
err := r.AbortJobUpdate(*result.GetKey(), "Monitor timed out.")
_, err = r.KillJob(job.JobKey())
err = r.KillJob(job.JobKey())
assert.NoError(t, err)
}
@ -416,7 +443,7 @@ func TestRealisClient_CreateService(t *testing.T) {
assert.NoError(t, mErr)
// Kill task test task after confirming it came up fine
_, err = r.KillJob(job.JobKey())
err = r.KillJob(job.JobKey())
assert.NoError(t, err)
}
@ -425,7 +452,7 @@ func TestRealisClient_CreateService(t *testing.T) {
func TestRealisClient_CreateService_ExecutorDoesNotExist(t *testing.T) {
// Create a single job
job := realis.NewJob().
jobUpdate := realis.NewJobUpdate().
Environment("prod").
Role("vagrant").
Name("executordoesntexist").
@ -434,15 +461,12 @@ func TestRealisClient_CreateService_ExecutorDoesNotExist(t *testing.T) {
CPU(.25).
RAM(4).
Disk(10).
InstanceCount(1)
InstanceCount(3)
settings := realis.NewUpdateSettings()
job.InstanceCount(3)
resp, result, err := r.CreateService(job, settings)
result, err := r.CreateService(jobUpdate)
assert.Error(t, err)
assert.Nil(t, result)
assert.Equal(t, aurora.ResponseCode_INVALID_REQUEST, resp.GetResponseCode())
}
func TestRealisClient_ScheduleCronJob_Thermos(t *testing.T) {
@ -465,87 +489,66 @@ func TestRealisClient_ScheduleCronJob_Thermos(t *testing.T) {
CronSchedule("* * * * *").
IsService(false)
resp, err := r.ScheduleCronJob(job)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
err = r.ScheduleCronJob(job)
assert.NoError(t, err)
t.Run("TestRealisClient_StartCronJob_Thermos", func(t *testing.T) {
start := time.Now()
resp, err := r.StartCronJob(job.JobKey())
end := time.Now()
err := r.StartCronJob(job.JobKey())
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
fmt.Printf("Schedule cron call took %d ns\n", (end.UnixNano() - start.UnixNano()))
})
t.Run("TestRealisClient_DeschedulerCronJob_Thermos", func(t *testing.T) {
start := time.Now()
resp, err := r.DescheduleCronJob(job.JobKey())
end := time.Now()
err := r.DescheduleCronJob(job.JobKey())
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
fmt.Printf("Deschedule cron call took %d ns\n", (end.UnixNano() - start.UnixNano()))
})
}
func TestRealisClient_StartMaintenance(t *testing.T) {
hosts := []string{"localhost"}
_, _, err := r.StartMaintenance(hosts...)
if err != nil {
fmt.Printf("error: %+v\n", err.Error())
os.Exit(1)
}
_, err := r.StartMaintenance(hosts...)
assert.NoError(t, err)
// Monitor change to DRAINING and DRAINED mode
hostResults, err := monitor.HostMaintenance(
hostResults, err := r.HostMaintenanceMonitor(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_SCHEDULED},
1,
50)
1*time.Second,
50*time.Second)
assert.Equal(t, map[string]bool{"localhost": true}, hostResults)
assert.NoError(t, err)
_, _, err = r.EndMaintenance(hosts...)
_, err = r.EndMaintenance(hosts...)
assert.NoError(t, err)
// Monitor change to DRAINING and DRAINED mode
_, err = monitor.HostMaintenance(
_, err = r.HostMaintenanceMonitor(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
5,
10)
5*time.Second,
10*time.Second)
assert.NoError(t, err)
}
func TestRealisClient_DrainHosts(t *testing.T) {
hosts := []string{"localhost"}
_, _, err := r.DrainHosts(hosts...)
if err != nil {
fmt.Printf("error: %+v\n", err.Error())
os.Exit(1)
}
_, err := r.DrainHosts(hosts...)
assert.NoError(t, err)
// Monitor change to DRAINING and DRAINED mode
hostResults, err := monitor.HostMaintenance(
hostResults, err := r.HostMaintenanceMonitor(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
1,
50)
1*time.Second,
50*time.Second)
assert.Equal(t, map[string]bool{"localhost": true}, hostResults)
assert.NoError(t, err)
t.Run("TestRealisClient_MonitorNontransitioned", func(t *testing.T) {
// Monitor change to DRAINING and DRAINED mode
hostResults, err := monitor.HostMaintenance(
hostResults, err := r.HostMaintenanceMonitor(
append(hosts, "IMAGINARY_HOST"),
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
1,
1)
1*time.Second,
1*time.Second)
// Assert monitor returned an error that was not nil, and also a list of the non-transitioned hosts
assert.Error(t, err)
@ -553,15 +556,15 @@ func TestRealisClient_DrainHosts(t *testing.T) {
})
t.Run("TestRealisClient_EndMaintenance", func(t *testing.T) {
_, _, err := r.EndMaintenance(hosts...)
_, err := r.EndMaintenance(hosts...)
assert.NoError(t, err)
// Monitor change to DRAINING and DRAINED mode
_, err = monitor.HostMaintenance(
_, err = r.HostMaintenanceMonitor(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
5,
10)
5*time.Second,
10*time.Second)
assert.NoError(t, err)
})
@ -578,23 +581,23 @@ func TestRealisClient_SLADrainHosts(t *testing.T) {
}
// Monitor change to DRAINING and DRAINED mode
hostResults, err := monitor.HostMaintenance(
hostResults, err := r.HostMaintenanceMonitor(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED, aurora.MaintenanceMode_DRAINING},
1,
50)
1*time.Second,
50*time.Second)
assert.Equal(t, map[string]bool{"localhost": true}, hostResults)
assert.NoError(t, err)
_, _, err = r.EndMaintenance(hosts...)
_, err = r.EndMaintenance(hosts...)
assert.NoError(t, err)
// Monitor change to DRAINING and DRAINED mode
_, err = monitor.HostMaintenance(
_, err = r.HostMaintenanceMonitor(
hosts,
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
5,
10)
5*time.Second,
10*time.Second)
assert.NoError(t, err)
}
@ -613,11 +616,9 @@ func TestRealisClient_SessionThreadSafety(t *testing.T) {
Disk(10).
InstanceCount(1000) // Impossible amount to go live in any sane machine
resp, err := r.CreateJob(job)
err := r.CreateJob(job)
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
wg := sync.WaitGroup{}
for i := 0; i < 20; i++ {
@ -628,15 +629,13 @@ func TestRealisClient_SessionThreadSafety(t *testing.T) {
defer wg.Done()
// Test Schedule status monitor for terminal state and timing out after 30 seconds
success, err := monitor.ScheduleStatus(job.JobKey(), job.GetInstanceCount(), aurora.LIVE_STATES, 1, 30)
success, err := r.ScheduleStatusMonitor(job.JobKey(), job.GetInstanceCount(), aurora.LIVE_STATES, 1, 30)
assert.False(t, success)
assert.Error(t, err)
resp, err := r.KillJob(job.JobKey())
err = r.KillJob(job.JobKey())
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
}()
}
@ -648,19 +647,15 @@ func TestRealisClient_SetQuota(t *testing.T) {
var cpu = 3.5
var ram int64 = 20480
var disk int64 = 10240
resp, err := r.SetQuota("vagrant", &cpu, &ram, &disk)
err := r.SetQuota("vagrant", &cpu, &ram, &disk)
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
t.Run("TestRealisClient_GetQuota", func(t *testing.T) {
// Test GetQuota based on previously set values
var result *aurora.GetQuotaResult_
resp, err = r.GetQuota("vagrant")
if resp.GetResult_() != nil {
result = resp.GetResult_().GetQuotaResult_
}
quotaResult, err := r.GetQuota("vagrant")
assert.NoError(t, err)
assert.Equal(t, aurora.ResponseCode_OK, resp.ResponseCode)
for res := range result.Quota.GetResources() {
for _, res := range quotaResult.GetQuota().GetResources() {
switch true {
case res.DiskMb != nil:
assert.Equal(t, disk, *res.DiskMb)

View file

@ -17,7 +17,6 @@ package response
import (
"bytes"
"errors"
"github.com/paypal/gorealis/gen-go/apache/aurora"
)
@ -39,18 +38,6 @@ func JobUpdateSummaries(resp *aurora.Response) []*aurora.JobUpdateSummary {
return resp.GetResult_().GetGetJobUpdateSummariesResult_().GetUpdateSummaries()
}
// Deprecated: Replaced by checks done inside of thriftCallHelper
func ResponseCodeCheck(resp *aurora.Response) (*aurora.Response, error) {
if resp == nil {
return resp, errors.New("Response is nil")
}
if resp.GetResponseCode() != aurora.ResponseCode_OK {
return resp, errors.New(CombineMessage(resp))
}
return resp, nil
}
// Based on aurora client: src/main/python/apache/aurora/client/base.py
func CombineMessage(resp *aurora.Response) string {
var buffer bytes.Buffer

View file

@ -17,7 +17,10 @@ package realis
import (
"io"
"math/rand"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"git.apache.org/thrift.git/lib/go/thrift"
@ -116,12 +119,12 @@ func ExponentialBackoff(backoff Backoff, logger Logger, condition ConditionFunc)
type auroraThriftCall func() (resp *aurora.Response, err error)
// Duplicates the functionality of ExponentialBackoff but is specifically targeted towards ThriftCalls.
func (r *realisClient) thriftCallWithRetries(thriftCall auroraThriftCall) (*aurora.Response, error) {
func (c *Client) thriftCallWithRetries(thriftCall auroraThriftCall) (*aurora.Response, error) {
var resp *aurora.Response
var clientErr error
var curStep int
backoff := r.config.backoff
backoff := c.config.backoff
duration := backoff.Duration
for curStep = 0; curStep < backoff.Steps; curStep++ {
@ -133,7 +136,7 @@ func (r *realisClient) thriftCallWithRetries(thriftCall auroraThriftCall) (*auro
adjusted = Jitter(duration, backoff.Jitter)
}
r.logger.Printf("A retriable error occurred during thrift call, backing off for %v before retry %v\n", adjusted, curStep)
c.logger.Printf("A retriable error occurred during thrift call, backing off for %v before retry %v\n", adjusted, curStep)
time.Sleep(adjusted)
duration = time.Duration(float64(duration) * backoff.Factor)
@ -143,12 +146,12 @@ func (r *realisClient) thriftCallWithRetries(thriftCall auroraThriftCall) (*auro
// Placing this in an anonymous function in order to create a new, short-lived stack allowing unlock
// to be run in case of a panic inside of thriftCall.
func() {
r.lock.Lock()
defer r.lock.Unlock()
c.lock.Lock()
defer c.lock.Unlock()
resp, clientErr = thriftCall()
r.logger.DebugPrintf("Aurora Thrift Call ended resp: %v clientErr: %v\n", resp, clientErr)
c.logger.DebugPrintf("Aurora Thrift Call ended resp: %v clientErr: %v\n", resp, clientErr)
}()
// Check if our thrift call is returning an error. This is a retriable event as we don't know
@ -156,12 +159,18 @@ func (r *realisClient) thriftCallWithRetries(thriftCall auroraThriftCall) (*auro
if clientErr != nil {
// Print out the error to the user
r.logger.Printf("Client Error: %v\n", clientErr)
c.logger.Printf("Client Error: %v\n", clientErr)
// Determine if error is a temporary URL error by going up the stack
e, ok := clientErr.(thrift.TTransportException)
if ok {
r.logger.DebugPrint("Encountered a transport exception")
c.logger.DebugPrint("Encountered a transport exception")
// TODO(rdelvalle): Figure out a better way to obtain the error code as this is a very brittle solution
// 401 Unauthorized means the wrong username and password were provided
if strings.Contains(e.Error(), strconv.Itoa(http.StatusUnauthorized)) {
return nil, errors.Wrap(clientErr, "wrong username or password provided")
}
e, ok := e.Err().(*url.Error)
if ok {
@ -176,7 +185,7 @@ func (r *realisClient) thriftCallWithRetries(thriftCall auroraThriftCall) (*auro
// In the future, reestablish connection should be able to check if it is actually possible
// to make a thrift call to Aurora. For now, a reconnect should always lead to a retry.
r.ReestablishConn()
c.ReestablishConn()
} else {
@ -195,31 +204,31 @@ func (r *realisClient) thriftCallWithRetries(thriftCall auroraThriftCall) (*auro
// If the response code is transient, continue retrying
case aurora.ResponseCode_ERROR_TRANSIENT:
r.logger.Println("Aurora replied with Transient error code, retrying")
c.logger.Println("Aurora replied with Transient error code, retrying")
continue
// Failure scenarios, these indicate a bad payload or a bad config. Stop retrying.
// Failure scenarios, these indicate a bad payload or a bad clientConfig. Stop retrying.
case aurora.ResponseCode_INVALID_REQUEST,
aurora.ResponseCode_ERROR,
aurora.ResponseCode_AUTH_FAILED,
aurora.ResponseCode_JOB_UPDATING_ERROR:
r.logger.Printf("Terminal Response Code %v from Aurora, won't retry\n", resp.GetResponseCode().String())
c.logger.Printf("Terminal Response Code %v from Aurora, won't retry\n", resp.GetResponseCode().String())
return resp, errors.New(response.CombineMessage(resp))
// The only case that should fall down to here is a WARNING response code.
// It is currently not used as a response in the scheduler so it is unknown how to handle it.
default:
r.logger.DebugPrintf("unhandled response code %v received from Aurora\n", responseCode)
c.logger.DebugPrintf("unhandled response code %v received from Aurora\n", responseCode)
return nil, errors.Errorf("unhandled response code from Aurora %v\n", responseCode.String())
}
}
}
r.logger.DebugPrintf("it took %v retries to complete this operation\n", curStep)
c.logger.DebugPrintf("it took %v retries to complete this operation\n", curStep)
if curStep > 1 {
r.config.logger.Printf("retried this thrift call %d time(s)", curStep)
c.config.logger.Printf("retried this thrift call %d time(s)", curStep)
}
// Provide more information to the user wherever possible.

View file

@ -1,4 +1,4 @@
#!/bin/bash
# Since we run our docker compose setup in bridge mode to be able to run on MacOS, we have to launch a Docker container within the bridge network in order to avoid any routing issues.
docker run -t -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.10.3-stretch go test -v github.com/paypal/gorealis
docker run -t -v $(pwd):/go/src/github.com/paypal/gorealis --network gorealis_aurora_cluster golang:1.11.3-stretch go test -v github.com/paypal/gorealis

367
vendor/github.com/paypal/gorealis/task.go generated vendored Normal file
View file

@ -0,0 +1,367 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
import (
"strconv"
"git.apache.org/thrift.git/lib/go/thrift"
"github.com/paypal/gorealis/gen-go/apache/aurora"
)
type ResourceType int
const (
CPU ResourceType = iota
RAM
DISK
)
const (
dedicated = "dedicated"
portPrefix = "org.apache.aurora.port."
)
type AuroraTask struct {
task *aurora.TaskConfig
resources map[ResourceType]*aurora.Resource
portCount int
}
func NewTask() *AuroraTask {
numCpus := &aurora.Resource{}
ramMb := &aurora.Resource{}
diskMb := &aurora.Resource{}
numCpus.NumCpus = new(float64)
ramMb.RamMb = new(int64)
diskMb.DiskMb = new(int64)
resources := make(map[ResourceType]*aurora.Resource)
resources[CPU] = numCpus
resources[RAM] = ramMb
resources[DISK] = diskMb
return &AuroraTask{task: &aurora.TaskConfig{
Job: &aurora.JobKey{},
MesosFetcherUris: make([]*aurora.MesosFetcherURI, 0),
Metadata: make([]*aurora.Metadata, 0),
Constraints: make([]*aurora.Constraint, 0),
// Container is a Union so one container field must be set. Set Mesos by default.
Container: NewMesosContainer().Build(),
Resources: []*aurora.Resource{numCpus, ramMb, diskMb},
},
resources: resources,
portCount: 0}
}
// Helper method to convert aurora.TaskConfig to gorealis AuroraTask type
func TaskFromThrift(config *aurora.TaskConfig) *AuroraTask {
newTask := NewTask()
// Pass values using receivers as much as possible
newTask.
Environment(config.Job.Environment).
Role(config.Job.Role).
Name(config.Job.Name).
MaxFailure(config.MaxTaskFailures).
IsService(config.IsService)
if config.Tier != nil {
newTask.Tier(*config.Tier)
}
if config.ExecutorConfig != nil {
newTask.
ExecutorName(config.ExecutorConfig.Name).
ExecutorData(config.ExecutorConfig.Data)
}
// Make a deep copy of the task's container
if config.Container != nil {
if config.Container.Mesos != nil {
mesosContainer := NewMesosContainer()
if config.Container.Mesos.Image != nil {
if config.Container.Mesos.Image.Appc != nil {
mesosContainer.AppcImage(config.Container.Mesos.Image.Appc.Name, config.Container.Mesos.Image.Appc.ImageId)
} else if config.Container.Mesos.Image.Docker != nil {
mesosContainer.DockerImage(config.Container.Mesos.Image.Docker.Name, config.Container.Mesos.Image.Docker.Tag)
}
}
for _, vol := range config.Container.Mesos.Volumes {
mesosContainer.AddVolume(vol.ContainerPath, vol.HostPath, vol.Mode)
}
newTask.Container(mesosContainer)
} else if config.Container.Docker != nil {
dockerContainer := NewDockerContainer()
dockerContainer.Image(config.Container.Docker.Image)
for _, param := range config.Container.Docker.Parameters {
dockerContainer.AddParameter(param.Name, param.Value)
}
newTask.Container(dockerContainer)
}
}
// Copy all ports
for _, resource := range config.Resources {
// Copy only ports, skip CPU, RAM, and DISK
if resource != nil {
if resource.NamedPort != nil {
newTask.task.Resources = append(newTask.task.Resources, &aurora.Resource{NamedPort: thrift.StringPtr(*resource.NamedPort)})
newTask.portCount++
}
if resource.RamMb != nil {
newTask.RAM(*resource.RamMb)
}
if resource.NumCpus != nil {
newTask.CPU(*resource.NumCpus)
}
if resource.DiskMb != nil {
newTask.Disk(*resource.DiskMb)
}
}
}
// Copy constraints
for _, constraint := range config.Constraints {
if constraint != nil && constraint.Constraint != nil {
newConstraint := aurora.Constraint{Name: constraint.Name}
taskConstraint := constraint.Constraint
if taskConstraint.Limit != nil {
newConstraint.Constraint = &aurora.TaskConstraint{Limit: &aurora.LimitConstraint{Limit: taskConstraint.Limit.Limit}}
newTask.task.Constraints = append(newTask.task.Constraints, &newConstraint)
} else if taskConstraint.Value != nil {
values := make([]string, 0)
for _, val := range taskConstraint.Value.Values {
values = append(values, val)
}
newConstraint.Constraint = &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{Negated: taskConstraint.Value.Negated, Values: values}}
newTask.task.Constraints = append(newTask.task.Constraints, &newConstraint)
}
}
}
// Copy labels
for _, label := range config.Metadata {
newTask.task.Metadata = append(newTask.task.Metadata, &aurora.Metadata{Key: label.Key, Value: label.Value})
}
// Copy Mesos fetcher URIs
for _, uri := range config.MesosFetcherUris {
newTask.task.MesosFetcherUris = append(
newTask.task.MesosFetcherUris,
&aurora.MesosFetcherURI{Value: uri.Value, Extract: thrift.BoolPtr(*uri.Extract), Cache: thrift.BoolPtr(*uri.Cache)})
}
return newTask
}
// Set AuroraTask Key environment.
func (t *AuroraTask) Environment(env string) *AuroraTask {
t.task.Job.Environment = env
return t
}
// Set AuroraTask Key Role.
func (t *AuroraTask) Role(role string) *AuroraTask {
t.task.Job.Role = role
return t
}
// Set AuroraTask Key Name.
func (t *AuroraTask) Name(name string) *AuroraTask {
t.task.Job.Name = name
return t
}
// Set name of the executor that will the task will be configured to.
func (t *AuroraTask) ExecutorName(name string) *AuroraTask {
if t.task.ExecutorConfig == nil {
t.task.ExecutorConfig = aurora.NewExecutorConfig()
}
t.task.ExecutorConfig.Name = name
return t
}
// Will be included as part of entire task inside the scheduler that will be serialized.
func (t *AuroraTask) ExecutorData(data string) *AuroraTask {
if t.task.ExecutorConfig == nil {
t.task.ExecutorConfig = aurora.NewExecutorConfig()
}
t.task.ExecutorConfig.Data = data
return t
}
func (t *AuroraTask) CPU(cpus float64) *AuroraTask {
*t.resources[CPU].NumCpus = cpus
return t
}
func (t *AuroraTask) RAM(ram int64) *AuroraTask {
*t.resources[RAM].RamMb = ram
return t
}
func (t *AuroraTask) Disk(disk int64) *AuroraTask {
*t.resources[DISK].DiskMb = disk
return t
}
func (t *AuroraTask) Tier(tier string) *AuroraTask {
t.task.Tier = &tier
return t
}
// How many failures to tolerate before giving up.
func (t *AuroraTask) MaxFailure(maxFail int32) *AuroraTask {
t.task.MaxTaskFailures = maxFail
return t
}
// Restart the job's tasks if they fail
func (t *AuroraTask) IsService(isService bool) *AuroraTask {
t.task.IsService = isService
return t
}
// Add a list of URIs with the same extract and cache configuration. Scheduler must have
// --enable_mesos_fetcher flag enabled. Currently there is no duplicate detection.
func (t *AuroraTask) AddURIs(extract bool, cache bool, values ...string) *AuroraTask {
for _, value := range values {
t.task.MesosFetcherUris = append(
t.task.MesosFetcherUris,
&aurora.MesosFetcherURI{Value: value, Extract: &extract, Cache: &cache})
}
return t
}
// Adds a Mesos label to the job. Note that Aurora will add the
// prefix "org.apache.aurora.metadata." to the beginning of each key.
func (t *AuroraTask) AddLabel(key string, value string) *AuroraTask {
t.task.Metadata = append(t.task.Metadata, &aurora.Metadata{Key: key, Value: value})
return t
}
// Add a named port to the job configuration These are random ports as it's
// not currently possible to request specific ports using Aurora.
func (t *AuroraTask) AddNamedPorts(names ...string) *AuroraTask {
t.portCount += len(names)
for _, name := range names {
t.task.Resources = append(t.task.Resources, &aurora.Resource{NamedPort: &name})
}
return t
}
// Adds a request for a number of ports to the job configuration. The names chosen for these ports
// will be org.apache.aurora.port.X, where X is the current port count for the job configuration
// starting at 0. These are random ports as it's not currently possible to request
// specific ports using Aurora.
func (t *AuroraTask) AddPorts(num int) *AuroraTask {
start := t.portCount
t.portCount += num
for i := start; i < t.portCount; i++ {
portName := portPrefix + strconv.Itoa(i)
t.task.Resources = append(t.task.Resources, &aurora.Resource{NamedPort: &portName})
}
return t
}
// From Aurora Docs:
// Add a Value constraint
// name - Mesos slave attribute that the constraint is matched against.
// If negated = true , treat this as a 'not' - to avoid specific values.
// Values - list of values we look for in attribute name
func (t *AuroraTask) AddValueConstraint(name string, negated bool, values ...string) *AuroraTask {
t.task.Constraints = append(t.task.Constraints,
&aurora.Constraint{
Name: name,
Constraint: &aurora.TaskConstraint{
Value: &aurora.ValueConstraint{
Negated: negated,
Values: values,
},
Limit: nil,
},
})
return t
}
// From Aurora Docs:
// A constraint that specifies the maximum number of active tasks on a host with
// a matching attribute that may be scheduled simultaneously.
func (t *AuroraTask) AddLimitConstraint(name string, limit int32) *AuroraTask {
t.task.Constraints = append(t.task.Constraints,
&aurora.Constraint{
Name: name,
Constraint: &aurora.TaskConstraint{
Value: nil,
Limit: &aurora.LimitConstraint{Limit: limit},
},
})
return t
}
// From Aurora Docs:
// dedicated attribute. Aurora treats this specially, and only allows matching jobs
// to run on these machines, and will only schedule matching jobs on these machines.
// When a job is created, the scheduler requires that the $role component matches
// the role field in the job configuration, and will reject the job creation otherwise.
// A wildcard (*) may be used for the role portion of the dedicated attribute, which
// will allow any owner to elect for a job to run on the host(s)
func (t *AuroraTask) AddDedicatedConstraint(role, name string) *AuroraTask {
t.AddValueConstraint(dedicated, false, role+"/"+name)
return t
}
// Set a container to run for the job configuration to run.
func (t *AuroraTask) Container(container Container) *AuroraTask {
t.task.Container = container.Build()
return t
}
func (t *AuroraTask) TaskConfig() *aurora.TaskConfig {
return t.task
}
func (t *AuroraTask) JobKey() aurora.JobKey {
return *t.task.Job
}
func (t *AuroraTask) Clone() *AuroraTask {
newTask := TaskFromThrift(t.task)
return newTask
}

57
vendor/github.com/paypal/gorealis/task_test.go generated vendored Normal file
View file

@ -0,0 +1,57 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis_test
import (
"testing"
realis "github.com/paypal/gorealis"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/stretchr/testify/assert"
)
func TestAuroraTask_Clone(t *testing.T) {
task0 := realis.NewTask().
Environment("development").
Role("ubuntu").
Name("this_is_a_test").
ExecutorName(aurora.AURORA_EXECUTOR_NAME).
ExecutorData("{fake:payload}").
CPU(10).
RAM(643).
Disk(1000).
IsService(true).
AddPorts(10).
Tier("preferred").
MaxFailure(23).
AddURIs(true, true, "testURI").
AddLabel("Test", "Value").
AddNamedPorts("test").
AddValueConstraint("test", false, "testing").
AddLimitConstraint("test_limit", 1).
AddDedicatedConstraint("ubuntu", "name").
Container(realis.NewDockerContainer().AddParameter("hello", "world").Image("testImg"))
task1 := task0.Clone()
assert.EqualValues(t, task0, task1, "Clone does not return the correct deep copy of AuroraTask")
task0.Container(realis.NewMesosContainer().
AppcImage("test", "testing").
AddVolume("test", "test", aurora.Mode_RW))
task2 := task0.Clone()
assert.EqualValues(t, task0, task2, "Clone does not return the correct deep copy of AuroraTask")
}

View file

@ -1,154 +0,0 @@
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package realis
import (
"github.com/paypal/gorealis/gen-go/apache/aurora"
)
// Structure to collect all information required to create job update
type UpdateJob struct {
Job // SetInstanceCount for job is hidden, access via full qualifier
req *aurora.JobUpdateRequest
}
// Create a default UpdateJob object.
func NewDefaultUpdateJob(config *aurora.TaskConfig) *UpdateJob {
req := aurora.NewJobUpdateRequest()
req.TaskConfig = config
req.Settings = NewUpdateSettings()
job := NewJob().(*AuroraJob)
job.jobConfig.TaskConfig = config
// Rebuild resource map from TaskConfig
for ptr := range config.Resources {
if ptr.NumCpus != nil {
job.resources["cpu"].NumCpus = ptr.NumCpus
continue // Guard against Union violations that Go won't enforce
}
if ptr.RamMb != nil {
job.resources["ram"].RamMb = ptr.RamMb
continue
}
if ptr.DiskMb != nil {
job.resources["disk"].DiskMb = ptr.DiskMb
continue
}
}
// Mirrors defaults set by Pystachio
req.Settings.UpdateOnlyTheseInstances = make(map[*aurora.Range]bool)
req.Settings.UpdateGroupSize = 1
req.Settings.WaitForBatchCompletion = false
req.Settings.MinWaitInInstanceRunningMs = 45000
req.Settings.MaxPerInstanceFailures = 0
req.Settings.MaxFailedInstances = 0
req.Settings.RollbackOnFailure = true
//TODO(rdelvalle): Deep copy job struct to avoid unexpected behavior
return &UpdateJob{Job: job, req: req}
}
func NewUpdateJob(config *aurora.TaskConfig, settings *aurora.JobUpdateSettings) *UpdateJob {
req := aurora.NewJobUpdateRequest()
req.TaskConfig = config
req.Settings = settings
job := NewJob().(*AuroraJob)
job.jobConfig.TaskConfig = config
// Rebuild resource map from TaskConfig
for ptr := range config.Resources {
if ptr.NumCpus != nil {
job.resources["cpu"].NumCpus = ptr.NumCpus
continue // Guard against Union violations that Go won't enforce
}
if ptr.RamMb != nil {
job.resources["ram"].RamMb = ptr.RamMb
continue
}
if ptr.DiskMb != nil {
job.resources["disk"].DiskMb = ptr.DiskMb
continue
}
}
//TODO(rdelvalle): Deep copy job struct to avoid unexpected behavior
return &UpdateJob{Job: job, req: req}
}
// Set instance count the job will have after the update.
func (u *UpdateJob) InstanceCount(inst int32) *UpdateJob {
u.req.InstanceCount = inst
return u
}
// Max number of instances being updated at any given moment.
func (u *UpdateJob) BatchSize(size int32) *UpdateJob {
u.req.Settings.UpdateGroupSize = size
return u
}
// Minimum number of seconds a shard must remain in RUNNING state before considered a success.
func (u *UpdateJob) WatchTime(ms int32) *UpdateJob {
u.req.Settings.MinWaitInInstanceRunningMs = ms
return u
}
// Wait for all instances in a group to be done before moving on.
func (u *UpdateJob) WaitForBatchCompletion(batchWait bool) *UpdateJob {
u.req.Settings.WaitForBatchCompletion = batchWait
return u
}
// Max number of instance failures to tolerate before marking instance as FAILED.
func (u *UpdateJob) MaxPerInstanceFailures(inst int32) *UpdateJob {
u.req.Settings.MaxPerInstanceFailures = inst
return u
}
// Max number of FAILED instances to tolerate before terminating the update.
func (u *UpdateJob) MaxFailedInstances(inst int32) *UpdateJob {
u.req.Settings.MaxFailedInstances = inst
return u
}
// When False, prevents auto rollback of a failed update.
func (u *UpdateJob) RollbackOnFail(rollback bool) *UpdateJob {
u.req.Settings.RollbackOnFailure = rollback
return u
}
func NewUpdateSettings() *aurora.JobUpdateSettings {
us := new(aurora.JobUpdateSettings)
// Mirrors defaults set by Pystachio
us.UpdateOnlyTheseInstances = make(map[*aurora.Range]bool)
us.UpdateGroupSize = 1
us.WaitForBatchCompletion = false
us.MinWaitInInstanceRunningMs = 45000
us.MaxPerInstanceFailures = 0
us.MaxFailedInstances = 0
us.RollbackOnFailure = true
return us
}

75
vendor/github.com/paypal/gorealis/util.go generated vendored Normal file
View file

@ -0,0 +1,75 @@
package realis
import (
"net/url"
"strings"
"github.com/paypal/gorealis/gen-go/apache/aurora"
"github.com/pkg/errors"
)
var ActiveStates = make(map[aurora.ScheduleStatus]bool)
var SlaveAssignedStates = make(map[aurora.ScheduleStatus]bool)
var LiveStates = make(map[aurora.ScheduleStatus]bool)
var TerminalStates = make(map[aurora.ScheduleStatus]bool)
var ActiveJobUpdateStates = make(map[aurora.JobUpdateStatus]bool)
var AwaitingPulseJobUpdateStates = make(map[aurora.JobUpdateStatus]bool)
func init() {
for _, status := range aurora.ACTIVE_STATES {
ActiveStates[status] = true
}
for _, status := range aurora.SLAVE_ASSIGNED_STATES {
SlaveAssignedStates[status] = true
}
for _, status := range aurora.LIVE_STATES {
LiveStates[status] = true
}
for _, status := range aurora.TERMINAL_STATES {
TerminalStates[status] = true
}
for _, status := range aurora.ACTIVE_JOB_UPDATE_STATES {
ActiveJobUpdateStates[status] = true
}
for _, status := range aurora.AWAITNG_PULSE_JOB_UPDATE_STATES {
AwaitingPulseJobUpdateStates[status] = true
}
}
func validateAndPopulateAuroraURL(urlStr string) (string, error) {
// If no protocol defined, assume http
if !strings.Contains(urlStr, "://") {
urlStr = "http://" + urlStr
}
u, err := url.Parse(urlStr)
if err != nil {
return "", errors.Wrap(err, "error parsing url")
}
// If no path provided assume /api
if u.Path == "" {
u.Path = "/api"
}
// If no port provided, assume default 8081
if u.Port() == "" {
u.Host = u.Host + ":8081"
}
if !(u.Scheme == "http" || u.Scheme == "https") {
return "", errors.Errorf("only protocols http and https are supported %v\n", u.Scheme)
}
if u.Path != "/api" {
return "", errors.Errorf("expected /api path %v\n", u.Path)
}
return u.String(), nil
}

View file

@ -20,7 +20,7 @@ import (
"testing"
"time"
"github.com/paypal/gorealis"
realis "github.com/paypal/gorealis"
"github.com/stretchr/testify/assert"
)