Misc. fixes for tests (#16)
* Bumping up CI to go1.17 and enabling CI for PRs. * Adding go.sum now that issues seem to have gone away. * Bump up aurora to 0.25.0 and mesos to 1.9.0 * Fixing Mac tests. Adding extra time for killing thermos jobs. * Reduce the thermos overhead for unit tests Co-authored-by: lenhattan86 <lenhattan86@users.noreply.github.com>
This commit is contained in:
parent
fe664178ce
commit
907430768c
8 changed files with 53 additions and 37 deletions
10
.github/workflows/main.yml
vendored
10
.github/workflows/main.yml
vendored
|
@ -1,6 +1,12 @@
|
||||||
name: CI
|
name: CI
|
||||||
|
|
||||||
on: [push]
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
@ -12,7 +18,7 @@ jobs:
|
||||||
- name: Setup Go for use with actions
|
- name: Setup Go for use with actions
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: 1.15
|
go-version: 1.17
|
||||||
- name: Install goimports
|
- name: Install goimports
|
||||||
run: go get golang.org/x/tools/cmd/goimports
|
run: go get golang.org/x/tools/cmd/goimports
|
||||||
- name: Set env with list of directories in repo containin go code
|
- name: Set env with list of directories in repo containin go code
|
||||||
|
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -41,6 +41,3 @@ _testmain.go
|
||||||
# Example client build
|
# Example client build
|
||||||
examples/client
|
examples/client
|
||||||
examples/jsonClient
|
examples/jsonClient
|
||||||
|
|
||||||
# Use checksum database
|
|
||||||
go.sum
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ services:
|
||||||
ipv4_address: 192.168.33.2
|
ipv4_address: 192.168.33.2
|
||||||
|
|
||||||
master:
|
master:
|
||||||
image: rdelvalle/mesos-master:1.6.2
|
image: quay.io/aurorascheduler/mesos-master:1.9.0
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
ports:
|
ports:
|
||||||
- "5050:5050"
|
- "5050:5050"
|
||||||
|
@ -32,7 +32,7 @@ services:
|
||||||
- zk
|
- zk
|
||||||
|
|
||||||
agent-one:
|
agent-one:
|
||||||
image: rdelvalle/mesos-agent:1.6.2
|
image: quay.io/aurorascheduler/mesos-agent:1.9.0
|
||||||
pid: host
|
pid: host
|
||||||
restart: on-failure
|
restart: on-failure
|
||||||
ports:
|
ports:
|
||||||
|
@ -56,7 +56,7 @@ services:
|
||||||
- zk
|
- zk
|
||||||
|
|
||||||
aurora-one:
|
aurora-one:
|
||||||
image: rdelvalle/aurora:0.22.0
|
image: quay.io/aurorascheduler/scheduler:0.25.0
|
||||||
pid: host
|
pid: host
|
||||||
ports:
|
ports:
|
||||||
- "8081:8081"
|
- "8081:8081"
|
||||||
|
@ -70,6 +70,7 @@ services:
|
||||||
-shiro_realm_modules=INI_AUTHNZ
|
-shiro_realm_modules=INI_AUTHNZ
|
||||||
-shiro_ini_path=/etc/aurora/security.ini
|
-shiro_ini_path=/etc/aurora/security.ini
|
||||||
-min_required_instances_for_sla_check=1
|
-min_required_instances_for_sla_check=1
|
||||||
|
-thermos_executor_cpu=0.09
|
||||||
volumes:
|
volumes:
|
||||||
- ./.aurora-config:/etc/aurora
|
- ./.aurora-config:/etc/aurora
|
||||||
networks:
|
networks:
|
||||||
|
|
18
go.sum
Normal file
18
go.sum
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
github.com/apache/thrift v0.14.0 h1:vqZ2DP42i8th2OsgCcYZkirtbzvpZEFx53LiWDJXIAs=
|
||||||
|
github.com/apache/thrift v0.14.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||||
|
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a h1:EYL2xz/Zdo0hyqdZMXR4lmT2O11jDLTPCEqIe/FR6W4=
|
||||||
|
github.com/samuel/go-zookeeper v0.0.0-20171117190445-471cd4e61d7a/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
|
||||||
|
github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.5.0 h1:DMOzIV76tmoDNE9pX6RSN0aDtCYeCg5VueieJaAo1uw=
|
||||||
|
github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|
@ -77,7 +77,7 @@ func (j *JobUpdate) BatchSize(size int32) *JobUpdate {
|
||||||
|
|
||||||
// Minimum number of seconds a shard must remain in RUNNING state before considered a success.
|
// Minimum number of seconds a shard must remain in RUNNING state before considered a success.
|
||||||
func (j *JobUpdate) WatchTime(timeout time.Duration) *JobUpdate {
|
func (j *JobUpdate) WatchTime(timeout time.Duration) *JobUpdate {
|
||||||
j.request.Settings.MinWaitInInstanceRunningMs = int32(timeout.Seconds() * 1000)
|
j.request.Settings.MinWaitInInstanceRunningMs = int32(timeout.Milliseconds())
|
||||||
return j
|
return j
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -245,7 +245,7 @@ func (c *Client) MonitorHostMaintenance(hosts []string,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AutoPaused monitor is a special monitor for auto pause enabled batch updates. This monitor ensures that the update
|
// MonitorAutoPausedUpdate is a special monitor for auto pause enabled batch updates. This monitor ensures that the update
|
||||||
// being monitored is capable of auto pausing and has auto pausing enabled. After verifying this information,
|
// being monitored is capable of auto pausing and has auto pausing enabled. After verifying this information,
|
||||||
// the monitor watches for the job to enter the ROLL_FORWARD_PAUSED state and calculates the current batch
|
// the monitor watches for the job to enter the ROLL_FORWARD_PAUSED state and calculates the current batch
|
||||||
// the update is in using information from the update configuration.
|
// the update is in using information from the update configuration.
|
||||||
|
@ -294,8 +294,9 @@ func (c *Client) MonitorAutoPausedUpdate(key aurora.JobUpdateKey, interval, time
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Summary 0 is assumed to exist because MonitorJobUpdateQuery will return an error if there is Summaries
|
// Summary 0 is assumed to exist because MonitorJobUpdateQuery will return an error if there is no summaries
|
||||||
if summary[0].State.Status != aurora.JobUpdateStatus_ROLL_FORWARD_PAUSED {
|
if !(summary[0].State.Status == aurora.JobUpdateStatus_ROLL_FORWARD_PAUSED ||
|
||||||
|
summary[0].State.Status == aurora.JobUpdateStatus_ROLLED_FORWARD) {
|
||||||
return -1, errors.Errorf("update is in a terminal state %v", summary[0].State.Status)
|
return -1, errors.Errorf("update is in a terminal state %v", summary[0].State.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@ func TestBadCredentials(t *testing.T) {
|
||||||
job := realis.NewJob().
|
job := realis.NewJob().
|
||||||
Environment("prod").
|
Environment("prod").
|
||||||
Role("vagrant").
|
Role("vagrant").
|
||||||
Name("create_thermos_job_test").
|
Name("create_thermos_job_bad_creds_test").
|
||||||
ThermosExecutor(thermosExec).
|
ThermosExecutor(thermosExec).
|
||||||
CPU(.5).
|
CPU(.5).
|
||||||
RAM(64).
|
RAM(64).
|
||||||
|
@ -209,7 +209,6 @@ func TestValidAuroraURL(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRealisClient_ReestablishConn(t *testing.T) {
|
func TestRealisClient_ReestablishConn(t *testing.T) {
|
||||||
|
|
||||||
// Test that we're able to tear down the old connection and create a new one.
|
// Test that we're able to tear down the old connection and create a new one.
|
||||||
err := r.ReestablishConn()
|
err := r.ReestablishConn()
|
||||||
|
|
||||||
|
@ -220,11 +219,9 @@ func TestGetCACerts(t *testing.T) {
|
||||||
certs, err := realis.GetCerts("./examples/certs")
|
certs, err := realis.GetCerts("./examples/certs")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, len(certs.Subjects()), 2)
|
assert.Equal(t, len(certs.Subjects()), 2)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRealisClient_CreateJob_Thermos(t *testing.T) {
|
func TestRealisClient_CreateJob_Thermos(t *testing.T) {
|
||||||
|
|
||||||
role := "vagrant"
|
role := "vagrant"
|
||||||
job := realis.NewJob().
|
job := realis.NewJob().
|
||||||
Environment("prod").
|
Environment("prod").
|
||||||
|
@ -251,7 +248,7 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
|
||||||
|
|
||||||
// Fetch all Jobs
|
// Fetch all Jobs
|
||||||
result, err := r.GetJobs(role)
|
result, err := r.GetJobs(role)
|
||||||
fmt.Printf("GetJobs length: %+v \n", len(result.Configs))
|
fmt.Println("GetJobs length: ", len(result.Configs))
|
||||||
assert.Len(t, result.Configs, 1)
|
assert.Len(t, result.Configs, 1)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
@ -272,7 +269,7 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
|
||||||
err := r.KillJob(job.JobKey())
|
err := r.KillJob(job.JobKey())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
success, err := r.MonitorInstances(job.JobKey(), 0, 1*time.Second, 60*time.Second)
|
success, err := r.MonitorInstances(job.JobKey(), 0, 1*time.Second, 90*time.Second)
|
||||||
assert.True(t, success)
|
assert.True(t, success)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
})
|
})
|
||||||
|
@ -280,7 +277,6 @@ func TestRealisClient_CreateJob_Thermos(t *testing.T) {
|
||||||
|
|
||||||
// Test configuring an executor that doesn't exist for CreateJob API
|
// Test configuring an executor that doesn't exist for CreateJob API
|
||||||
func TestRealisClient_CreateJob_ExecutorDoesNotExist(t *testing.T) {
|
func TestRealisClient_CreateJob_ExecutorDoesNotExist(t *testing.T) {
|
||||||
|
|
||||||
// Create a single job
|
// Create a single job
|
||||||
job := realis.NewJob().
|
job := realis.NewJob().
|
||||||
Environment("prod").
|
Environment("prod").
|
||||||
|
@ -299,7 +295,6 @@ func TestRealisClient_CreateJob_ExecutorDoesNotExist(t *testing.T) {
|
||||||
|
|
||||||
// Test configuring an executor that doesn't exist for CreateJob API
|
// Test configuring an executor that doesn't exist for CreateJob API
|
||||||
func TestRealisClient_GetPendingReason(t *testing.T) {
|
func TestRealisClient_GetPendingReason(t *testing.T) {
|
||||||
|
|
||||||
env := "prod"
|
env := "prod"
|
||||||
role := "vagrant"
|
role := "vagrant"
|
||||||
name := "pending_reason_test"
|
name := "pending_reason_test"
|
||||||
|
@ -333,7 +328,6 @@ func TestRealisClient_GetPendingReason(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRealisClient_CreateService_WithPulse_Thermos(t *testing.T) {
|
func TestRealisClient_CreateService_WithPulse_Thermos(t *testing.T) {
|
||||||
|
|
||||||
fmt.Println("Creating service")
|
fmt.Println("Creating service")
|
||||||
role := "vagrant"
|
role := "vagrant"
|
||||||
job := realis.NewJobUpdate().
|
job := realis.NewJobUpdate().
|
||||||
|
@ -813,18 +807,15 @@ func TestRealisClient_BatchAwareAutoPause(t *testing.T) {
|
||||||
job := realis.NewJob().
|
job := realis.NewJob().
|
||||||
Environment("prod").
|
Environment("prod").
|
||||||
Role("vagrant").
|
Role("vagrant").
|
||||||
Name("BatchAwareAutoPauseTest").
|
Name("batch_aware_auto_pause_test").
|
||||||
ThermosExecutor(thermosExec).
|
ThermosExecutor(thermosExec).
|
||||||
CPU(.01).
|
CPU(.01).
|
||||||
RAM(4).
|
RAM(4).
|
||||||
Disk(10).
|
Disk(10).
|
||||||
InstanceCount(6).
|
InstanceCount(6).
|
||||||
IsService(true).
|
IsService(true)
|
||||||
Production(false).
|
|
||||||
Tier("preemptible").
|
|
||||||
Priority(0)
|
|
||||||
|
|
||||||
updateGroups := []int32{1, 2, 3}
|
updateGroups := []int32{1, 3}
|
||||||
strategy := realis.JobUpdateFromAuroraTask(job.AuroraTask()).
|
strategy := realis.JobUpdateFromAuroraTask(job.AuroraTask()).
|
||||||
VariableBatchStrategy(true, updateGroups...).
|
VariableBatchStrategy(true, updateGroups...).
|
||||||
InstanceCount(6).
|
InstanceCount(6).
|
||||||
|
@ -837,22 +828,28 @@ func TestRealisClient_BatchAwareAutoPause(t *testing.T) {
|
||||||
key := *result.GetKey()
|
key := *result.GetKey()
|
||||||
|
|
||||||
for i := range updateGroups {
|
for i := range updateGroups {
|
||||||
curStep, mErr := r.MonitorAutoPausedUpdate(key, time.Second*5, time.Second*240)
|
curStep, mErr := r.MonitorAutoPausedUpdate(key, time.Second*5, time.Minute*5)
|
||||||
if mErr != nil {
|
if mErr != nil {
|
||||||
|
fmt.Println(mErr)
|
||||||
// Update may already be in a terminal state so don't check for error
|
// Update may already be in a terminal state so don't check for error
|
||||||
assert.NoError(t, r.AbortJobUpdate(key, "Monitor timed out."))
|
_ = r.AbortJobUpdate(key, "Monitor timed out.")
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, i, curStep)
|
assert.Equal(t, i, curStep)
|
||||||
require.NoError(t, r.ResumeJobUpdate(key, "auto resuming test"))
|
|
||||||
|
if i != len(updateGroups)-1 {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, r.ResumeJobUpdate(key, "auto resuming test"))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
assert.NoError(t, r.AbortJobUpdate(key, ""))
|
||||||
assert.NoError(t, r.KillJob(strategy.JobKey()))
|
assert.NoError(t, r.KillJob(strategy.JobKey()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRealisClient_GetJobSummary(t *testing.T) {
|
func TestRealisClient_GetJobSummary(t *testing.T) {
|
||||||
role := "vagrant"
|
role := "vagrant"
|
||||||
env := "prod"
|
env := "prod"
|
||||||
name := "GetJobSummaryJob"
|
name := "test_get_job_summary"
|
||||||
// Create a single job
|
// Create a single job
|
||||||
job := realis.NewJob().
|
job := realis.NewJob().
|
||||||
Environment(env).
|
Environment(env).
|
||||||
|
@ -863,14 +860,10 @@ func TestRealisClient_GetJobSummary(t *testing.T) {
|
||||||
RAM(4).
|
RAM(4).
|
||||||
Disk(10).
|
Disk(10).
|
||||||
InstanceCount(3).
|
InstanceCount(3).
|
||||||
WatchTime(20 * time.Second).
|
|
||||||
IsService(true).
|
IsService(true).
|
||||||
Production(false).
|
Production(false).
|
||||||
Tier("preemptible").
|
Tier("preemptible").
|
||||||
Priority(0).
|
Priority(0)
|
||||||
BatchSize(2)
|
|
||||||
|
|
||||||
result, err := r.CreateService(job)
|
|
||||||
|
|
||||||
err := r.CreateJob(job)
|
err := r.CreateJob(job)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Since we run our docker compose setup in bridge mode to be able to run on MacOS, we have to launch a Docker container within the bridge network in order to avoid any routing issues.
|
# Since we run our docker compose setup in bridge mode to be able to run on MacOS, we have to launch a Docker container within the bridge network in order to avoid any routing issues.
|
||||||
docker run --rm -t -v $(pwd):/go/src/github.com/aurora-scheduler/gorealis --network gorealis_aurora_cluster golang:1.13-stretch go test -v github.com/aurora-scheduler/gorealis $@
|
docker run --rm -t -w /gorealis -v $GOPATH/pkg:/go/pkg -v $(pwd):/gorealis --network gorealis_aurora_cluster golang:1.17-buster go test -v github.com/aurora-scheduler/gorealis/v2 $@
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue