Refactoring project to be more similar to other go project.
This commit is contained in:
parent
bbccabcd27
commit
d7db155d88
10 changed files with 153 additions and 192 deletions
|
@ -15,13 +15,10 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/aurora-scheduler/australis/internal"
|
||||
realis "github.com/aurora-scheduler/gorealis/v2"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -36,101 +33,9 @@ var createCmd = &cobra.Command{
|
|||
Args: cobra.ExactArgs(1),
|
||||
}
|
||||
|
||||
type URI struct {
|
||||
URI string `yaml:"uri"`
|
||||
Extract bool `yaml:"extract"`
|
||||
Cache bool `yaml:"cache"`
|
||||
}
|
||||
|
||||
type Executor struct {
|
||||
Name string `yaml:"name"`
|
||||
Data string `yaml:"data"`
|
||||
}
|
||||
|
||||
type ThermosProcess struct {
|
||||
Name string `yaml:"name"`
|
||||
Cmd string `yaml:"cmd"`
|
||||
}
|
||||
|
||||
type DockerContainer struct {
|
||||
Name string `yaml:"name"`
|
||||
Tag string `yaml:"tag"`
|
||||
}
|
||||
|
||||
type Container struct {
|
||||
Docker *DockerContainer `yaml:"docker"`
|
||||
}
|
||||
|
||||
type Job struct {
|
||||
Environment string `yaml:"environment"`
|
||||
Role string `yaml:"role"`
|
||||
Name string `yaml:"name"`
|
||||
CPU float64 `yaml:"cpu"`
|
||||
RAM int64 `yaml:"ram"`
|
||||
Disk int64 `yaml:"disk"`
|
||||
Executor Executor `yaml:"executor"`
|
||||
Instances int32 `yaml:"instances"`
|
||||
URIs []URI `yaml:"uris"`
|
||||
Metadata map[string]string `yaml:"labels"`
|
||||
Service bool `yaml:"service"`
|
||||
Thermos []ThermosProcess `yaml:",flow,omitempty"`
|
||||
Container *Container `yaml:"container,omitempty"`
|
||||
}
|
||||
|
||||
func (j *Job) Validate() bool {
|
||||
if j.Name == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if j.Role == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if j.Environment == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if j.Instances <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if j.CPU <= 0.0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if j.RAM <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if j.Disk <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func unmarshalJob(filename string) (Job, error) {
|
||||
|
||||
job := Job{}
|
||||
|
||||
if jobsFile, err := os.Open(filename); err != nil {
|
||||
return job, errors.Wrap(err, "unable to read the job config file")
|
||||
} else {
|
||||
if err := yaml.NewDecoder(jobsFile).Decode(&job); err != nil {
|
||||
return job, errors.Wrap(err, "unable to parse job config file")
|
||||
}
|
||||
|
||||
if !job.Validate() {
|
||||
return job, errors.New("invalid job config")
|
||||
}
|
||||
}
|
||||
|
||||
return job, nil
|
||||
}
|
||||
|
||||
func createJob(cmd *cobra.Command, args []string) {
|
||||
|
||||
job, err := unmarshalJob(args[0])
|
||||
job, err := internal.UnmarshalJob(args[0])
|
||||
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
|
|
21
cmd/fetch.go
21
cmd/fetch.go
|
@ -17,6 +17,7 @@ package cmd
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/aurora-scheduler/australis/internal"
|
||||
realis "github.com/aurora-scheduler/gorealis/v2"
|
||||
"github.com/aurora-scheduler/gorealis/v2/gen-go/apache/aurora"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -95,8 +96,8 @@ var taskStatusCmd = &cobra.Command{
|
|||
|
||||
var leaderCmd = &cobra.Command{
|
||||
Use: "leader [zkNode0, zkNode1, ...zkNodeN]",
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {}, //We don't need a realis client for this cmd
|
||||
PersistentPostRun: func(cmd *cobra.Command, args []string) {}, //We don't need a realis client for this cmd
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {}, // We don't need a realis client for this cmd
|
||||
PersistentPostRun: func(cmd *cobra.Command, args []string) {}, // We don't need a realis client for this cmd
|
||||
PreRun: setConfig,
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Short: "Fetch current Aurora leader given Zookeeper nodes. ",
|
||||
|
@ -138,11 +139,11 @@ func fetchTasksConfig(cmd *cobra.Command, args []string) {
|
|||
|
||||
tasks, err := client.GetTasksWithoutConfigs(taskQuery)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %+v\n", err)
|
||||
log.Fatalf("error: %+v", err)
|
||||
}
|
||||
|
||||
if toJson {
|
||||
fmt.Println(toJSON(tasks))
|
||||
fmt.Println(internal.ToJSON(tasks))
|
||||
} else {
|
||||
for _, t := range tasks {
|
||||
fmt.Println(t)
|
||||
|
@ -164,7 +165,7 @@ func fetchTasksStatus(cmd *cobra.Command, args []string) {
|
|||
if *role == "" {
|
||||
role = nil
|
||||
}
|
||||
//TODO: Add filtering down by status
|
||||
// TODO(rdelvalle): Add filtering down by status
|
||||
taskQuery := &aurora.TaskQuery{
|
||||
Environment: env,
|
||||
Role: role,
|
||||
|
@ -173,11 +174,11 @@ func fetchTasksStatus(cmd *cobra.Command, args []string) {
|
|||
|
||||
tasks, err := client.GetTaskStatus(taskQuery)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %+v\n", err)
|
||||
log.Fatalf("error: %+v", err)
|
||||
}
|
||||
|
||||
if toJson {
|
||||
fmt.Println(toJSON(tasks))
|
||||
fmt.Println(internal.ToJSON(tasks))
|
||||
} else {
|
||||
for _, t := range tasks {
|
||||
fmt.Println(t)
|
||||
|
@ -193,7 +194,7 @@ func fetchHostStatus(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
|
||||
if toJson {
|
||||
fmt.Println(toJSON(result.Statuses))
|
||||
fmt.Println(internal.ToJSON(result.Statuses))
|
||||
} else {
|
||||
for _, k := range result.GetStatuses() {
|
||||
fmt.Printf("Result: %s:%s\n", k.Host, k.Mode)
|
||||
|
@ -233,7 +234,7 @@ func fetchJobs(cmd *cobra.Command, args []string) {
|
|||
result, err := client.GetJobs(*role)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("error: %+v\n", err)
|
||||
log.Fatalf("error: %+v", err)
|
||||
}
|
||||
|
||||
if toJson {
|
||||
|
@ -243,7 +244,7 @@ func fetchJobs(cmd *cobra.Command, args []string) {
|
|||
configSlice = append(configSlice, config)
|
||||
}
|
||||
|
||||
fmt.Println(toJSON(configSlice))
|
||||
fmt.Println(internal.ToJSON(configSlice))
|
||||
} else {
|
||||
for jobConfig := range result.GetConfigs() {
|
||||
fmt.Println(jobConfig)
|
||||
|
|
|
@ -69,7 +69,7 @@ func backup(cmd *cobra.Command, args []string) {
|
|||
fmt.Println("Forcing scheduler to write a Backup of latest Snapshot to file system")
|
||||
err := client.PerformBackup()
|
||||
if err != nil {
|
||||
log.Fatalf("error: %+v\n", err)
|
||||
log.Fatalf("error: %+v", err)
|
||||
} else {
|
||||
log.Println("Backup started successfully")
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ func explicitRecon(cmd *cobra.Command, args []string) {
|
|||
// Get batch size from args and convert it to the right format
|
||||
batchInt, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("error: %+v\n", err)
|
||||
log.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
batchInt32 := int32(batchInt)
|
||||
|
@ -123,7 +123,7 @@ func explicitRecon(cmd *cobra.Command, args []string) {
|
|||
|
||||
err := client.ForceExplicitTaskReconciliation(batchSize)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %+v\n", err.Error())
|
||||
log.Fatalf("error: %v", err)
|
||||
} else {
|
||||
fmt.Println("Explicit reconciliation started successfully")
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ func implicitRecon(cmd *cobra.Command, args []string) {
|
|||
log.Println("Forcing scheduler to perform an implicit reconciliation with Mesos")
|
||||
err := client.ForceImplicitTaskReconciliation()
|
||||
if err != nil {
|
||||
log.Fatalf("error: %+v\n", err)
|
||||
log.Fatalf("error: %+v", err)
|
||||
} else {
|
||||
fmt.Println("Implicit reconciliation started successfully")
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aurora-scheduler/australis/internal"
|
||||
"github.com/aurora-scheduler/gorealis/v2/gen-go/apache/aurora"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -25,12 +26,12 @@ import (
|
|||
func init() {
|
||||
rootCmd.AddCommand(monitorCmd)
|
||||
|
||||
monitorCmd.AddCommand(monitorHostCmd.cmd)
|
||||
monitorCmd.AddCommand(monitorHostCmd.Cmd)
|
||||
|
||||
monitorHostCmd.cmd.Run = monitorHost
|
||||
monitorHostCmd.cmd.Flags().DurationVar(&monitorHostCmd.monitorInterval, "interval", time.Second*5, "Interval at which to poll scheduler.")
|
||||
monitorHostCmd.cmd.Flags().DurationVar(&monitorHostCmd.monitorTimeout, "timeout", time.Minute*10, "Time after which the monitor will stop polling and throw an error.")
|
||||
monitorHostCmd.cmd.Flags().StringSliceVar(&monitorHostCmd.statusList, "statuses", []string{aurora.MaintenanceMode_DRAINED.String()}, "List of acceptable statuses for a host to be in. (case-insensitive) [NONE, SCHEDULED, DRAINED, DRAINING]")
|
||||
monitorHostCmd.Cmd.Run = monitorHost
|
||||
monitorHostCmd.Cmd.Flags().DurationVar(&monitorHostCmd.MonitorInterval, "interval", time.Second*5, "Interval at which to poll scheduler.")
|
||||
monitorHostCmd.Cmd.Flags().DurationVar(&monitorHostCmd.MonitorTimeout, "timeout", time.Minute*10, "Time after which the monitor will stop polling and throw an error.")
|
||||
monitorHostCmd.Cmd.Flags().StringSliceVar(&monitorHostCmd.StatusList, "statuses", []string{aurora.MaintenanceMode_DRAINED.String()}, "List of acceptable statuses for a host to be in. (case-insensitive) [NONE, SCHEDULED, DRAINED, DRAINING]")
|
||||
}
|
||||
|
||||
var monitorCmd = &cobra.Command{
|
||||
|
@ -38,20 +39,20 @@ var monitorCmd = &cobra.Command{
|
|||
Short: "Watch for a specific state change",
|
||||
}
|
||||
|
||||
var monitorHostCmd = monitorCmdConfig{
|
||||
cmd: &cobra.Command{
|
||||
var monitorHostCmd = internal.MonitorCmdConfig{
|
||||
Cmd: &cobra.Command{
|
||||
Use: "hosts",
|
||||
Short: "Watch a host maintenance status until it enters one of the desired statuses.",
|
||||
Long: `Provide a list of hosts to monitor for desired statuses. Statuses may be passed using the --statuses
|
||||
flag with a list of comma separated statuses. Statuses include [NONE, SCHEDULED, DRAINED, DRAINING]`,
|
||||
},
|
||||
statusList: make([]string, 0),
|
||||
StatusList: make([]string, 0),
|
||||
}
|
||||
|
||||
func monitorHost(cmd *cobra.Command, args []string) {
|
||||
maintenanceModes := make([]aurora.MaintenanceMode, 0)
|
||||
|
||||
for _, status := range monitorHostCmd.statusList {
|
||||
for _, status := range monitorHostCmd.StatusList {
|
||||
mode, err := aurora.MaintenanceModeFromString(strings.ToUpper(status))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
|
@ -60,10 +61,10 @@ func monitorHost(cmd *cobra.Command, args []string) {
|
|||
maintenanceModes = append(maintenanceModes, mode)
|
||||
}
|
||||
|
||||
log.Infof("Monitoring for %v at %v intervals", monitorHostCmd.monitorTimeout, monitorHostCmd.monitorInterval)
|
||||
hostResult, err := client.MonitorHostMaintenance(args, maintenanceModes, monitorHostCmd.monitorInterval, monitorHostCmd.monitorTimeout)
|
||||
log.Infof("Monitoring for %v at %v intervals", monitorHostCmd.MonitorTimeout, monitorHostCmd.MonitorInterval)
|
||||
hostResult, err := client.MonitorHostMaintenance(args, maintenanceModes, monitorHostCmd.MonitorInterval, monitorHostCmd.MonitorTimeout)
|
||||
|
||||
maintenanceMonitorPrint(hostResult, maintenanceModes)
|
||||
internal.MaintenanceMonitorPrint(hostResult, maintenanceModes, toJson)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aurora-scheduler/australis/internal"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
realis "github.com/aurora-scheduler/gorealis/v2"
|
||||
|
@ -47,7 +48,7 @@ var message = new(string)
|
|||
var updateID string
|
||||
var log = logrus.New()
|
||||
|
||||
const australisVer = "v0.1.1"
|
||||
const australisVer = "v0.22.0"
|
||||
|
||||
var forceDrainTimeout time.Duration
|
||||
|
||||
|
@ -65,7 +66,7 @@ func init() {
|
|||
rootCmd.PersistentFlags().BoolVarP(&skipCertVerification, "skipCertVerification", "i", false, "Skip CA certificate hostname verification.")
|
||||
rootCmd.PersistentFlags().StringVar(&configFile, "config", "/etc/aurora/australis.yml", "Config file to use.")
|
||||
rootCmd.PersistentFlags().BoolVar(&toJson, "toJSON", false, "Print output in JSON format.")
|
||||
rootCmd.PersistentFlags().StringVarP(&logLevel, "logLevel", "l", "info", "Set logging level ["+getLoggingLevels()+"].")
|
||||
rootCmd.PersistentFlags().StringVarP(&logLevel, "logLevel", "l", "info", "Set logging level ["+internal.GetLoggingLevels()+"].")
|
||||
}
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
|
@ -91,10 +92,11 @@ func setConfig(cmd *cobra.Command, args []string) {
|
|||
lvl, err := logrus.ParseLevel(logLevel)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("Log level %v is not valid\n", logLevel)
|
||||
log.Fatalf("Log level %v is not valid", logLevel)
|
||||
}
|
||||
|
||||
log.SetLevel(lvl)
|
||||
internal.Logger(log)
|
||||
}
|
||||
|
||||
func connect(cmd *cobra.Command, args []string) {
|
||||
|
|
103
cmd/start.go
103
cmd/start.go
|
@ -21,6 +21,7 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/aurora-scheduler/australis/internal"
|
||||
"github.com/aurora-scheduler/gorealis/v2/gen-go/apache/aurora"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -34,46 +35,52 @@ func init() {
|
|||
rootCmd.AddCommand(startCmd)
|
||||
|
||||
// Sub-commands
|
||||
startCmd.AddCommand(startDrainCmd.cmd)
|
||||
startDrainCmd.cmd.Run = drain
|
||||
startCmd.AddCommand(startDrainCmd.Cmd)
|
||||
startDrainCmd.Cmd.Run = drain
|
||||
|
||||
// Maintenance specific flags
|
||||
startDrainCmd.cmd.Flags().DurationVar(&startDrainCmd.monitorInterval, "interval", time.Second*5, "Interval at which to poll scheduler.")
|
||||
startDrainCmd.cmd.Flags().DurationVar(&startDrainCmd.monitorTimeout, "timeout", time.Minute*10, "Time after which the monitor will stop polling and throw an error.")
|
||||
startDrainCmd.cmd.Flags().StringVar(&fromJsonFile, jsonFileFlag, "", "JSON file to read list of agents from.")
|
||||
startDrainCmd.cmd.Flags().BoolVar(&fromJson, jsonFlag, false, "Read JSON list of agents from the STDIN.")
|
||||
startDrainCmd.Cmd.Flags().DurationVar(&startDrainCmd.MonitorInterval, "interval", time.Second*5, "Interval at which to poll scheduler.")
|
||||
startDrainCmd.Cmd.Flags().DurationVar(&startDrainCmd.MonitorTimeout, "timeout", time.Minute*10, "Time after which the monitor will stop polling and throw an error.")
|
||||
startDrainCmd.Cmd.Flags().StringVar(&fromJsonFile, jsonFileFlag, "", "JSON file to read list of agents from.")
|
||||
startDrainCmd.Cmd.Flags().BoolVar(&fromJson, jsonFlag, false, "Read JSON list of agents from the STDIN.")
|
||||
|
||||
/* SLA Aware commands */
|
||||
startCmd.AddCommand(startSLADrainCmd.cmd)
|
||||
startSLADrainCmd.cmd.Run = slaDrain
|
||||
startCmd.AddCommand(startSLADrainCmd.Cmd)
|
||||
startSLADrainCmd.Cmd.Run = slaDrain
|
||||
|
||||
// SLA Maintenance specific flags
|
||||
startSLADrainCmd.cmd.Flags().Int64Var(&count, countFlag, 5, "Instances count that should be running to meet SLA.")
|
||||
startSLADrainCmd.cmd.Flags().Float64Var(&percent, percentageFlag, 80.0, "Percentage of instances that should be running to meet SLA.")
|
||||
startSLADrainCmd.cmd.Flags().DurationVar(&duration, "duration", time.Minute*1, "Minimum time duration a task needs to be `RUNNING` to be treated as active.")
|
||||
startSLADrainCmd.cmd.Flags().DurationVar(&forceDrainTimeout, "sla-limit", time.Minute*60, "Time limit after which SLA-Aware drain sheds SLA Awareness.")
|
||||
startSLADrainCmd.cmd.Flags().DurationVar(&startSLADrainCmd.monitorInterval, "interval", time.Second*10, "Interval at which to poll scheduler.")
|
||||
startSLADrainCmd.cmd.Flags().DurationVar(&startSLADrainCmd.monitorTimeout, "timeout", time.Minute*20, "Time after which the monitor will stop polling and throw an error.")
|
||||
startSLADrainCmd.cmd.Flags().StringVar(&fromJsonFile, jsonFileFlag, "", "JSON file to read list of agents from.")
|
||||
startSLADrainCmd.cmd.Flags().BoolVar(&fromJson, jsonFlag, false, "Read JSON list of agents from the STDIN.")
|
||||
startSLADrainCmd.Cmd.Flags().Int64Var(&count, countFlag, 5, "Instances count that should be running to meet SLA.")
|
||||
startSLADrainCmd.Cmd.Flags().Float64Var(&percent, percentageFlag, 80.0, "Percentage of instances that should be running to meet SLA.")
|
||||
startSLADrainCmd.Cmd.Flags().DurationVar(&duration, "duration", time.Minute*1, "Minimum time duration a task needs to be `RUNNING` to be treated as active.")
|
||||
startSLADrainCmd.Cmd.Flags().DurationVar(&forceDrainTimeout, "sla-limit", time.Minute*60, "Time limit after which SLA-Aware drain sheds SLA Awareness.")
|
||||
startSLADrainCmd.Cmd.Flags().DurationVar(&startSLADrainCmd.MonitorInterval, "interval", time.Second*10, "Interval at which to poll scheduler.")
|
||||
startSLADrainCmd.Cmd.Flags().DurationVar(&startSLADrainCmd.MonitorTimeout, "timeout", time.Minute*20, "Time after which the monitor will stop polling and throw an error.")
|
||||
startSLADrainCmd.Cmd.Flags().StringVar(&fromJsonFile, jsonFileFlag, "", "JSON file to read list of agents from.")
|
||||
startSLADrainCmd.Cmd.Flags().BoolVar(&fromJson, jsonFlag, false, "Read JSON list of agents from the STDIN.")
|
||||
|
||||
startCmd.AddCommand(startMaintenanceCmd.cmd)
|
||||
startMaintenanceCmd.cmd.Run = maintenance
|
||||
startCmd.AddCommand(startMaintenanceCmd.Cmd)
|
||||
startMaintenanceCmd.Cmd.Run = maintenance
|
||||
|
||||
// SLA Maintenance specific flags
|
||||
startMaintenanceCmd.cmd.Flags().DurationVar(&startMaintenanceCmd.monitorInterval, "interval", time.Second*5, "Interval at which to poll scheduler.")
|
||||
startMaintenanceCmd.cmd.Flags().DurationVar(&startMaintenanceCmd.monitorTimeout, "timeout", time.Minute*10, "Time after which the monitor will stop polling and throw an error.")
|
||||
startMaintenanceCmd.cmd.Flags().StringVar(&fromJsonFile, jsonFileFlag, "", "JSON file to read list of agents from.")
|
||||
startMaintenanceCmd.cmd.Flags().BoolVar(&fromJson, jsonFlag, false, "Read JSON list of agents from the STDIN.")
|
||||
startMaintenanceCmd.Cmd.Flags().DurationVar(&startMaintenanceCmd.MonitorInterval, "interval", time.Second*5, "Interval at which to poll scheduler.")
|
||||
startMaintenanceCmd.Cmd.Flags().DurationVar(&startMaintenanceCmd.MonitorTimeout, "timeout", time.Minute*10, "Time after which the monitor will stop polling and throw an error.")
|
||||
startMaintenanceCmd.Cmd.Flags().StringVar(&fromJsonFile, jsonFileFlag, "", "JSON file to read list of agents from.")
|
||||
startMaintenanceCmd.Cmd.Flags().BoolVar(&fromJson, jsonFlag, false, "Read JSON list of agents from the STDIN.")
|
||||
|
||||
// Start update command
|
||||
startCmd.AddCommand(startUpdateCmd.Cmd)
|
||||
startUpdateCmd.Cmd.Run = update
|
||||
startUpdateCmd.Cmd.Flags().DurationVar(&startUpdateCmd.MonitorInterval, "interval", time.Second*5, "Interval at which to poll scheduler.")
|
||||
startUpdateCmd.Cmd.Flags().DurationVar(&startUpdateCmd.MonitorTimeout, "timeout", time.Minute*10, "Time after which the monitor will stop polling and throw an error.")
|
||||
}
|
||||
|
||||
var startCmd = &cobra.Command{
|
||||
Use: "start",
|
||||
Short: "Start a service, maintenance on a host (DRAIN), a snapshot, or a backup.",
|
||||
Short: "Start a service, maintenance on a host (DRAIN), a snapshot, an update, or a backup.",
|
||||
}
|
||||
|
||||
var startDrainCmd = monitorCmdConfig{
|
||||
cmd: &cobra.Command{
|
||||
var startDrainCmd = internal.MonitorCmdConfig{
|
||||
Cmd: &cobra.Command{
|
||||
Use: "drain [space separated host list or use JSON flags]",
|
||||
Short: "Place a list of space separated Mesos Agents into draining mode.",
|
||||
Long: `Adds a Mesos Agent to Aurora's Drain list. Agents in this list
|
||||
|
@ -84,8 +91,8 @@ expects a space separated list of hosts to place into maintenance mode.`,
|
|||
},
|
||||
}
|
||||
|
||||
var startSLADrainCmd = monitorCmdConfig{
|
||||
cmd: &cobra.Command{
|
||||
var startSLADrainCmd = internal.MonitorCmdConfig{
|
||||
Cmd: &cobra.Command{
|
||||
Use: "sla-drain [space separated host list or use JSON flags]",
|
||||
Short: "Place a list of space separated Mesos Agents into maintenance mode using SLA aware strategies.",
|
||||
Long: `Adds a Mesos Agent to Aurora's Drain list. Agents in this list
|
||||
|
@ -100,8 +107,8 @@ when a Job does not have a defined SLA policy.`,
|
|||
},
|
||||
}
|
||||
|
||||
var startMaintenanceCmd = monitorCmdConfig{
|
||||
cmd: &cobra.Command{
|
||||
var startMaintenanceCmd = internal.MonitorCmdConfig{
|
||||
Cmd: &cobra.Command{
|
||||
Use: "maintenance [space separated host list or use JSON flags]",
|
||||
Short: "Place a list of space separated Mesos Agents into maintenance mode.",
|
||||
Long: `Places Mesos Agent into Maintenance mode. Agents in this list
|
||||
|
@ -111,6 +118,16 @@ expects a space separated list of hosts to place into maintenance mode.`,
|
|||
},
|
||||
}
|
||||
|
||||
var startUpdateCmd = internal.MonitorCmdConfig{
|
||||
Cmd: &cobra.Command{
|
||||
Use: "update [update config]",
|
||||
Short: "Start an update on an Aurora long running service.",
|
||||
Long: `Starts the update process on an Aurora long running service. If no such service exists, the update mechanism
|
||||
will act as a deployment, creating all new instances based on the requirements in the update configuration.`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
},
|
||||
}
|
||||
|
||||
func argsValidateJSONFlags(cmd *cobra.Command, args []string) error {
|
||||
if cmd.Flags().Changed(jsonFlag) && cmd.Flags().Changed(jsonFileFlag) {
|
||||
return errors.New("only json file or json stdin must be set")
|
||||
|
@ -162,15 +179,15 @@ func drain(cmd *cobra.Command, args []string) {
|
|||
|
||||
log.Debugln(result)
|
||||
|
||||
log.Infof("Monitoring for %v at %v intervals", monitorHostCmd.monitorTimeout, monitorHostCmd.monitorInterval)
|
||||
log.Infof("Monitoring for %v at %v intervals", monitorHostCmd.MonitorTimeout, monitorHostCmd.MonitorInterval)
|
||||
// Monitor change to DRAINING and DRAINED mode
|
||||
hostResult, err := client.MonitorHostMaintenance(
|
||||
hosts,
|
||||
[]aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED},
|
||||
startDrainCmd.monitorInterval,
|
||||
startDrainCmd.monitorTimeout)
|
||||
startDrainCmd.MonitorInterval,
|
||||
startDrainCmd.MonitorTimeout)
|
||||
|
||||
maintenanceMonitorPrint(hostResult, []aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED})
|
||||
internal.MaintenanceMonitorPrint(hostResult, []aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED}, toJson)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
|
@ -180,7 +197,7 @@ func drain(cmd *cobra.Command, args []string) {
|
|||
func slaDrainHosts(policy *aurora.SlaPolicy, interval, timeout time.Duration, hosts ...string) {
|
||||
result, err := client.SLADrainHosts(policy, int64(forceDrainTimeout.Seconds()), hosts...)
|
||||
if err != nil {
|
||||
log.Fatalf("error: %+v\n", err)
|
||||
log.Fatalf("error: %+v", err)
|
||||
}
|
||||
|
||||
log.Debugln(result)
|
||||
|
@ -193,7 +210,7 @@ func slaDrainHosts(policy *aurora.SlaPolicy, interval, timeout time.Duration, ho
|
|||
interval,
|
||||
timeout)
|
||||
|
||||
maintenanceMonitorPrint(hostResult, []aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED})
|
||||
internal.MaintenanceMonitorPrint(hostResult, []aurora.MaintenanceMode{aurora.MaintenanceMode_DRAINED}, toJson)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("error: %+v", err)
|
||||
|
@ -224,7 +241,7 @@ func slaDrain(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
|
||||
log.Infoln("Hosts affected: ", args)
|
||||
slaDrainHosts(policy, startDrainCmd.monitorInterval, startDrainCmd.monitorTimeout, hosts...)
|
||||
slaDrainHosts(policy, startDrainCmd.MonitorInterval, startDrainCmd.MonitorTimeout, hosts...)
|
||||
}
|
||||
|
||||
func maintenance(cmd *cobra.Command, args []string) {
|
||||
|
@ -239,18 +256,22 @@ func maintenance(cmd *cobra.Command, args []string) {
|
|||
|
||||
log.Debugln(result)
|
||||
|
||||
log.Infof("Monitoring for %v at %v intervals", monitorHostCmd.monitorTimeout, monitorHostCmd.monitorInterval)
|
||||
log.Infof("Monitoring for %v at %v intervals", monitorHostCmd.MonitorTimeout, monitorHostCmd.MonitorInterval)
|
||||
|
||||
// Monitor change to DRAINING and DRAINED mode
|
||||
hostResult, err := client.MonitorHostMaintenance(
|
||||
hosts,
|
||||
[]aurora.MaintenanceMode{aurora.MaintenanceMode_SCHEDULED},
|
||||
startMaintenanceCmd.monitorInterval,
|
||||
startMaintenanceCmd.monitorTimeout)
|
||||
startMaintenanceCmd.MonitorInterval,
|
||||
startMaintenanceCmd.MonitorTimeout)
|
||||
|
||||
maintenanceMonitorPrint(hostResult, []aurora.MaintenanceMode{aurora.MaintenanceMode_SCHEDULED})
|
||||
internal.MaintenanceMonitorPrint(hostResult, []aurora.MaintenanceMode{aurora.MaintenanceMode_SCHEDULED}, toJson)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalln("error: %+v", err)
|
||||
log.Fatalf("error: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func update(cmd *cobra.Command, args []string) {
|
||||
|
||||
}
|
||||
|
|
23
cmd/stop.go
23
cmd/stop.go
|
@ -17,20 +17,19 @@ package cmd
|
|||
import (
|
||||
"time"
|
||||
|
||||
"github.com/aurora-scheduler/australis/internal"
|
||||
"github.com/aurora-scheduler/gorealis/v2/gen-go/apache/aurora"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var stopMaintenanceConfig = monitorCmdConfig{}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(stopCmd)
|
||||
|
||||
// Stop subcommands
|
||||
stopCmd.AddCommand(stopMaintCmd.cmd)
|
||||
stopMaintCmd.cmd.Run = endMaintenance
|
||||
stopMaintCmd.cmd.Flags().DurationVar(&stopMaintenanceConfig.monitorInterval, "interval", time.Second*5, "Interval at which to poll scheduler.")
|
||||
stopMaintCmd.cmd.Flags().DurationVar(&stopMaintenanceConfig.monitorTimeout, "timeout", time.Minute*1, "Time after which the monitor will stop polling and throw an error.")
|
||||
stopCmd.AddCommand(stopMaintCmd.Cmd)
|
||||
stopMaintCmd.Cmd.Run = endMaintenance
|
||||
stopMaintCmd.Cmd.Flags().DurationVar(&stopMaintCmd.MonitorInterval, "interval", time.Second*5, "Interval at which to poll scheduler.")
|
||||
stopMaintCmd.Cmd.Flags().DurationVar(&stopMaintCmd.MonitorTimeout, "timeout", time.Minute*1, "Time after which the monitor will stop polling and throw an error.")
|
||||
|
||||
// Stop update
|
||||
|
||||
|
@ -46,8 +45,8 @@ var stopCmd = &cobra.Command{
|
|||
Short: "Stop a service or maintenance on a host (DRAIN).",
|
||||
}
|
||||
|
||||
var stopMaintCmd = monitorCmdConfig{
|
||||
cmd: &cobra.Command{
|
||||
var stopMaintCmd = internal.MonitorCmdConfig{
|
||||
Cmd: &cobra.Command{
|
||||
Use: "drain [space separated host list]",
|
||||
Short: "Stop maintenance on a host (move to NONE).",
|
||||
Long: `Transition a list of hosts currently in a maintenance status out of it.`,
|
||||
|
@ -75,13 +74,13 @@ func endMaintenance(cmd *cobra.Command, args []string) {
|
|||
hostResult, err := client.MonitorHostMaintenance(
|
||||
args,
|
||||
[]aurora.MaintenanceMode{aurora.MaintenanceMode_NONE},
|
||||
stopMaintenanceConfig.monitorInterval,
|
||||
stopMaintenanceConfig.monitorTimeout)
|
||||
stopMaintCmd.MonitorInterval,
|
||||
stopMaintCmd.MonitorTimeout)
|
||||
|
||||
maintenanceMonitorPrint(hostResult, []aurora.MaintenanceMode{aurora.MaintenanceMode_NONE})
|
||||
internal.MaintenanceMonitorPrint(hostResult, []aurora.MaintenanceMode{aurora.MaintenanceMode_NONE}, toJson)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalln("error: %+v", err)
|
||||
log.Fatalf("error: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +1,17 @@
|
|||
/**
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
|
|
|
@ -27,15 +27,21 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type MonitorCmdConfig struct {
|
||||
cmd *cobra.Command
|
||||
monitorInterval, monitorTimeout time.Duration
|
||||
statusList []string
|
||||
Cmd *cobra.Command
|
||||
MonitorInterval, MonitorTimeout time.Duration
|
||||
StatusList []string
|
||||
}
|
||||
|
||||
func toJSON(v interface{}) string {
|
||||
var log *logrus.Logger
|
||||
|
||||
// Logger sets the logger available to the internal package
|
||||
func Logger(l *logrus.Logger) {
|
||||
log = l
|
||||
}
|
||||
|
||||
// ToJSON converts an interface to a JSON formatted string
|
||||
func ToJSON(v interface{}) string {
|
||||
output, err := json.Marshal(v)
|
||||
|
||||
if err != nil {
|
||||
|
@ -45,8 +51,7 @@ func toJSON(v interface{}) string {
|
|||
return string(output)
|
||||
}
|
||||
|
||||
func getLoggingLevels() string {
|
||||
|
||||
func GetLoggingLevels() string {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
for _, level := range logrus.AllLevels {
|
||||
|
@ -57,16 +62,15 @@ func getLoggingLevels() string {
|
|||
buffer.Truncate(buffer.Len() - 1)
|
||||
|
||||
return buffer.String()
|
||||
|
||||
}
|
||||
|
||||
func maintenanceMonitorPrint(hostResult map[string]bool, desiredStates []aurora.MaintenanceMode) {
|
||||
func MaintenanceMonitorPrint(hostResult map[string]bool, desiredStates []aurora.MaintenanceMode, toJson bool) {
|
||||
if len(hostResult) > 0 {
|
||||
// Create anonymous struct for JSON formatting
|
||||
output := struct {
|
||||
DesiredStates []string `json:desired_states`
|
||||
Transitioned []string `json:transitioned`
|
||||
NonTransitioned []string `json:non-transitioned`
|
||||
DesiredStates []string `json:"desired_states"`
|
||||
Transitioned []string `json:"transitioned"`
|
||||
NonTransitioned []string `json:"non-transitioned"`
|
||||
}{
|
||||
make([]string, 0),
|
||||
make([]string, 0),
|
||||
|
@ -86,7 +90,7 @@ func maintenanceMonitorPrint(hostResult map[string]bool, desiredStates []aurora.
|
|||
}
|
||||
|
||||
if toJson {
|
||||
fmt.Println(toJSON(output))
|
||||
fmt.Println(ToJSON(output))
|
||||
} else {
|
||||
fmt.Printf("Entered %v status: %v\n", output.DesiredStates, output.Transitioned)
|
||||
fmt.Printf("Did not enter %v status: %v\n", output.DesiredStates, output.NonTransitioned)
|
||||
|
|
|
@ -1,3 +1,17 @@
|
|||
/**
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue