* Bumped up version to 1.21.1 * Moving admin functions to a new file. They are still part of the same pointer receiver type. * Removing dead code and fixing some comments to add space between backslash and comment. * Adding set up and tear down to run tests script. It sets up a pod, runs all tests, and then tears down the pod. * Added `--rm` to run tests Mac script. * Removing cookie jar from transport layer as it's not needed. * Changing all error messages to start with a lower case letter. Changing some messages around to be more descriptive. * Adding an argument to allow the retry mechanism to stop if a timeout has been encountered. This is useful for mutating API calls. Only StartUpdate and CreateService have enabled by default stop at timeout. * Added 2 tests for when a call goes through despite the client timing out. One is with a good payload, one is with a bad payload. * Updating changelog with information about the error type returned. * Adding test for duplicate metadata. * Refactored JobUpdateStatus monitor to use a new monitor called JobUpdateQuery. Update monitor will now still continue if it does not find an update to monitor. Furthermore, it has been optimized to reduce returning payloads from the scheduler as much as possible. This is through using the GetJobUpdateSummaries API instead of JobUpdateDetails and by including a the statuses we're searching for as part of the query. * Added documentation as to how to handle a timeout on an API request. * Optimized GetInstancesIds to create a copy of the JobKey being passed down in order to avoid unexpected behavior. Instead of setting every variable name separately, now a JobKey array is being created.
174 lines
4.7 KiB
Go
174 lines
4.7 KiB
Go
/**
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
package realis
|
|
|
|
import (
|
|
"encoding/json"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/pkg/errors"
|
|
"github.com/samuel/go-zookeeper/zk"
|
|
)
|
|
|
|
type Endpoint struct {
|
|
Host string `json:"host"`
|
|
Port int `json:"port"`
|
|
}
|
|
|
|
type ServiceInstance struct {
|
|
Service Endpoint `json:"serviceEndpoint"`
|
|
AdditionalEndpoints map[string]Endpoint `json:"additionalEndpoints"`
|
|
Status string `json:"status"`
|
|
}
|
|
|
|
type zkConfig struct {
|
|
endpoints []string
|
|
path string
|
|
backoff Backoff
|
|
timeout time.Duration
|
|
logger Logger
|
|
}
|
|
|
|
type ZKOpt func(z *zkConfig)
|
|
|
|
func ZKEndpoints(endpoints ...string) ZKOpt {
|
|
return func(z *zkConfig) {
|
|
z.endpoints = endpoints
|
|
}
|
|
}
|
|
|
|
func ZKPath(path string) ZKOpt {
|
|
return func(z *zkConfig) {
|
|
z.path = path
|
|
}
|
|
}
|
|
|
|
func ZKBackoff(b Backoff) ZKOpt {
|
|
return func(z *zkConfig) {
|
|
z.backoff = b
|
|
}
|
|
}
|
|
|
|
func ZKTimeout(d time.Duration) ZKOpt {
|
|
return func(z *zkConfig) {
|
|
z.timeout = d
|
|
}
|
|
}
|
|
|
|
func ZKLogger(l Logger) ZKOpt {
|
|
return func(z *zkConfig) {
|
|
z.logger = l
|
|
}
|
|
}
|
|
|
|
// Retrieves current Aurora leader from ZK.
|
|
func LeaderFromZK(cluster Cluster) (string, error) {
|
|
return LeaderFromZKOpts(ZKEndpoints(strings.Split(cluster.ZK, ",")...), ZKPath(cluster.SchedZKPath))
|
|
}
|
|
|
|
// Retrieves current Aurora leader from ZK with a custom configuration.
|
|
func LeaderFromZKOpts(options ...ZKOpt) (string, error) {
|
|
var leaderURL string
|
|
|
|
// Load the default configuration for Zookeeper followed by overriding values with those provided by the caller.
|
|
config := &zkConfig{backoff: defaultBackoff, timeout: time.Second * 10, logger: NoopLogger{}}
|
|
for _, opt := range options {
|
|
opt(config)
|
|
}
|
|
|
|
if len(config.endpoints) == 0 {
|
|
return "", errors.New("no Zookeeper endpoints supplied")
|
|
}
|
|
|
|
if config.path == "" {
|
|
return "", errors.New("no Zookeeper path supplied")
|
|
}
|
|
|
|
// Create a closure that allows us to use the ExponentialBackoff function.
|
|
retryErr := ExponentialBackoff(config.backoff, config.logger, func() (bool, error) {
|
|
|
|
c, _, err := zk.Connect(config.endpoints, config.timeout, func(c *zk.Conn) { c.SetLogger(config.logger) })
|
|
if err != nil {
|
|
return false, NewTemporaryError(errors.Wrap(err, "failed to connect to Zookeeper"))
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
// Open up descriptor for the ZK path given
|
|
children, _, _, err := c.ChildrenW(config.path)
|
|
if err != nil {
|
|
|
|
// Sentinel error check as there is no other way to check.
|
|
if err == zk.ErrInvalidPath {
|
|
return false, errors.Wrapf(err, "path %s is an invalid Zookeeper path", config.path)
|
|
}
|
|
|
|
return false, NewTemporaryError(errors.Wrapf(err, "path %s doesn't exist on Zookeeper ", config.path))
|
|
}
|
|
|
|
// Search for the leader through all the children in the given path
|
|
serviceInst := new(ServiceInstance)
|
|
for _, child := range children {
|
|
|
|
// Only the leader will start with member_
|
|
if strings.HasPrefix(child, "member_") {
|
|
|
|
childPath := config.path + "/" + child
|
|
data, _, err := c.Get(childPath)
|
|
if err != nil {
|
|
if err == zk.ErrInvalidPath {
|
|
return false, errors.Wrapf(err, "path %s is an invalid Zookeeper path", childPath)
|
|
}
|
|
|
|
return false, NewTemporaryError(errors.Wrap(err, "unable to fetch contents of leader"))
|
|
}
|
|
|
|
err = json.Unmarshal([]byte(data), serviceInst)
|
|
if err != nil {
|
|
return false, NewTemporaryError(errors.Wrap(err, "unable to unmarshal contents of leader"))
|
|
}
|
|
|
|
// Should only be one endpoint.
|
|
// This should never be encountered as it would indicate Aurora
|
|
// writing bad info into Zookeeper but is kept here as a safety net.
|
|
if len(serviceInst.AdditionalEndpoints) > 1 {
|
|
return false, NewTemporaryError(errors.New("ambiguous endpoints in json blob, Aurora wrote bad info to ZK"))
|
|
}
|
|
|
|
var scheme, host, port string
|
|
for k, v := range serviceInst.AdditionalEndpoints {
|
|
scheme = k
|
|
host = v.Host
|
|
port = strconv.Itoa(v.Port)
|
|
}
|
|
|
|
leaderURL = scheme + "://" + host + ":" + port
|
|
return true, nil
|
|
}
|
|
}
|
|
|
|
// Leader data might not be available yet, try to fetch again.
|
|
return false, NewTemporaryError(errors.New("no leader found"))
|
|
})
|
|
|
|
if retryErr != nil {
|
|
config.logger.Printf("failed to determine leader after %v attempts", config.backoff.Steps)
|
|
return "", retryErr
|
|
}
|
|
|
|
return leaderURL, nil
|
|
}
|