Code cleanup, added ability to attach logger, added CreateService api

* Code cleanup: Deleted multiple functions which have become stale. Removed cluster example as we replaced the need to create the Cluster object.

* Cleaned up ZK connection code by using the backoff function. Added a test to the end to end to test that we're getting the host correctly from ZK. Changed clusters test to be an outside package.

* Added LeaderFromZKURL test to end to end tests.

* Added logger to realisConfig so that users can attach their own Loggers to the client. Logger is an interface that shadows most popular logging libraries. Only Print, Println, and Printf are needed to be a realis.Logger type. Example in the client uses the std library log.

* Moved most fmt.Print* calls to be redirected to user provided logger. Logger by default is a no-op logger.

* Adding CreateService to realis interface. Uses the StartJobUpdate API to create services instead of the createJobs API.

* Bumping up version number inside client in anticipation of new release.
This commit is contained in:
Renan DelValle 2017-11-30 12:02:50 -08:00 committed by GitHub
parent 72b746e431
commit e614e04f27
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 292 additions and 298 deletions

127
zk.go
View file

@ -36,89 +36,70 @@ type ServiceInstance struct {
Status string `json:"status"`
}
type NoopLogger struct{}
func (NoopLogger) Printf(format string, a ...interface{}) {
}
// Retrieves current Aurora leader from ZK.
func LeaderFromZK(cluster Cluster) (string, error) {
var err error
var zkurl string
duration := defaultBackoff.Duration
for step := 0; step < defaultBackoff.Steps; step++ {
retryErr := ExponentialBackoff(defaultBackoff, func() (bool, error) {
// Attempt to find leader
zkurl, err = leaderFromZK(cluster)
if err == nil {
return zkurl, err
endpoints := strings.Split(cluster.ZK, ",")
//TODO (rdelvalle): When enabling debugging, change logger here
c, _, err := zk.Connect(endpoints, time.Second*10, func(c *zk.Conn) { c.SetLogger(NoopLogger{}) })
if err != nil {
return false, errors.Wrap(err, "Failed to connect to Zookeeper at "+cluster.ZK)
}
// Backoff if we failed to determine leader
adjusted := duration
if defaultBackoff.Jitter > 0.0 {
adjusted = Jitter(duration, defaultBackoff.Jitter)
defer c.Close()
// Open up descriptor for the ZK path given
children, _, _, err := c.ChildrenW(cluster.SchedZKPath)
if err != nil {
return false, errors.Wrapf(err, "Path %s doesn't exist on Zookeeper ", cluster.SchedZKPath)
}
fmt.Printf("Error determining Aurora leader: %v; retrying in %v\n", err, adjusted)
time.Sleep(adjusted)
duration = time.Duration(float64(duration) * defaultBackoff.Factor)
// Search for the leader through all the children in the given path
serviceInst := new(ServiceInstance)
for _, child := range children {
// Only the leader will start with member_
if strings.HasPrefix(child, "member_") {
data, _, err := c.Get(cluster.SchedZKPath + "/" + child)
if err != nil {
return false, errors.Wrap(err, "Error fetching contents of leader")
}
err = json.Unmarshal([]byte(data), serviceInst)
if err != nil {
return false, errors.Wrap(err, "Unable to unmarshall contents of leader")
}
// Should only be one endpoint
if len(serviceInst.AdditionalEndpoints) > 1 {
fmt.Errorf("Ambiguous end points schemes")
}
var scheme, host, port string
for k, v := range serviceInst.AdditionalEndpoints {
scheme = k
host = v.Host
port = strconv.Itoa(v.Port)
}
zkurl = scheme + "://" + host + ":" + port
return true, nil
}
}
return false, errors.New("No leader found")
})
if retryErr != nil {
return "", errors.Wrapf(retryErr, "Failed to determine leader after %v attempts", defaultBackoff.Steps)
}
return "", errors.Wrapf(err, "Failed to determine leader after %v attempts", defaultBackoff.Steps)
}
func leaderFromZK(cluster Cluster) (string, error) {
endpoints := strings.Split(cluster.ZK, ",")
//TODO (rdelvalle): When enabling debugging, change logger here
c, _, err := zk.Connect(endpoints, time.Second*10, func(c *zk.Conn) { c.SetLogger(NoopLogger{}) })
if err != nil {
return "", errors.Wrap(err, "Failed to connect to Zookeeper at "+cluster.ZK)
}
defer c.Close()
children, _, _, err := c.ChildrenW(cluster.SchedZKPath)
if err != nil {
return "", errors.Wrapf(err, "Path %s doesn't exist on Zookeeper ", cluster.SchedZKPath)
}
serviceInst := new(ServiceInstance)
for _, child := range children {
// Only the leader will start with member_
if strings.HasPrefix(child, "member_") {
data, _, err := c.Get(cluster.SchedZKPath + "/" + child)
if err != nil {
return "", errors.Wrap(err, "Error fetching contents of leader")
}
err = json.Unmarshal([]byte(data), serviceInst)
if err != nil {
return "", errors.Wrap(err, "Unable to unmarshall contents of leader")
}
// Should only be one endpoint
if len(serviceInst.AdditionalEndpoints) > 1 {
fmt.Errorf("Ambiguous end points schemes")
}
var scheme, host, port string
for k, v := range serviceInst.AdditionalEndpoints {
scheme = k
host = v.Host
port = strconv.Itoa(v.Port)
}
return scheme + "://" + host + ":" + port, nil
}
}
return "", errors.New("No leader found")
return zkurl, nil
}