Checking in vendor folder for ease of using go get.

This commit is contained in:
Renan DelValle 2018-10-23 23:32:59 -07:00
parent 7a1251853b
commit cdb4b5a1d0
No known key found for this signature in database
GPG key ID: C240AD6D6F443EC9
3554 changed files with 1270116 additions and 0 deletions

1
vendor/github.com/samuel/go-zookeeper/.gitignore generated vendored Normal file
View file

@ -0,0 +1 @@
.DS_Store

31
vendor/github.com/samuel/go-zookeeper/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,31 @@
language: go
go:
- 1.9
jdk:
- oraclejdk9
sudo: false
branches:
only:
- master
before_install:
- wget http://apache.cs.utah.edu/zookeeper/zookeeper-3.4.10/zookeeper-3.4.10.tar.gz
- tar -zxvf zookeeper*tar.gz
- go get github.com/mattn/goveralls
- go get golang.org/x/tools/cmd/cover
script:
- jdk_switcher use oraclejdk9
- go build ./...
- go fmt ./...
- go vet ./...
- go test -i -race ./...
- go test -race -covermode atomic -coverprofile=profile.cov ./zk
- goveralls -coverprofile=profile.cov -service=travis-ci
env:
global:
secure: Coha3DDcXmsekrHCZlKvRAc+pMBaQU1QS/3++3YCCUXVDBWgVsC1ZIc9df4RLdZ/ncGd86eoRq/S+zyn1XbnqK5+ePqwJoUnJ59BE8ZyHLWI9ajVn3fND1MTduu/ksGsS79+IYbdVI5wgjSgjD3Ktp6Y5uPl+BPosjYBGdNcHS4=

25
vendor/github.com/samuel/go-zookeeper/LICENSE generated vendored Normal file
View file

@ -0,0 +1,25 @@
Copyright (c) 2013, Samuel Stauffer <samuel@descolada.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

11
vendor/github.com/samuel/go-zookeeper/README.md generated vendored Normal file
View file

@ -0,0 +1,11 @@
Native Go Zookeeper Client Library
===================================
[![GoDoc](https://godoc.org/github.com/samuel/go-zookeeper?status.svg)](https://godoc.org/github.com/samuel/go-zookeeper)
[![Build Status](https://travis-ci.org/samuel/go-zookeeper.png)](https://travis-ci.org/samuel/go-zookeeper)
[![Coverage Status](https://coveralls.io/repos/github/samuel/go-zookeeper/badge.svg?branch=master)](https://coveralls.io/github/samuel/go-zookeeper?branch=master)
License
-------
3-clause BSD. See LICENSE file.

View file

@ -0,0 +1,22 @@
package main
import (
"fmt"
"time"
"github.com/samuel/go-zookeeper/zk"
)
func main() {
c, _, err := zk.Connect([]string{"127.0.0.1"}, time.Second) //*10)
if err != nil {
panic(err)
}
children, stat, ch, err := c.ChildrenW("/")
if err != nil {
panic(err)
}
fmt.Printf("%+v %+v\n", children, stat)
e := <-ch
fmt.Printf("%+v\n", e)
}

View file

@ -0,0 +1,314 @@
package zk
import (
"sync"
"testing"
"time"
)
type logWriter struct {
t *testing.T
p string
}
func (lw logWriter) Write(b []byte) (int, error) {
lw.t.Logf("%s%s", lw.p, string(b))
return len(b), nil
}
func TestBasicCluster(t *testing.T) {
ts, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
zk1, err := ts.Connect(0)
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk1.Close()
zk2, err := ts.Connect(1)
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk2.Close()
time.Sleep(time.Second * 5)
if _, err := zk1.Create("/gozk-test", []byte("foo-cluster"), 0, WorldACL(PermAll)); err != nil {
t.Fatalf("Create failed on node 1: %+v", err)
}
if by, _, err := zk2.Get("/gozk-test"); err != nil {
t.Fatalf("Get failed on node 2: %+v", err)
} else if string(by) != "foo-cluster" {
t.Fatal("Wrong data for node 2")
}
}
// If the current leader dies, then the session is reestablished with the new one.
func TestClientClusterFailover(t *testing.T) {
tc, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer tc.Stop()
zk, evCh, err := tc.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
sl := NewStateLogger(evCh)
hasSessionEvent1 := sl.NewWatcher(sessionStateMatcher(StateHasSession)).Wait(8 * time.Second)
if hasSessionEvent1 == nil {
t.Fatalf("Failed to connect and get session")
}
if _, err := zk.Create("/gozk-test", []byte("foo-cluster"), 0, WorldACL(PermAll)); err != nil {
t.Fatalf("Create failed on node 1: %+v", err)
}
hasSessionWatcher2 := sl.NewWatcher(sessionStateMatcher(StateHasSession))
// Kill the current leader
tc.StopServer(hasSessionEvent1.Server)
// Wait for the session to be reconnected with the new leader.
if hasSessionWatcher2.Wait(8*time.Second) == nil {
t.Fatalf("Failover failed")
}
if by, _, err := zk.Get("/gozk-test"); err != nil {
t.Fatalf("Get failed on node 2: %+v", err)
} else if string(by) != "foo-cluster" {
t.Fatal("Wrong data for node 2")
}
}
// If a ZooKeeper cluster looses quorum then a session is reconnected as soon
// as the quorum is restored.
func TestNoQuorum(t *testing.T) {
tc, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer tc.Stop()
zk, evCh, err := tc.ConnectAllTimeout(4 * time.Second)
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
sl := NewStateLogger(evCh)
// Wait for initial session to be established
hasSessionEvent1 := sl.NewWatcher(sessionStateMatcher(StateHasSession)).Wait(8 * time.Second)
if hasSessionEvent1 == nil {
t.Fatalf("Failed to connect and get session")
}
initialSessionID := zk.sessionID
DefaultLogger.Printf(" Session established: id=%d, timeout=%d", zk.sessionID, zk.sessionTimeoutMs)
// Kill the ZooKeeper leader and wait for the session to reconnect.
DefaultLogger.Printf(" Kill the leader")
disconnectWatcher1 := sl.NewWatcher(sessionStateMatcher(StateDisconnected))
hasSessionWatcher2 := sl.NewWatcher(sessionStateMatcher(StateHasSession))
tc.StopServer(hasSessionEvent1.Server)
disconnectedEvent1 := disconnectWatcher1.Wait(8 * time.Second)
if disconnectedEvent1 == nil {
t.Fatalf("Failover failed, missed StateDisconnected event")
}
if disconnectedEvent1.Server != hasSessionEvent1.Server {
t.Fatalf("Unexpected StateDisconnected event, expected=%s, actual=%s",
hasSessionEvent1.Server, disconnectedEvent1.Server)
}
hasSessionEvent2 := hasSessionWatcher2.Wait(8 * time.Second)
if hasSessionEvent2 == nil {
t.Fatalf("Failover failed, missed StateHasSession event")
}
// Kill the ZooKeeper leader leaving the cluster without quorum.
DefaultLogger.Printf(" Kill the leader")
disconnectWatcher2 := sl.NewWatcher(sessionStateMatcher(StateDisconnected))
tc.StopServer(hasSessionEvent2.Server)
disconnectedEvent2 := disconnectWatcher2.Wait(8 * time.Second)
if disconnectedEvent2 == nil {
t.Fatalf("Failover failed, missed StateDisconnected event")
}
if disconnectedEvent2.Server != hasSessionEvent2.Server {
t.Fatalf("Unexpected StateDisconnected event, expected=%s, actual=%s",
hasSessionEvent2.Server, disconnectedEvent2.Server)
}
// Make sure that we keep retrying connecting to the only remaining
// ZooKeeper server, but the attempts are being dropped because there is
// no quorum.
DefaultLogger.Printf(" Retrying no luck...")
var firstDisconnect *Event
begin := time.Now()
for time.Now().Sub(begin) < 6*time.Second {
disconnectedEvent := sl.NewWatcher(sessionStateMatcher(StateDisconnected)).Wait(4 * time.Second)
if disconnectedEvent == nil {
t.Fatalf("Disconnected event expected")
}
if firstDisconnect == nil {
firstDisconnect = disconnectedEvent
continue
}
if disconnectedEvent.Server != firstDisconnect.Server {
t.Fatalf("Disconnect from wrong server: expected=%s, actual=%s",
firstDisconnect.Server, disconnectedEvent.Server)
}
}
// Start a ZooKeeper node to restore quorum.
hasSessionWatcher3 := sl.NewWatcher(sessionStateMatcher(StateHasSession))
tc.StartServer(hasSessionEvent1.Server)
// Make sure that session is reconnected with the same ID.
hasSessionEvent3 := hasSessionWatcher3.Wait(8 * time.Second)
if hasSessionEvent3 == nil {
t.Fatalf("Session has not been reconnected")
}
if zk.sessionID != initialSessionID {
t.Fatalf("Wrong session ID: expected=%d, actual=%d", initialSessionID, zk.sessionID)
}
// Make sure that the session is not dropped soon after reconnect
e := sl.NewWatcher(sessionStateMatcher(StateDisconnected)).Wait(6 * time.Second)
if e != nil {
t.Fatalf("Unexpected disconnect")
}
}
func TestWaitForClose(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
zk, err := ts.Connect(0)
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
timeout := time.After(30 * time.Second)
CONNECTED:
for {
select {
case ev := <-zk.eventChan:
if ev.State == StateConnected {
break CONNECTED
}
case <-timeout:
zk.Close()
t.Fatal("Timeout")
}
}
zk.Close()
for {
select {
case _, ok := <-zk.eventChan:
if !ok {
return
}
case <-timeout:
t.Fatal("Timeout")
}
}
}
func TestBadSession(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
zk, _, err := ts.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
zk.conn.Close()
time.Sleep(time.Millisecond * 100)
if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
}
type EventLogger struct {
events []Event
watchers []*EventWatcher
lock sync.Mutex
wg sync.WaitGroup
}
func NewStateLogger(eventCh <-chan Event) *EventLogger {
el := &EventLogger{}
el.wg.Add(1)
go func() {
defer el.wg.Done()
for event := range eventCh {
el.lock.Lock()
for _, sw := range el.watchers {
if !sw.triggered && sw.matcher(event) {
sw.triggered = true
sw.matchCh <- event
}
}
DefaultLogger.Printf(" event received: %v\n", event)
el.events = append(el.events, event)
el.lock.Unlock()
}
}()
return el
}
func (el *EventLogger) NewWatcher(matcher func(Event) bool) *EventWatcher {
ew := &EventWatcher{matcher: matcher, matchCh: make(chan Event, 1)}
el.lock.Lock()
el.watchers = append(el.watchers, ew)
el.lock.Unlock()
return ew
}
func (el *EventLogger) Events() []Event {
el.lock.Lock()
transitions := make([]Event, len(el.events))
copy(transitions, el.events)
el.lock.Unlock()
return transitions
}
func (el *EventLogger) Wait4Stop() {
el.wg.Wait()
}
type EventWatcher struct {
matcher func(Event) bool
matchCh chan Event
triggered bool
}
func (ew *EventWatcher) Wait(timeout time.Duration) *Event {
select {
case event := <-ew.matchCh:
return &event
case <-time.After(timeout):
return nil
}
}
func sessionStateMatcher(s State) func(Event) bool {
return func(e Event) bool {
return e.Type == EventSession && e.State == s
}
}

1193
vendor/github.com/samuel/go-zookeeper/zk/conn.go generated vendored Normal file

File diff suppressed because it is too large Load diff

240
vendor/github.com/samuel/go-zookeeper/zk/constants.go generated vendored Normal file
View file

@ -0,0 +1,240 @@
package zk
import (
"errors"
)
const (
protocolVersion = 0
DefaultPort = 2181
)
const (
opNotify = 0
opCreate = 1
opDelete = 2
opExists = 3
opGetData = 4
opSetData = 5
opGetAcl = 6
opSetAcl = 7
opGetChildren = 8
opSync = 9
opPing = 11
opGetChildren2 = 12
opCheck = 13
opMulti = 14
opClose = -11
opSetAuth = 100
opSetWatches = 101
opError = -1
// Not in protocol, used internally
opWatcherEvent = -2
)
const (
EventNodeCreated EventType = 1
EventNodeDeleted EventType = 2
EventNodeDataChanged EventType = 3
EventNodeChildrenChanged EventType = 4
EventSession EventType = -1
EventNotWatching EventType = -2
)
var (
eventNames = map[EventType]string{
EventNodeCreated: "EventNodeCreated",
EventNodeDeleted: "EventNodeDeleted",
EventNodeDataChanged: "EventNodeDataChanged",
EventNodeChildrenChanged: "EventNodeChildrenChanged",
EventSession: "EventSession",
EventNotWatching: "EventNotWatching",
}
)
const (
StateUnknown State = -1
StateDisconnected State = 0
StateConnecting State = 1
StateAuthFailed State = 4
StateConnectedReadOnly State = 5
StateSaslAuthenticated State = 6
StateExpired State = -112
StateConnected = State(100)
StateHasSession = State(101)
)
const (
FlagEphemeral = 1
FlagSequence = 2
)
var (
stateNames = map[State]string{
StateUnknown: "StateUnknown",
StateDisconnected: "StateDisconnected",
StateConnectedReadOnly: "StateConnectedReadOnly",
StateSaslAuthenticated: "StateSaslAuthenticated",
StateExpired: "StateExpired",
StateAuthFailed: "StateAuthFailed",
StateConnecting: "StateConnecting",
StateConnected: "StateConnected",
StateHasSession: "StateHasSession",
}
)
type State int32
func (s State) String() string {
if name := stateNames[s]; name != "" {
return name
}
return "Unknown"
}
type ErrCode int32
var (
ErrConnectionClosed = errors.New("zk: connection closed")
ErrUnknown = errors.New("zk: unknown error")
ErrAPIError = errors.New("zk: api error")
ErrNoNode = errors.New("zk: node does not exist")
ErrNoAuth = errors.New("zk: not authenticated")
ErrBadVersion = errors.New("zk: version conflict")
ErrNoChildrenForEphemerals = errors.New("zk: ephemeral nodes may not have children")
ErrNodeExists = errors.New("zk: node already exists")
ErrNotEmpty = errors.New("zk: node has children")
ErrSessionExpired = errors.New("zk: session has been expired by the server")
ErrInvalidACL = errors.New("zk: invalid ACL specified")
ErrAuthFailed = errors.New("zk: client authentication failed")
ErrClosing = errors.New("zk: zookeeper is closing")
ErrNothing = errors.New("zk: no server responsees to process")
ErrSessionMoved = errors.New("zk: session moved to another server, so operation is ignored")
// ErrInvalidCallback = errors.New("zk: invalid callback specified")
errCodeToError = map[ErrCode]error{
0: nil,
errAPIError: ErrAPIError,
errNoNode: ErrNoNode,
errNoAuth: ErrNoAuth,
errBadVersion: ErrBadVersion,
errNoChildrenForEphemerals: ErrNoChildrenForEphemerals,
errNodeExists: ErrNodeExists,
errNotEmpty: ErrNotEmpty,
errSessionExpired: ErrSessionExpired,
// errInvalidCallback: ErrInvalidCallback,
errInvalidAcl: ErrInvalidACL,
errAuthFailed: ErrAuthFailed,
errClosing: ErrClosing,
errNothing: ErrNothing,
errSessionMoved: ErrSessionMoved,
}
)
func (e ErrCode) toError() error {
if err, ok := errCodeToError[e]; ok {
return err
}
return ErrUnknown
}
const (
errOk = 0
// System and server-side errors
errSystemError = -1
errRuntimeInconsistency = -2
errDataInconsistency = -3
errConnectionLoss = -4
errMarshallingError = -5
errUnimplemented = -6
errOperationTimeout = -7
errBadArguments = -8
errInvalidState = -9
// API errors
errAPIError ErrCode = -100
errNoNode ErrCode = -101 // *
errNoAuth ErrCode = -102
errBadVersion ErrCode = -103 // *
errNoChildrenForEphemerals ErrCode = -108
errNodeExists ErrCode = -110 // *
errNotEmpty ErrCode = -111
errSessionExpired ErrCode = -112
errInvalidCallback ErrCode = -113
errInvalidAcl ErrCode = -114
errAuthFailed ErrCode = -115
errClosing ErrCode = -116
errNothing ErrCode = -117
errSessionMoved ErrCode = -118
)
// Constants for ACL permissions
const (
PermRead = 1 << iota
PermWrite
PermCreate
PermDelete
PermAdmin
PermAll = 0x1f
)
var (
emptyPassword = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
opNames = map[int32]string{
opNotify: "notify",
opCreate: "create",
opDelete: "delete",
opExists: "exists",
opGetData: "getData",
opSetData: "setData",
opGetAcl: "getACL",
opSetAcl: "setACL",
opGetChildren: "getChildren",
opSync: "sync",
opPing: "ping",
opGetChildren2: "getChildren2",
opCheck: "check",
opMulti: "multi",
opClose: "close",
opSetAuth: "setAuth",
opSetWatches: "setWatches",
opWatcherEvent: "watcherEvent",
}
)
type EventType int32
func (t EventType) String() string {
if name := eventNames[t]; name != "" {
return name
}
return "Unknown"
}
// Mode is used to build custom server modes (leader|follower|standalone).
type Mode uint8
func (m Mode) String() string {
if name := modeNames[m]; name != "" {
return name
}
return "unknown"
}
const (
ModeUnknown Mode = iota
ModeLeader Mode = iota
ModeFollower Mode = iota
ModeStandalone Mode = iota
)
var (
modeNames = map[Mode]string{
ModeLeader: "leader",
ModeFollower: "follower",
ModeStandalone: "standalone",
}
)

View file

@ -0,0 +1,24 @@
package zk
import (
"fmt"
"testing"
)
func TestModeString(t *testing.T) {
if fmt.Sprintf("%v", ModeUnknown) != "unknown" {
t.Errorf("unknown value should be 'unknown'")
}
if fmt.Sprintf("%v", ModeLeader) != "leader" {
t.Errorf("leader value should be 'leader'")
}
if fmt.Sprintf("%v", ModeFollower) != "follower" {
t.Errorf("follower value should be 'follower'")
}
if fmt.Sprintf("%v", ModeStandalone) != "standalone" {
t.Errorf("standlone value should be 'standalone'")
}
}

View file

@ -0,0 +1,88 @@
package zk
import (
"fmt"
"net"
"sync"
)
// DNSHostProvider is the default HostProvider. It currently matches
// the Java StaticHostProvider, resolving hosts from DNS once during
// the call to Init. It could be easily extended to re-query DNS
// periodically or if there is trouble connecting.
type DNSHostProvider struct {
mu sync.Mutex // Protects everything, so we can add asynchronous updates later.
servers []string
curr int
last int
lookupHost func(string) ([]string, error) // Override of net.LookupHost, for testing.
}
// Init is called first, with the servers specified in the connection
// string. It uses DNS to look up addresses for each server, then
// shuffles them all together.
func (hp *DNSHostProvider) Init(servers []string) error {
hp.mu.Lock()
defer hp.mu.Unlock()
lookupHost := hp.lookupHost
if lookupHost == nil {
lookupHost = net.LookupHost
}
found := []string{}
for _, server := range servers {
host, port, err := net.SplitHostPort(server)
if err != nil {
return err
}
addrs, err := lookupHost(host)
if err != nil {
return err
}
for _, addr := range addrs {
found = append(found, net.JoinHostPort(addr, port))
}
}
if len(found) == 0 {
return fmt.Errorf("No hosts found for addresses %q", servers)
}
// Randomize the order of the servers to avoid creating hotspots
stringShuffle(found)
hp.servers = found
hp.curr = -1
hp.last = -1
return nil
}
// Len returns the number of servers available
func (hp *DNSHostProvider) Len() int {
hp.mu.Lock()
defer hp.mu.Unlock()
return len(hp.servers)
}
// Next returns the next server to connect to. retryStart will be true
// if we've looped through all known servers without Connected() being
// called.
func (hp *DNSHostProvider) Next() (server string, retryStart bool) {
hp.mu.Lock()
defer hp.mu.Unlock()
hp.curr = (hp.curr + 1) % len(hp.servers)
retryStart = hp.curr == hp.last
if hp.last == -1 {
hp.last = 0
}
return hp.servers[hp.curr], retryStart
}
// Connected notifies the HostProvider of a successful connection.
func (hp *DNSHostProvider) Connected() {
hp.mu.Lock()
defer hp.mu.Unlock()
hp.last = hp.curr
}

View file

@ -0,0 +1,224 @@
package zk
import (
"fmt"
"log"
"testing"
"time"
)
// localhostLookupHost is a test replacement for net.LookupHost that
// always returns 127.0.0.1
func localhostLookupHost(host string) ([]string, error) {
return []string{"127.0.0.1"}, nil
}
// TestDNSHostProviderCreate is just like TestCreate, but with an
// overridden HostProvider that ignores the provided hostname.
func TestDNSHostProviderCreate(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
port := ts.Servers[0].Port
server := fmt.Sprintf("foo.example.com:%d", port)
hostProvider := &DNSHostProvider{lookupHost: localhostLookupHost}
zk, _, err := Connect([]string{server}, time.Second*15, WithHostProvider(hostProvider))
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
path := "/gozk-test"
if err := zk.Delete(path, -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
t.Fatalf("Create returned error: %+v", err)
} else if p != path {
t.Fatalf("Create returned different path '%s' != '%s'", p, path)
}
if data, stat, err := zk.Get(path); err != nil {
t.Fatalf("Get returned error: %+v", err)
} else if stat == nil {
t.Fatal("Get returned nil stat")
} else if len(data) < 4 {
t.Fatal("Get returned wrong size data")
}
}
// localHostPortsFacade wraps a HostProvider, remapping the
// address/port combinations it returns to "localhost:$PORT" where
// $PORT is chosen from the provided ports.
type localHostPortsFacade struct {
inner HostProvider // The wrapped HostProvider
ports []int // The provided list of ports
nextPort int // The next port to use
mapped map[string]string // Already-mapped address/port combinations
}
func newLocalHostPortsFacade(inner HostProvider, ports []int) *localHostPortsFacade {
return &localHostPortsFacade{
inner: inner,
ports: ports,
mapped: make(map[string]string),
}
}
func (lhpf *localHostPortsFacade) Len() int { return lhpf.inner.Len() }
func (lhpf *localHostPortsFacade) Connected() { lhpf.inner.Connected() }
func (lhpf *localHostPortsFacade) Init(servers []string) error { return lhpf.inner.Init(servers) }
func (lhpf *localHostPortsFacade) Next() (string, bool) {
server, retryStart := lhpf.inner.Next()
// If we've already set up a mapping for that server, just return it.
if localMapping := lhpf.mapped[server]; localMapping != "" {
return localMapping, retryStart
}
if lhpf.nextPort == len(lhpf.ports) {
log.Fatalf("localHostPortsFacade out of ports to assign to %q; current config: %q", server, lhpf.mapped)
}
localMapping := fmt.Sprintf("localhost:%d", lhpf.ports[lhpf.nextPort])
lhpf.mapped[server] = localMapping
lhpf.nextPort++
return localMapping, retryStart
}
var _ HostProvider = &localHostPortsFacade{}
// TestDNSHostProviderReconnect tests that the zk.Conn correctly
// reconnects when the Zookeeper instance it's connected to
// restarts. It wraps the DNSHostProvider in a lightweight facade that
// remaps addresses to localhost:$PORT combinations corresponding to
// the test ZooKeeper instances.
func TestDNSHostProviderReconnect(t *testing.T) {
ts, err := StartTestCluster(3, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
innerHp := &DNSHostProvider{lookupHost: func(host string) ([]string, error) {
return []string{"192.0.2.1", "192.0.2.2", "192.0.2.3"}, nil
}}
ports := make([]int, 0, len(ts.Servers))
for _, server := range ts.Servers {
ports = append(ports, server.Port)
}
hp := newLocalHostPortsFacade(innerHp, ports)
zk, _, err := Connect([]string{"foo.example.com:12345"}, time.Second, WithHostProvider(hp))
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
path := "/gozk-test"
// Initial operation to force connection.
if err := zk.Delete(path, -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
// Figure out which server we're connected to.
currentServer := zk.Server()
t.Logf("Connected to %q. Finding test server index…", currentServer)
serverIndex := -1
for i, server := range ts.Servers {
server := fmt.Sprintf("localhost:%d", server.Port)
t.Logf("…trying %q", server)
if currentServer == server {
serverIndex = i
t.Logf("…found at index %d", i)
break
}
}
if serverIndex == -1 {
t.Fatalf("Cannot determine test server index.")
}
// Restart the connected server.
ts.Servers[serverIndex].Srv.Stop()
ts.Servers[serverIndex].Srv.Start()
// Continue with the basic TestCreate tests.
if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
t.Fatalf("Create returned error: %+v", err)
} else if p != path {
t.Fatalf("Create returned different path '%s' != '%s'", p, path)
}
if data, stat, err := zk.Get(path); err != nil {
t.Fatalf("Get returned error: %+v", err)
} else if stat == nil {
t.Fatal("Get returned nil stat")
} else if len(data) < 4 {
t.Fatal("Get returned wrong size data")
}
if zk.Server() == currentServer {
t.Errorf("Still connected to %q after restart.", currentServer)
}
}
// TestDNSHostProviderRetryStart tests the `retryStart` functionality
// of DNSHostProvider.
// It's also probably the clearest visual explanation of exactly how
// it works.
func TestDNSHostProviderRetryStart(t *testing.T) {
t.Parallel()
hp := &DNSHostProvider{lookupHost: func(host string) ([]string, error) {
return []string{"192.0.2.1", "192.0.2.2", "192.0.2.3"}, nil
}}
if err := hp.Init([]string{"foo.example.com:12345"}); err != nil {
t.Fatal(err)
}
testdata := []struct {
retryStartWant bool
callConnected bool
}{
// Repeated failures.
{false, false},
{false, false},
{false, false},
{true, false},
{false, false},
{false, false},
{true, true},
// One success offsets things.
{false, false},
{false, true},
{false, true},
// Repeated successes.
{false, true},
{false, true},
{false, true},
{false, true},
{false, true},
// And some more failures.
{false, false},
{false, false},
{true, false}, // Looped back to last known good server: all alternates failed.
{false, false},
}
for i, td := range testdata {
_, retryStartGot := hp.Next()
if retryStartGot != td.retryStartWant {
t.Errorf("%d: retryStart=%v; want %v", i, retryStartGot, td.retryStartWant)
}
if td.callConnected {
hp.Connected()
}
}
}

266
vendor/github.com/samuel/go-zookeeper/zk/flw.go generated vendored Normal file
View file

@ -0,0 +1,266 @@
package zk
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"net"
"regexp"
"strconv"
"strings"
"time"
)
// FLWSrvr is a FourLetterWord helper function. In particular, this function pulls the srvr output
// from the zookeeper instances and parses the output. A slice of *ServerStats structs are returned
// as well as a boolean value to indicate whether this function processed successfully.
//
// If the boolean value is false there was a problem. If the *ServerStats slice is empty or nil,
// then the error happened before we started to obtain 'srvr' values. Otherwise, one of the
// servers had an issue and the "Error" value in the struct should be inspected to determine
// which server had the issue.
func FLWSrvr(servers []string, timeout time.Duration) ([]*ServerStats, bool) {
// different parts of the regular expression that are required to parse the srvr output
const (
zrVer = `^Zookeeper version: ([A-Za-z0-9\.\-]+), built on (\d\d/\d\d/\d\d\d\d \d\d:\d\d [A-Za-z0-9:\+\-]+)`
zrLat = `^Latency min/avg/max: (\d+)/(\d+)/(\d+)`
zrNet = `^Received: (\d+).*\n^Sent: (\d+).*\n^Connections: (\d+).*\n^Outstanding: (\d+)`
zrState = `^Zxid: (0x[A-Za-z0-9]+).*\n^Mode: (\w+).*\n^Node count: (\d+)`
)
// build the regex from the pieces above
re, err := regexp.Compile(fmt.Sprintf(`(?m:\A%v.*\n%v.*\n%v.*\n%v)`, zrVer, zrLat, zrNet, zrState))
if err != nil {
return nil, false
}
imOk := true
servers = FormatServers(servers)
ss := make([]*ServerStats, len(servers))
for i := range ss {
response, err := fourLetterWord(servers[i], "srvr", timeout)
if err != nil {
ss[i] = &ServerStats{Error: err}
imOk = false
continue
}
matches := re.FindAllStringSubmatch(string(response), -1)
if matches == nil {
err := fmt.Errorf("unable to parse fields from zookeeper response (no regex matches)")
ss[i] = &ServerStats{Error: err}
imOk = false
continue
}
match := matches[0][1:]
// determine current server
var srvrMode Mode
switch match[10] {
case "leader":
srvrMode = ModeLeader
case "follower":
srvrMode = ModeFollower
case "standalone":
srvrMode = ModeStandalone
default:
srvrMode = ModeUnknown
}
buildTime, err := time.Parse("01/02/2006 15:04 MST", match[1])
if err != nil {
ss[i] = &ServerStats{Error: err}
imOk = false
continue
}
parsedInt, err := strconv.ParseInt(match[9], 0, 64)
if err != nil {
ss[i] = &ServerStats{Error: err}
imOk = false
continue
}
// the ZxID value is an int64 with two int32s packed inside
// the high int32 is the epoch (i.e., number of leader elections)
// the low int32 is the counter
epoch := int32(parsedInt >> 32)
counter := int32(parsedInt & 0xFFFFFFFF)
// within the regex above, these values must be numerical
// so we can avoid useless checking of the error return value
minLatency, _ := strconv.ParseInt(match[2], 0, 64)
avgLatency, _ := strconv.ParseInt(match[3], 0, 64)
maxLatency, _ := strconv.ParseInt(match[4], 0, 64)
recv, _ := strconv.ParseInt(match[5], 0, 64)
sent, _ := strconv.ParseInt(match[6], 0, 64)
cons, _ := strconv.ParseInt(match[7], 0, 64)
outs, _ := strconv.ParseInt(match[8], 0, 64)
ncnt, _ := strconv.ParseInt(match[11], 0, 64)
ss[i] = &ServerStats{
Sent: sent,
Received: recv,
NodeCount: ncnt,
MinLatency: minLatency,
AvgLatency: avgLatency,
MaxLatency: maxLatency,
Connections: cons,
Outstanding: outs,
Epoch: epoch,
Counter: counter,
BuildTime: buildTime,
Mode: srvrMode,
Version: match[0],
}
}
return ss, imOk
}
// FLWRuok is a FourLetterWord helper function. In particular, this function
// pulls the ruok output from each server.
func FLWRuok(servers []string, timeout time.Duration) []bool {
servers = FormatServers(servers)
oks := make([]bool, len(servers))
for i := range oks {
response, err := fourLetterWord(servers[i], "ruok", timeout)
if err != nil {
continue
}
if bytes.Equal(response[:4], []byte("imok")) {
oks[i] = true
}
}
return oks
}
// FLWCons is a FourLetterWord helper function. In particular, this function
// pulls the ruok output from each server.
//
// As with FLWSrvr, the boolean value indicates whether one of the requests had
// an issue. The Clients struct has an Error value that can be checked.
func FLWCons(servers []string, timeout time.Duration) ([]*ServerClients, bool) {
const (
zrAddr = `^ /((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?):(?:\d+))\[\d+\]`
zrPac = `\(queued=(\d+),recved=(\d+),sent=(\d+),sid=(0x[A-Za-z0-9]+),lop=(\w+),est=(\d+),to=(\d+),`
zrSesh = `lcxid=(0x[A-Za-z0-9]+),lzxid=(0x[A-Za-z0-9]+),lresp=(\d+),llat=(\d+),minlat=(\d+),avglat=(\d+),maxlat=(\d+)\)`
)
re, err := regexp.Compile(fmt.Sprintf("%v%v%v", zrAddr, zrPac, zrSesh))
if err != nil {
return nil, false
}
servers = FormatServers(servers)
sc := make([]*ServerClients, len(servers))
imOk := true
for i := range sc {
response, err := fourLetterWord(servers[i], "cons", timeout)
if err != nil {
sc[i] = &ServerClients{Error: err}
imOk = false
continue
}
scan := bufio.NewScanner(bytes.NewReader(response))
var clients []*ServerClient
for scan.Scan() {
line := scan.Bytes()
if len(line) == 0 {
continue
}
m := re.FindAllStringSubmatch(string(line), -1)
if m == nil {
err := fmt.Errorf("unable to parse fields from zookeeper response (no regex matches)")
sc[i] = &ServerClients{Error: err}
imOk = false
continue
}
match := m[0][1:]
queued, _ := strconv.ParseInt(match[1], 0, 64)
recvd, _ := strconv.ParseInt(match[2], 0, 64)
sent, _ := strconv.ParseInt(match[3], 0, 64)
sid, _ := strconv.ParseInt(match[4], 0, 64)
est, _ := strconv.ParseInt(match[6], 0, 64)
timeout, _ := strconv.ParseInt(match[7], 0, 32)
lcxid, _ := parseInt64(match[8])
lzxid, _ := parseInt64(match[9])
lresp, _ := strconv.ParseInt(match[10], 0, 64)
llat, _ := strconv.ParseInt(match[11], 0, 32)
minlat, _ := strconv.ParseInt(match[12], 0, 32)
avglat, _ := strconv.ParseInt(match[13], 0, 32)
maxlat, _ := strconv.ParseInt(match[14], 0, 32)
clients = append(clients, &ServerClient{
Queued: queued,
Received: recvd,
Sent: sent,
SessionID: sid,
Lcxid: int64(lcxid),
Lzxid: int64(lzxid),
Timeout: int32(timeout),
LastLatency: int32(llat),
MinLatency: int32(minlat),
AvgLatency: int32(avglat),
MaxLatency: int32(maxlat),
Established: time.Unix(est, 0),
LastResponse: time.Unix(lresp, 0),
Addr: match[0],
LastOperation: match[5],
})
}
sc[i] = &ServerClients{Clients: clients}
}
return sc, imOk
}
// parseInt64 is similar to strconv.ParseInt, but it also handles hex values that represent negative numbers
func parseInt64(s string) (int64, error) {
if strings.HasPrefix(s, "0x") {
i, err := strconv.ParseUint(s, 0, 64)
return int64(i), err
}
return strconv.ParseInt(s, 0, 64)
}
func fourLetterWord(server, command string, timeout time.Duration) ([]byte, error) {
conn, err := net.DialTimeout("tcp", server, timeout)
if err != nil {
return nil, err
}
// the zookeeper server should automatically close this socket
// once the command has been processed, but better safe than sorry
defer conn.Close()
conn.SetWriteDeadline(time.Now().Add(timeout))
_, err = conn.Write([]byte(command))
if err != nil {
return nil, err
}
conn.SetReadDeadline(time.Now().Add(timeout))
return ioutil.ReadAll(conn)
}

330
vendor/github.com/samuel/go-zookeeper/zk/flw_test.go generated vendored Normal file
View file

@ -0,0 +1,330 @@
package zk
import (
"net"
"testing"
"time"
)
var (
zkSrvrOut = `Zookeeper version: 3.4.6-1569965, built on 02/20/2014 09:09 GMT
Latency min/avg/max: 0/1/10
Received: 4207
Sent: 4220
Connections: 81
Outstanding: 1
Zxid: 0x110a7a8f37
Mode: leader
Node count: 306
`
zkConsOut = ` /10.42.45.231:45361[1](queued=0,recved=9435,sent=9457,sid=0x94c2989e04716b5,lop=PING,est=1427238717217,to=20001,lcxid=0x55120915,lzxid=0xffffffffffffffff,lresp=1427259255908,llat=0,minlat=0,avglat=1,maxlat=17)
/10.55.33.98:34342[1](queued=0,recved=9338,sent=9350,sid=0x94c2989e0471731,lop=PING,est=1427238849319,to=20001,lcxid=0x55120944,lzxid=0xffffffffffffffff,lresp=1427259252294,llat=0,minlat=0,avglat=1,maxlat=18)
/10.44.145.114:46556[1](queued=0,recved=109253,sent=109617,sid=0x94c2989e0471709,lop=DELE,est=1427238791305,to=20001,lcxid=0x55139618,lzxid=0x110a7b187d,lresp=1427259257423,llat=2,minlat=0,avglat=1,maxlat=23)
`
)
func TestFLWRuok(t *testing.T) {
t.Parallel()
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer l.Close()
go tcpServer(l, "")
oks := FLWRuok([]string{l.Addr().String()}, time.Second*10)
if len(oks) == 0 {
t.Errorf("no values returned")
}
if !oks[0] {
t.Errorf("instance should be marked as OK")
}
//
// Confirm that it also returns false for dead instances
//
l, err = net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer l.Close()
go tcpServer(l, "dead")
oks = FLWRuok([]string{l.Addr().String()}, time.Second*10)
if len(oks) == 0 {
t.Errorf("no values returned")
}
if oks[0] {
t.Errorf("instance should be marked as not OK")
}
}
func TestFLWSrvr(t *testing.T) {
t.Parallel()
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer l.Close()
go tcpServer(l, "")
statsSlice, ok := FLWSrvr([]string{l.Addr().String()}, time.Second*10)
if !ok {
t.Errorf("failure indicated on 'srvr' parsing")
}
if len(statsSlice) == 0 {
t.Errorf("no *ServerStats instances returned")
}
stats := statsSlice[0]
if stats.Error != nil {
t.Fatalf("error seen in stats: %v", err.Error())
}
if stats.Sent != 4220 {
t.Errorf("Sent != 4220")
}
if stats.Received != 4207 {
t.Errorf("Received != 4207")
}
if stats.NodeCount != 306 {
t.Errorf("NodeCount != 306")
}
if stats.MinLatency != 0 {
t.Errorf("MinLatency != 0")
}
if stats.AvgLatency != 1 {
t.Errorf("AvgLatency != 1")
}
if stats.MaxLatency != 10 {
t.Errorf("MaxLatency != 10")
}
if stats.Connections != 81 {
t.Errorf("Connection != 81")
}
if stats.Outstanding != 1 {
t.Errorf("Outstanding != 1")
}
if stats.Epoch != 17 {
t.Errorf("Epoch != 17")
}
if stats.Counter != 175804215 {
t.Errorf("Counter != 175804215")
}
if stats.Mode != ModeLeader {
t.Errorf("Mode != ModeLeader")
}
if stats.Version != "3.4.6-1569965" {
t.Errorf("Version expected: 3.4.6-1569965")
}
}
func TestFLWCons(t *testing.T) {
t.Parallel()
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer l.Close()
go tcpServer(l, "")
clients, ok := FLWCons([]string{l.Addr().String()}, time.Second*10)
if !ok {
t.Errorf("failure indicated on 'cons' parsing")
}
if len(clients) == 0 {
t.Errorf("no *ServerClients instances returned")
}
results := []*ServerClient{
{
Queued: 0,
Received: 9435,
Sent: 9457,
SessionID: 669956116721374901,
LastOperation: "PING",
Established: time.Unix(1427238717217, 0),
Timeout: 20001,
Lcxid: 1427245333,
Lzxid: -1,
LastResponse: time.Unix(1427259255908, 0),
LastLatency: 0,
MinLatency: 0,
AvgLatency: 1,
MaxLatency: 17,
Addr: "10.42.45.231:45361",
},
{
Queued: 0,
Received: 9338,
Sent: 9350,
SessionID: 669956116721375025,
LastOperation: "PING",
Established: time.Unix(1427238849319, 0),
Timeout: 20001,
Lcxid: 1427245380,
Lzxid: -1,
LastResponse: time.Unix(1427259252294, 0),
LastLatency: 0,
MinLatency: 0,
AvgLatency: 1,
MaxLatency: 18,
Addr: "10.55.33.98:34342",
},
{
Queued: 0,
Received: 109253,
Sent: 109617,
SessionID: 669956116721374985,
LastOperation: "DELE",
Established: time.Unix(1427238791305, 0),
Timeout: 20001,
Lcxid: 1427346968,
Lzxid: 73190283389,
LastResponse: time.Unix(1427259257423, 0),
LastLatency: 2,
MinLatency: 0,
AvgLatency: 1,
MaxLatency: 23,
Addr: "10.44.145.114:46556",
},
}
for _, z := range clients {
if z.Error != nil {
t.Errorf("error seen: %v", err.Error())
}
for i, v := range z.Clients {
c := results[i]
if v.Error != nil {
t.Errorf("client error seen: %v", err.Error())
}
if v.Queued != c.Queued {
t.Errorf("Queued value mismatch (%d/%d)", v.Queued, c.Queued)
}
if v.Received != c.Received {
t.Errorf("Received value mismatch (%d/%d)", v.Received, c.Received)
}
if v.Sent != c.Sent {
t.Errorf("Sent value mismatch (%d/%d)", v.Sent, c.Sent)
}
if v.SessionID != c.SessionID {
t.Errorf("SessionID value mismatch (%d/%d)", v.SessionID, c.SessionID)
}
if v.LastOperation != c.LastOperation {
t.Errorf("LastOperation value mismatch ('%v'/'%v')", v.LastOperation, c.LastOperation)
}
if v.Timeout != c.Timeout {
t.Errorf("Timeout value mismatch (%d/%d)", v.Timeout, c.Timeout)
}
if v.Lcxid != c.Lcxid {
t.Errorf("Lcxid value mismatch (%d/%d)", v.Lcxid, c.Lcxid)
}
if v.Lzxid != c.Lzxid {
t.Errorf("Lzxid value mismatch (%d/%d)", v.Lzxid, c.Lzxid)
}
if v.LastLatency != c.LastLatency {
t.Errorf("LastLatency value mismatch (%d/%d)", v.LastLatency, c.LastLatency)
}
if v.MinLatency != c.MinLatency {
t.Errorf("MinLatency value mismatch (%d/%d)", v.MinLatency, c.MinLatency)
}
if v.AvgLatency != c.AvgLatency {
t.Errorf("AvgLatency value mismatch (%d/%d)", v.AvgLatency, c.AvgLatency)
}
if v.MaxLatency != c.MaxLatency {
t.Errorf("MaxLatency value mismatch (%d/%d)", v.MaxLatency, c.MaxLatency)
}
if v.Addr != c.Addr {
t.Errorf("Addr value mismatch ('%v'/'%v')", v.Addr, c.Addr)
}
if !c.Established.Equal(v.Established) {
t.Errorf("Established value mismatch (%v/%v)", c.Established, v.Established)
}
if !c.LastResponse.Equal(v.LastResponse) {
t.Errorf("Established value mismatch (%v/%v)", c.LastResponse, v.LastResponse)
}
}
}
}
func tcpServer(listener net.Listener, thing string) {
for {
conn, err := listener.Accept()
if err != nil {
return
}
go connHandler(conn, thing)
}
}
func connHandler(conn net.Conn, thing string) {
defer conn.Close()
data := make([]byte, 4)
_, err := conn.Read(data)
if err != nil {
return
}
switch string(data) {
case "ruok":
switch thing {
case "dead":
return
default:
conn.Write([]byte("imok"))
}
case "srvr":
switch thing {
case "dead":
return
default:
conn.Write([]byte(zkSrvrOut))
}
case "cons":
switch thing {
case "dead":
return
default:
conn.Write([]byte(zkConsOut))
}
default:
conn.Write([]byte("This ZooKeeper instance is not currently serving requests."))
}
}

150
vendor/github.com/samuel/go-zookeeper/zk/lock.go generated vendored Normal file
View file

@ -0,0 +1,150 @@
package zk
import (
"errors"
"fmt"
"strconv"
"strings"
)
var (
// ErrDeadlock is returned by Lock when trying to lock twice without unlocking first
ErrDeadlock = errors.New("zk: trying to acquire a lock twice")
// ErrNotLocked is returned by Unlock when trying to release a lock that has not first be acquired.
ErrNotLocked = errors.New("zk: not locked")
)
// Lock is a mutual exclusion lock.
type Lock struct {
c *Conn
path string
acl []ACL
lockPath string
seq int
}
// NewLock creates a new lock instance using the provided connection, path, and acl.
// The path must be a node that is only used by this lock. A lock instances starts
// unlocked until Lock() is called.
func NewLock(c *Conn, path string, acl []ACL) *Lock {
return &Lock{
c: c,
path: path,
acl: acl,
}
}
func parseSeq(path string) (int, error) {
parts := strings.Split(path, "-")
return strconv.Atoi(parts[len(parts)-1])
}
// Lock attempts to acquire the lock. It will wait to return until the lock
// is acquired or an error occurs. If this instance already has the lock
// then ErrDeadlock is returned.
func (l *Lock) Lock() error {
if l.lockPath != "" {
return ErrDeadlock
}
prefix := fmt.Sprintf("%s/lock-", l.path)
path := ""
var err error
for i := 0; i < 3; i++ {
path, err = l.c.CreateProtectedEphemeralSequential(prefix, []byte{}, l.acl)
if err == ErrNoNode {
// Create parent node.
parts := strings.Split(l.path, "/")
pth := ""
for _, p := range parts[1:] {
var exists bool
pth += "/" + p
exists, _, err = l.c.Exists(pth)
if err != nil {
return err
}
if exists == true {
continue
}
_, err = l.c.Create(pth, []byte{}, 0, l.acl)
if err != nil && err != ErrNodeExists {
return err
}
}
} else if err == nil {
break
} else {
return err
}
}
if err != nil {
return err
}
seq, err := parseSeq(path)
if err != nil {
return err
}
for {
children, _, err := l.c.Children(l.path)
if err != nil {
return err
}
lowestSeq := seq
prevSeq := -1
prevSeqPath := ""
for _, p := range children {
s, err := parseSeq(p)
if err != nil {
return err
}
if s < lowestSeq {
lowestSeq = s
}
if s < seq && s > prevSeq {
prevSeq = s
prevSeqPath = p
}
}
if seq == lowestSeq {
// Acquired the lock
break
}
// Wait on the node next in line for the lock
_, _, ch, err := l.c.GetW(l.path + "/" + prevSeqPath)
if err != nil && err != ErrNoNode {
return err
} else if err != nil && err == ErrNoNode {
// try again
continue
}
ev := <-ch
if ev.Err != nil {
return ev.Err
}
}
l.seq = seq
l.lockPath = path
return nil
}
// Unlock releases an acquired lock. If the lock is not currently acquired by
// this Lock instance than ErrNotLocked is returned.
func (l *Lock) Unlock() error {
if l.lockPath == "" {
return ErrNotLocked
}
if err := l.c.Delete(l.lockPath, -1); err != nil {
return err
}
l.lockPath = ""
l.seq = 0
return nil
}

94
vendor/github.com/samuel/go-zookeeper/zk/lock_test.go generated vendored Normal file
View file

@ -0,0 +1,94 @@
package zk
import (
"testing"
"time"
)
func TestLock(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
zk, _, err := ts.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
acls := WorldACL(PermAll)
l := NewLock(zk, "/test", acls)
if err := l.Lock(); err != nil {
t.Fatal(err)
}
if err := l.Unlock(); err != nil {
t.Fatal(err)
}
val := make(chan int, 3)
if err := l.Lock(); err != nil {
t.Fatal(err)
}
l2 := NewLock(zk, "/test", acls)
go func() {
if err := l2.Lock(); err != nil {
t.Fatal(err)
}
val <- 2
if err := l2.Unlock(); err != nil {
t.Fatal(err)
}
val <- 3
}()
time.Sleep(time.Millisecond * 100)
val <- 1
if err := l.Unlock(); err != nil {
t.Fatal(err)
}
if x := <-val; x != 1 {
t.Fatalf("Expected 1 instead of %d", x)
}
if x := <-val; x != 2 {
t.Fatalf("Expected 2 instead of %d", x)
}
if x := <-val; x != 3 {
t.Fatalf("Expected 3 instead of %d", x)
}
}
// This tests creating a lock with a path that's more than 1 node deep (e.g. "/test-multi-level/lock"),
// when a part of that path already exists (i.e. "/test-multi-level" node already exists).
func TestMultiLevelLock(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
zk, _, err := ts.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
acls := WorldACL(PermAll)
path := "/test-multi-level"
if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
t.Fatalf("Create returned error: %+v", err)
} else if p != path {
t.Fatalf("Create returned different path '%s' != '%s'", p, path)
}
l := NewLock(zk, "/test-multi-level/lock", acls)
defer zk.Delete("/test-multi-level", -1) // Clean up what we've created for this test
defer zk.Delete("/test-multi-level/lock", -1)
if err := l.Lock(); err != nil {
t.Fatal(err)
}
if err := l.Unlock(); err != nil {
t.Fatal(err)
}
}

216
vendor/github.com/samuel/go-zookeeper/zk/server_help.go generated vendored Normal file
View file

@ -0,0 +1,216 @@
package zk
import (
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"strings"
"time"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
type TestServer struct {
Port int
Path string
Srv *Server
}
type TestCluster struct {
Path string
Servers []TestServer
}
func StartTestCluster(size int, stdout, stderr io.Writer) (*TestCluster, error) {
tmpPath, err := ioutil.TempDir("", "gozk")
if err != nil {
return nil, err
}
success := false
startPort := int(rand.Int31n(6000) + 10000)
cluster := &TestCluster{Path: tmpPath}
defer func() {
if !success {
cluster.Stop()
}
}()
for serverN := 0; serverN < size; serverN++ {
srvPath := filepath.Join(tmpPath, fmt.Sprintf("srv%d", serverN))
if err := os.Mkdir(srvPath, 0700); err != nil {
return nil, err
}
port := startPort + serverN*3
cfg := ServerConfig{
ClientPort: port,
DataDir: srvPath,
}
for i := 0; i < size; i++ {
cfg.Servers = append(cfg.Servers, ServerConfigServer{
ID: i + 1,
Host: "127.0.0.1",
PeerPort: startPort + i*3 + 1,
LeaderElectionPort: startPort + i*3 + 2,
})
}
cfgPath := filepath.Join(srvPath, "zoo.cfg")
fi, err := os.Create(cfgPath)
if err != nil {
return nil, err
}
err = cfg.Marshall(fi)
fi.Close()
if err != nil {
return nil, err
}
fi, err = os.Create(filepath.Join(srvPath, "myid"))
if err != nil {
return nil, err
}
_, err = fmt.Fprintf(fi, "%d\n", serverN+1)
fi.Close()
if err != nil {
return nil, err
}
srv := &Server{
ConfigPath: cfgPath,
Stdout: stdout,
Stderr: stderr,
}
if err := srv.Start(); err != nil {
return nil, err
}
cluster.Servers = append(cluster.Servers, TestServer{
Path: srvPath,
Port: cfg.ClientPort,
Srv: srv,
})
}
if err := cluster.waitForStart(10, time.Second); err != nil {
return nil, err
}
success = true
return cluster, nil
}
func (tc *TestCluster) Connect(idx int) (*Conn, error) {
zk, _, err := Connect([]string{fmt.Sprintf("127.0.0.1:%d", tc.Servers[idx].Port)}, time.Second*15)
return zk, err
}
func (tc *TestCluster) ConnectAll() (*Conn, <-chan Event, error) {
return tc.ConnectAllTimeout(time.Second * 15)
}
func (tc *TestCluster) ConnectAllTimeout(sessionTimeout time.Duration) (*Conn, <-chan Event, error) {
return tc.ConnectWithOptions(sessionTimeout)
}
func (tc *TestCluster) ConnectWithOptions(sessionTimeout time.Duration, options ...connOption) (*Conn, <-chan Event, error) {
hosts := make([]string, len(tc.Servers))
for i, srv := range tc.Servers {
hosts[i] = fmt.Sprintf("127.0.0.1:%d", srv.Port)
}
zk, ch, err := Connect(hosts, sessionTimeout, options...)
return zk, ch, err
}
func (tc *TestCluster) Stop() error {
for _, srv := range tc.Servers {
srv.Srv.Stop()
}
defer os.RemoveAll(tc.Path)
return tc.waitForStop(5, time.Second)
}
// waitForStart blocks until the cluster is up
func (tc *TestCluster) waitForStart(maxRetry int, interval time.Duration) error {
// verify that the servers are up with SRVR
serverAddrs := make([]string, len(tc.Servers))
for i, s := range tc.Servers {
serverAddrs[i] = fmt.Sprintf("127.0.0.1:%d", s.Port)
}
for i := 0; i < maxRetry; i++ {
_, ok := FLWSrvr(serverAddrs, time.Second)
if ok {
return nil
}
time.Sleep(interval)
}
return fmt.Errorf("unable to verify health of servers")
}
// waitForStop blocks until the cluster is down
func (tc *TestCluster) waitForStop(maxRetry int, interval time.Duration) error {
// verify that the servers are up with RUOK
serverAddrs := make([]string, len(tc.Servers))
for i, s := range tc.Servers {
serverAddrs[i] = fmt.Sprintf("127.0.0.1:%d", s.Port)
}
var success bool
for i := 0; i < maxRetry && !success; i++ {
success = true
for _, ok := range FLWRuok(serverAddrs, time.Second) {
if ok {
success = false
}
}
if !success {
time.Sleep(interval)
}
}
if !success {
return fmt.Errorf("unable to verify servers are down")
}
return nil
}
func (tc *TestCluster) StartServer(server string) {
for _, s := range tc.Servers {
if strings.HasSuffix(server, fmt.Sprintf(":%d", s.Port)) {
s.Srv.Start()
return
}
}
panic(fmt.Sprintf("Unknown server: %s", server))
}
func (tc *TestCluster) StopServer(server string) {
for _, s := range tc.Servers {
if strings.HasSuffix(server, fmt.Sprintf(":%d", s.Port)) {
s.Srv.Stop()
return
}
}
panic(fmt.Sprintf("Unknown server: %s", server))
}
func (tc *TestCluster) StartAllServers() error {
for _, s := range tc.Servers {
if err := s.Srv.Start(); err != nil {
return fmt.Errorf(
"Failed to start server listening on port `%d` : %+v", s.Port, err)
}
}
return nil
}
func (tc *TestCluster) StopAllServers() error {
for _, s := range tc.Servers {
if err := s.Srv.Stop(); err != nil {
return fmt.Errorf(
"Failed to stop server listening on port `%d` : %+v", s.Port, err)
}
}
return nil
}

136
vendor/github.com/samuel/go-zookeeper/zk/server_java.go generated vendored Normal file
View file

@ -0,0 +1,136 @@
package zk
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
)
type ErrMissingServerConfigField string
func (e ErrMissingServerConfigField) Error() string {
return fmt.Sprintf("zk: missing server config field '%s'", string(e))
}
const (
DefaultServerTickTime = 2000
DefaultServerInitLimit = 10
DefaultServerSyncLimit = 5
DefaultServerAutoPurgeSnapRetainCount = 3
DefaultPeerPort = 2888
DefaultLeaderElectionPort = 3888
)
type ServerConfigServer struct {
ID int
Host string
PeerPort int
LeaderElectionPort int
}
type ServerConfig struct {
TickTime int // Number of milliseconds of each tick
InitLimit int // Number of ticks that the initial synchronization phase can take
SyncLimit int // Number of ticks that can pass between sending a request and getting an acknowledgement
DataDir string // Direcrory where the snapshot is stored
ClientPort int // Port at which clients will connect
AutoPurgeSnapRetainCount int // Number of snapshots to retain in dataDir
AutoPurgePurgeInterval int // Purge task internal in hours (0 to disable auto purge)
Servers []ServerConfigServer
}
func (sc ServerConfig) Marshall(w io.Writer) error {
if sc.DataDir == "" {
return ErrMissingServerConfigField("dataDir")
}
fmt.Fprintf(w, "dataDir=%s\n", sc.DataDir)
if sc.TickTime <= 0 {
sc.TickTime = DefaultServerTickTime
}
fmt.Fprintf(w, "tickTime=%d\n", sc.TickTime)
if sc.InitLimit <= 0 {
sc.InitLimit = DefaultServerInitLimit
}
fmt.Fprintf(w, "initLimit=%d\n", sc.InitLimit)
if sc.SyncLimit <= 0 {
sc.SyncLimit = DefaultServerSyncLimit
}
fmt.Fprintf(w, "syncLimit=%d\n", sc.SyncLimit)
if sc.ClientPort <= 0 {
sc.ClientPort = DefaultPort
}
fmt.Fprintf(w, "clientPort=%d\n", sc.ClientPort)
if sc.AutoPurgePurgeInterval > 0 {
if sc.AutoPurgeSnapRetainCount <= 0 {
sc.AutoPurgeSnapRetainCount = DefaultServerAutoPurgeSnapRetainCount
}
fmt.Fprintf(w, "autopurge.snapRetainCount=%d\n", sc.AutoPurgeSnapRetainCount)
fmt.Fprintf(w, "autopurge.purgeInterval=%d\n", sc.AutoPurgePurgeInterval)
}
if len(sc.Servers) > 0 {
for _, srv := range sc.Servers {
if srv.PeerPort <= 0 {
srv.PeerPort = DefaultPeerPort
}
if srv.LeaderElectionPort <= 0 {
srv.LeaderElectionPort = DefaultLeaderElectionPort
}
fmt.Fprintf(w, "server.%d=%s:%d:%d\n", srv.ID, srv.Host, srv.PeerPort, srv.LeaderElectionPort)
}
}
return nil
}
var jarSearchPaths = []string{
"zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
"../zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
"/usr/share/java/zookeeper-*.jar",
"/usr/local/zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar",
"/usr/local/Cellar/zookeeper/*/libexec/contrib/fatjar/zookeeper-*-fatjar.jar",
}
func findZookeeperFatJar() string {
var paths []string
zkPath := os.Getenv("ZOOKEEPER_PATH")
if zkPath == "" {
paths = jarSearchPaths
} else {
paths = []string{filepath.Join(zkPath, "contrib/fatjar/zookeeper-*-fatjar.jar")}
}
for _, path := range paths {
matches, _ := filepath.Glob(path)
// TODO: could sort by version and pick latest
if len(matches) > 0 {
return matches[0]
}
}
return ""
}
type Server struct {
JarPath string
ConfigPath string
Stdout, Stderr io.Writer
cmd *exec.Cmd
}
func (srv *Server) Start() error {
if srv.JarPath == "" {
srv.JarPath = findZookeeperFatJar()
if srv.JarPath == "" {
return fmt.Errorf("zk: unable to find server jar")
}
}
srv.cmd = exec.Command("java", "-jar", srv.JarPath, "server", srv.ConfigPath)
srv.cmd.Stdout = srv.Stdout
srv.cmd.Stderr = srv.Stderr
return srv.cmd.Start()
}
func (srv *Server) Stop() error {
srv.cmd.Process.Signal(os.Kill)
return srv.cmd.Wait()
}

609
vendor/github.com/samuel/go-zookeeper/zk/structs.go generated vendored Normal file
View file

@ -0,0 +1,609 @@
package zk
import (
"encoding/binary"
"errors"
"log"
"reflect"
"runtime"
"time"
)
var (
ErrUnhandledFieldType = errors.New("zk: unhandled field type")
ErrPtrExpected = errors.New("zk: encode/decode expect a non-nil pointer to struct")
ErrShortBuffer = errors.New("zk: buffer too small")
)
type defaultLogger struct{}
func (defaultLogger) Printf(format string, a ...interface{}) {
log.Printf(format, a...)
}
type ACL struct {
Perms int32
Scheme string
ID string
}
type Stat struct {
Czxid int64 // The zxid of the change that caused this znode to be created.
Mzxid int64 // The zxid of the change that last modified this znode.
Ctime int64 // The time in milliseconds from epoch when this znode was created.
Mtime int64 // The time in milliseconds from epoch when this znode was last modified.
Version int32 // The number of changes to the data of this znode.
Cversion int32 // The number of changes to the children of this znode.
Aversion int32 // The number of changes to the ACL of this znode.
EphemeralOwner int64 // The session id of the owner of this znode if the znode is an ephemeral node. If it is not an ephemeral node, it will be zero.
DataLength int32 // The length of the data field of this znode.
NumChildren int32 // The number of children of this znode.
Pzxid int64 // last modified children
}
// ServerClient is the information for a single Zookeeper client and its session.
// This is used to parse/extract the output fo the `cons` command.
type ServerClient struct {
Queued int64
Received int64
Sent int64
SessionID int64
Lcxid int64
Lzxid int64
Timeout int32
LastLatency int32
MinLatency int32
AvgLatency int32
MaxLatency int32
Established time.Time
LastResponse time.Time
Addr string
LastOperation string // maybe?
Error error
}
// ServerClients is a struct for the FLWCons() function. It's used to provide
// the list of Clients.
//
// This is needed because FLWCons() takes multiple servers.
type ServerClients struct {
Clients []*ServerClient
Error error
}
// ServerStats is the information pulled from the Zookeeper `stat` command.
type ServerStats struct {
Sent int64
Received int64
NodeCount int64
MinLatency int64
AvgLatency int64
MaxLatency int64
Connections int64
Outstanding int64
Epoch int32
Counter int32
BuildTime time.Time
Mode Mode
Version string
Error error
}
type requestHeader struct {
Xid int32
Opcode int32
}
type responseHeader struct {
Xid int32
Zxid int64
Err ErrCode
}
type multiHeader struct {
Type int32
Done bool
Err ErrCode
}
type auth struct {
Type int32
Scheme string
Auth []byte
}
// Generic request structs
type pathRequest struct {
Path string
}
type PathVersionRequest struct {
Path string
Version int32
}
type pathWatchRequest struct {
Path string
Watch bool
}
type pathResponse struct {
Path string
}
type statResponse struct {
Stat Stat
}
//
type CheckVersionRequest PathVersionRequest
type closeRequest struct{}
type closeResponse struct{}
type connectRequest struct {
ProtocolVersion int32
LastZxidSeen int64
TimeOut int32
SessionID int64
Passwd []byte
}
type connectResponse struct {
ProtocolVersion int32
TimeOut int32
SessionID int64
Passwd []byte
}
type CreateRequest struct {
Path string
Data []byte
Acl []ACL
Flags int32
}
type createResponse pathResponse
type DeleteRequest PathVersionRequest
type deleteResponse struct{}
type errorResponse struct {
Err int32
}
type existsRequest pathWatchRequest
type existsResponse statResponse
type getAclRequest pathRequest
type getAclResponse struct {
Acl []ACL
Stat Stat
}
type getChildrenRequest pathRequest
type getChildrenResponse struct {
Children []string
}
type getChildren2Request pathWatchRequest
type getChildren2Response struct {
Children []string
Stat Stat
}
type getDataRequest pathWatchRequest
type getDataResponse struct {
Data []byte
Stat Stat
}
type getMaxChildrenRequest pathRequest
type getMaxChildrenResponse struct {
Max int32
}
type getSaslRequest struct {
Token []byte
}
type pingRequest struct{}
type pingResponse struct{}
type setAclRequest struct {
Path string
Acl []ACL
Version int32
}
type setAclResponse statResponse
type SetDataRequest struct {
Path string
Data []byte
Version int32
}
type setDataResponse statResponse
type setMaxChildren struct {
Path string
Max int32
}
type setSaslRequest struct {
Token string
}
type setSaslResponse struct {
Token string
}
type setWatchesRequest struct {
RelativeZxid int64
DataWatches []string
ExistWatches []string
ChildWatches []string
}
type setWatchesResponse struct{}
type syncRequest pathRequest
type syncResponse pathResponse
type setAuthRequest auth
type setAuthResponse struct{}
type multiRequestOp struct {
Header multiHeader
Op interface{}
}
type multiRequest struct {
Ops []multiRequestOp
DoneHeader multiHeader
}
type multiResponseOp struct {
Header multiHeader
String string
Stat *Stat
Err ErrCode
}
type multiResponse struct {
Ops []multiResponseOp
DoneHeader multiHeader
}
func (r *multiRequest) Encode(buf []byte) (int, error) {
total := 0
for _, op := range r.Ops {
op.Header.Done = false
n, err := encodePacketValue(buf[total:], reflect.ValueOf(op))
if err != nil {
return total, err
}
total += n
}
r.DoneHeader.Done = true
n, err := encodePacketValue(buf[total:], reflect.ValueOf(r.DoneHeader))
if err != nil {
return total, err
}
total += n
return total, nil
}
func (r *multiRequest) Decode(buf []byte) (int, error) {
r.Ops = make([]multiRequestOp, 0)
r.DoneHeader = multiHeader{-1, true, -1}
total := 0
for {
header := &multiHeader{}
n, err := decodePacketValue(buf[total:], reflect.ValueOf(header))
if err != nil {
return total, err
}
total += n
if header.Done {
r.DoneHeader = *header
break
}
req := requestStructForOp(header.Type)
if req == nil {
return total, ErrAPIError
}
n, err = decodePacketValue(buf[total:], reflect.ValueOf(req))
if err != nil {
return total, err
}
total += n
r.Ops = append(r.Ops, multiRequestOp{*header, req})
}
return total, nil
}
func (r *multiResponse) Decode(buf []byte) (int, error) {
var multiErr error
r.Ops = make([]multiResponseOp, 0)
r.DoneHeader = multiHeader{-1, true, -1}
total := 0
for {
header := &multiHeader{}
n, err := decodePacketValue(buf[total:], reflect.ValueOf(header))
if err != nil {
return total, err
}
total += n
if header.Done {
r.DoneHeader = *header
break
}
res := multiResponseOp{Header: *header}
var w reflect.Value
switch header.Type {
default:
return total, ErrAPIError
case opError:
w = reflect.ValueOf(&res.Err)
case opCreate:
w = reflect.ValueOf(&res.String)
case opSetData:
res.Stat = new(Stat)
w = reflect.ValueOf(res.Stat)
case opCheck, opDelete:
}
if w.IsValid() {
n, err := decodePacketValue(buf[total:], w)
if err != nil {
return total, err
}
total += n
}
r.Ops = append(r.Ops, res)
if multiErr == nil && res.Err != errOk {
// Use the first error as the error returned from Multi().
multiErr = res.Err.toError()
}
}
return total, multiErr
}
type watcherEvent struct {
Type EventType
State State
Path string
}
type decoder interface {
Decode(buf []byte) (int, error)
}
type encoder interface {
Encode(buf []byte) (int, error)
}
func decodePacket(buf []byte, st interface{}) (n int, err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" {
err = ErrShortBuffer
} else {
panic(r)
}
}
}()
v := reflect.ValueOf(st)
if v.Kind() != reflect.Ptr || v.IsNil() {
return 0, ErrPtrExpected
}
return decodePacketValue(buf, v)
}
func decodePacketValue(buf []byte, v reflect.Value) (int, error) {
rv := v
kind := v.Kind()
if kind == reflect.Ptr {
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
v = v.Elem()
kind = v.Kind()
}
n := 0
switch kind {
default:
return n, ErrUnhandledFieldType
case reflect.Struct:
if de, ok := rv.Interface().(decoder); ok {
return de.Decode(buf)
} else if de, ok := v.Interface().(decoder); ok {
return de.Decode(buf)
} else {
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
n2, err := decodePacketValue(buf[n:], field)
n += n2
if err != nil {
return n, err
}
}
}
case reflect.Bool:
v.SetBool(buf[n] != 0)
n++
case reflect.Int32:
v.SetInt(int64(binary.BigEndian.Uint32(buf[n : n+4])))
n += 4
case reflect.Int64:
v.SetInt(int64(binary.BigEndian.Uint64(buf[n : n+8])))
n += 8
case reflect.String:
ln := int(binary.BigEndian.Uint32(buf[n : n+4]))
v.SetString(string(buf[n+4 : n+4+ln]))
n += 4 + ln
case reflect.Slice:
switch v.Type().Elem().Kind() {
default:
count := int(binary.BigEndian.Uint32(buf[n : n+4]))
n += 4
values := reflect.MakeSlice(v.Type(), count, count)
v.Set(values)
for i := 0; i < count; i++ {
n2, err := decodePacketValue(buf[n:], values.Index(i))
n += n2
if err != nil {
return n, err
}
}
case reflect.Uint8:
ln := int(int32(binary.BigEndian.Uint32(buf[n : n+4])))
if ln < 0 {
n += 4
v.SetBytes(nil)
} else {
bytes := make([]byte, ln)
copy(bytes, buf[n+4:n+4+ln])
v.SetBytes(bytes)
n += 4 + ln
}
}
}
return n, nil
}
func encodePacket(buf []byte, st interface{}) (n int, err error) {
defer func() {
if r := recover(); r != nil {
if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" {
err = ErrShortBuffer
} else {
panic(r)
}
}
}()
v := reflect.ValueOf(st)
if v.Kind() != reflect.Ptr || v.IsNil() {
return 0, ErrPtrExpected
}
return encodePacketValue(buf, v)
}
func encodePacketValue(buf []byte, v reflect.Value) (int, error) {
rv := v
for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface {
v = v.Elem()
}
n := 0
switch v.Kind() {
default:
return n, ErrUnhandledFieldType
case reflect.Struct:
if en, ok := rv.Interface().(encoder); ok {
return en.Encode(buf)
} else if en, ok := v.Interface().(encoder); ok {
return en.Encode(buf)
} else {
for i := 0; i < v.NumField(); i++ {
field := v.Field(i)
n2, err := encodePacketValue(buf[n:], field)
n += n2
if err != nil {
return n, err
}
}
}
case reflect.Bool:
if v.Bool() {
buf[n] = 1
} else {
buf[n] = 0
}
n++
case reflect.Int32:
binary.BigEndian.PutUint32(buf[n:n+4], uint32(v.Int()))
n += 4
case reflect.Int64:
binary.BigEndian.PutUint64(buf[n:n+8], uint64(v.Int()))
n += 8
case reflect.String:
str := v.String()
binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(str)))
copy(buf[n+4:n+4+len(str)], []byte(str))
n += 4 + len(str)
case reflect.Slice:
switch v.Type().Elem().Kind() {
default:
count := v.Len()
startN := n
n += 4
for i := 0; i < count; i++ {
n2, err := encodePacketValue(buf[n:], v.Index(i))
n += n2
if err != nil {
return n, err
}
}
binary.BigEndian.PutUint32(buf[startN:startN+4], uint32(count))
case reflect.Uint8:
if v.IsNil() {
binary.BigEndian.PutUint32(buf[n:n+4], uint32(0xffffffff))
n += 4
} else {
bytes := v.Bytes()
binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(bytes)))
copy(buf[n+4:n+4+len(bytes)], bytes)
n += 4 + len(bytes)
}
}
}
return n, nil
}
func requestStructForOp(op int32) interface{} {
switch op {
case opClose:
return &closeRequest{}
case opCreate:
return &CreateRequest{}
case opDelete:
return &DeleteRequest{}
case opExists:
return &existsRequest{}
case opGetAcl:
return &getAclRequest{}
case opGetChildren:
return &getChildrenRequest{}
case opGetChildren2:
return &getChildren2Request{}
case opGetData:
return &getDataRequest{}
case opPing:
return &pingRequest{}
case opSetAcl:
return &setAclRequest{}
case opSetData:
return &SetDataRequest{}
case opSetWatches:
return &setWatchesRequest{}
case opSync:
return &syncRequest{}
case opSetAuth:
return &setAuthRequest{}
case opCheck:
return &CheckVersionRequest{}
case opMulti:
return &multiRequest{}
}
return nil
}

View file

@ -0,0 +1,83 @@
package zk
import (
"reflect"
"testing"
)
func TestEncodeDecodePacket(t *testing.T) {
t.Parallel()
encodeDecodeTest(t, &requestHeader{-2, 5})
encodeDecodeTest(t, &connectResponse{1, 2, 3, nil})
encodeDecodeTest(t, &connectResponse{1, 2, 3, []byte{4, 5, 6}})
encodeDecodeTest(t, &getAclResponse{[]ACL{{12, "s", "anyone"}}, Stat{}})
encodeDecodeTest(t, &getChildrenResponse{[]string{"foo", "bar"}})
encodeDecodeTest(t, &pathWatchRequest{"path", true})
encodeDecodeTest(t, &pathWatchRequest{"path", false})
encodeDecodeTest(t, &CheckVersionRequest{"/", -1})
encodeDecodeTest(t, &multiRequest{Ops: []multiRequestOp{{multiHeader{opCheck, false, -1}, &CheckVersionRequest{"/", -1}}}})
}
func TestRequestStructForOp(t *testing.T) {
for op, name := range opNames {
if op != opNotify && op != opWatcherEvent {
if s := requestStructForOp(op); s == nil {
t.Errorf("No struct for op %s", name)
}
}
}
}
func encodeDecodeTest(t *testing.T, r interface{}) {
buf := make([]byte, 1024)
n, err := encodePacket(buf, r)
if err != nil {
t.Errorf("encodePacket returned non-nil error %+v\n", err)
return
}
t.Logf("%+v %x", r, buf[:n])
r2 := reflect.New(reflect.ValueOf(r).Elem().Type()).Interface()
n2, err := decodePacket(buf[:n], r2)
if err != nil {
t.Errorf("decodePacket returned non-nil error %+v\n", err)
return
}
if n != n2 {
t.Errorf("sizes don't match: %d != %d", n, n2)
return
}
if !reflect.DeepEqual(r, r2) {
t.Errorf("results don't match: %+v != %+v", r, r2)
return
}
}
func TestEncodeShortBuffer(t *testing.T) {
t.Parallel()
_, err := encodePacket([]byte{}, &requestHeader{1, 2})
if err != ErrShortBuffer {
t.Errorf("encodePacket should return ErrShortBuffer on a short buffer instead of '%+v'", err)
return
}
}
func TestDecodeShortBuffer(t *testing.T) {
t.Parallel()
_, err := decodePacket([]byte{}, &responseHeader{})
if err != ErrShortBuffer {
t.Errorf("decodePacket should return ErrShortBuffer on a short buffer instead of '%+v'", err)
return
}
}
func BenchmarkEncode(b *testing.B) {
buf := make([]byte, 4096)
st := &connectRequest{Passwd: []byte("1234567890")}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := encodePacket(buf, st); err != nil {
b.Fatal(err)
}
}
}

View file

@ -0,0 +1,136 @@
/*
Copyright 2012 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Vendored from go4.org/net/throttle
package zk
import (
"fmt"
"net"
"sync"
"time"
)
const unitSize = 1400 // read/write chunk size. ~MTU size.
type Rate struct {
KBps int // or 0, to not rate-limit bandwidth
Latency time.Duration
}
// byteTime returns the time required for n bytes.
func (r Rate) byteTime(n int) time.Duration {
if r.KBps == 0 {
return 0
}
return time.Duration(float64(n)/1024/float64(r.KBps)) * time.Second
}
type Listener struct {
net.Listener
Down Rate // server Writes to Client
Up Rate // server Reads from client
}
func (ln *Listener) Accept() (net.Conn, error) {
c, err := ln.Listener.Accept()
time.Sleep(ln.Up.Latency)
if err != nil {
return nil, err
}
tc := &conn{Conn: c, Down: ln.Down, Up: ln.Up}
tc.start()
return tc, nil
}
type nErr struct {
n int
err error
}
type writeReq struct {
writeAt time.Time
p []byte
resc chan nErr
}
type conn struct {
net.Conn
Down Rate // for reads
Up Rate // for writes
wchan chan writeReq
closeOnce sync.Once
closeErr error
}
func (c *conn) start() {
c.wchan = make(chan writeReq, 1024)
go c.writeLoop()
}
func (c *conn) writeLoop() {
for req := range c.wchan {
time.Sleep(req.writeAt.Sub(time.Now()))
var res nErr
for len(req.p) > 0 && res.err == nil {
writep := req.p
if len(writep) > unitSize {
writep = writep[:unitSize]
}
n, err := c.Conn.Write(writep)
time.Sleep(c.Up.byteTime(len(writep)))
res.n += n
res.err = err
req.p = req.p[n:]
}
req.resc <- res
}
}
func (c *conn) Close() error {
c.closeOnce.Do(func() {
err := c.Conn.Close()
close(c.wchan)
c.closeErr = err
})
return c.closeErr
}
func (c *conn) Write(p []byte) (n int, err error) {
defer func() {
if e := recover(); e != nil {
n = 0
err = fmt.Errorf("%v", err)
return
}
}()
resc := make(chan nErr, 1)
c.wchan <- writeReq{time.Now().Add(c.Up.Latency), p, resc}
res := <-resc
return res.n, res.err
}
func (c *conn) Read(p []byte) (n int, err error) {
const max = 1024
if len(p) > max {
p = p[:max]
}
n, err = c.Conn.Read(p)
time.Sleep(c.Down.byteTime(n))
return
}

116
vendor/github.com/samuel/go-zookeeper/zk/util.go generated vendored Normal file
View file

@ -0,0 +1,116 @@
package zk
import (
"crypto/sha1"
"encoding/base64"
"fmt"
"math/rand"
"strconv"
"strings"
"unicode/utf8"
)
// AuthACL produces an ACL list containing a single ACL which uses the
// provided permissions, with the scheme "auth", and ID "", which is used
// by ZooKeeper to represent any authenticated user.
func AuthACL(perms int32) []ACL {
return []ACL{{perms, "auth", ""}}
}
// WorldACL produces an ACL list containing a single ACL which uses the
// provided permissions, with the scheme "world", and ID "anyone", which
// is used by ZooKeeper to represent any user at all.
func WorldACL(perms int32) []ACL {
return []ACL{{perms, "world", "anyone"}}
}
func DigestACL(perms int32, user, password string) []ACL {
userPass := []byte(fmt.Sprintf("%s:%s", user, password))
h := sha1.New()
if n, err := h.Write(userPass); err != nil || n != len(userPass) {
panic("SHA1 failed")
}
digest := base64.StdEncoding.EncodeToString(h.Sum(nil))
return []ACL{{perms, "digest", fmt.Sprintf("%s:%s", user, digest)}}
}
// FormatServers takes a slice of addresses, and makes sure they are in a format
// that resembles <addr>:<port>. If the server has no port provided, the
// DefaultPort constant is added to the end.
func FormatServers(servers []string) []string {
for i := range servers {
if !strings.Contains(servers[i], ":") {
servers[i] = servers[i] + ":" + strconv.Itoa(DefaultPort)
}
}
return servers
}
// stringShuffle performs a Fisher-Yates shuffle on a slice of strings
func stringShuffle(s []string) {
for i := len(s) - 1; i > 0; i-- {
j := rand.Intn(i + 1)
s[i], s[j] = s[j], s[i]
}
}
// validatePath will make sure a path is valid before sending the request
func validatePath(path string, isSequential bool) error {
if path == "" {
return ErrInvalidPath
}
if path[0] != '/' {
return ErrInvalidPath
}
n := len(path)
if n == 1 {
// path is just the root
return nil
}
if !isSequential && path[n-1] == '/' {
return ErrInvalidPath
}
// Start at rune 1 since we already know that the first character is
// a '/'.
for i, w := 1, 0; i < n; i += w {
r, width := utf8.DecodeRuneInString(path[i:])
switch {
case r == '\u0000':
return ErrInvalidPath
case r == '/':
last, _ := utf8.DecodeLastRuneInString(path[:i])
if last == '/' {
return ErrInvalidPath
}
case r == '.':
last, lastWidth := utf8.DecodeLastRuneInString(path[:i])
// Check for double dot
if last == '.' {
last, _ = utf8.DecodeLastRuneInString(path[:i-lastWidth])
}
if last == '/' {
if i+1 == n {
return ErrInvalidPath
}
next, _ := utf8.DecodeRuneInString(path[i+w:])
if next == '/' {
return ErrInvalidPath
}
}
case r >= '\u0000' && r <= '\u001f',
r >= '\u007f' && r <= '\u009f',
r >= '\uf000' && r <= '\uf8ff',
r >= '\ufff0' && r < '\uffff':
return ErrInvalidPath
}
w = width
}
return nil
}

53
vendor/github.com/samuel/go-zookeeper/zk/util_test.go generated vendored Normal file
View file

@ -0,0 +1,53 @@
package zk
import "testing"
func TestFormatServers(t *testing.T) {
t.Parallel()
servers := []string{"127.0.0.1:2181", "127.0.0.42", "127.0.42.1:8811"}
r := []string{"127.0.0.1:2181", "127.0.0.42:2181", "127.0.42.1:8811"}
for i, s := range FormatServers(servers) {
if s != r[i] {
t.Errorf("%v should equal %v", s, r[i])
}
}
}
func TestValidatePath(t *testing.T) {
tt := []struct {
path string
seq bool
valid bool
}{
{"/this is / a valid/path", false, true},
{"/", false, true},
{"", false, false},
{"not/valid", false, false},
{"/ends/with/slash/", false, false},
{"/sequential/", true, true},
{"/test\u0000", false, false},
{"/double//slash", false, false},
{"/single/./period", false, false},
{"/double/../period", false, false},
{"/double/..ok/period", false, true},
{"/double/alsook../period", false, true},
{"/double/period/at/end/..", false, false},
{"/name/with.period", false, true},
{"/test\u0001", false, false},
{"/test\u001f", false, false},
{"/test\u0020", false, true}, // first allowable
{"/test\u007e", false, true}, // last valid ascii
{"/test\u007f", false, false},
{"/test\u009f", false, false},
{"/test\uf8ff", false, false},
{"/test\uffef", false, true},
{"/test\ufff0", false, false},
}
for _, tc := range tt {
err := validatePath(tc.path, tc.seq)
if (err != nil) == tc.valid {
t.Errorf("failed to validate path %q", tc.path)
}
}
}

939
vendor/github.com/samuel/go-zookeeper/zk/zk_test.go generated vendored Normal file
View file

@ -0,0 +1,939 @@
package zk
import (
"crypto/rand"
"encoding/hex"
"fmt"
"io"
"net"
"reflect"
"regexp"
"sort"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
)
func TestStateChanges(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
callbackChan := make(chan Event)
f := func(event Event) {
callbackChan <- event
}
zk, eventChan, err := ts.ConnectWithOptions(15*time.Second, WithEventCallback(f))
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
verifyEventOrder := func(c <-chan Event, expectedStates []State, source string) {
for _, state := range expectedStates {
for {
event, ok := <-c
if !ok {
t.Fatalf("unexpected channel close for %s", source)
}
if event.Type != EventSession {
continue
}
if event.State != state {
t.Fatalf("mismatched state order from %s, expected %v, received %v", source, state, event.State)
}
break
}
}
}
states := []State{StateConnecting, StateConnected, StateHasSession}
verifyEventOrder(callbackChan, states, "callback")
verifyEventOrder(eventChan, states, "event channel")
zk.Close()
verifyEventOrder(callbackChan, []State{StateDisconnected}, "callback")
verifyEventOrder(eventChan, []State{StateDisconnected}, "event channel")
}
func TestCreate(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
zk, _, err := ts.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
path := "/gozk-test"
if err := zk.Delete(path, -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
t.Fatalf("Create returned error: %+v", err)
} else if p != path {
t.Fatalf("Create returned different path '%s' != '%s'", p, path)
}
if data, stat, err := zk.Get(path); err != nil {
t.Fatalf("Get returned error: %+v", err)
} else if stat == nil {
t.Fatal("Get returned nil stat")
} else if len(data) < 4 {
t.Fatal("Get returned wrong size data")
}
}
func TestMulti(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
zk, _, err := ts.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
path := "/gozk-test"
if err := zk.Delete(path, -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
ops := []interface{}{
&CreateRequest{Path: path, Data: []byte{1, 2, 3, 4}, Acl: WorldACL(PermAll)},
&SetDataRequest{Path: path, Data: []byte{1, 2, 3, 4}, Version: -1},
}
if res, err := zk.Multi(ops...); err != nil {
t.Fatalf("Multi returned error: %+v", err)
} else if len(res) != 2 {
t.Fatalf("Expected 2 responses got %d", len(res))
} else {
t.Logf("%+v", res)
}
if data, stat, err := zk.Get(path); err != nil {
t.Fatalf("Get returned error: %+v", err)
} else if stat == nil {
t.Fatal("Get returned nil stat")
} else if len(data) < 4 {
t.Fatal("Get returned wrong size data")
}
}
func TestIfAuthdataSurvivesReconnect(t *testing.T) {
// This test case ensures authentication data is being resubmited after
// reconnect.
testNode := "/auth-testnode"
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
zk, _, err := ts.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
acl := DigestACL(PermAll, "userfoo", "passbar")
_, err = zk.Create(testNode, []byte("Some very secret content"), 0, acl)
if err != nil && err != ErrNodeExists {
t.Fatalf("Failed to create test node : %+v", err)
}
_, _, err = zk.Get(testNode)
if err == nil || err != ErrNoAuth {
var msg string
if err == nil {
msg = "Fetching data without auth should have resulted in an error"
} else {
msg = fmt.Sprintf("Expecting ErrNoAuth, got `%+v` instead", err)
}
t.Fatalf(msg)
}
zk.AddAuth("digest", []byte("userfoo:passbar"))
_, _, err = zk.Get(testNode)
if err != nil {
t.Fatalf("Fetching data with auth failed: %+v", err)
}
ts.StopAllServers()
ts.StartAllServers()
_, _, err = zk.Get(testNode)
if err != nil {
t.Fatalf("Fetching data after reconnect failed: %+v", err)
}
}
func TestMultiFailures(t *testing.T) {
// This test case ensures that we return the errors associated with each
// opeThis in the event a call to Multi() fails.
const firstPath = "/gozk-test-first"
const secondPath = "/gozk-test-second"
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
zk, _, err := ts.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
// Ensure firstPath doesn't exist and secondPath does. This will cause the
// 2nd operation in the Multi() to fail.
if err := zk.Delete(firstPath, -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
if _, err := zk.Create(secondPath, nil /* data */, 0, WorldACL(PermAll)); err != nil {
t.Fatalf("Create returned error: %+v", err)
}
ops := []interface{}{
&CreateRequest{Path: firstPath, Data: []byte{1, 2}, Acl: WorldACL(PermAll)},
&CreateRequest{Path: secondPath, Data: []byte{3, 4}, Acl: WorldACL(PermAll)},
}
res, err := zk.Multi(ops...)
if err != ErrNodeExists {
t.Fatalf("Multi() didn't return correct error: %+v", err)
}
if len(res) != 2 {
t.Fatalf("Expected 2 responses received %d", len(res))
}
if res[0].Error != nil {
t.Fatalf("First operation returned an unexpected error %+v", res[0].Error)
}
if res[1].Error != ErrNodeExists {
t.Fatalf("Second operation returned incorrect error %+v", res[1].Error)
}
if _, _, err := zk.Get(firstPath); err != ErrNoNode {
t.Fatalf("Node %s was incorrectly created: %+v", firstPath, err)
}
}
func TestGetSetACL(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
zk, _, err := ts.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
if err := zk.AddAuth("digest", []byte("blah")); err != nil {
t.Fatalf("AddAuth returned error %+v", err)
}
path := "/gozk-test"
if err := zk.Delete(path, -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
if path, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
t.Fatalf("Create returned error: %+v", err)
} else if path != "/gozk-test" {
t.Fatalf("Create returned different path '%s' != '/gozk-test'", path)
}
expected := WorldACL(PermAll)
if acl, stat, err := zk.GetACL(path); err != nil {
t.Fatalf("GetACL returned error %+v", err)
} else if stat == nil {
t.Fatalf("GetACL returned nil Stat")
} else if len(acl) != 1 || expected[0] != acl[0] {
t.Fatalf("GetACL mismatch expected %+v instead of %+v", expected, acl)
}
expected = []ACL{{PermAll, "ip", "127.0.0.1"}}
if stat, err := zk.SetACL(path, expected, -1); err != nil {
t.Fatalf("SetACL returned error %+v", err)
} else if stat == nil {
t.Fatalf("SetACL returned nil Stat")
}
if acl, stat, err := zk.GetACL(path); err != nil {
t.Fatalf("GetACL returned error %+v", err)
} else if stat == nil {
t.Fatalf("GetACL returned nil Stat")
} else if len(acl) != 1 || expected[0] != acl[0] {
t.Fatalf("GetACL mismatch expected %+v instead of %+v", expected, acl)
}
}
func TestAuth(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
zk, _, err := ts.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
path := "/gozk-digest-test"
if err := zk.Delete(path, -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
acl := DigestACL(PermAll, "user", "password")
if p, err := zk.Create(path, []byte{1, 2, 3, 4}, 0, acl); err != nil {
t.Fatalf("Create returned error: %+v", err)
} else if p != path {
t.Fatalf("Create returned different path '%s' != '%s'", p, path)
}
if a, stat, err := zk.GetACL(path); err != nil {
t.Fatalf("GetACL returned error %+v", err)
} else if stat == nil {
t.Fatalf("GetACL returned nil Stat")
} else if len(a) != 1 || acl[0] != a[0] {
t.Fatalf("GetACL mismatch expected %+v instead of %+v", acl, a)
}
if _, _, err := zk.Get(path); err != ErrNoAuth {
t.Fatalf("Get returned error %+v instead of ErrNoAuth", err)
}
if err := zk.AddAuth("digest", []byte("user:password")); err != nil {
t.Fatalf("AddAuth returned error %+v", err)
}
if data, stat, err := zk.Get(path); err != nil {
t.Fatalf("Get returned error %+v", err)
} else if stat == nil {
t.Fatalf("Get returned nil Stat")
} else if len(data) != 4 {
t.Fatalf("Get returned wrong data length")
}
}
func TestChildren(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
zk, _, err := ts.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
deleteNode := func(node string) {
if err := zk.Delete(node, -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
}
deleteNode("/gozk-test-big")
if path, err := zk.Create("/gozk-test-big", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
t.Fatalf("Create returned error: %+v", err)
} else if path != "/gozk-test-big" {
t.Fatalf("Create returned different path '%s' != '/gozk-test-big'", path)
}
rb := make([]byte, 1000)
hb := make([]byte, 2000)
prefix := []byte("/gozk-test-big/")
for i := 0; i < 10000; i++ {
_, err := rand.Read(rb)
if err != nil {
t.Fatal("Cannot create random znode name")
}
hex.Encode(hb, rb)
expect := string(append(prefix, hb...))
if path, err := zk.Create(expect, []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
t.Fatalf("Create returned error: %+v", err)
} else if path != expect {
t.Fatalf("Create returned different path '%s' != '%s'", path, expect)
}
defer deleteNode(string(expect))
}
children, _, err := zk.Children("/gozk-test-big")
if err != nil {
t.Fatalf("Children returned error: %+v", err)
} else if len(children) != 10000 {
t.Fatal("Children returned wrong number of nodes")
}
}
func TestChildWatch(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
zk, _, err := ts.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
children, stat, childCh, err := zk.ChildrenW("/")
if err != nil {
t.Fatalf("Children returned error: %+v", err)
} else if stat == nil {
t.Fatal("Children returned nil stat")
} else if len(children) < 1 {
t.Fatal("Children should return at least 1 child")
}
if path, err := zk.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
t.Fatalf("Create returned error: %+v", err)
} else if path != "/gozk-test" {
t.Fatalf("Create returned different path '%s' != '/gozk-test'", path)
}
select {
case ev := <-childCh:
if ev.Err != nil {
t.Fatalf("Child watcher error %+v", ev.Err)
}
if ev.Path != "/" {
t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/")
}
case _ = <-time.After(time.Second * 2):
t.Fatal("Child watcher timed out")
}
// Delete of the watched node should trigger the watch
children, stat, childCh, err = zk.ChildrenW("/gozk-test")
if err != nil {
t.Fatalf("Children returned error: %+v", err)
} else if stat == nil {
t.Fatal("Children returned nil stat")
} else if len(children) != 0 {
t.Fatal("Children should return 0 children")
}
if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
select {
case ev := <-childCh:
if ev.Err != nil {
t.Fatalf("Child watcher error %+v", ev.Err)
}
if ev.Path != "/gozk-test" {
t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/")
}
case _ = <-time.After(time.Second * 2):
t.Fatal("Child watcher timed out")
}
}
func TestSetWatchers(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
zk, _, err := ts.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
zk.reconnectLatch = make(chan struct{})
zk.setWatchLimit = 1024 // break up set-watch step into 1k requests
var setWatchReqs atomic.Value
zk.setWatchCallback = func(reqs []*setWatchesRequest) {
setWatchReqs.Store(reqs)
}
zk2, _, err := ts.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk2.Close()
if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
testPaths := map[string]<-chan Event{}
defer func() {
// clean up all of the test paths we create
for p := range testPaths {
zk2.Delete(p, -1)
}
}()
// we create lots of paths to watch, to make sure a "set watches" request
// on re-create will be too big and be required to span multiple packets
for i := 0; i < 1000; i++ {
testPath, err := zk.Create(fmt.Sprintf("/gozk-test-%d", i), []byte{}, 0, WorldACL(PermAll))
if err != nil {
t.Fatalf("Create returned: %+v", err)
}
testPaths[testPath] = nil
_, _, testEvCh, err := zk.GetW(testPath)
if err != nil {
t.Fatalf("GetW returned: %+v", err)
}
testPaths[testPath] = testEvCh
}
children, stat, childCh, err := zk.ChildrenW("/")
if err != nil {
t.Fatalf("Children returned error: %+v", err)
} else if stat == nil {
t.Fatal("Children returned nil stat")
} else if len(children) < 1 {
t.Fatal("Children should return at least 1 child")
}
// Simulate network error by brutally closing the network connection.
zk.conn.Close()
for p := range testPaths {
if err := zk2.Delete(p, -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
}
if path, err := zk2.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
t.Fatalf("Create returned error: %+v", err)
} else if path != "/gozk-test" {
t.Fatalf("Create returned different path '%s' != '/gozk-test'", path)
}
time.Sleep(100 * time.Millisecond)
// zk should still be waiting to reconnect, so none of the watches should have been triggered
for p, ch := range testPaths {
select {
case <-ch:
t.Fatalf("GetW watcher for %q should not have triggered yet", p)
default:
}
}
select {
case <-childCh:
t.Fatalf("ChildrenW watcher should not have triggered yet")
default:
}
// now we let the reconnect occur and make sure it resets watches
close(zk.reconnectLatch)
for p, ch := range testPaths {
select {
case ev := <-ch:
if ev.Err != nil {
t.Fatalf("GetW watcher error %+v", ev.Err)
}
if ev.Path != p {
t.Fatalf("GetW watcher wrong path %s instead of %s", ev.Path, p)
}
case <-time.After(2 * time.Second):
t.Fatal("GetW watcher timed out")
}
}
select {
case ev := <-childCh:
if ev.Err != nil {
t.Fatalf("Child watcher error %+v", ev.Err)
}
if ev.Path != "/" {
t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/")
}
case <-time.After(2 * time.Second):
t.Fatal("Child watcher timed out")
}
// Yay! All watches fired correctly. Now we also inspect the actual set-watch request objects
// to ensure they didn't exceed the expected packet set.
buf := make([]byte, bufferSize)
totalWatches := 0
actualReqs := setWatchReqs.Load().([]*setWatchesRequest)
if len(actualReqs) < 12 {
// sanity check: we should have generated *at least* 12 requests to reset watches
t.Fatalf("too few setWatchesRequest messages: %d", len(actualReqs))
}
for _, r := range actualReqs {
totalWatches += len(r.ChildWatches) + len(r.DataWatches) + len(r.ExistWatches)
n, err := encodePacket(buf, r)
if err != nil {
t.Fatalf("encodePacket failed: %v! request:\n%+v", err, r)
} else if n > 1024 {
t.Fatalf("setWatchesRequest exceeded allowed size (%d > 1024)! request:\n%+v", n, r)
}
}
if totalWatches != len(testPaths)+1 {
t.Fatalf("setWatchesRequests did not include all expected watches; expecting %d, got %d", len(testPaths)+1, totalWatches)
}
}
func TestExpiringWatch(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
zk, _, err := ts.ConnectAll()
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
if err := zk.Delete("/gozk-test", -1); err != nil && err != ErrNoNode {
t.Fatalf("Delete returned error: %+v", err)
}
children, stat, childCh, err := zk.ChildrenW("/")
if err != nil {
t.Fatalf("Children returned error: %+v", err)
} else if stat == nil {
t.Fatal("Children returned nil stat")
} else if len(children) < 1 {
t.Fatal("Children should return at least 1 child")
}
zk.sessionID = 99999
zk.conn.Close()
select {
case ev := <-childCh:
if ev.Err != ErrSessionExpired {
t.Fatalf("Child watcher error %+v instead of expected ErrSessionExpired", ev.Err)
}
if ev.Path != "/" {
t.Fatalf("Child watcher wrong path %s instead of %s", ev.Path, "/")
}
case <-time.After(2 * time.Second):
t.Fatal("Child watcher timed out")
}
}
func TestRequestFail(t *testing.T) {
// If connecting fails to all servers in the list then pending requests
// should be errored out so they don't hang forever.
zk, _, err := Connect([]string{"127.0.0.1:32444"}, time.Second*15)
if err != nil {
t.Fatal(err)
}
defer zk.Close()
ch := make(chan error)
go func() {
_, _, err := zk.Get("/blah")
ch <- err
}()
select {
case err := <-ch:
if err == nil {
t.Fatal("Expected non-nil error on failed request due to connection failure")
}
case <-time.After(time.Second * 2):
t.Fatal("Get hung when connection could not be made")
}
}
func TestSlowServer(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
realAddr := fmt.Sprintf("127.0.0.1:%d", ts.Servers[0].Port)
proxyAddr, stopCh, err := startSlowProxy(t,
Rate{}, Rate{},
realAddr, func(ln *Listener) {
if ln.Up.Latency == 0 {
ln.Up.Latency = time.Millisecond * 2000
ln.Down.Latency = time.Millisecond * 2000
} else {
ln.Up.Latency = 0
ln.Down.Latency = 0
}
})
if err != nil {
t.Fatal(err)
}
defer close(stopCh)
zk, _, err := Connect([]string{proxyAddr}, time.Millisecond*500)
if err != nil {
t.Fatal(err)
}
defer zk.Close()
_, _, wch, err := zk.ChildrenW("/")
if err != nil {
t.Fatal(err)
}
// Force a reconnect to get a throttled connection
zk.conn.Close()
time.Sleep(time.Millisecond * 100)
if err := zk.Delete("/gozk-test", -1); err == nil {
t.Fatal("Delete should have failed")
}
// The previous request should have timed out causing the server to be disconnected and reconnected
if _, err := zk.Create("/gozk-test", []byte{1, 2, 3, 4}, 0, WorldACL(PermAll)); err != nil {
t.Fatal(err)
}
// Make sure event is still returned because the session should not have been affected
select {
case ev := <-wch:
t.Logf("Received event: %+v", ev)
case <-time.After(time.Second):
t.Fatal("Expected to receive a watch event")
}
}
func startSlowProxy(t *testing.T, up, down Rate, upstream string, adj func(ln *Listener)) (string, chan bool, error) {
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return "", nil, err
}
tln := &Listener{
Listener: ln,
Up: up,
Down: down,
}
stopCh := make(chan bool)
go func() {
<-stopCh
tln.Close()
}()
go func() {
for {
cn, err := tln.Accept()
if err != nil {
if !strings.Contains(err.Error(), "use of closed network connection") {
t.Fatalf("Accept failed: %s", err.Error())
}
return
}
if adj != nil {
adj(tln)
}
go func(cn net.Conn) {
defer cn.Close()
upcn, err := net.Dial("tcp", upstream)
if err != nil {
t.Log(err)
return
}
// This will leave hanging goroutines util stopCh is closed
// but it doesn't matter in the context of running tests.
go func() {
<-stopCh
upcn.Close()
}()
go func() {
if _, err := io.Copy(upcn, cn); err != nil {
if !strings.Contains(err.Error(), "use of closed network connection") {
// log.Printf("Upstream write failed: %s", err.Error())
}
}
}()
if _, err := io.Copy(cn, upcn); err != nil {
if !strings.Contains(err.Error(), "use of closed network connection") {
// log.Printf("Upstream read failed: %s", err.Error())
}
}
}(cn)
}
}()
return ln.Addr().String(), stopCh, nil
}
func TestMaxBufferSize(t *testing.T) {
ts, err := StartTestCluster(1, nil, logWriter{t: t, p: "[ZKERR] "})
if err != nil {
t.Fatal(err)
}
defer ts.Stop()
// no buffer size
zk, _, err := ts.ConnectWithOptions(15 * time.Second)
var l testLogger
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zk.Close()
// 1k buffer size, logs to custom test logger
zkLimited, _, err := ts.ConnectWithOptions(15*time.Second, WithMaxBufferSize(1024), func(conn *Conn) {
conn.SetLogger(&l)
})
if err != nil {
t.Fatalf("Connect returned error: %+v", err)
}
defer zkLimited.Close()
// With small node with small number of children
data := []byte{101, 102, 103, 103}
_, err = zk.Create("/foo", data, 0, WorldACL(PermAll))
if err != nil {
t.Fatalf("Create returned error: %+v", err)
}
var children []string
for i := 0; i < 4; i++ {
childName, err := zk.Create("/foo/child", nil, FlagEphemeral|FlagSequence, WorldACL(PermAll))
if err != nil {
t.Fatalf("Create returned error: %+v", err)
}
children = append(children, childName[len("/foo/"):]) // strip parent prefix from name
}
sort.Strings(children)
// Limited client works fine
resultData, _, err := zkLimited.Get("/foo")
if err != nil {
t.Fatalf("Get returned error: %+v", err)
}
if !reflect.DeepEqual(resultData, data) {
t.Fatalf("Get returned unexpected data; expecting %+v, got %+v", data, resultData)
}
resultChildren, _, err := zkLimited.Children("/foo")
if err != nil {
t.Fatalf("Children returned error: %+v", err)
}
sort.Strings(resultChildren)
if !reflect.DeepEqual(resultChildren, children) {
t.Fatalf("Children returned unexpected names; expecting %+v, got %+v", children, resultChildren)
}
// With large node though...
data = make([]byte, 1024)
for i := 0; i < 1024; i++ {
data[i] = byte(i)
}
_, err = zk.Create("/bar", data, 0, WorldACL(PermAll))
if err != nil {
t.Fatalf("Create returned error: %+v", err)
}
_, _, err = zkLimited.Get("/bar")
// NB: Sadly, without actually de-serializing the too-large response packet, we can't send the
// right error to the corresponding outstanding request. So the request just sees ErrConnectionClosed
// while the log will see the actual reason the connection was closed.
expectErr(t, err, ErrConnectionClosed)
expectLogMessage(t, &l, "received packet from server with length .*, which exceeds max buffer size 1024")
// Or with large number of children...
totalLen := 0
children = nil
for totalLen < 1024 {
childName, err := zk.Create("/bar/child", nil, FlagEphemeral|FlagSequence, WorldACL(PermAll))
if err != nil {
t.Fatalf("Create returned error: %+v", err)
}
n := childName[len("/bar/"):] // strip parent prefix from name
children = append(children, n)
totalLen += len(n)
}
sort.Strings(children)
_, _, err = zkLimited.Children("/bar")
expectErr(t, err, ErrConnectionClosed)
expectLogMessage(t, &l, "received packet from server with length .*, which exceeds max buffer size 1024")
// Other client (without buffer size limit) can successfully query the node and its children, of course
resultData, _, err = zk.Get("/bar")
if err != nil {
t.Fatalf("Get returned error: %+v", err)
}
if !reflect.DeepEqual(resultData, data) {
t.Fatalf("Get returned unexpected data; expecting %+v, got %+v", data, resultData)
}
resultChildren, _, err = zk.Children("/bar")
if err != nil {
t.Fatalf("Children returned error: %+v", err)
}
sort.Strings(resultChildren)
if !reflect.DeepEqual(resultChildren, children) {
t.Fatalf("Children returned unexpected names; expecting %+v, got %+v", children, resultChildren)
}
}
func expectErr(t *testing.T, err error, expected error) {
if err == nil {
t.Fatalf("Get for node that is too large should have returned error!")
}
if err != expected {
t.Fatalf("Get returned wrong error; expecting ErrClosing, got %+v", err)
}
}
func expectLogMessage(t *testing.T, logger *testLogger, pattern string) {
re := regexp.MustCompile(pattern)
events := logger.Reset()
if len(events) == 0 {
t.Fatalf("Failed to log error; expecting message that matches pattern: %s", pattern)
}
var found []string
for _, e := range events {
if re.Match([]byte(e)) {
found = append(found, e)
}
}
if len(found) == 0 {
t.Fatalf("Failed to log error; expecting message that matches pattern: %s", pattern)
} else if len(found) > 1 {
t.Fatalf("Logged error redundantly %d times:\n%+v", len(found), found)
}
}
type testLogger struct {
mu sync.Mutex
events []string
}
func (l *testLogger) Printf(msgFormat string, args ...interface{}) {
msg := fmt.Sprintf(msgFormat, args...)
fmt.Println(msg)
l.mu.Lock()
defer l.mu.Unlock()
l.events = append(l.events, msg)
}
func (l *testLogger) Reset() []string {
l.mu.Lock()
defer l.mu.Unlock()
ret := l.events
l.events = nil
return ret
}