Moving from govendor to dep, updated dependencies (#48)
* Moving from govendor to dep. * Making the pull request template more friendly. * Fixing akward space in PR template. * goimports run on whole project using ` goimports -w $(find . -type f -name '*.go' -not -path "./vendor/*" -not -path "./gen-go/*")` source of command: https://gist.github.com/bgentry/fd1ffef7dbde01857f66
This commit is contained in:
parent
9631aa3aab
commit
8d445c1c77
2186 changed files with 400410 additions and 352 deletions
209
vendor/github.com/samuel/go-zookeeper/zk/conn.go
generated
vendored
209
vendor/github.com/samuel/go-zookeeper/zk/conn.go
generated
vendored
|
@ -85,6 +85,7 @@ type Conn struct {
|
|||
pingInterval time.Duration
|
||||
recvTimeout time.Duration
|
||||
connectTimeout time.Duration
|
||||
maxBufferSize int
|
||||
|
||||
creds []authCreds
|
||||
credsMu sync.Mutex // protects server
|
||||
|
@ -97,9 +98,12 @@ type Conn struct {
|
|||
closeChan chan struct{} // channel to tell send loop stop
|
||||
|
||||
// Debug (used by unit tests)
|
||||
reconnectDelay time.Duration
|
||||
reconnectLatch chan struct{}
|
||||
setWatchLimit int
|
||||
setWatchCallback func([]*setWatchesRequest)
|
||||
|
||||
logger Logger
|
||||
logger Logger
|
||||
logInfo bool // true if information messages are logged; false if only errors are logged
|
||||
|
||||
buf []byte
|
||||
}
|
||||
|
@ -197,10 +201,8 @@ func Connect(servers []string, sessionTimeout time.Duration, options ...connOpti
|
|||
watchers: make(map[watchPathType][]chan Event),
|
||||
passwd: emptyPassword,
|
||||
logger: DefaultLogger,
|
||||
logInfo: true, // default is true for backwards compatability
|
||||
buf: make([]byte, bufferSize),
|
||||
|
||||
// Debug
|
||||
reconnectDelay: 0,
|
||||
}
|
||||
|
||||
// Set provided options.
|
||||
|
@ -237,6 +239,21 @@ func WithHostProvider(hostProvider HostProvider) connOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WithLogger returns a connection option specifying a non-default Logger
|
||||
func WithLogger(logger Logger) connOption {
|
||||
return func(c *Conn) {
|
||||
c.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogInfo returns a connection option specifying whether or not information messages
|
||||
// shoud be logged.
|
||||
func WithLogInfo(logInfo bool) connOption {
|
||||
return func(c *Conn) {
|
||||
c.logInfo = logInfo
|
||||
}
|
||||
}
|
||||
|
||||
// EventCallback is a function that is called when an Event occurs.
|
||||
type EventCallback func(Event)
|
||||
|
||||
|
@ -249,6 +266,46 @@ func WithEventCallback(cb EventCallback) connOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WithMaxBufferSize sets the maximum buffer size used to read and decode
|
||||
// packets received from the Zookeeper server. The standard Zookeeper client for
|
||||
// Java defaults to a limit of 1mb. For backwards compatibility, this Go client
|
||||
// defaults to unbounded unless overridden via this option. A value that is zero
|
||||
// or negative indicates that no limit is enforced.
|
||||
//
|
||||
// This is meant to prevent resource exhaustion in the face of potentially
|
||||
// malicious data in ZK. It should generally match the server setting (which
|
||||
// also defaults ot 1mb) so that clients and servers agree on the limits for
|
||||
// things like the size of data in an individual znode and the total size of a
|
||||
// transaction.
|
||||
//
|
||||
// For production systems, this should be set to a reasonable value (ideally
|
||||
// that matches the server configuration). For ops tooling, it is handy to use a
|
||||
// much larger limit, in order to do things like clean-up problematic state in
|
||||
// the ZK tree. For example, if a single znode has a huge number of children, it
|
||||
// is possible for the response to a "list children" operation to exceed this
|
||||
// buffer size and cause errors in clients. The only way to subsequently clean
|
||||
// up the tree (by removing superfluous children) is to use a client configured
|
||||
// with a larger buffer size that can successfully query for all of the child
|
||||
// names and then remove them. (Note there are other tools that can list all of
|
||||
// the child names without an increased buffer size in the client, but they work
|
||||
// by inspecting the servers' transaction logs to enumerate children instead of
|
||||
// sending an online request to a server.
|
||||
func WithMaxBufferSize(maxBufferSize int) connOption {
|
||||
return func(c *Conn) {
|
||||
c.maxBufferSize = maxBufferSize
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxConnBufferSize sets maximum buffer size used to send and encode
|
||||
// packets to Zookeeper server. The standard Zookeepeer client for java defaults
|
||||
// to a limit of 1mb. This option should be used for non-standard server setup
|
||||
// where znode is bigger than default 1mb.
|
||||
func WithMaxConnBufferSize(maxBufferSize int) connOption {
|
||||
return func(c *Conn) {
|
||||
c.buf = make([]byte, maxBufferSize)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Conn) Close() {
|
||||
close(c.shouldQuit)
|
||||
|
||||
|
@ -321,7 +378,9 @@ func (c *Conn) connect() error {
|
|||
if err == nil {
|
||||
c.conn = zkConn
|
||||
c.setState(StateConnected)
|
||||
c.logger.Printf("Connected to %s", c.Server())
|
||||
if c.logInfo {
|
||||
c.logger.Printf("Connected to %s", c.Server())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -335,8 +394,10 @@ func (c *Conn) resendZkAuth(reauthReadyChan chan struct{}) {
|
|||
|
||||
defer close(reauthReadyChan)
|
||||
|
||||
c.logger.Printf("Re-submitting `%d` credentials after reconnect",
|
||||
len(c.creds))
|
||||
if c.logInfo {
|
||||
c.logger.Printf("Re-submitting `%d` credentials after reconnect",
|
||||
len(c.creds))
|
||||
}
|
||||
|
||||
for _, cred := range c.creds {
|
||||
resChan, err := c.sendRequest(
|
||||
|
@ -404,7 +465,9 @@ func (c *Conn) loop() {
|
|||
c.logger.Printf("Authentication failed: %s", err)
|
||||
c.conn.Close()
|
||||
case err == nil:
|
||||
c.logger.Printf("Authenticated: id=%d, timeout=%d", c.SessionID(), c.sessionTimeoutMs)
|
||||
if c.logInfo {
|
||||
c.logger.Printf("Authenticated: id=%d, timeout=%d", c.SessionID(), c.sessionTimeoutMs)
|
||||
}
|
||||
c.hostProvider.Connected() // mark success
|
||||
c.closeChan = make(chan struct{}) // channel to tell send loop stop
|
||||
reauthChan := make(chan struct{}) // channel to tell send loop that authdata has been resubmitted
|
||||
|
@ -414,7 +477,9 @@ func (c *Conn) loop() {
|
|||
go func() {
|
||||
<-reauthChan
|
||||
err := c.sendLoop()
|
||||
c.logger.Printf("Send loop terminated: err=%v", err)
|
||||
if err != nil || c.logInfo {
|
||||
c.logger.Printf("Send loop terminated: err=%v", err)
|
||||
}
|
||||
c.conn.Close() // causes recv loop to EOF/exit
|
||||
wg.Done()
|
||||
}()
|
||||
|
@ -422,7 +487,9 @@ func (c *Conn) loop() {
|
|||
wg.Add(1)
|
||||
go func() {
|
||||
err := c.recvLoop(c.conn)
|
||||
c.logger.Printf("Recv loop terminated: err=%v", err)
|
||||
if err != io.EOF || c.logInfo {
|
||||
c.logger.Printf("Recv loop terminated: err=%v", err)
|
||||
}
|
||||
if err == nil {
|
||||
panic("zk: recvLoop should never return nil error")
|
||||
}
|
||||
|
@ -450,11 +517,11 @@ func (c *Conn) loop() {
|
|||
}
|
||||
c.flushRequests(err)
|
||||
|
||||
if c.reconnectDelay > 0 {
|
||||
if c.reconnectLatch != nil {
|
||||
select {
|
||||
case <-c.shouldQuit:
|
||||
return
|
||||
case <-time.After(c.reconnectDelay):
|
||||
case <-c.reconnectLatch:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -506,17 +573,41 @@ func (c *Conn) sendSetWatches() {
|
|||
return
|
||||
}
|
||||
|
||||
req := &setWatchesRequest{
|
||||
RelativeZxid: c.lastZxid,
|
||||
DataWatches: make([]string, 0),
|
||||
ExistWatches: make([]string, 0),
|
||||
ChildWatches: make([]string, 0),
|
||||
// NB: A ZK server, by default, rejects packets >1mb. So, if we have too
|
||||
// many watches to reset, we need to break this up into multiple packets
|
||||
// to avoid hitting that limit. Mirroring the Java client behavior: we are
|
||||
// conservative in that we limit requests to 128kb (since server limit is
|
||||
// is actually configurable and could conceivably be configured smaller
|
||||
// than default of 1mb).
|
||||
limit := 128 * 1024
|
||||
if c.setWatchLimit > 0 {
|
||||
limit = c.setWatchLimit
|
||||
}
|
||||
|
||||
var reqs []*setWatchesRequest
|
||||
var req *setWatchesRequest
|
||||
var sizeSoFar int
|
||||
|
||||
n := 0
|
||||
for pathType, watchers := range c.watchers {
|
||||
if len(watchers) == 0 {
|
||||
continue
|
||||
}
|
||||
addlLen := 4 + len(pathType.path)
|
||||
if req == nil || sizeSoFar+addlLen > limit {
|
||||
if req != nil {
|
||||
// add to set of requests that we'll send
|
||||
reqs = append(reqs, req)
|
||||
}
|
||||
sizeSoFar = 28 // fixed overhead of a set-watches packet
|
||||
req = &setWatchesRequest{
|
||||
RelativeZxid: c.lastZxid,
|
||||
DataWatches: make([]string, 0),
|
||||
ExistWatches: make([]string, 0),
|
||||
ChildWatches: make([]string, 0),
|
||||
}
|
||||
}
|
||||
sizeSoFar += addlLen
|
||||
switch pathType.wType {
|
||||
case watchTypeData:
|
||||
req.DataWatches = append(req.DataWatches, pathType.path)
|
||||
|
@ -530,12 +621,26 @@ func (c *Conn) sendSetWatches() {
|
|||
if n == 0 {
|
||||
return
|
||||
}
|
||||
if req != nil { // don't forget any trailing packet we were building
|
||||
reqs = append(reqs, req)
|
||||
}
|
||||
|
||||
if c.setWatchCallback != nil {
|
||||
c.setWatchCallback(reqs)
|
||||
}
|
||||
|
||||
go func() {
|
||||
res := &setWatchesResponse{}
|
||||
_, err := c.request(opSetWatches, req, res, nil)
|
||||
if err != nil {
|
||||
c.logger.Printf("Failed to set previous watches: %s", err.Error())
|
||||
// TODO: Pipeline these so queue all of them up before waiting on any
|
||||
// response. That will require some investigation to make sure there
|
||||
// aren't failure modes where a blocking write to the channel of requests
|
||||
// could hang indefinitely and cause this goroutine to leak...
|
||||
for _, req := range reqs {
|
||||
_, err := c.request(opSetWatches, req, res, nil)
|
||||
if err != nil {
|
||||
c.logger.Printf("Failed to set previous watches: %s", err.Error())
|
||||
break
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -676,7 +781,11 @@ func (c *Conn) sendLoop() error {
|
|||
}
|
||||
|
||||
func (c *Conn) recvLoop(conn net.Conn) error {
|
||||
buf := make([]byte, bufferSize)
|
||||
sz := bufferSize
|
||||
if c.maxBufferSize > 0 && sz > c.maxBufferSize {
|
||||
sz = c.maxBufferSize
|
||||
}
|
||||
buf := make([]byte, sz)
|
||||
for {
|
||||
// package length
|
||||
conn.SetReadDeadline(time.Now().Add(c.recvTimeout))
|
||||
|
@ -687,6 +796,9 @@ func (c *Conn) recvLoop(conn net.Conn) error {
|
|||
|
||||
blen := int(binary.BigEndian.Uint32(buf[:4]))
|
||||
if cap(buf) < blen {
|
||||
if c.maxBufferSize > 0 && blen > c.maxBufferSize {
|
||||
return fmt.Errorf("received packet from server with length %d, which exceeds max buffer size %d", blen, c.maxBufferSize)
|
||||
}
|
||||
buf = make([]byte, blen)
|
||||
}
|
||||
|
||||
|
@ -831,12 +943,20 @@ func (c *Conn) AddAuth(scheme string, auth []byte) error {
|
|||
}
|
||||
|
||||
func (c *Conn) Children(path string) ([]string, *Stat, error) {
|
||||
if err := validatePath(path, false); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
res := &getChildren2Response{}
|
||||
_, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: false}, res, nil)
|
||||
return res.Children, &res.Stat, err
|
||||
}
|
||||
|
||||
func (c *Conn) ChildrenW(path string) ([]string, *Stat, <-chan Event, error) {
|
||||
if err := validatePath(path, false); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
var ech <-chan Event
|
||||
res := &getChildren2Response{}
|
||||
_, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
|
||||
|
@ -851,6 +971,10 @@ func (c *Conn) ChildrenW(path string) ([]string, *Stat, <-chan Event, error) {
|
|||
}
|
||||
|
||||
func (c *Conn) Get(path string) ([]byte, *Stat, error) {
|
||||
if err := validatePath(path, false); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
res := &getDataResponse{}
|
||||
_, err := c.request(opGetData, &getDataRequest{Path: path, Watch: false}, res, nil)
|
||||
return res.Data, &res.Stat, err
|
||||
|
@ -858,6 +982,10 @@ func (c *Conn) Get(path string) ([]byte, *Stat, error) {
|
|||
|
||||
// GetW returns the contents of a znode and sets a watch
|
||||
func (c *Conn) GetW(path string) ([]byte, *Stat, <-chan Event, error) {
|
||||
if err := validatePath(path, false); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
var ech <-chan Event
|
||||
res := &getDataResponse{}
|
||||
_, err := c.request(opGetData, &getDataRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
|
||||
|
@ -872,15 +1000,20 @@ func (c *Conn) GetW(path string) ([]byte, *Stat, <-chan Event, error) {
|
|||
}
|
||||
|
||||
func (c *Conn) Set(path string, data []byte, version int32) (*Stat, error) {
|
||||
if path == "" {
|
||||
return nil, ErrInvalidPath
|
||||
if err := validatePath(path, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := &setDataResponse{}
|
||||
_, err := c.request(opSetData, &SetDataRequest{path, data, version}, res, nil)
|
||||
return &res.Stat, err
|
||||
}
|
||||
|
||||
func (c *Conn) Create(path string, data []byte, flags int32, acl []ACL) (string, error) {
|
||||
if err := validatePath(path, flags&FlagSequence == FlagSequence); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
res := &createResponse{}
|
||||
_, err := c.request(opCreate, &CreateRequest{path, data, acl, flags}, res, nil)
|
||||
return res.Path, err
|
||||
|
@ -891,6 +1024,10 @@ func (c *Conn) Create(path string, data []byte, flags int32, acl []ACL) (string,
|
|||
// ephemeral node still exists. Therefore, on reconnect we need to check if a node
|
||||
// with a GUID generated on create exists.
|
||||
func (c *Conn) CreateProtectedEphemeralSequential(path string, data []byte, acl []ACL) (string, error) {
|
||||
if err := validatePath(path, true); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var guid [16]byte
|
||||
_, err := io.ReadFull(rand.Reader, guid[:16])
|
||||
if err != nil {
|
||||
|
@ -932,11 +1069,19 @@ func (c *Conn) CreateProtectedEphemeralSequential(path string, data []byte, acl
|
|||
}
|
||||
|
||||
func (c *Conn) Delete(path string, version int32) error {
|
||||
if err := validatePath(path, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := c.request(opDelete, &DeleteRequest{path, version}, &deleteResponse{}, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Conn) Exists(path string) (bool, *Stat, error) {
|
||||
if err := validatePath(path, false); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
res := &existsResponse{}
|
||||
_, err := c.request(opExists, &existsRequest{Path: path, Watch: false}, res, nil)
|
||||
exists := true
|
||||
|
@ -948,6 +1093,10 @@ func (c *Conn) Exists(path string) (bool, *Stat, error) {
|
|||
}
|
||||
|
||||
func (c *Conn) ExistsW(path string) (bool, *Stat, <-chan Event, error) {
|
||||
if err := validatePath(path, false); err != nil {
|
||||
return false, nil, nil, err
|
||||
}
|
||||
|
||||
var ech <-chan Event
|
||||
res := &existsResponse{}
|
||||
_, err := c.request(opExists, &existsRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) {
|
||||
|
@ -969,17 +1118,29 @@ func (c *Conn) ExistsW(path string) (bool, *Stat, <-chan Event, error) {
|
|||
}
|
||||
|
||||
func (c *Conn) GetACL(path string) ([]ACL, *Stat, error) {
|
||||
if err := validatePath(path, false); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
res := &getAclResponse{}
|
||||
_, err := c.request(opGetAcl, &getAclRequest{Path: path}, res, nil)
|
||||
return res.Acl, &res.Stat, err
|
||||
}
|
||||
func (c *Conn) SetACL(path string, acl []ACL, version int32) (*Stat, error) {
|
||||
if err := validatePath(path, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := &setAclResponse{}
|
||||
_, err := c.request(opSetAcl, &setAclRequest{Path: path, Acl: acl, Version: version}, res, nil)
|
||||
return &res.Stat, err
|
||||
}
|
||||
|
||||
func (c *Conn) Sync(path string) (string, error) {
|
||||
if err := validatePath(path, false); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
res := &syncResponse{}
|
||||
_, err := c.request(opSync, &syncRequest{Path: path}, res, nil)
|
||||
return res.Path, err
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue