repo
stringlengths
5
67
path
stringlengths
4
218
func_name
stringlengths
0
151
original_string
stringlengths
52
373k
language
stringclasses
6 values
code
stringlengths
52
373k
code_tokens
listlengths
10
512
docstring
stringlengths
3
47.2k
docstring_tokens
listlengths
3
234
sha
stringlengths
40
40
url
stringlengths
85
339
partition
stringclasses
3 values
docker/swarmkit
manager/state/raft/util.go
Register
func Register(server *grpc.Server, node *Node) { api.RegisterRaftServer(server, node) api.RegisterRaftMembershipServer(server, node) }
go
func Register(server *grpc.Server, node *Node) { api.RegisterRaftServer(server, node) api.RegisterRaftMembershipServer(server, node) }
[ "func", "Register", "(", "server", "*", "grpc", ".", "Server", ",", "node", "*", "Node", ")", "{", "api", ".", "RegisterRaftServer", "(", "server", ",", "node", ")", "\n", "api", ".", "RegisterRaftMembershipServer", "(", "server", ",", "node", ")", "\n",...
// Register registers the node raft server
[ "Register", "registers", "the", "node", "raft", "server" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/util.go#L37-L40
train
docker/swarmkit
manager/state/raft/util.go
WaitForLeader
func WaitForLeader(ctx context.Context, n *Node) error { _, err := n.Leader() if err == nil { return nil } ticker := time.NewTicker(50 * time.Millisecond) defer ticker.Stop() for err != nil { select { case <-ticker.C: case <-ctx.Done(): return ctx.Err() } _, err = n.Leader() } return nil }
go
func WaitForLeader(ctx context.Context, n *Node) error { _, err := n.Leader() if err == nil { return nil } ticker := time.NewTicker(50 * time.Millisecond) defer ticker.Stop() for err != nil { select { case <-ticker.C: case <-ctx.Done(): return ctx.Err() } _, err = n.Leader() } return nil }
[ "func", "WaitForLeader", "(", "ctx", "context", ".", "Context", ",", "n", "*", "Node", ")", "error", "{", "_", ",", "err", ":=", "n", ".", "Leader", "(", ")", "\n", "if", "err", "==", "nil", "{", "return", "nil", "\n", "}", "\n", "ticker", ":=", ...
// WaitForLeader waits until node observe some leader in cluster. It returns // error if ctx was cancelled before leader appeared.
[ "WaitForLeader", "waits", "until", "node", "observe", "some", "leader", "in", "cluster", ".", "It", "returns", "error", "if", "ctx", "was", "cancelled", "before", "leader", "appeared", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/util.go#L44-L60
train
docker/swarmkit
manager/state/raft/util.go
WaitForCluster
func WaitForCluster(ctx context.Context, n *Node) (cluster *api.Cluster, err error) { watch, cancel := state.Watch(n.MemoryStore().WatchQueue(), api.EventCreateCluster{}) defer cancel() var clusters []*api.Cluster n.MemoryStore().View(func(readTx store.ReadTx) { clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName)) }) if err != nil { return nil, err } if len(clusters) == 1 { cluster = clusters[0] } else { select { case e := <-watch: cluster = e.(api.EventCreateCluster).Cluster case <-ctx.Done(): return nil, ctx.Err() } } return cluster, nil }
go
func WaitForCluster(ctx context.Context, n *Node) (cluster *api.Cluster, err error) { watch, cancel := state.Watch(n.MemoryStore().WatchQueue(), api.EventCreateCluster{}) defer cancel() var clusters []*api.Cluster n.MemoryStore().View(func(readTx store.ReadTx) { clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName)) }) if err != nil { return nil, err } if len(clusters) == 1 { cluster = clusters[0] } else { select { case e := <-watch: cluster = e.(api.EventCreateCluster).Cluster case <-ctx.Done(): return nil, ctx.Err() } } return cluster, nil }
[ "func", "WaitForCluster", "(", "ctx", "context", ".", "Context", ",", "n", "*", "Node", ")", "(", "cluster", "*", "api", ".", "Cluster", ",", "err", "error", ")", "{", "watch", ",", "cancel", ":=", "state", ".", "Watch", "(", "n", ".", "MemoryStore",...
// WaitForCluster waits until node observes that the cluster wide config is // committed to raft. This ensures that we can see and serve informations // related to the cluster.
[ "WaitForCluster", "waits", "until", "node", "observes", "that", "the", "cluster", "wide", "config", "is", "committed", "to", "raft", ".", "This", "ensures", "that", "we", "can", "see", "and", "serve", "informations", "related", "to", "the", "cluster", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/util.go#L65-L90
train
docker/swarmkit
manager/dispatcher/nodes.go
checkSessionID
func (rn *registeredNode) checkSessionID(sessionID string) error { rn.mu.Lock() defer rn.mu.Unlock() // Before each message send, we need to check the nodes sessionID hasn't // changed. If it has, we will the stream and make the node // re-register. if sessionID == "" || rn.SessionID != sessionID { return status.Errorf(codes.InvalidArgument, ErrSessionInvalid.Error()) } return nil }
go
func (rn *registeredNode) checkSessionID(sessionID string) error { rn.mu.Lock() defer rn.mu.Unlock() // Before each message send, we need to check the nodes sessionID hasn't // changed. If it has, we will the stream and make the node // re-register. if sessionID == "" || rn.SessionID != sessionID { return status.Errorf(codes.InvalidArgument, ErrSessionInvalid.Error()) } return nil }
[ "func", "(", "rn", "*", "registeredNode", ")", "checkSessionID", "(", "sessionID", "string", ")", "error", "{", "rn", ".", "mu", ".", "Lock", "(", ")", "\n", "defer", "rn", ".", "mu", ".", "Unlock", "(", ")", "\n", "if", "sessionID", "==", "\"\"", ...
// checkSessionID determines if the SessionID has changed and returns the // appropriate GRPC error code. // // This may not belong here in the future.
[ "checkSessionID", "determines", "if", "the", "SessionID", "has", "changed", "and", "returns", "the", "appropriate", "GRPC", "error", "code", ".", "This", "may", "not", "belong", "here", "in", "the", "future", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/dispatcher/nodes.go#L30-L42
train
docker/swarmkit
manager/dispatcher/nodes.go
CheckRateLimit
func (s *nodeStore) CheckRateLimit(id string) error { s.mu.Lock() defer s.mu.Unlock() if existRn, ok := s.nodes[id]; ok { if time.Since(existRn.Registered) > s.rateLimitPeriod { existRn.Attempts = 0 } existRn.Attempts++ if existRn.Attempts > rateLimitCount { return status.Errorf(codes.Unavailable, "node %s exceeded rate limit count of registrations", id) } existRn.Registered = time.Now() } return nil }
go
func (s *nodeStore) CheckRateLimit(id string) error { s.mu.Lock() defer s.mu.Unlock() if existRn, ok := s.nodes[id]; ok { if time.Since(existRn.Registered) > s.rateLimitPeriod { existRn.Attempts = 0 } existRn.Attempts++ if existRn.Attempts > rateLimitCount { return status.Errorf(codes.Unavailable, "node %s exceeded rate limit count of registrations", id) } existRn.Registered = time.Now() } return nil }
[ "func", "(", "s", "*", "nodeStore", ")", "CheckRateLimit", "(", "id", "string", ")", "error", "{", "s", ".", "mu", ".", "Lock", "(", ")", "\n", "defer", "s", ".", "mu", ".", "Unlock", "(", ")", "\n", "if", "existRn", ",", "ok", ":=", "s", ".", ...
// CheckRateLimit returns error if node with specified id is allowed to re-register // again.
[ "CheckRateLimit", "returns", "error", "if", "node", "with", "specified", "id", "is", "allowed", "to", "re", "-", "register", "again", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/dispatcher/nodes.go#L90-L104
train
docker/swarmkit
manager/dispatcher/nodes.go
Add
func (s *nodeStore) Add(n *api.Node, expireFunc func()) *registeredNode { s.mu.Lock() defer s.mu.Unlock() var attempts int var registered time.Time if existRn, ok := s.nodes[n.ID]; ok { attempts = existRn.Attempts registered = existRn.Registered existRn.Heartbeat.Stop() delete(s.nodes, n.ID) } if registered.IsZero() { registered = time.Now() } rn := &registeredNode{ SessionID: identity.NewID(), // session ID is local to the dispatcher. Node: n, Registered: registered, Attempts: attempts, Disconnect: make(chan struct{}), } s.nodes[n.ID] = rn rn.Heartbeat = heartbeat.New(s.periodChooser.Choose()*s.gracePeriodMultiplierNormal, expireFunc) return rn }
go
func (s *nodeStore) Add(n *api.Node, expireFunc func()) *registeredNode { s.mu.Lock() defer s.mu.Unlock() var attempts int var registered time.Time if existRn, ok := s.nodes[n.ID]; ok { attempts = existRn.Attempts registered = existRn.Registered existRn.Heartbeat.Stop() delete(s.nodes, n.ID) } if registered.IsZero() { registered = time.Now() } rn := &registeredNode{ SessionID: identity.NewID(), // session ID is local to the dispatcher. Node: n, Registered: registered, Attempts: attempts, Disconnect: make(chan struct{}), } s.nodes[n.ID] = rn rn.Heartbeat = heartbeat.New(s.periodChooser.Choose()*s.gracePeriodMultiplierNormal, expireFunc) return rn }
[ "func", "(", "s", "*", "nodeStore", ")", "Add", "(", "n", "*", "api", ".", "Node", ",", "expireFunc", "func", "(", ")", ")", "*", "registeredNode", "{", "s", ".", "mu", ".", "Lock", "(", ")", "\n", "defer", "s", ".", "mu", ".", "Unlock", "(", ...
// Add adds new node and returns it, it replaces existing without notification.
[ "Add", "adds", "new", "node", "and", "returns", "it", "it", "replaces", "existing", "without", "notification", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/dispatcher/nodes.go#L107-L131
train
docker/swarmkit
manager/dispatcher/nodes.go
Clean
func (s *nodeStore) Clean() { s.mu.Lock() for _, rn := range s.nodes { rn.Heartbeat.Stop() } s.nodes = make(map[string]*registeredNode) s.mu.Unlock() }
go
func (s *nodeStore) Clean() { s.mu.Lock() for _, rn := range s.nodes { rn.Heartbeat.Stop() } s.nodes = make(map[string]*registeredNode) s.mu.Unlock() }
[ "func", "(", "s", "*", "nodeStore", ")", "Clean", "(", ")", "{", "s", ".", "mu", ".", "Lock", "(", ")", "\n", "for", "_", ",", "rn", ":=", "range", "s", ".", "nodes", "{", "rn", ".", "Heartbeat", ".", "Stop", "(", ")", "\n", "}", "\n", "s",...
// Clean removes all nodes and stops their heartbeats. // It's equivalent to invalidate all sessions.
[ "Clean", "removes", "all", "nodes", "and", "stops", "their", "heartbeats", ".", "It", "s", "equivalent", "to", "invalidate", "all", "sessions", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/dispatcher/nodes.go#L190-L197
train
docker/swarmkit
agent/exec/dockerapi/executor.go
Controller
func (e *executor) Controller(t *api.Task) (exec.Controller, error) { // Get the node description from the executor field e.mutex.Lock() nodeDescription := e.node e.mutex.Unlock() ctlr, err := newController(e.client, nodeDescription, t, secrets.Restrict(e.secrets, t)) if err != nil { return nil, err } return ctlr, nil }
go
func (e *executor) Controller(t *api.Task) (exec.Controller, error) { // Get the node description from the executor field e.mutex.Lock() nodeDescription := e.node e.mutex.Unlock() ctlr, err := newController(e.client, nodeDescription, t, secrets.Restrict(e.secrets, t)) if err != nil { return nil, err } return ctlr, nil }
[ "func", "(", "e", "*", "executor", ")", "Controller", "(", "t", "*", "api", ".", "Task", ")", "(", "exec", ".", "Controller", ",", "error", ")", "{", "e", ".", "mutex", ".", "Lock", "(", ")", "\n", "nodeDescription", ":=", "e", ".", "node", "\n",...
// Controller returns a docker container controller.
[ "Controller", "returns", "a", "docker", "container", "controller", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/agent/exec/dockerapi/executor.go#L131-L142
train
docker/swarmkit
watch/queue/queue.go
NewLimitQueue
func NewLimitQueue(dst events.Sink, limit uint64) *LimitQueue { eq := LimitQueue{ dst: dst, events: list.New(), limit: limit, full: make(chan struct{}), } eq.cond = sync.NewCond(&eq.mu) go eq.run() return &eq }
go
func NewLimitQueue(dst events.Sink, limit uint64) *LimitQueue { eq := LimitQueue{ dst: dst, events: list.New(), limit: limit, full: make(chan struct{}), } eq.cond = sync.NewCond(&eq.mu) go eq.run() return &eq }
[ "func", "NewLimitQueue", "(", "dst", "events", ".", "Sink", ",", "limit", "uint64", ")", "*", "LimitQueue", "{", "eq", ":=", "LimitQueue", "{", "dst", ":", "dst", ",", "events", ":", "list", ".", "New", "(", ")", ",", "limit", ":", "limit", ",", "f...
// NewLimitQueue returns a queue to the provided Sink dst.
[ "NewLimitQueue", "returns", "a", "queue", "to", "the", "provided", "Sink", "dst", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/watch/queue/queue.go#L33-L44
train
docker/swarmkit
watch/queue/queue.go
Write
func (eq *LimitQueue) Write(event events.Event) error { eq.mu.Lock() defer eq.mu.Unlock() if eq.closed { return events.ErrSinkClosed } if eq.limit > 0 && uint64(eq.events.Len()) >= eq.limit { // If the limit has been reached, don't write the event to the queue, // and close the Full channel. This notifies listeners that the queue // is now full, but the sink is still permitted to consume events. It's // the responsibility of the listener to decide whether they want to // live with dropped events or whether they want to Close() the // LimitQueue if !eq.fullClosed { eq.fullClosed = true close(eq.full) } return ErrQueueFull } eq.events.PushBack(event) eq.cond.Signal() // signal waiters return nil }
go
func (eq *LimitQueue) Write(event events.Event) error { eq.mu.Lock() defer eq.mu.Unlock() if eq.closed { return events.ErrSinkClosed } if eq.limit > 0 && uint64(eq.events.Len()) >= eq.limit { // If the limit has been reached, don't write the event to the queue, // and close the Full channel. This notifies listeners that the queue // is now full, but the sink is still permitted to consume events. It's // the responsibility of the listener to decide whether they want to // live with dropped events or whether they want to Close() the // LimitQueue if !eq.fullClosed { eq.fullClosed = true close(eq.full) } return ErrQueueFull } eq.events.PushBack(event) eq.cond.Signal() // signal waiters return nil }
[ "func", "(", "eq", "*", "LimitQueue", ")", "Write", "(", "event", "events", ".", "Event", ")", "error", "{", "eq", ".", "mu", ".", "Lock", "(", ")", "\n", "defer", "eq", ".", "mu", ".", "Unlock", "(", ")", "\n", "if", "eq", ".", "closed", "{", ...
// Write accepts the events into the queue, only failing if the queue has // been closed or has reached its size limit.
[ "Write", "accepts", "the", "events", "into", "the", "queue", "only", "failing", "if", "the", "queue", "has", "been", "closed", "or", "has", "reached", "its", "size", "limit", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/watch/queue/queue.go#L48-L74
train
docker/swarmkit
watch/queue/queue.go
Len
func (eq *LimitQueue) Len() int { eq.mu.Lock() defer eq.mu.Unlock() return eq.events.Len() }
go
func (eq *LimitQueue) Len() int { eq.mu.Lock() defer eq.mu.Unlock() return eq.events.Len() }
[ "func", "(", "eq", "*", "LimitQueue", ")", "Len", "(", ")", "int", "{", "eq", ".", "mu", ".", "Lock", "(", ")", "\n", "defer", "eq", ".", "mu", ".", "Unlock", "(", ")", "\n", "return", "eq", ".", "events", ".", "Len", "(", ")", "\n", "}" ]
// Len returns the number of items that are currently stored in the queue and // not consumed by its sink.
[ "Len", "returns", "the", "number", "of", "items", "that", "are", "currently", "stored", "in", "the", "queue", "and", "not", "consumed", "by", "its", "sink", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/watch/queue/queue.go#L125-L129
train
docker/swarmkit
ca/reconciler.go
IssuerFromAPIRootCA
func IssuerFromAPIRootCA(rootCA *api.RootCA) (*IssuerInfo, error) { wantedIssuer := rootCA.CACert if rootCA.RootRotation != nil { wantedIssuer = rootCA.RootRotation.CACert } issuerCerts, err := helpers.ParseCertificatesPEM(wantedIssuer) if err != nil { return nil, errors.Wrap(err, "invalid certificate in cluster root CA object") } if len(issuerCerts) == 0 { return nil, errors.New("invalid certificate in cluster root CA object") } return &IssuerInfo{ Subject: issuerCerts[0].RawSubject, PublicKey: issuerCerts[0].RawSubjectPublicKeyInfo, }, nil }
go
func IssuerFromAPIRootCA(rootCA *api.RootCA) (*IssuerInfo, error) { wantedIssuer := rootCA.CACert if rootCA.RootRotation != nil { wantedIssuer = rootCA.RootRotation.CACert } issuerCerts, err := helpers.ParseCertificatesPEM(wantedIssuer) if err != nil { return nil, errors.Wrap(err, "invalid certificate in cluster root CA object") } if len(issuerCerts) == 0 { return nil, errors.New("invalid certificate in cluster root CA object") } return &IssuerInfo{ Subject: issuerCerts[0].RawSubject, PublicKey: issuerCerts[0].RawSubjectPublicKeyInfo, }, nil }
[ "func", "IssuerFromAPIRootCA", "(", "rootCA", "*", "api", ".", "RootCA", ")", "(", "*", "IssuerInfo", ",", "error", ")", "{", "wantedIssuer", ":=", "rootCA", ".", "CACert", "\n", "if", "rootCA", ".", "RootRotation", "!=", "nil", "{", "wantedIssuer", "=", ...
// IssuerFromAPIRootCA returns the desired issuer given an API root CA object
[ "IssuerFromAPIRootCA", "returns", "the", "desired", "issuer", "given", "an", "API", "root", "CA", "object" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/ca/reconciler.go#L50-L66
train
docker/swarmkit
ca/reconciler.go
finishRootRotation
func (r *rootRotationReconciler) finishRootRotation(tx store.Tx, expectedRootCA *api.RootCA) error { cluster := store.GetCluster(tx, r.clusterID) if cluster == nil { return fmt.Errorf("unable to get cluster %s", r.clusterID) } // If the RootCA object has changed (because another root rotation was started or because some other node // had finished the root rotation), we cannot finish the root rotation that we were working on. if !equality.RootCAEqualStable(expectedRootCA, &cluster.RootCA) { return errRootRotationChanged } var signerCert []byte if len(cluster.RootCA.RootRotation.CAKey) > 0 { signerCert = cluster.RootCA.RootRotation.CACert } // we don't actually have to parse out the default node expiration from the cluster - we are just using // the ca.RootCA object to generate new tokens and the digest updatedRootCA, err := NewRootCA(cluster.RootCA.RootRotation.CACert, signerCert, cluster.RootCA.RootRotation.CAKey, DefaultNodeCertExpiration, nil) if err != nil { return errors.Wrap(err, "invalid cluster root rotation object") } cluster.RootCA = api.RootCA{ CACert: cluster.RootCA.RootRotation.CACert, CAKey: cluster.RootCA.RootRotation.CAKey, CACertHash: updatedRootCA.Digest.String(), JoinTokens: api.JoinTokens{ Worker: GenerateJoinToken(&updatedRootCA, cluster.FIPS), Manager: GenerateJoinToken(&updatedRootCA, cluster.FIPS), }, LastForcedRotation: cluster.RootCA.LastForcedRotation, } return store.UpdateCluster(tx, cluster) }
go
func (r *rootRotationReconciler) finishRootRotation(tx store.Tx, expectedRootCA *api.RootCA) error { cluster := store.GetCluster(tx, r.clusterID) if cluster == nil { return fmt.Errorf("unable to get cluster %s", r.clusterID) } // If the RootCA object has changed (because another root rotation was started or because some other node // had finished the root rotation), we cannot finish the root rotation that we were working on. if !equality.RootCAEqualStable(expectedRootCA, &cluster.RootCA) { return errRootRotationChanged } var signerCert []byte if len(cluster.RootCA.RootRotation.CAKey) > 0 { signerCert = cluster.RootCA.RootRotation.CACert } // we don't actually have to parse out the default node expiration from the cluster - we are just using // the ca.RootCA object to generate new tokens and the digest updatedRootCA, err := NewRootCA(cluster.RootCA.RootRotation.CACert, signerCert, cluster.RootCA.RootRotation.CAKey, DefaultNodeCertExpiration, nil) if err != nil { return errors.Wrap(err, "invalid cluster root rotation object") } cluster.RootCA = api.RootCA{ CACert: cluster.RootCA.RootRotation.CACert, CAKey: cluster.RootCA.RootRotation.CAKey, CACertHash: updatedRootCA.Digest.String(), JoinTokens: api.JoinTokens{ Worker: GenerateJoinToken(&updatedRootCA, cluster.FIPS), Manager: GenerateJoinToken(&updatedRootCA, cluster.FIPS), }, LastForcedRotation: cluster.RootCA.LastForcedRotation, } return store.UpdateCluster(tx, cluster) }
[ "func", "(", "r", "*", "rootRotationReconciler", ")", "finishRootRotation", "(", "tx", "store", ".", "Tx", ",", "expectedRootCA", "*", "api", ".", "RootCA", ")", "error", "{", "cluster", ":=", "store", ".", "GetCluster", "(", "tx", ",", "r", ".", "cluste...
// This function assumes that the expected root CA has root rotation. This is intended to be used by // `reconcileNodeRootsAndCerts`, which uses the root CA from the `lastSeenClusterRootCA`, and checks // that it has a root rotation before calling this function.
[ "This", "function", "assumes", "that", "the", "expected", "root", "CA", "has", "root", "rotation", ".", "This", "is", "intended", "to", "be", "used", "by", "reconcileNodeRootsAndCerts", "which", "uses", "the", "root", "CA", "from", "the", "lastSeenClusterRootCA"...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/ca/reconciler.go#L204-L238
train
docker/swarmkit
agent/dependency.go
NewDependencyManager
func NewDependencyManager() exec.DependencyManager { return &dependencyManager{ secrets: secrets.NewManager(), configs: configs.NewManager(), } }
go
func NewDependencyManager() exec.DependencyManager { return &dependencyManager{ secrets: secrets.NewManager(), configs: configs.NewManager(), } }
[ "func", "NewDependencyManager", "(", ")", "exec", ".", "DependencyManager", "{", "return", "&", "dependencyManager", "{", "secrets", ":", "secrets", ".", "NewManager", "(", ")", ",", "configs", ":", "configs", ".", "NewManager", "(", ")", ",", "}", "\n", "...
// NewDependencyManager creates a dependency manager object that wraps // objects which provide access to various dependency types.
[ "NewDependencyManager", "creates", "a", "dependency", "manager", "object", "that", "wraps", "objects", "which", "provide", "access", "to", "various", "dependency", "types", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/agent/dependency.go#L17-L22
train
docker/swarmkit
agent/dependency.go
Restrict
func Restrict(dependencies exec.DependencyManager, t *api.Task) exec.DependencyGetter { return &dependencyGetter{ secrets: secrets.Restrict(dependencies.Secrets(), t), configs: configs.Restrict(dependencies.Configs(), t), } }
go
func Restrict(dependencies exec.DependencyManager, t *api.Task) exec.DependencyGetter { return &dependencyGetter{ secrets: secrets.Restrict(dependencies.Secrets(), t), configs: configs.Restrict(dependencies.Configs(), t), } }
[ "func", "Restrict", "(", "dependencies", "exec", ".", "DependencyManager", ",", "t", "*", "api", ".", "Task", ")", "exec", ".", "DependencyGetter", "{", "return", "&", "dependencyGetter", "{", "secrets", ":", "secrets", ".", "Restrict", "(", "dependencies", ...
// Restrict provides getters that only allows access to the dependencies // referenced by the task.
[ "Restrict", "provides", "getters", "that", "only", "allows", "access", "to", "the", "dependencies", "referenced", "by", "the", "task", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/agent/dependency.go#L47-L52
train
docker/swarmkit
manager/scheduler/scheduler.go
New
func New(store *store.MemoryStore) *Scheduler { return &Scheduler{ store: store, unassignedTasks: make(map[string]*api.Task), pendingPreassignedTasks: make(map[string]*api.Task), preassignedTasks: make(map[string]struct{}), allTasks: make(map[string]*api.Task), stopChan: make(chan struct{}), doneChan: make(chan struct{}), pipeline: NewPipeline(), } }
go
func New(store *store.MemoryStore) *Scheduler { return &Scheduler{ store: store, unassignedTasks: make(map[string]*api.Task), pendingPreassignedTasks: make(map[string]*api.Task), preassignedTasks: make(map[string]struct{}), allTasks: make(map[string]*api.Task), stopChan: make(chan struct{}), doneChan: make(chan struct{}), pipeline: NewPipeline(), } }
[ "func", "New", "(", "store", "*", "store", ".", "MemoryStore", ")", "*", "Scheduler", "{", "return", "&", "Scheduler", "{", "store", ":", "store", ",", "unassignedTasks", ":", "make", "(", "map", "[", "string", "]", "*", "api", ".", "Task", ")", ",",...
// New creates a new scheduler.
[ "New", "creates", "a", "new", "scheduler", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/scheduler/scheduler.go#L50-L61
train
docker/swarmkit
manager/scheduler/scheduler.go
Run
func (s *Scheduler) Run(ctx context.Context) error { defer close(s.doneChan) updates, cancel, err := store.ViewAndWatch(s.store, s.setupTasksList) if err != nil { log.G(ctx).WithError(err).Errorf("snapshot store update failed") return err } defer cancel() // Validate resource for tasks from preassigned tasks // do this before other tasks because preassigned tasks like // global service should start before other tasks s.processPreassignedTasks(ctx) // Queue all unassigned tasks before processing changes. s.tick(ctx) const ( // commitDebounceGap is the amount of time to wait between // commit events to debounce them. commitDebounceGap = 50 * time.Millisecond // maxLatency is a time limit on the debouncing. maxLatency = time.Second ) var ( debouncingStarted time.Time commitDebounceTimer *time.Timer commitDebounceTimeout <-chan time.Time ) tickRequired := false schedule := func() { if len(s.pendingPreassignedTasks) > 0 { s.processPreassignedTasks(ctx) } if tickRequired { s.tick(ctx) tickRequired = false } } // Watch for changes. for { select { case event := <-updates: switch v := event.(type) { case api.EventCreateTask: if s.createTask(ctx, v.Task) { tickRequired = true } case api.EventUpdateTask: if s.updateTask(ctx, v.Task) { tickRequired = true } case api.EventDeleteTask: if s.deleteTask(v.Task) { // deleting tasks may free up node resource, pending tasks should be re-evaluated. tickRequired = true } case api.EventCreateNode: s.createOrUpdateNode(v.Node) tickRequired = true case api.EventUpdateNode: s.createOrUpdateNode(v.Node) tickRequired = true case api.EventDeleteNode: s.nodeSet.remove(v.Node.ID) case state.EventCommit: if commitDebounceTimer != nil { if time.Since(debouncingStarted) > maxLatency { commitDebounceTimer.Stop() commitDebounceTimer = nil commitDebounceTimeout = nil schedule() } else { commitDebounceTimer.Reset(commitDebounceGap) } } else { commitDebounceTimer = time.NewTimer(commitDebounceGap) commitDebounceTimeout = commitDebounceTimer.C debouncingStarted = time.Now() } } case <-commitDebounceTimeout: schedule() commitDebounceTimer = nil commitDebounceTimeout = nil case <-s.stopChan: return nil } } }
go
func (s *Scheduler) Run(ctx context.Context) error { defer close(s.doneChan) updates, cancel, err := store.ViewAndWatch(s.store, s.setupTasksList) if err != nil { log.G(ctx).WithError(err).Errorf("snapshot store update failed") return err } defer cancel() // Validate resource for tasks from preassigned tasks // do this before other tasks because preassigned tasks like // global service should start before other tasks s.processPreassignedTasks(ctx) // Queue all unassigned tasks before processing changes. s.tick(ctx) const ( // commitDebounceGap is the amount of time to wait between // commit events to debounce them. commitDebounceGap = 50 * time.Millisecond // maxLatency is a time limit on the debouncing. maxLatency = time.Second ) var ( debouncingStarted time.Time commitDebounceTimer *time.Timer commitDebounceTimeout <-chan time.Time ) tickRequired := false schedule := func() { if len(s.pendingPreassignedTasks) > 0 { s.processPreassignedTasks(ctx) } if tickRequired { s.tick(ctx) tickRequired = false } } // Watch for changes. for { select { case event := <-updates: switch v := event.(type) { case api.EventCreateTask: if s.createTask(ctx, v.Task) { tickRequired = true } case api.EventUpdateTask: if s.updateTask(ctx, v.Task) { tickRequired = true } case api.EventDeleteTask: if s.deleteTask(v.Task) { // deleting tasks may free up node resource, pending tasks should be re-evaluated. tickRequired = true } case api.EventCreateNode: s.createOrUpdateNode(v.Node) tickRequired = true case api.EventUpdateNode: s.createOrUpdateNode(v.Node) tickRequired = true case api.EventDeleteNode: s.nodeSet.remove(v.Node.ID) case state.EventCommit: if commitDebounceTimer != nil { if time.Since(debouncingStarted) > maxLatency { commitDebounceTimer.Stop() commitDebounceTimer = nil commitDebounceTimeout = nil schedule() } else { commitDebounceTimer.Reset(commitDebounceGap) } } else { commitDebounceTimer = time.NewTimer(commitDebounceGap) commitDebounceTimeout = commitDebounceTimer.C debouncingStarted = time.Now() } } case <-commitDebounceTimeout: schedule() commitDebounceTimer = nil commitDebounceTimeout = nil case <-s.stopChan: return nil } } }
[ "func", "(", "s", "*", "Scheduler", ")", "Run", "(", "ctx", "context", ".", "Context", ")", "error", "{", "defer", "close", "(", "s", ".", "doneChan", ")", "\n", "updates", ",", "cancel", ",", "err", ":=", "store", ".", "ViewAndWatch", "(", "s", "....
// Run is the scheduler event loop.
[ "Run", "is", "the", "scheduler", "event", "loop", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/scheduler/scheduler.go#L105-L198
train
docker/swarmkit
manager/scheduler/scheduler.go
enqueue
func (s *Scheduler) enqueue(t *api.Task) { s.unassignedTasks[t.ID] = t }
go
func (s *Scheduler) enqueue(t *api.Task) { s.unassignedTasks[t.ID] = t }
[ "func", "(", "s", "*", "Scheduler", ")", "enqueue", "(", "t", "*", "api", ".", "Task", ")", "{", "s", ".", "unassignedTasks", "[", "t", ".", "ID", "]", "=", "t", "\n", "}" ]
// enqueue queues a task for scheduling.
[ "enqueue", "queues", "a", "task", "for", "scheduling", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/scheduler/scheduler.go#L207-L209
train
docker/swarmkit
manager/scheduler/scheduler.go
tick
func (s *Scheduler) tick(ctx context.Context) { type commonSpecKey struct { serviceID string specVersion api.Version } tasksByCommonSpec := make(map[commonSpecKey]map[string]*api.Task) var oneOffTasks []*api.Task schedulingDecisions := make(map[string]schedulingDecision, len(s.unassignedTasks)) for taskID, t := range s.unassignedTasks { if t == nil || t.NodeID != "" { // task deleted or already assigned delete(s.unassignedTasks, taskID) continue } // Group tasks with common specs if t.SpecVersion != nil { taskGroupKey := commonSpecKey{ serviceID: t.ServiceID, specVersion: *t.SpecVersion, } if tasksByCommonSpec[taskGroupKey] == nil { tasksByCommonSpec[taskGroupKey] = make(map[string]*api.Task) } tasksByCommonSpec[taskGroupKey][taskID] = t } else { // This task doesn't have a spec version. We have to // schedule it as a one-off. oneOffTasks = append(oneOffTasks, t) } delete(s.unassignedTasks, taskID) } for _, taskGroup := range tasksByCommonSpec { s.scheduleTaskGroup(ctx, taskGroup, schedulingDecisions) } for _, t := range oneOffTasks { s.scheduleTaskGroup(ctx, map[string]*api.Task{t.ID: t}, schedulingDecisions) } _, failed := s.applySchedulingDecisions(ctx, schedulingDecisions) for _, decision := range failed { s.allTasks[decision.old.ID] = decision.old nodeInfo, err := s.nodeSet.nodeInfo(decision.new.NodeID) if err == nil && nodeInfo.removeTask(decision.new) { s.nodeSet.updateNode(nodeInfo) } // enqueue task for next scheduling attempt s.enqueue(decision.old) } }
go
func (s *Scheduler) tick(ctx context.Context) { type commonSpecKey struct { serviceID string specVersion api.Version } tasksByCommonSpec := make(map[commonSpecKey]map[string]*api.Task) var oneOffTasks []*api.Task schedulingDecisions := make(map[string]schedulingDecision, len(s.unassignedTasks)) for taskID, t := range s.unassignedTasks { if t == nil || t.NodeID != "" { // task deleted or already assigned delete(s.unassignedTasks, taskID) continue } // Group tasks with common specs if t.SpecVersion != nil { taskGroupKey := commonSpecKey{ serviceID: t.ServiceID, specVersion: *t.SpecVersion, } if tasksByCommonSpec[taskGroupKey] == nil { tasksByCommonSpec[taskGroupKey] = make(map[string]*api.Task) } tasksByCommonSpec[taskGroupKey][taskID] = t } else { // This task doesn't have a spec version. We have to // schedule it as a one-off. oneOffTasks = append(oneOffTasks, t) } delete(s.unassignedTasks, taskID) } for _, taskGroup := range tasksByCommonSpec { s.scheduleTaskGroup(ctx, taskGroup, schedulingDecisions) } for _, t := range oneOffTasks { s.scheduleTaskGroup(ctx, map[string]*api.Task{t.ID: t}, schedulingDecisions) } _, failed := s.applySchedulingDecisions(ctx, schedulingDecisions) for _, decision := range failed { s.allTasks[decision.old.ID] = decision.old nodeInfo, err := s.nodeSet.nodeInfo(decision.new.NodeID) if err == nil && nodeInfo.removeTask(decision.new) { s.nodeSet.updateNode(nodeInfo) } // enqueue task for next scheduling attempt s.enqueue(decision.old) } }
[ "func", "(", "s", "*", "Scheduler", ")", "tick", "(", "ctx", "context", ".", "Context", ")", "{", "type", "commonSpecKey", "struct", "{", "serviceID", "string", "\n", "specVersion", "api", ".", "Version", "\n", "}", "\n", "tasksByCommonSpec", ":=", "make",...
// tick attempts to schedule the queue.
[ "tick", "attempts", "to", "schedule", "the", "queue", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/scheduler/scheduler.go#L376-L430
train
docker/swarmkit
manager/scheduler/scheduler.go
taskFitNode
func (s *Scheduler) taskFitNode(ctx context.Context, t *api.Task, nodeID string) *api.Task { nodeInfo, err := s.nodeSet.nodeInfo(nodeID) if err != nil { // node does not exist in set (it may have been deleted) return nil } newT := *t s.pipeline.SetTask(t) if !s.pipeline.Process(&nodeInfo) { // this node cannot accommodate this task newT.Status.Timestamp = ptypes.MustTimestampProto(time.Now()) newT.Status.Err = s.pipeline.Explain() s.allTasks[t.ID] = &newT return &newT } newT.Status = api.TaskStatus{ State: api.TaskStateAssigned, Timestamp: ptypes.MustTimestampProto(time.Now()), Message: "scheduler confirmed task can run on preassigned node", } s.allTasks[t.ID] = &newT if nodeInfo.addTask(&newT) { s.nodeSet.updateNode(nodeInfo) } return &newT }
go
func (s *Scheduler) taskFitNode(ctx context.Context, t *api.Task, nodeID string) *api.Task { nodeInfo, err := s.nodeSet.nodeInfo(nodeID) if err != nil { // node does not exist in set (it may have been deleted) return nil } newT := *t s.pipeline.SetTask(t) if !s.pipeline.Process(&nodeInfo) { // this node cannot accommodate this task newT.Status.Timestamp = ptypes.MustTimestampProto(time.Now()) newT.Status.Err = s.pipeline.Explain() s.allTasks[t.ID] = &newT return &newT } newT.Status = api.TaskStatus{ State: api.TaskStateAssigned, Timestamp: ptypes.MustTimestampProto(time.Now()), Message: "scheduler confirmed task can run on preassigned node", } s.allTasks[t.ID] = &newT if nodeInfo.addTask(&newT) { s.nodeSet.updateNode(nodeInfo) } return &newT }
[ "func", "(", "s", "*", "Scheduler", ")", "taskFitNode", "(", "ctx", "context", ".", "Context", ",", "t", "*", "api", ".", "Task", ",", "nodeID", "string", ")", "*", "api", ".", "Task", "{", "nodeInfo", ",", "err", ":=", "s", ".", "nodeSet", ".", ...
// taskFitNode checks if a node has enough resources to accommodate a task.
[ "taskFitNode", "checks", "if", "a", "node", "has", "enough", "resources", "to", "accommodate", "a", "task", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/scheduler/scheduler.go#L502-L529
train
docker/swarmkit
manager/scheduler/scheduler.go
scheduleTaskGroup
func (s *Scheduler) scheduleTaskGroup(ctx context.Context, taskGroup map[string]*api.Task, schedulingDecisions map[string]schedulingDecision) { // Pick at task at random from taskGroup to use for constraint // evaluation. It doesn't matter which one we pick because all the // tasks in the group are equal in terms of the fields the constraint // filters consider. var t *api.Task for _, t = range taskGroup { break } s.pipeline.SetTask(t) now := time.Now() nodeLess := func(a *NodeInfo, b *NodeInfo) bool { // If either node has at least maxFailures recent failures, // that's the deciding factor. recentFailuresA := a.countRecentFailures(now, t) recentFailuresB := b.countRecentFailures(now, t) if recentFailuresA >= maxFailures || recentFailuresB >= maxFailures { if recentFailuresA > recentFailuresB { return false } if recentFailuresB > recentFailuresA { return true } } tasksByServiceA := a.ActiveTasksCountByService[t.ServiceID] tasksByServiceB := b.ActiveTasksCountByService[t.ServiceID] if tasksByServiceA < tasksByServiceB { return true } if tasksByServiceA > tasksByServiceB { return false } // Total number of tasks breaks ties. return a.ActiveTasksCount < b.ActiveTasksCount } var prefs []*api.PlacementPreference if t.Spec.Placement != nil { prefs = t.Spec.Placement.Preferences } tree := s.nodeSet.tree(t.ServiceID, prefs, len(taskGroup), s.pipeline.Process, nodeLess) s.scheduleNTasksOnSubtree(ctx, len(taskGroup), taskGroup, &tree, schedulingDecisions, nodeLess) if len(taskGroup) != 0 { s.noSuitableNode(ctx, taskGroup, schedulingDecisions) } }
go
func (s *Scheduler) scheduleTaskGroup(ctx context.Context, taskGroup map[string]*api.Task, schedulingDecisions map[string]schedulingDecision) { // Pick at task at random from taskGroup to use for constraint // evaluation. It doesn't matter which one we pick because all the // tasks in the group are equal in terms of the fields the constraint // filters consider. var t *api.Task for _, t = range taskGroup { break } s.pipeline.SetTask(t) now := time.Now() nodeLess := func(a *NodeInfo, b *NodeInfo) bool { // If either node has at least maxFailures recent failures, // that's the deciding factor. recentFailuresA := a.countRecentFailures(now, t) recentFailuresB := b.countRecentFailures(now, t) if recentFailuresA >= maxFailures || recentFailuresB >= maxFailures { if recentFailuresA > recentFailuresB { return false } if recentFailuresB > recentFailuresA { return true } } tasksByServiceA := a.ActiveTasksCountByService[t.ServiceID] tasksByServiceB := b.ActiveTasksCountByService[t.ServiceID] if tasksByServiceA < tasksByServiceB { return true } if tasksByServiceA > tasksByServiceB { return false } // Total number of tasks breaks ties. return a.ActiveTasksCount < b.ActiveTasksCount } var prefs []*api.PlacementPreference if t.Spec.Placement != nil { prefs = t.Spec.Placement.Preferences } tree := s.nodeSet.tree(t.ServiceID, prefs, len(taskGroup), s.pipeline.Process, nodeLess) s.scheduleNTasksOnSubtree(ctx, len(taskGroup), taskGroup, &tree, schedulingDecisions, nodeLess) if len(taskGroup) != 0 { s.noSuitableNode(ctx, taskGroup, schedulingDecisions) } }
[ "func", "(", "s", "*", "Scheduler", ")", "scheduleTaskGroup", "(", "ctx", "context", ".", "Context", ",", "taskGroup", "map", "[", "string", "]", "*", "api", ".", "Task", ",", "schedulingDecisions", "map", "[", "string", "]", "schedulingDecision", ")", "{"...
// scheduleTaskGroup schedules a batch of tasks that are part of the same // service and share the same version of the spec.
[ "scheduleTaskGroup", "schedules", "a", "batch", "of", "tasks", "that", "are", "part", "of", "the", "same", "service", "and", "share", "the", "same", "version", "of", "the", "spec", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/scheduler/scheduler.go#L533-L587
train
docker/swarmkit
manager/state/raft/storage/snapwrap.go
NewSnapFactory
func NewSnapFactory(encrypter encryption.Encrypter, decrypter encryption.Decrypter) SnapFactory { return snapCryptor{ encrypter: encrypter, decrypter: decrypter, } }
go
func NewSnapFactory(encrypter encryption.Encrypter, decrypter encryption.Decrypter) SnapFactory { return snapCryptor{ encrypter: encrypter, decrypter: decrypter, } }
[ "func", "NewSnapFactory", "(", "encrypter", "encryption", ".", "Encrypter", ",", "decrypter", "encryption", ".", "Decrypter", ")", "SnapFactory", "{", "return", "snapCryptor", "{", "encrypter", ":", "encrypter", ",", "decrypter", ":", "decrypter", ",", "}", "\n"...
// NewSnapFactory returns a new object that can read from and write to encrypted // snapshots on disk
[ "NewSnapFactory", "returns", "a", "new", "object", "that", "can", "read", "from", "and", "write", "to", "encrypted", "snapshots", "on", "disk" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/storage/snapwrap.go#L81-L86
train
docker/swarmkit
manager/state/raft/storage/snapwrap.go
New
func (sc snapCryptor) New(dirpath string) Snapshotter { return &wrappedSnap{ Snapshotter: snap.New(dirpath), encrypter: sc.encrypter, decrypter: sc.decrypter, } }
go
func (sc snapCryptor) New(dirpath string) Snapshotter { return &wrappedSnap{ Snapshotter: snap.New(dirpath), encrypter: sc.encrypter, decrypter: sc.decrypter, } }
[ "func", "(", "sc", "snapCryptor", ")", "New", "(", "dirpath", "string", ")", "Snapshotter", "{", "return", "&", "wrappedSnap", "{", "Snapshotter", ":", "snap", ".", "New", "(", "dirpath", ")", ",", "encrypter", ":", "sc", ".", "encrypter", ",", "decrypte...
// NewSnapshotter returns a new Snapshotter with the given encrypters and decrypters
[ "NewSnapshotter", "returns", "a", "new", "Snapshotter", "with", "the", "given", "encrypters", "and", "decrypters" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/storage/snapwrap.go#L89-L95
train
docker/swarmkit
manager/state/raft/storage/snapwrap.go
MigrateSnapshot
func MigrateSnapshot(oldDir, newDir string, oldFactory, newFactory SnapFactory) error { // use temporary snapshot directory so initialization appears atomic oldSnapshotter := oldFactory.New(oldDir) snapshot, err := oldSnapshotter.Load() switch err { case snap.ErrNoSnapshot: // if there's no snapshot, the migration succeeded return nil case nil: break default: return err } tmpdirpath := filepath.Clean(newDir) + ".tmp" if fileutil.Exist(tmpdirpath) { if err := os.RemoveAll(tmpdirpath); err != nil { return errors.Wrap(err, "could not remove temporary snapshot directory") } } if err := fileutil.CreateDirAll(tmpdirpath); err != nil { return errors.Wrap(err, "could not create temporary snapshot directory") } tmpSnapshotter := newFactory.New(tmpdirpath) // write the new snapshot to the temporary location if err = tmpSnapshotter.SaveSnap(*snapshot); err != nil { return err } return os.Rename(tmpdirpath, newDir) }
go
func MigrateSnapshot(oldDir, newDir string, oldFactory, newFactory SnapFactory) error { // use temporary snapshot directory so initialization appears atomic oldSnapshotter := oldFactory.New(oldDir) snapshot, err := oldSnapshotter.Load() switch err { case snap.ErrNoSnapshot: // if there's no snapshot, the migration succeeded return nil case nil: break default: return err } tmpdirpath := filepath.Clean(newDir) + ".tmp" if fileutil.Exist(tmpdirpath) { if err := os.RemoveAll(tmpdirpath); err != nil { return errors.Wrap(err, "could not remove temporary snapshot directory") } } if err := fileutil.CreateDirAll(tmpdirpath); err != nil { return errors.Wrap(err, "could not create temporary snapshot directory") } tmpSnapshotter := newFactory.New(tmpdirpath) // write the new snapshot to the temporary location if err = tmpSnapshotter.SaveSnap(*snapshot); err != nil { return err } return os.Rename(tmpdirpath, newDir) }
[ "func", "MigrateSnapshot", "(", "oldDir", ",", "newDir", "string", ",", "oldFactory", ",", "newFactory", "SnapFactory", ")", "error", "{", "oldSnapshotter", ":=", "oldFactory", ".", "New", "(", "oldDir", ")", "\n", "snapshot", ",", "err", ":=", "oldSnapshotter...
// MigrateSnapshot reads the latest existing snapshot from one directory, encoded one way, and writes // it to a new directory, encoded a different way
[ "MigrateSnapshot", "reads", "the", "latest", "existing", "snapshot", "from", "one", "directory", "encoded", "one", "way", "and", "writes", "it", "to", "a", "new", "directory", "encoded", "a", "different", "way" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/storage/snapwrap.go#L108-L138
train
docker/swarmkit
manager/orchestrator/replicated/tasks.go
handleTaskChange
func (r *Orchestrator) handleTaskChange(ctx context.Context, t *api.Task) { // If we already set the desired state past TaskStateRunning, there is no // further action necessary. if t.DesiredState > api.TaskStateRunning { return } var ( n *api.Node service *api.Service ) r.store.View(func(tx store.ReadTx) { if t.NodeID != "" { n = store.GetNode(tx, t.NodeID) } if t.ServiceID != "" { service = store.GetService(tx, t.ServiceID) } }) if !orchestrator.IsReplicatedService(service) { return } if t.Status.State > api.TaskStateRunning || (t.NodeID != "" && orchestrator.InvalidNode(n)) { r.restartTasks[t.ID] = struct{}{} } }
go
func (r *Orchestrator) handleTaskChange(ctx context.Context, t *api.Task) { // If we already set the desired state past TaskStateRunning, there is no // further action necessary. if t.DesiredState > api.TaskStateRunning { return } var ( n *api.Node service *api.Service ) r.store.View(func(tx store.ReadTx) { if t.NodeID != "" { n = store.GetNode(tx, t.NodeID) } if t.ServiceID != "" { service = store.GetService(tx, t.ServiceID) } }) if !orchestrator.IsReplicatedService(service) { return } if t.Status.State > api.TaskStateRunning || (t.NodeID != "" && orchestrator.InvalidNode(n)) { r.restartTasks[t.ID] = struct{}{} } }
[ "func", "(", "r", "*", "Orchestrator", ")", "handleTaskChange", "(", "ctx", "context", ".", "Context", ",", "t", "*", "api", ".", "Task", ")", "{", "if", "t", ".", "DesiredState", ">", "api", ".", "TaskStateRunning", "{", "return", "\n", "}", "\n", "...
// handleTaskChange defines what orchestrator does when a task is updated by agent.
[ "handleTaskChange", "defines", "what", "orchestrator", "does", "when", "a", "task", "is", "updated", "by", "agent", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/replicated/tasks.go#L119-L147
train
docker/swarmkit
manager/orchestrator/replicated/tasks.go
FixTask
func (r *Orchestrator) FixTask(ctx context.Context, batch *store.Batch, t *api.Task) { // If we already set the desired state past TaskStateRunning, there is no // further action necessary. if t.DesiredState > api.TaskStateRunning { return } var ( n *api.Node service *api.Service ) batch.Update(func(tx store.Tx) error { if t.NodeID != "" { n = store.GetNode(tx, t.NodeID) } if t.ServiceID != "" { service = store.GetService(tx, t.ServiceID) } return nil }) if !orchestrator.IsReplicatedService(service) { return } if t.Status.State > api.TaskStateRunning || (t.NodeID != "" && orchestrator.InvalidNode(n)) { r.restartTasks[t.ID] = struct{}{} return } }
go
func (r *Orchestrator) FixTask(ctx context.Context, batch *store.Batch, t *api.Task) { // If we already set the desired state past TaskStateRunning, there is no // further action necessary. if t.DesiredState > api.TaskStateRunning { return } var ( n *api.Node service *api.Service ) batch.Update(func(tx store.Tx) error { if t.NodeID != "" { n = store.GetNode(tx, t.NodeID) } if t.ServiceID != "" { service = store.GetService(tx, t.ServiceID) } return nil }) if !orchestrator.IsReplicatedService(service) { return } if t.Status.State > api.TaskStateRunning || (t.NodeID != "" && orchestrator.InvalidNode(n)) { r.restartTasks[t.ID] = struct{}{} return } }
[ "func", "(", "r", "*", "Orchestrator", ")", "FixTask", "(", "ctx", "context", ".", "Context", ",", "batch", "*", "store", ".", "Batch", ",", "t", "*", "api", ".", "Task", ")", "{", "if", "t", ".", "DesiredState", ">", "api", ".", "TaskStateRunning", ...
// FixTask validates a task with the current cluster settings, and takes // action to make it conformant. it's called at orchestrator initialization.
[ "FixTask", "validates", "a", "task", "with", "the", "current", "cluster", "settings", "and", "takes", "action", "to", "make", "it", "conformant", ".", "it", "s", "called", "at", "orchestrator", "initialization", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/replicated/tasks.go#L151-L181
train
docker/swarmkit
manager/controlapi/ca_rotation.go
newRootRotationObject
func newRootRotationObject(ctx context.Context, securityConfig *ca.SecurityConfig, apiRootCA *api.RootCA, newCARootCA ca.RootCA, extCAs []*api.ExternalCA, version uint64) (*api.RootCA, error) { var ( rootCert, rootKey, crossSignedCert []byte newRootHasSigner bool err error ) rootCert = newCARootCA.Certs if s, err := newCARootCA.Signer(); err == nil { rootCert, rootKey = s.Cert, s.Key newRootHasSigner = true } // we have to sign with the original signer, not whatever is in the SecurityConfig's RootCA (which may have an intermediate signer, if // a root rotation is already in progress) switch { case hasSigningKey(apiRootCA): var oldRootCA ca.RootCA oldRootCA, err = ca.NewRootCA(apiRootCA.CACert, apiRootCA.CACert, apiRootCA.CAKey, ca.DefaultNodeCertExpiration, nil) if err == nil { crossSignedCert, err = oldRootCA.CrossSignCACertificate(rootCert) } case !newRootHasSigner: // the original CA and the new CA both require external CAs return nil, status.Errorf(codes.InvalidArgument, "rotating from one external CA to a different external CA is not supported") default: // We need the same credentials but to connect to the original URLs (in case we are in the middle of a root rotation already) var urls []string for _, c := range extCAs { if c.Protocol == api.ExternalCA_CAProtocolCFSSL { urls = append(urls, c.URL) } } if len(urls) == 0 { return nil, status.Errorf(codes.InvalidArgument, "must provide an external CA for the current external root CA to generate a cross-signed certificate") } rootPool := x509.NewCertPool() rootPool.AppendCertsFromPEM(apiRootCA.CACert) externalCAConfig := ca.NewExternalCATLSConfig(securityConfig.ClientTLSCreds.Config().Certificates, rootPool) externalCA := ca.NewExternalCA(nil, externalCAConfig, urls...) crossSignedCert, err = externalCA.CrossSignRootCA(ctx, newCARootCA) } if err != nil { log.G(ctx).WithError(err).Error("unable to generate a cross-signed certificate for root rotation") return nil, status.Errorf(codes.Internal, "unable to generate a cross-signed certificate for root rotation") } copied := apiRootCA.Copy() copied.RootRotation = &api.RootRotation{ CACert: rootCert, CAKey: rootKey, CrossSignedCACert: ca.NormalizePEMs(crossSignedCert), } copied.LastForcedRotation = version return copied, nil }
go
func newRootRotationObject(ctx context.Context, securityConfig *ca.SecurityConfig, apiRootCA *api.RootCA, newCARootCA ca.RootCA, extCAs []*api.ExternalCA, version uint64) (*api.RootCA, error) { var ( rootCert, rootKey, crossSignedCert []byte newRootHasSigner bool err error ) rootCert = newCARootCA.Certs if s, err := newCARootCA.Signer(); err == nil { rootCert, rootKey = s.Cert, s.Key newRootHasSigner = true } // we have to sign with the original signer, not whatever is in the SecurityConfig's RootCA (which may have an intermediate signer, if // a root rotation is already in progress) switch { case hasSigningKey(apiRootCA): var oldRootCA ca.RootCA oldRootCA, err = ca.NewRootCA(apiRootCA.CACert, apiRootCA.CACert, apiRootCA.CAKey, ca.DefaultNodeCertExpiration, nil) if err == nil { crossSignedCert, err = oldRootCA.CrossSignCACertificate(rootCert) } case !newRootHasSigner: // the original CA and the new CA both require external CAs return nil, status.Errorf(codes.InvalidArgument, "rotating from one external CA to a different external CA is not supported") default: // We need the same credentials but to connect to the original URLs (in case we are in the middle of a root rotation already) var urls []string for _, c := range extCAs { if c.Protocol == api.ExternalCA_CAProtocolCFSSL { urls = append(urls, c.URL) } } if len(urls) == 0 { return nil, status.Errorf(codes.InvalidArgument, "must provide an external CA for the current external root CA to generate a cross-signed certificate") } rootPool := x509.NewCertPool() rootPool.AppendCertsFromPEM(apiRootCA.CACert) externalCAConfig := ca.NewExternalCATLSConfig(securityConfig.ClientTLSCreds.Config().Certificates, rootPool) externalCA := ca.NewExternalCA(nil, externalCAConfig, urls...) crossSignedCert, err = externalCA.CrossSignRootCA(ctx, newCARootCA) } if err != nil { log.G(ctx).WithError(err).Error("unable to generate a cross-signed certificate for root rotation") return nil, status.Errorf(codes.Internal, "unable to generate a cross-signed certificate for root rotation") } copied := apiRootCA.Copy() copied.RootRotation = &api.RootRotation{ CACert: rootCert, CAKey: rootKey, CrossSignedCACert: ca.NormalizePEMs(crossSignedCert), } copied.LastForcedRotation = version return copied, nil }
[ "func", "newRootRotationObject", "(", "ctx", "context", ".", "Context", ",", "securityConfig", "*", "ca", ".", "SecurityConfig", ",", "apiRootCA", "*", "api", ".", "RootCA", ",", "newCARootCA", "ca", ".", "RootCA", ",", "extCAs", "[", "]", "*", "api", ".",...
// Creates a cross-signed intermediate and new api.RootRotation object. // This function assumes that the root cert and key and the external CAs have already been validated.
[ "Creates", "a", "cross", "-", "signed", "intermediate", "and", "new", "api", ".", "RootRotation", "object", ".", "This", "function", "assumes", "that", "the", "root", "cert", "and", "key", "and", "the", "external", "CAs", "have", "already", "been", "validate...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/ca_rotation.go#L39-L96
train
docker/swarmkit
manager/controlapi/ca_rotation.go
validateExternalCAURL
func validateExternalCAURL(dialer *net.Dialer, tlsOpts *tls.Config, caURL string) error { parsed, err := url.Parse(caURL) if err != nil { return err } if parsed.Scheme != "https" { return errors.New("invalid HTTP scheme") } host, port, err := net.SplitHostPort(parsed.Host) if err != nil { // It either has no port or is otherwise invalid (e.g. too many colons). If it's otherwise invalid the dialer // will error later, so just assume it's no port and set the port to the default HTTPS port. host = parsed.Host port = "443" } conn, err := tls.DialWithDialer(dialer, "tcp", net.JoinHostPort(host, port), tlsOpts) if conn != nil { conn.Close() } return err }
go
func validateExternalCAURL(dialer *net.Dialer, tlsOpts *tls.Config, caURL string) error { parsed, err := url.Parse(caURL) if err != nil { return err } if parsed.Scheme != "https" { return errors.New("invalid HTTP scheme") } host, port, err := net.SplitHostPort(parsed.Host) if err != nil { // It either has no port or is otherwise invalid (e.g. too many colons). If it's otherwise invalid the dialer // will error later, so just assume it's no port and set the port to the default HTTPS port. host = parsed.Host port = "443" } conn, err := tls.DialWithDialer(dialer, "tcp", net.JoinHostPort(host, port), tlsOpts) if conn != nil { conn.Close() } return err }
[ "func", "validateExternalCAURL", "(", "dialer", "*", "net", ".", "Dialer", ",", "tlsOpts", "*", "tls", ".", "Config", ",", "caURL", "string", ")", "error", "{", "parsed", ",", "err", ":=", "url", ".", "Parse", "(", "caURL", ")", "\n", "if", "err", "!...
// Checks that a CA URL is connectable using the credentials we have and that its server certificate is signed by the // root CA that we expect. This uses a TCP dialer rather than an HTTP client; because we have custom TLS configuration, // if we wanted to use an HTTP client we'd have to create a new transport for every connection. The docs specify that // Transports cache connections for future re-use, which could cause many open connections.
[ "Checks", "that", "a", "CA", "URL", "is", "connectable", "using", "the", "credentials", "we", "have", "and", "that", "its", "server", "certificate", "is", "signed", "by", "the", "root", "CA", "that", "we", "expect", ".", "This", "uses", "a", "TCP", "dial...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/ca_rotation.go#L102-L123
train
docker/swarmkit
manager/controlapi/ca_rotation.go
validateHasAtLeastOneExternalCA
func validateHasAtLeastOneExternalCA(ctx context.Context, externalCAs map[string][]*api.ExternalCA, securityConfig *ca.SecurityConfig, wantedCert []byte, desc string) ([]*api.ExternalCA, error) { specific, ok := externalCAs[string(wantedCert)] if ok { pool := x509.NewCertPool() pool.AppendCertsFromPEM(wantedCert) dialer := net.Dialer{Timeout: 5 * time.Second} opts := tls.Config{ RootCAs: pool, Certificates: securityConfig.ClientTLSCreds.Config().Certificates, } for i, ca := range specific { if ca.Protocol == api.ExternalCA_CAProtocolCFSSL { if err := validateExternalCAURL(&dialer, &opts, ca.URL); err != nil { log.G(ctx).WithError(err).Warnf("external CA # %d is unreachable or invalid", i+1) } else { return specific, nil } } } } return nil, status.Errorf(codes.InvalidArgument, "there must be at least one valid, reachable external CA corresponding to the %s CA certificate", desc) }
go
func validateHasAtLeastOneExternalCA(ctx context.Context, externalCAs map[string][]*api.ExternalCA, securityConfig *ca.SecurityConfig, wantedCert []byte, desc string) ([]*api.ExternalCA, error) { specific, ok := externalCAs[string(wantedCert)] if ok { pool := x509.NewCertPool() pool.AppendCertsFromPEM(wantedCert) dialer := net.Dialer{Timeout: 5 * time.Second} opts := tls.Config{ RootCAs: pool, Certificates: securityConfig.ClientTLSCreds.Config().Certificates, } for i, ca := range specific { if ca.Protocol == api.ExternalCA_CAProtocolCFSSL { if err := validateExternalCAURL(&dialer, &opts, ca.URL); err != nil { log.G(ctx).WithError(err).Warnf("external CA # %d is unreachable or invalid", i+1) } else { return specific, nil } } } } return nil, status.Errorf(codes.InvalidArgument, "there must be at least one valid, reachable external CA corresponding to the %s CA certificate", desc) }
[ "func", "validateHasAtLeastOneExternalCA", "(", "ctx", "context", ".", "Context", ",", "externalCAs", "map", "[", "string", "]", "[", "]", "*", "api", ".", "ExternalCA", ",", "securityConfig", "*", "ca", ".", "SecurityConfig", ",", "wantedCert", "[", "]", "b...
// Validates that there is at least 1 reachable, valid external CA for the given CA certificate. Returns true if there is, false otherwise. // Requires that the wanted cert is already normalized.
[ "Validates", "that", "there", "is", "at", "least", "1", "reachable", "valid", "external", "CA", "for", "the", "given", "CA", "certificate", ".", "Returns", "true", "if", "there", "is", "false", "otherwise", ".", "Requires", "that", "the", "wanted", "cert", ...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/ca_rotation.go#L127-L149
train
docker/swarmkit
integration/cluster.go
Stop
func (c *testCluster) Stop() error { c.cancel() for _, n := range c.nodes { if err := n.Stop(); err != nil { return err } } c.wg.Wait() close(c.errs) for err := range c.errs { if err != nil { return err } } return nil }
go
func (c *testCluster) Stop() error { c.cancel() for _, n := range c.nodes { if err := n.Stop(); err != nil { return err } } c.wg.Wait() close(c.errs) for err := range c.errs { if err != nil { return err } } return nil }
[ "func", "(", "c", "*", "testCluster", ")", "Stop", "(", ")", "error", "{", "c", ".", "cancel", "(", ")", "\n", "for", "_", ",", "n", ":=", "range", "c", ".", "nodes", "{", "if", "err", ":=", "n", ".", "Stop", "(", ")", ";", "err", "!=", "ni...
// Stop makes best effort to stop all nodes and close connections to them.
[ "Stop", "makes", "best", "effort", "to", "stop", "all", "nodes", "and", "close", "connections", "to", "them", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/integration/cluster.go#L43-L58
train
docker/swarmkit
integration/cluster.go
RandomManager
func (c *testCluster) RandomManager() *testNode { var managers []*testNode for _, n := range c.nodes { if n.IsManager() { managers = append(managers, n) } } idx := rand.Intn(len(managers)) return managers[idx] }
go
func (c *testCluster) RandomManager() *testNode { var managers []*testNode for _, n := range c.nodes { if n.IsManager() { managers = append(managers, n) } } idx := rand.Intn(len(managers)) return managers[idx] }
[ "func", "(", "c", "*", "testCluster", ")", "RandomManager", "(", ")", "*", "testNode", "{", "var", "managers", "[", "]", "*", "testNode", "\n", "for", "_", ",", "n", ":=", "range", "c", ".", "nodes", "{", "if", "n", ".", "IsManager", "(", ")", "{...
// RandomManager chooses random manager from cluster.
[ "RandomManager", "chooses", "random", "manager", "from", "cluster", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/integration/cluster.go#L61-L70
train
docker/swarmkit
integration/cluster.go
AddManager
func (c *testCluster) AddManager(lateBind bool, rootCA *ca.RootCA) error { // first node var n *testNode if len(c.nodes) == 0 { node, err := newTestNode("", "", lateBind, c.fips) if err != nil { return err } // generate TLS certs for this manager for bootstrapping, else the node will generate its own CA if rootCA != nil { if err := generateCerts(node.stateDir, rootCA, identity.NewID(), ca.ManagerRole, identity.NewID(), true); err != nil { return err } } n = node } else { lateBind = false joinAddr, err := c.RandomManager().node.RemoteAPIAddr() if err != nil { return err } clusterInfo, err := c.GetClusterInfo() if err != nil { return err } node, err := newTestNode(joinAddr, clusterInfo.RootCA.JoinTokens.Manager, false, c.fips) if err != nil { return err } n = node } if err := c.AddNode(n); err != nil { return err } if lateBind { // Verify that the control API works if _, err := c.GetClusterInfo(); err != nil { return err } return n.node.BindRemote(context.Background(), "127.0.0.1:0", "") } return nil }
go
func (c *testCluster) AddManager(lateBind bool, rootCA *ca.RootCA) error { // first node var n *testNode if len(c.nodes) == 0 { node, err := newTestNode("", "", lateBind, c.fips) if err != nil { return err } // generate TLS certs for this manager for bootstrapping, else the node will generate its own CA if rootCA != nil { if err := generateCerts(node.stateDir, rootCA, identity.NewID(), ca.ManagerRole, identity.NewID(), true); err != nil { return err } } n = node } else { lateBind = false joinAddr, err := c.RandomManager().node.RemoteAPIAddr() if err != nil { return err } clusterInfo, err := c.GetClusterInfo() if err != nil { return err } node, err := newTestNode(joinAddr, clusterInfo.RootCA.JoinTokens.Manager, false, c.fips) if err != nil { return err } n = node } if err := c.AddNode(n); err != nil { return err } if lateBind { // Verify that the control API works if _, err := c.GetClusterInfo(); err != nil { return err } return n.node.BindRemote(context.Background(), "127.0.0.1:0", "") } return nil }
[ "func", "(", "c", "*", "testCluster", ")", "AddManager", "(", "lateBind", "bool", ",", "rootCA", "*", "ca", ".", "RootCA", ")", "error", "{", "var", "n", "*", "testNode", "\n", "if", "len", "(", "c", ".", "nodes", ")", "==", "0", "{", "node", ","...
// AddManager adds a node with the Manager role. The node will function as both // an agent and a manager. If lateBind is set, the manager is started before a // remote API port is bound. If rootCA is set, the manager is bootstrapped using // said root CA. These settings only apply to the first manager.
[ "AddManager", "adds", "a", "node", "with", "the", "Manager", "role", ".", "The", "node", "will", "function", "as", "both", "an", "agent", "and", "a", "manager", ".", "If", "lateBind", "is", "set", "the", "manager", "is", "started", "before", "a", "remote...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/integration/cluster.go#L76-L121
train
docker/swarmkit
integration/cluster.go
AddNode
func (c *testCluster) AddNode(n *testNode) error { c.counter++ if err := c.runNode(n, c.counter); err != nil { c.counter-- return err } c.nodes[n.node.NodeID()] = n c.nodesOrder[n.node.NodeID()] = c.counter return nil }
go
func (c *testCluster) AddNode(n *testNode) error { c.counter++ if err := c.runNode(n, c.counter); err != nil { c.counter-- return err } c.nodes[n.node.NodeID()] = n c.nodesOrder[n.node.NodeID()] = c.counter return nil }
[ "func", "(", "c", "*", "testCluster", ")", "AddNode", "(", "n", "*", "testNode", ")", "error", "{", "c", ".", "counter", "++", "\n", "if", "err", ":=", "c", ".", "runNode", "(", "n", ",", "c", ".", "counter", ")", ";", "err", "!=", "nil", "{", ...
// AddNode adds a new node to the cluster
[ "AddNode", "adds", "a", "new", "node", "to", "the", "cluster" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/integration/cluster.go#L145-L154
train
docker/swarmkit
integration/cluster.go
CreateService
func (c *testCluster) CreateService(name string, instances int) (string, error) { spec := &api.ServiceSpec{ Annotations: api.Annotations{Name: name}, Mode: &api.ServiceSpec_Replicated{ Replicated: &api.ReplicatedService{ Replicas: uint64(instances), }, }, Task: api.TaskSpec{ Runtime: &api.TaskSpec_Container{ Container: &api.ContainerSpec{Image: "alpine", Command: []string{"sh"}}, }, }, } resp, err := c.api.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) if err != nil { return "", err } return resp.Service.ID, nil }
go
func (c *testCluster) CreateService(name string, instances int) (string, error) { spec := &api.ServiceSpec{ Annotations: api.Annotations{Name: name}, Mode: &api.ServiceSpec_Replicated{ Replicated: &api.ReplicatedService{ Replicas: uint64(instances), }, }, Task: api.TaskSpec{ Runtime: &api.TaskSpec_Container{ Container: &api.ContainerSpec{Image: "alpine", Command: []string{"sh"}}, }, }, } resp, err := c.api.CreateService(context.Background(), &api.CreateServiceRequest{Spec: spec}) if err != nil { return "", err } return resp.Service.ID, nil }
[ "func", "(", "c", "*", "testCluster", ")", "CreateService", "(", "name", "string", ",", "instances", "int", ")", "(", "string", ",", "error", ")", "{", "spec", ":=", "&", "api", ".", "ServiceSpec", "{", "Annotations", ":", "api", ".", "Annotations", "{...
// CreateService creates dummy service.
[ "CreateService", "creates", "dummy", "service", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/integration/cluster.go#L196-L216
train
docker/swarmkit
integration/cluster.go
Leader
func (c *testCluster) Leader() (*testNode, error) { resp, err := c.api.ListNodes(context.Background(), &api.ListNodesRequest{ Filters: &api.ListNodesRequest_Filters{ Roles: []api.NodeRole{api.NodeRoleManager}, }, }) if err != nil { return nil, err } for _, n := range resp.Nodes { if n.ManagerStatus.Leader { tn, ok := c.nodes[n.ID] if !ok { return nil, fmt.Errorf("leader id is %s, but it isn't found in test cluster object", n.ID) } return tn, nil } } return nil, fmt.Errorf("cluster leader is not found in api response") }
go
func (c *testCluster) Leader() (*testNode, error) { resp, err := c.api.ListNodes(context.Background(), &api.ListNodesRequest{ Filters: &api.ListNodesRequest_Filters{ Roles: []api.NodeRole{api.NodeRoleManager}, }, }) if err != nil { return nil, err } for _, n := range resp.Nodes { if n.ManagerStatus.Leader { tn, ok := c.nodes[n.ID] if !ok { return nil, fmt.Errorf("leader id is %s, but it isn't found in test cluster object", n.ID) } return tn, nil } } return nil, fmt.Errorf("cluster leader is not found in api response") }
[ "func", "(", "c", "*", "testCluster", ")", "Leader", "(", ")", "(", "*", "testNode", ",", "error", ")", "{", "resp", ",", "err", ":=", "c", ".", "api", ".", "ListNodes", "(", "context", ".", "Background", "(", ")", ",", "&", "api", ".", "ListNode...
// Leader returns TestNode for cluster leader.
[ "Leader", "returns", "TestNode", "for", "cluster", "leader", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/integration/cluster.go#L219-L238
train
docker/swarmkit
integration/cluster.go
RemoveNode
func (c *testCluster) RemoveNode(id string, graceful bool) error { node, ok := c.nodes[id] if !ok { return fmt.Errorf("remove node: node %s not found", id) } // demote before removal if node.IsManager() { if err := c.SetNodeRole(id, api.NodeRoleWorker); err != nil { return fmt.Errorf("demote manager: %v", err) } } if err := node.Stop(); err != nil { return err } delete(c.nodes, id) if graceful { if err := testutils.PollFuncWithTimeout(nil, func() error { resp, err := c.api.GetNode(context.Background(), &api.GetNodeRequest{NodeID: id}) if err != nil { return fmt.Errorf("get node: %v", err) } if resp.Node.Status.State != api.NodeStatus_DOWN { return fmt.Errorf("node %s is still not down", id) } return nil }, opsTimeout); err != nil { return err } } if _, err := c.api.RemoveNode(context.Background(), &api.RemoveNodeRequest{NodeID: id, Force: !graceful}); err != nil { return fmt.Errorf("remove node: %v", err) } return nil }
go
func (c *testCluster) RemoveNode(id string, graceful bool) error { node, ok := c.nodes[id] if !ok { return fmt.Errorf("remove node: node %s not found", id) } // demote before removal if node.IsManager() { if err := c.SetNodeRole(id, api.NodeRoleWorker); err != nil { return fmt.Errorf("demote manager: %v", err) } } if err := node.Stop(); err != nil { return err } delete(c.nodes, id) if graceful { if err := testutils.PollFuncWithTimeout(nil, func() error { resp, err := c.api.GetNode(context.Background(), &api.GetNodeRequest{NodeID: id}) if err != nil { return fmt.Errorf("get node: %v", err) } if resp.Node.Status.State != api.NodeStatus_DOWN { return fmt.Errorf("node %s is still not down", id) } return nil }, opsTimeout); err != nil { return err } } if _, err := c.api.RemoveNode(context.Background(), &api.RemoveNodeRequest{NodeID: id, Force: !graceful}); err != nil { return fmt.Errorf("remove node: %v", err) } return nil }
[ "func", "(", "c", "*", "testCluster", ")", "RemoveNode", "(", "id", "string", ",", "graceful", "bool", ")", "error", "{", "node", ",", "ok", ":=", "c", ".", "nodes", "[", "id", "]", "\n", "if", "!", "ok", "{", "return", "fmt", ".", "Errorf", "(",...
// RemoveNode removes node entirely. It tries to demote managers.
[ "RemoveNode", "removes", "node", "entirely", ".", "It", "tries", "to", "demote", "managers", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/integration/cluster.go#L241-L275
train
docker/swarmkit
integration/cluster.go
SetNodeRole
func (c *testCluster) SetNodeRole(id string, role api.NodeRole) error { node, ok := c.nodes[id] if !ok { return fmt.Errorf("set node role: node %s not found", id) } if node.IsManager() && role == api.NodeRoleManager { return fmt.Errorf("node is already manager") } if !node.IsManager() && role == api.NodeRoleWorker { return fmt.Errorf("node is already worker") } var initialTimeout time.Duration // version might change between get and update, so retry for i := 0; i < 5; i++ { time.Sleep(initialTimeout) initialTimeout += 500 * time.Millisecond resp, err := c.api.GetNode(context.Background(), &api.GetNodeRequest{NodeID: id}) if err != nil { return err } spec := resp.Node.Spec.Copy() spec.DesiredRole = role if _, err := c.api.UpdateNode(context.Background(), &api.UpdateNodeRequest{ NodeID: id, Spec: spec, NodeVersion: &resp.Node.Meta.Version, }); err != nil { // there possible problems on calling update node because redirecting // node or leader might want to shut down if testutils.ErrorDesc(err) == "update out of sequence" { continue } return err } if role == api.NodeRoleManager { // wait to become manager return testutils.PollFuncWithTimeout(nil, func() error { if !node.IsManager() { return fmt.Errorf("node is still not a manager") } return nil }, opsTimeout) } // wait to become worker return testutils.PollFuncWithTimeout(nil, func() error { if node.IsManager() { return fmt.Errorf("node is still not a worker") } return nil }, opsTimeout) } return fmt.Errorf("set role %s for node %s, got sequence error 5 times", role, id) }
go
func (c *testCluster) SetNodeRole(id string, role api.NodeRole) error { node, ok := c.nodes[id] if !ok { return fmt.Errorf("set node role: node %s not found", id) } if node.IsManager() && role == api.NodeRoleManager { return fmt.Errorf("node is already manager") } if !node.IsManager() && role == api.NodeRoleWorker { return fmt.Errorf("node is already worker") } var initialTimeout time.Duration // version might change between get and update, so retry for i := 0; i < 5; i++ { time.Sleep(initialTimeout) initialTimeout += 500 * time.Millisecond resp, err := c.api.GetNode(context.Background(), &api.GetNodeRequest{NodeID: id}) if err != nil { return err } spec := resp.Node.Spec.Copy() spec.DesiredRole = role if _, err := c.api.UpdateNode(context.Background(), &api.UpdateNodeRequest{ NodeID: id, Spec: spec, NodeVersion: &resp.Node.Meta.Version, }); err != nil { // there possible problems on calling update node because redirecting // node or leader might want to shut down if testutils.ErrorDesc(err) == "update out of sequence" { continue } return err } if role == api.NodeRoleManager { // wait to become manager return testutils.PollFuncWithTimeout(nil, func() error { if !node.IsManager() { return fmt.Errorf("node is still not a manager") } return nil }, opsTimeout) } // wait to become worker return testutils.PollFuncWithTimeout(nil, func() error { if node.IsManager() { return fmt.Errorf("node is still not a worker") } return nil }, opsTimeout) } return fmt.Errorf("set role %s for node %s, got sequence error 5 times", role, id) }
[ "func", "(", "c", "*", "testCluster", ")", "SetNodeRole", "(", "id", "string", ",", "role", "api", ".", "NodeRole", ")", "error", "{", "node", ",", "ok", ":=", "c", ".", "nodes", "[", "id", "]", "\n", "if", "!", "ok", "{", "return", "fmt", ".", ...
// SetNodeRole sets role for node through control api.
[ "SetNodeRole", "sets", "role", "for", "node", "through", "control", "api", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/integration/cluster.go#L278-L331
train
docker/swarmkit
integration/cluster.go
StartNode
func (c *testCluster) StartNode(id string) error { n, ok := c.nodes[id] if !ok { return fmt.Errorf("set node role: node %s not found", id) } if err := c.runNode(n, c.nodesOrder[id]); err != nil { return err } if n.node.NodeID() != id { return fmt.Errorf("restarted node does not have have the same ID") } return nil }
go
func (c *testCluster) StartNode(id string) error { n, ok := c.nodes[id] if !ok { return fmt.Errorf("set node role: node %s not found", id) } if err := c.runNode(n, c.nodesOrder[id]); err != nil { return err } if n.node.NodeID() != id { return fmt.Errorf("restarted node does not have have the same ID") } return nil }
[ "func", "(", "c", "*", "testCluster", ")", "StartNode", "(", "id", "string", ")", "error", "{", "n", ",", "ok", ":=", "c", ".", "nodes", "[", "id", "]", "\n", "if", "!", "ok", "{", "return", "fmt", ".", "Errorf", "(", "\"set node role: node %s not fo...
// Starts a node from a stopped state
[ "Starts", "a", "node", "from", "a", "stopped", "state" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/integration/cluster.go#L334-L346
train
docker/swarmkit
manager/controlapi/secret.go
GetSecret
func (s *Server) GetSecret(ctx context.Context, request *api.GetSecretRequest) (*api.GetSecretResponse, error) { if request.SecretID == "" { return nil, status.Errorf(codes.InvalidArgument, "secret ID must be provided") } var secret *api.Secret s.store.View(func(tx store.ReadTx) { secret = store.GetSecret(tx, request.SecretID) }) if secret == nil { return nil, status.Errorf(codes.NotFound, "secret %s not found", request.SecretID) } secret.Spec.Data = nil // clean the actual secret data so it's never returned return &api.GetSecretResponse{Secret: secret}, nil }
go
func (s *Server) GetSecret(ctx context.Context, request *api.GetSecretRequest) (*api.GetSecretResponse, error) { if request.SecretID == "" { return nil, status.Errorf(codes.InvalidArgument, "secret ID must be provided") } var secret *api.Secret s.store.View(func(tx store.ReadTx) { secret = store.GetSecret(tx, request.SecretID) }) if secret == nil { return nil, status.Errorf(codes.NotFound, "secret %s not found", request.SecretID) } secret.Spec.Data = nil // clean the actual secret data so it's never returned return &api.GetSecretResponse{Secret: secret}, nil }
[ "func", "(", "s", "*", "Server", ")", "GetSecret", "(", "ctx", "context", ".", "Context", ",", "request", "*", "api", ".", "GetSecretRequest", ")", "(", "*", "api", ".", "GetSecretResponse", ",", "error", ")", "{", "if", "request", ".", "SecretID", "==...
// GetSecret returns a `GetSecretResponse` with a `Secret` with the same // id as `GetSecretRequest.SecretID` // - Returns `NotFound` if the Secret with the given id is not found. // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. // - Returns an error if getting fails.
[ "GetSecret", "returns", "a", "GetSecretResponse", "with", "a", "Secret", "with", "the", "same", "id", "as", "GetSecretRequest", ".", "SecretID", "-", "Returns", "NotFound", "if", "the", "Secret", "with", "the", "given", "id", "is", "not", "found", ".", "-", ...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/secret.go#L31-L47
train
docker/swarmkit
manager/controlapi/secret.go
UpdateSecret
func (s *Server) UpdateSecret(ctx context.Context, request *api.UpdateSecretRequest) (*api.UpdateSecretResponse, error) { if request.SecretID == "" || request.SecretVersion == nil { return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) } var secret *api.Secret err := s.store.Update(func(tx store.Tx) error { secret = store.GetSecret(tx, request.SecretID) if secret == nil { return status.Errorf(codes.NotFound, "secret %s not found", request.SecretID) } // Check if the Name is different than the current name, or the secret is non-nil and different // than the current secret if secret.Spec.Annotations.Name != request.Spec.Annotations.Name || (request.Spec.Data != nil && subtle.ConstantTimeCompare(request.Spec.Data, secret.Spec.Data) == 0) { return status.Errorf(codes.InvalidArgument, "only updates to Labels are allowed") } // We only allow updating Labels secret.Meta.Version = *request.SecretVersion secret.Spec.Annotations.Labels = request.Spec.Annotations.Labels return store.UpdateSecret(tx, secret) }) if err != nil { return nil, err } log.G(ctx).WithFields(logrus.Fields{ "secret.ID": request.SecretID, "secret.Name": request.Spec.Annotations.Name, "method": "UpdateSecret", }).Debugf("secret updated") // WARN: we should never return the actual secret data here. We need to redact the private fields first. secret.Spec.Data = nil return &api.UpdateSecretResponse{ Secret: secret, }, nil }
go
func (s *Server) UpdateSecret(ctx context.Context, request *api.UpdateSecretRequest) (*api.UpdateSecretResponse, error) { if request.SecretID == "" || request.SecretVersion == nil { return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) } var secret *api.Secret err := s.store.Update(func(tx store.Tx) error { secret = store.GetSecret(tx, request.SecretID) if secret == nil { return status.Errorf(codes.NotFound, "secret %s not found", request.SecretID) } // Check if the Name is different than the current name, or the secret is non-nil and different // than the current secret if secret.Spec.Annotations.Name != request.Spec.Annotations.Name || (request.Spec.Data != nil && subtle.ConstantTimeCompare(request.Spec.Data, secret.Spec.Data) == 0) { return status.Errorf(codes.InvalidArgument, "only updates to Labels are allowed") } // We only allow updating Labels secret.Meta.Version = *request.SecretVersion secret.Spec.Annotations.Labels = request.Spec.Annotations.Labels return store.UpdateSecret(tx, secret) }) if err != nil { return nil, err } log.G(ctx).WithFields(logrus.Fields{ "secret.ID": request.SecretID, "secret.Name": request.Spec.Annotations.Name, "method": "UpdateSecret", }).Debugf("secret updated") // WARN: we should never return the actual secret data here. We need to redact the private fields first. secret.Spec.Data = nil return &api.UpdateSecretResponse{ Secret: secret, }, nil }
[ "func", "(", "s", "*", "Server", ")", "UpdateSecret", "(", "ctx", "context", ".", "Context", ",", "request", "*", "api", ".", "UpdateSecretRequest", ")", "(", "*", "api", ".", "UpdateSecretResponse", ",", "error", ")", "{", "if", "request", ".", "SecretI...
// UpdateSecret updates a Secret referenced by SecretID with the given SecretSpec. // - Returns `NotFound` if the Secret is not found. // - Returns `InvalidArgument` if the SecretSpec is malformed or anything other than Labels is changed // - Returns an error if the update fails.
[ "UpdateSecret", "updates", "a", "Secret", "referenced", "by", "SecretID", "with", "the", "given", "SecretSpec", ".", "-", "Returns", "NotFound", "if", "the", "Secret", "is", "not", "found", ".", "-", "Returns", "InvalidArgument", "if", "the", "SecretSpec", "is...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/secret.go#L53-L92
train
docker/swarmkit
manager/controlapi/secret.go
ListSecrets
func (s *Server) ListSecrets(ctx context.Context, request *api.ListSecretsRequest) (*api.ListSecretsResponse, error) { var ( secrets []*api.Secret respSecrets []*api.Secret err error byFilters []store.By by store.By labels map[string]string ) // return all secrets that match either any of the names or any of the name prefixes (why would you give both?) if request.Filters != nil { for _, name := range request.Filters.Names { byFilters = append(byFilters, store.ByName(name)) } for _, prefix := range request.Filters.NamePrefixes { byFilters = append(byFilters, store.ByNamePrefix(prefix)) } for _, prefix := range request.Filters.IDPrefixes { byFilters = append(byFilters, store.ByIDPrefix(prefix)) } labels = request.Filters.Labels } switch len(byFilters) { case 0: by = store.All case 1: by = byFilters[0] default: by = store.Or(byFilters...) } s.store.View(func(tx store.ReadTx) { secrets, err = store.FindSecrets(tx, by) }) if err != nil { return nil, err } // strip secret data from the secret, filter by label, and filter out all internal secrets for _, secret := range secrets { if secret.Internal || !filterMatchLabels(secret.Spec.Annotations.Labels, labels) { continue } secret.Spec.Data = nil // clean the actual secret data so it's never returned respSecrets = append(respSecrets, secret) } return &api.ListSecretsResponse{Secrets: respSecrets}, nil }
go
func (s *Server) ListSecrets(ctx context.Context, request *api.ListSecretsRequest) (*api.ListSecretsResponse, error) { var ( secrets []*api.Secret respSecrets []*api.Secret err error byFilters []store.By by store.By labels map[string]string ) // return all secrets that match either any of the names or any of the name prefixes (why would you give both?) if request.Filters != nil { for _, name := range request.Filters.Names { byFilters = append(byFilters, store.ByName(name)) } for _, prefix := range request.Filters.NamePrefixes { byFilters = append(byFilters, store.ByNamePrefix(prefix)) } for _, prefix := range request.Filters.IDPrefixes { byFilters = append(byFilters, store.ByIDPrefix(prefix)) } labels = request.Filters.Labels } switch len(byFilters) { case 0: by = store.All case 1: by = byFilters[0] default: by = store.Or(byFilters...) } s.store.View(func(tx store.ReadTx) { secrets, err = store.FindSecrets(tx, by) }) if err != nil { return nil, err } // strip secret data from the secret, filter by label, and filter out all internal secrets for _, secret := range secrets { if secret.Internal || !filterMatchLabels(secret.Spec.Annotations.Labels, labels) { continue } secret.Spec.Data = nil // clean the actual secret data so it's never returned respSecrets = append(respSecrets, secret) } return &api.ListSecretsResponse{Secrets: respSecrets}, nil }
[ "func", "(", "s", "*", "Server", ")", "ListSecrets", "(", "ctx", "context", ".", "Context", ",", "request", "*", "api", ".", "ListSecretsRequest", ")", "(", "*", "api", ".", "ListSecretsResponse", ",", "error", ")", "{", "var", "(", "secrets", "[", "]"...
// ListSecrets returns a `ListSecretResponse` with a list all non-internal `Secret`s being // managed, or all secrets matching any name in `ListSecretsRequest.Names`, any // name prefix in `ListSecretsRequest.NamePrefixes`, any id in // `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`. // - Returns an error if listing fails.
[ "ListSecrets", "returns", "a", "ListSecretResponse", "with", "a", "list", "all", "non", "-", "internal", "Secret", "s", "being", "managed", "or", "all", "secrets", "matching", "any", "name", "in", "ListSecretsRequest", ".", "Names", "any", "name", "prefix", "i...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/secret.go#L99-L149
train
docker/swarmkit
manager/controlapi/secret.go
CreateSecret
func (s *Server) CreateSecret(ctx context.Context, request *api.CreateSecretRequest) (*api.CreateSecretResponse, error) { if err := validateSecretSpec(request.Spec); err != nil { return nil, err } if request.Spec.Driver != nil { // Check that the requested driver is valid if _, err := s.dr.NewSecretDriver(request.Spec.Driver); err != nil { return nil, err } } secret := secretFromSecretSpec(request.Spec) // the store will handle name conflicts err := s.store.Update(func(tx store.Tx) error { return store.CreateSecret(tx, secret) }) switch err { case store.ErrNameConflict: return nil, status.Errorf(codes.AlreadyExists, "secret %s already exists", request.Spec.Annotations.Name) case nil: secret.Spec.Data = nil // clean the actual secret data so it's never returned log.G(ctx).WithFields(logrus.Fields{ "secret.Name": request.Spec.Annotations.Name, "method": "CreateSecret", }).Debugf("secret created") return &api.CreateSecretResponse{Secret: secret}, nil default: return nil, err } }
go
func (s *Server) CreateSecret(ctx context.Context, request *api.CreateSecretRequest) (*api.CreateSecretResponse, error) { if err := validateSecretSpec(request.Spec); err != nil { return nil, err } if request.Spec.Driver != nil { // Check that the requested driver is valid if _, err := s.dr.NewSecretDriver(request.Spec.Driver); err != nil { return nil, err } } secret := secretFromSecretSpec(request.Spec) // the store will handle name conflicts err := s.store.Update(func(tx store.Tx) error { return store.CreateSecret(tx, secret) }) switch err { case store.ErrNameConflict: return nil, status.Errorf(codes.AlreadyExists, "secret %s already exists", request.Spec.Annotations.Name) case nil: secret.Spec.Data = nil // clean the actual secret data so it's never returned log.G(ctx).WithFields(logrus.Fields{ "secret.Name": request.Spec.Annotations.Name, "method": "CreateSecret", }).Debugf("secret created") return &api.CreateSecretResponse{Secret: secret}, nil default: return nil, err } }
[ "func", "(", "s", "*", "Server", ")", "CreateSecret", "(", "ctx", "context", ".", "Context", ",", "request", "*", "api", ".", "CreateSecretRequest", ")", "(", "*", "api", ".", "CreateSecretResponse", ",", "error", ")", "{", "if", "err", ":=", "validateSe...
// CreateSecret creates and returns a `CreateSecretResponse` with a `Secret` based // on the provided `CreateSecretRequest.SecretSpec`. // - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed, // or if the secret data is too long or contains invalid characters. // - Returns an error if the creation fails.
[ "CreateSecret", "creates", "and", "returns", "a", "CreateSecretResponse", "with", "a", "Secret", "based", "on", "the", "provided", "CreateSecretRequest", ".", "SecretSpec", ".", "-", "Returns", "InvalidArgument", "if", "the", "CreateSecretRequest", ".", "SecretSpec", ...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/secret.go#L156-L186
train
docker/swarmkit
manager/controlapi/secret.go
RemoveSecret
func (s *Server) RemoveSecret(ctx context.Context, request *api.RemoveSecretRequest) (*api.RemoveSecretResponse, error) { if request.SecretID == "" { return nil, status.Errorf(codes.InvalidArgument, "secret ID must be provided") } err := s.store.Update(func(tx store.Tx) error { // Check if the secret exists secret := store.GetSecret(tx, request.SecretID) if secret == nil { return status.Errorf(codes.NotFound, "could not find secret %s", request.SecretID) } // Check if any services currently reference this secret, return error if so services, err := store.FindServices(tx, store.ByReferencedSecretID(request.SecretID)) if err != nil { return status.Errorf(codes.Internal, "could not find services using secret %s: %v", request.SecretID, err) } if len(services) != 0 { serviceNames := make([]string, 0, len(services)) for _, service := range services { serviceNames = append(serviceNames, service.Spec.Annotations.Name) } secretName := secret.Spec.Annotations.Name serviceNameStr := strings.Join(serviceNames, ", ") serviceStr := "services" if len(serviceNames) == 1 { serviceStr = "service" } return status.Errorf(codes.InvalidArgument, "secret '%s' is in use by the following %s: %v", secretName, serviceStr, serviceNameStr) } return store.DeleteSecret(tx, request.SecretID) }) switch err { case store.ErrNotExist: return nil, status.Errorf(codes.NotFound, "secret %s not found", request.SecretID) case nil: log.G(ctx).WithFields(logrus.Fields{ "secret.ID": request.SecretID, "method": "RemoveSecret", }).Debugf("secret removed") return &api.RemoveSecretResponse{}, nil default: return nil, err } }
go
func (s *Server) RemoveSecret(ctx context.Context, request *api.RemoveSecretRequest) (*api.RemoveSecretResponse, error) { if request.SecretID == "" { return nil, status.Errorf(codes.InvalidArgument, "secret ID must be provided") } err := s.store.Update(func(tx store.Tx) error { // Check if the secret exists secret := store.GetSecret(tx, request.SecretID) if secret == nil { return status.Errorf(codes.NotFound, "could not find secret %s", request.SecretID) } // Check if any services currently reference this secret, return error if so services, err := store.FindServices(tx, store.ByReferencedSecretID(request.SecretID)) if err != nil { return status.Errorf(codes.Internal, "could not find services using secret %s: %v", request.SecretID, err) } if len(services) != 0 { serviceNames := make([]string, 0, len(services)) for _, service := range services { serviceNames = append(serviceNames, service.Spec.Annotations.Name) } secretName := secret.Spec.Annotations.Name serviceNameStr := strings.Join(serviceNames, ", ") serviceStr := "services" if len(serviceNames) == 1 { serviceStr = "service" } return status.Errorf(codes.InvalidArgument, "secret '%s' is in use by the following %s: %v", secretName, serviceStr, serviceNameStr) } return store.DeleteSecret(tx, request.SecretID) }) switch err { case store.ErrNotExist: return nil, status.Errorf(codes.NotFound, "secret %s not found", request.SecretID) case nil: log.G(ctx).WithFields(logrus.Fields{ "secret.ID": request.SecretID, "method": "RemoveSecret", }).Debugf("secret removed") return &api.RemoveSecretResponse{}, nil default: return nil, err } }
[ "func", "(", "s", "*", "Server", ")", "RemoveSecret", "(", "ctx", "context", ".", "Context", ",", "request", "*", "api", ".", "RemoveSecretRequest", ")", "(", "*", "api", ".", "RemoveSecretResponse", ",", "error", ")", "{", "if", "request", ".", "SecretI...
// RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`. // - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty. // - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found. // - Returns `SecretInUse` if the secret is currently in use // - Returns an error if the deletion fails.
[ "RemoveSecret", "removes", "the", "secret", "referenced", "by", "RemoveSecretRequest", ".", "ID", ".", "-", "Returns", "InvalidArgument", "if", "RemoveSecretRequest", ".", "ID", "is", "empty", ".", "-", "Returns", "NotFound", "if", "the", "a", "secret", "named",...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/secret.go#L193-L242
train
docker/swarmkit
manager/controlapi/cluster.go
GetCluster
func (s *Server) GetCluster(ctx context.Context, request *api.GetClusterRequest) (*api.GetClusterResponse, error) { if request.ClusterID == "" { return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) } var cluster *api.Cluster s.store.View(func(tx store.ReadTx) { cluster = store.GetCluster(tx, request.ClusterID) }) if cluster == nil { return nil, status.Errorf(codes.NotFound, "cluster %s not found", request.ClusterID) } redactedClusters := redactClusters([]*api.Cluster{cluster}) // WARN: we should never return cluster here. We need to redact the private fields first. return &api.GetClusterResponse{ Cluster: redactedClusters[0], }, nil }
go
func (s *Server) GetCluster(ctx context.Context, request *api.GetClusterRequest) (*api.GetClusterResponse, error) { if request.ClusterID == "" { return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error()) } var cluster *api.Cluster s.store.View(func(tx store.ReadTx) { cluster = store.GetCluster(tx, request.ClusterID) }) if cluster == nil { return nil, status.Errorf(codes.NotFound, "cluster %s not found", request.ClusterID) } redactedClusters := redactClusters([]*api.Cluster{cluster}) // WARN: we should never return cluster here. We need to redact the private fields first. return &api.GetClusterResponse{ Cluster: redactedClusters[0], }, nil }
[ "func", "(", "s", "*", "Server", ")", "GetCluster", "(", "ctx", "context", ".", "Context", ",", "request", "*", "api", ".", "GetClusterRequest", ")", "(", "*", "api", ".", "GetClusterResponse", ",", "error", ")", "{", "if", "request", ".", "ClusterID", ...
// GetCluster returns a Cluster given a ClusterID. // - Returns `InvalidArgument` if ClusterID is not provided. // - Returns `NotFound` if the Cluster is not found.
[ "GetCluster", "returns", "a", "Cluster", "given", "a", "ClusterID", ".", "-", "Returns", "InvalidArgument", "if", "ClusterID", "is", "not", "provided", ".", "-", "Returns", "NotFound", "if", "the", "Cluster", "is", "not", "found", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/cluster.go#L80-L99
train
docker/swarmkit
manager/controlapi/cluster.go
ListClusters
func (s *Server) ListClusters(ctx context.Context, request *api.ListClustersRequest) (*api.ListClustersResponse, error) { var ( clusters []*api.Cluster err error ) s.store.View(func(tx store.ReadTx) { switch { case request.Filters != nil && len(request.Filters.Names) > 0: clusters, err = store.FindClusters(tx, buildFilters(store.ByName, request.Filters.Names)) case request.Filters != nil && len(request.Filters.NamePrefixes) > 0: clusters, err = store.FindClusters(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes)) case request.Filters != nil && len(request.Filters.IDPrefixes) > 0: clusters, err = store.FindClusters(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes)) default: clusters, err = store.FindClusters(tx, store.All) } }) if err != nil { return nil, err } if request.Filters != nil { clusters = filterClusters(clusters, func(e *api.Cluster) bool { return filterContains(e.Spec.Annotations.Name, request.Filters.Names) }, func(e *api.Cluster) bool { return filterContainsPrefix(e.Spec.Annotations.Name, request.Filters.NamePrefixes) }, func(e *api.Cluster) bool { return filterContainsPrefix(e.ID, request.Filters.IDPrefixes) }, func(e *api.Cluster) bool { return filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels) }, ) } // WARN: we should never return cluster here. We need to redact the private fields first. return &api.ListClustersResponse{ Clusters: redactClusters(clusters), }, nil }
go
func (s *Server) ListClusters(ctx context.Context, request *api.ListClustersRequest) (*api.ListClustersResponse, error) { var ( clusters []*api.Cluster err error ) s.store.View(func(tx store.ReadTx) { switch { case request.Filters != nil && len(request.Filters.Names) > 0: clusters, err = store.FindClusters(tx, buildFilters(store.ByName, request.Filters.Names)) case request.Filters != nil && len(request.Filters.NamePrefixes) > 0: clusters, err = store.FindClusters(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes)) case request.Filters != nil && len(request.Filters.IDPrefixes) > 0: clusters, err = store.FindClusters(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes)) default: clusters, err = store.FindClusters(tx, store.All) } }) if err != nil { return nil, err } if request.Filters != nil { clusters = filterClusters(clusters, func(e *api.Cluster) bool { return filterContains(e.Spec.Annotations.Name, request.Filters.Names) }, func(e *api.Cluster) bool { return filterContainsPrefix(e.Spec.Annotations.Name, request.Filters.NamePrefixes) }, func(e *api.Cluster) bool { return filterContainsPrefix(e.ID, request.Filters.IDPrefixes) }, func(e *api.Cluster) bool { return filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels) }, ) } // WARN: we should never return cluster here. We need to redact the private fields first. return &api.ListClustersResponse{ Clusters: redactClusters(clusters), }, nil }
[ "func", "(", "s", "*", "Server", ")", "ListClusters", "(", "ctx", "context", ".", "Context", ",", "request", "*", "api", ".", "ListClustersRequest", ")", "(", "*", "api", ".", "ListClustersResponse", ",", "error", ")", "{", "var", "(", "clusters", "[", ...
// ListClusters returns a list of all clusters.
[ "ListClusters", "returns", "a", "list", "of", "all", "clusters", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/cluster.go#L206-L248
train
docker/swarmkit
manager/controlapi/cluster.go
redactClusters
func redactClusters(clusters []*api.Cluster) []*api.Cluster { var redactedClusters []*api.Cluster // Only add public fields to the new clusters for _, cluster := range clusters { // Copy all the mandatory fields // Do not copy secret keys redactedSpec := cluster.Spec.Copy() redactedSpec.CAConfig.SigningCAKey = nil // the cert is not a secret, but if API users get the cluster spec and then update, // then because the cert is included but not the key, the user can get update errors // or unintended consequences (such as telling swarm to forget about the key so long // as there is a corresponding external CA) redactedSpec.CAConfig.SigningCACert = nil redactedRootCA := cluster.RootCA.Copy() redactedRootCA.CAKey = nil if r := redactedRootCA.RootRotation; r != nil { r.CAKey = nil } newCluster := &api.Cluster{ ID: cluster.ID, Meta: cluster.Meta, Spec: *redactedSpec, RootCA: *redactedRootCA, BlacklistedCertificates: cluster.BlacklistedCertificates, DefaultAddressPool: cluster.DefaultAddressPool, SubnetSize: cluster.SubnetSize, VXLANUDPPort: cluster.VXLANUDPPort, } if newCluster.DefaultAddressPool == nil { // This is just for CLI display. Set the inbuilt default pool for // user reference. newCluster.DefaultAddressPool = inbuiltDefaultAddressPool newCluster.SubnetSize = inbuiltSubnetSize } if newCluster.VXLANUDPPort == 0 { newCluster.VXLANUDPPort = defaultVXLANPort } redactedClusters = append(redactedClusters, newCluster) } return redactedClusters }
go
func redactClusters(clusters []*api.Cluster) []*api.Cluster { var redactedClusters []*api.Cluster // Only add public fields to the new clusters for _, cluster := range clusters { // Copy all the mandatory fields // Do not copy secret keys redactedSpec := cluster.Spec.Copy() redactedSpec.CAConfig.SigningCAKey = nil // the cert is not a secret, but if API users get the cluster spec and then update, // then because the cert is included but not the key, the user can get update errors // or unintended consequences (such as telling swarm to forget about the key so long // as there is a corresponding external CA) redactedSpec.CAConfig.SigningCACert = nil redactedRootCA := cluster.RootCA.Copy() redactedRootCA.CAKey = nil if r := redactedRootCA.RootRotation; r != nil { r.CAKey = nil } newCluster := &api.Cluster{ ID: cluster.ID, Meta: cluster.Meta, Spec: *redactedSpec, RootCA: *redactedRootCA, BlacklistedCertificates: cluster.BlacklistedCertificates, DefaultAddressPool: cluster.DefaultAddressPool, SubnetSize: cluster.SubnetSize, VXLANUDPPort: cluster.VXLANUDPPort, } if newCluster.DefaultAddressPool == nil { // This is just for CLI display. Set the inbuilt default pool for // user reference. newCluster.DefaultAddressPool = inbuiltDefaultAddressPool newCluster.SubnetSize = inbuiltSubnetSize } if newCluster.VXLANUDPPort == 0 { newCluster.VXLANUDPPort = defaultVXLANPort } redactedClusters = append(redactedClusters, newCluster) } return redactedClusters }
[ "func", "redactClusters", "(", "clusters", "[", "]", "*", "api", ".", "Cluster", ")", "[", "]", "*", "api", ".", "Cluster", "{", "var", "redactedClusters", "[", "]", "*", "api", ".", "Cluster", "\n", "for", "_", ",", "cluster", ":=", "range", "cluste...
// redactClusters is a method that enforces a whitelist of fields that are ok to be // returned in the Cluster object. It should filter out all sensitive information.
[ "redactClusters", "is", "a", "method", "that", "enforces", "a", "whitelist", "of", "fields", "that", "are", "ok", "to", "be", "returned", "in", "the", "Cluster", "object", ".", "It", "should", "filter", "out", "all", "sensitive", "information", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/cluster.go#L252-L294
train
docker/swarmkit
manager/scheduler/nodeset.go
nodeInfo
func (ns *nodeSet) nodeInfo(nodeID string) (NodeInfo, error) { node, ok := ns.nodes[nodeID] if ok { return node, nil } return NodeInfo{}, errNodeNotFound }
go
func (ns *nodeSet) nodeInfo(nodeID string) (NodeInfo, error) { node, ok := ns.nodes[nodeID] if ok { return node, nil } return NodeInfo{}, errNodeNotFound }
[ "func", "(", "ns", "*", "nodeSet", ")", "nodeInfo", "(", "nodeID", "string", ")", "(", "NodeInfo", ",", "error", ")", "{", "node", ",", "ok", ":=", "ns", ".", "nodes", "[", "nodeID", "]", "\n", "if", "ok", "{", "return", "node", ",", "nil", "\n",...
// nodeInfo returns the NodeInfo struct for a given node identified by its ID.
[ "nodeInfo", "returns", "the", "NodeInfo", "struct", "for", "a", "given", "node", "identified", "by", "its", "ID", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/scheduler/nodeset.go#L23-L29
train
docker/swarmkit
manager/scheduler/nodeset.go
addOrUpdateNode
func (ns *nodeSet) addOrUpdateNode(n NodeInfo) { ns.nodes[n.ID] = n }
go
func (ns *nodeSet) addOrUpdateNode(n NodeInfo) { ns.nodes[n.ID] = n }
[ "func", "(", "ns", "*", "nodeSet", ")", "addOrUpdateNode", "(", "n", "NodeInfo", ")", "{", "ns", ".", "nodes", "[", "n", ".", "ID", "]", "=", "n", "\n", "}" ]
// addOrUpdateNode sets the number of tasks for a given node. It adds the node // to the set if it wasn't already tracked.
[ "addOrUpdateNode", "sets", "the", "number", "of", "tasks", "for", "a", "given", "node", ".", "It", "adds", "the", "node", "to", "the", "set", "if", "it", "wasn", "t", "already", "tracked", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/scheduler/nodeset.go#L33-L35
train
docker/swarmkit
manager/scheduler/nodeset.go
updateNode
func (ns *nodeSet) updateNode(n NodeInfo) { _, ok := ns.nodes[n.ID] if ok { ns.nodes[n.ID] = n } }
go
func (ns *nodeSet) updateNode(n NodeInfo) { _, ok := ns.nodes[n.ID] if ok { ns.nodes[n.ID] = n } }
[ "func", "(", "ns", "*", "nodeSet", ")", "updateNode", "(", "n", "NodeInfo", ")", "{", "_", ",", "ok", ":=", "ns", ".", "nodes", "[", "n", ".", "ID", "]", "\n", "if", "ok", "{", "ns", ".", "nodes", "[", "n", ".", "ID", "]", "=", "n", "\n", ...
// updateNode sets the number of tasks for a given node. It ignores the update // if the node isn't already tracked in the set.
[ "updateNode", "sets", "the", "number", "of", "tasks", "for", "a", "given", "node", ".", "It", "ignores", "the", "update", "if", "the", "node", "isn", "t", "already", "tracked", "in", "the", "set", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/scheduler/nodeset.go#L39-L44
train
docker/swarmkit
manager/orchestrator/replicated/services.go
setTasksDesiredState
func (r *Orchestrator) setTasksDesiredState(ctx context.Context, batch *store.Batch, slots []orchestrator.Slot, newDesiredState api.TaskState) { for _, slot := range slots { for _, t := range slot { err := batch.Update(func(tx store.Tx) error { // time travel is not allowed. if the current desired state is // above the one we're trying to go to we can't go backwards. // we have nothing to do and we should skip to the next task if t.DesiredState > newDesiredState { // log a warning, though. we shouln't be trying to rewrite // a state to an earlier state log.G(ctx).Warnf( "cannot update task %v in desired state %v to an earlier desired state %v", t.ID, t.DesiredState, newDesiredState, ) return nil } // update desired state t.DesiredState = newDesiredState return store.UpdateTask(tx, t) }) // log an error if we get one if err != nil { log.G(ctx).WithError(err).Errorf("failed to update task to %v", newDesiredState.String()) } } } }
go
func (r *Orchestrator) setTasksDesiredState(ctx context.Context, batch *store.Batch, slots []orchestrator.Slot, newDesiredState api.TaskState) { for _, slot := range slots { for _, t := range slot { err := batch.Update(func(tx store.Tx) error { // time travel is not allowed. if the current desired state is // above the one we're trying to go to we can't go backwards. // we have nothing to do and we should skip to the next task if t.DesiredState > newDesiredState { // log a warning, though. we shouln't be trying to rewrite // a state to an earlier state log.G(ctx).Warnf( "cannot update task %v in desired state %v to an earlier desired state %v", t.ID, t.DesiredState, newDesiredState, ) return nil } // update desired state t.DesiredState = newDesiredState return store.UpdateTask(tx, t) }) // log an error if we get one if err != nil { log.G(ctx).WithError(err).Errorf("failed to update task to %v", newDesiredState.String()) } } } }
[ "func", "(", "r", "*", "Orchestrator", ")", "setTasksDesiredState", "(", "ctx", "context", ".", "Context", ",", "batch", "*", "store", ".", "Batch", ",", "slots", "[", "]", "orchestrator", ".", "Slot", ",", "newDesiredState", "api", ".", "TaskState", ")", ...
// setTasksDesiredState sets the desired state for all tasks for the given slots to the // requested state
[ "setTasksDesiredState", "sets", "the", "desired", "state", "for", "all", "tasks", "for", "the", "given", "slots", "to", "the", "requested", "state" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/replicated/services.go#L213-L241
train
docker/swarmkit
manager/dispatcher/dispatcher.go
DefaultConfig
func DefaultConfig() *Config { return &Config{ HeartbeatPeriod: DefaultHeartBeatPeriod, HeartbeatEpsilon: defaultHeartBeatEpsilon, RateLimitPeriod: defaultRateLimitPeriod, GracePeriodMultiplier: defaultGracePeriodMultiplier, } }
go
func DefaultConfig() *Config { return &Config{ HeartbeatPeriod: DefaultHeartBeatPeriod, HeartbeatEpsilon: defaultHeartBeatEpsilon, RateLimitPeriod: defaultRateLimitPeriod, GracePeriodMultiplier: defaultGracePeriodMultiplier, } }
[ "func", "DefaultConfig", "(", ")", "*", "Config", "{", "return", "&", "Config", "{", "HeartbeatPeriod", ":", "DefaultHeartBeatPeriod", ",", "HeartbeatEpsilon", ":", "defaultHeartBeatEpsilon", ",", "RateLimitPeriod", ":", "defaultRateLimitPeriod", ",", "GracePeriodMultip...
// DefaultConfig returns default config for Dispatcher.
[ "DefaultConfig", "returns", "default", "config", "for", "Dispatcher", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/dispatcher/dispatcher.go#L92-L99
train
docker/swarmkit
manager/dispatcher/dispatcher.go
Init
func (d *Dispatcher) Init(cluster Cluster, c *Config, dp *drivers.DriverProvider, securityConfig *ca.SecurityConfig) { d.cluster = cluster d.config = c d.securityConfig = securityConfig d.dp = dp d.store = cluster.MemoryStore() d.nodes = newNodeStore(c.HeartbeatPeriod, c.HeartbeatEpsilon, c.GracePeriodMultiplier, c.RateLimitPeriod) }
go
func (d *Dispatcher) Init(cluster Cluster, c *Config, dp *drivers.DriverProvider, securityConfig *ca.SecurityConfig) { d.cluster = cluster d.config = c d.securityConfig = securityConfig d.dp = dp d.store = cluster.MemoryStore() d.nodes = newNodeStore(c.HeartbeatPeriod, c.HeartbeatEpsilon, c.GracePeriodMultiplier, c.RateLimitPeriod) }
[ "func", "(", "d", "*", "Dispatcher", ")", "Init", "(", "cluster", "Cluster", ",", "c", "*", "Config", ",", "dp", "*", "drivers", ".", "DriverProvider", ",", "securityConfig", "*", "ca", ".", "SecurityConfig", ")", "{", "d", ".", "cluster", "=", "cluste...
// Init is used to initialize the dispatcher and // is typically called before starting the dispatcher // when a manager becomes a leader. // The dispatcher is a grpc server, and unlike other components, // it can't simply be recreated on becoming a leader. // This function ensures the dispatcher restarts with a clean slate.
[ "Init", "is", "used", "to", "initialize", "the", "dispatcher", "and", "is", "typically", "called", "before", "starting", "the", "dispatcher", "when", "a", "manager", "becomes", "a", "leader", ".", "The", "dispatcher", "is", "a", "grpc", "server", "and", "unl...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/dispatcher/dispatcher.go#L184-L191
train
docker/swarmkit
manager/dispatcher/dispatcher.go
Stop
func (d *Dispatcher) Stop() error { d.mu.Lock() if !d.isRunning() { d.mu.Unlock() return errors.New("dispatcher is already stopped") } log := log.G(d.ctx).WithField("method", "(*Dispatcher).Stop") log.Info("dispatcher stopping") d.cancel() d.mu.Unlock() d.processUpdatesLock.Lock() // when we called d.cancel(), there may be routines, servicing RPC calls to // the (*Dispatcher).Session endpoint, currently waiting at // d.processUpdatesCond.Wait() inside of (*Dispatcher).markNodeReady(). // // these routines are typically woken by a call to // d.processUpdatesCond.Broadcast() at the end of // (*Dispatcher).processUpdates() as part of the main Run loop. However, // when d.cancel() is called, the main Run loop is stopped, and there are // no more opportunties for processUpdates to be called. Any calls to // Session would be stuck waiting on a call to Broadcast that will never // come. // // Further, because the rpcRW write lock cannot be obtained until every RPC // has exited and released its read lock, then Stop would be stuck forever. // // To avoid this case, we acquire the processUpdatesLock (so that no new // waits can start) and then do a Broadcast to wake all of the waiting // routines. Further, if any routines are waiting in markNodeReady to // acquire this lock, but not yet waiting, those routines will check the // context cancelation, see the context is canceled, and exit before doing // the Wait. // // This call to Broadcast must occur here. If we called Broadcast before // context cancelation, then some new routines could enter the wait. If we // call Broadcast after attempting to acquire the rpcRW lock, we will be // deadlocked. If we do this Broadcast without obtaining this lock (as is // done in the processUpdates method), then it would be possible for that // broadcast to come after the context cancelation check in markNodeReady, // but before the call to Wait. d.processUpdatesCond.Broadcast() d.processUpdatesLock.Unlock() // The active nodes list can be cleaned out only when all // existing RPCs have finished. // RPCs that start after rpcRW.Unlock() should find the context // cancelled and should fail organically. d.rpcRW.Lock() d.nodes.Clean() d.downNodes.Clean() d.rpcRW.Unlock() d.clusterUpdateQueue.Close() // TODO(anshul): This use of Wait() could be unsafe. // According to go's documentation on WaitGroup, // Add() with a positive delta that occur when the counter is zero // must happen before a Wait(). // As is, dispatcher Stop() can race with Run(). d.wg.Wait() return nil }
go
func (d *Dispatcher) Stop() error { d.mu.Lock() if !d.isRunning() { d.mu.Unlock() return errors.New("dispatcher is already stopped") } log := log.G(d.ctx).WithField("method", "(*Dispatcher).Stop") log.Info("dispatcher stopping") d.cancel() d.mu.Unlock() d.processUpdatesLock.Lock() // when we called d.cancel(), there may be routines, servicing RPC calls to // the (*Dispatcher).Session endpoint, currently waiting at // d.processUpdatesCond.Wait() inside of (*Dispatcher).markNodeReady(). // // these routines are typically woken by a call to // d.processUpdatesCond.Broadcast() at the end of // (*Dispatcher).processUpdates() as part of the main Run loop. However, // when d.cancel() is called, the main Run loop is stopped, and there are // no more opportunties for processUpdates to be called. Any calls to // Session would be stuck waiting on a call to Broadcast that will never // come. // // Further, because the rpcRW write lock cannot be obtained until every RPC // has exited and released its read lock, then Stop would be stuck forever. // // To avoid this case, we acquire the processUpdatesLock (so that no new // waits can start) and then do a Broadcast to wake all of the waiting // routines. Further, if any routines are waiting in markNodeReady to // acquire this lock, but not yet waiting, those routines will check the // context cancelation, see the context is canceled, and exit before doing // the Wait. // // This call to Broadcast must occur here. If we called Broadcast before // context cancelation, then some new routines could enter the wait. If we // call Broadcast after attempting to acquire the rpcRW lock, we will be // deadlocked. If we do this Broadcast without obtaining this lock (as is // done in the processUpdates method), then it would be possible for that // broadcast to come after the context cancelation check in markNodeReady, // but before the call to Wait. d.processUpdatesCond.Broadcast() d.processUpdatesLock.Unlock() // The active nodes list can be cleaned out only when all // existing RPCs have finished. // RPCs that start after rpcRW.Unlock() should find the context // cancelled and should fail organically. d.rpcRW.Lock() d.nodes.Clean() d.downNodes.Clean() d.rpcRW.Unlock() d.clusterUpdateQueue.Close() // TODO(anshul): This use of Wait() could be unsafe. // According to go's documentation on WaitGroup, // Add() with a positive delta that occur when the counter is zero // must happen before a Wait(). // As is, dispatcher Stop() can race with Run(). d.wg.Wait() return nil }
[ "func", "(", "d", "*", "Dispatcher", ")", "Stop", "(", ")", "error", "{", "d", ".", "mu", ".", "Lock", "(", ")", "\n", "if", "!", "d", ".", "isRunning", "(", ")", "{", "d", ".", "mu", ".", "Unlock", "(", ")", "\n", "return", "errors", ".", ...
// Stop stops dispatcher and closes all grpc streams.
[ "Stop", "stops", "dispatcher", "and", "closes", "all", "grpc", "streams", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/dispatcher/dispatcher.go#L333-L397
train
docker/swarmkit
manager/dispatcher/dispatcher.go
markNodeReady
func (d *Dispatcher) markNodeReady(ctx context.Context, nodeID string, description *api.NodeDescription, addr string) error { d.nodeUpdatesLock.Lock() d.nodeUpdates[nodeID] = nodeUpdate{ status: &api.NodeStatus{ State: api.NodeStatus_READY, Addr: addr, }, description: description, } numUpdates := len(d.nodeUpdates) d.nodeUpdatesLock.Unlock() // Node is marked ready. Remove the node from down nodes if it // is there. d.downNodes.Delete(nodeID) if numUpdates >= maxBatchItems { select { case d.processUpdatesTrigger <- struct{}{}: case <-ctx.Done(): return ctx.Err() } } // Wait until the node update batch happens before unblocking register. d.processUpdatesLock.Lock() defer d.processUpdatesLock.Unlock() select { case <-ctx.Done(): return ctx.Err() default: } d.processUpdatesCond.Wait() return nil }
go
func (d *Dispatcher) markNodeReady(ctx context.Context, nodeID string, description *api.NodeDescription, addr string) error { d.nodeUpdatesLock.Lock() d.nodeUpdates[nodeID] = nodeUpdate{ status: &api.NodeStatus{ State: api.NodeStatus_READY, Addr: addr, }, description: description, } numUpdates := len(d.nodeUpdates) d.nodeUpdatesLock.Unlock() // Node is marked ready. Remove the node from down nodes if it // is there. d.downNodes.Delete(nodeID) if numUpdates >= maxBatchItems { select { case d.processUpdatesTrigger <- struct{}{}: case <-ctx.Done(): return ctx.Err() } } // Wait until the node update batch happens before unblocking register. d.processUpdatesLock.Lock() defer d.processUpdatesLock.Unlock() select { case <-ctx.Done(): return ctx.Err() default: } d.processUpdatesCond.Wait() return nil }
[ "func", "(", "d", "*", "Dispatcher", ")", "markNodeReady", "(", "ctx", "context", ".", "Context", ",", "nodeID", "string", ",", "description", "*", "api", ".", "NodeDescription", ",", "addr", "string", ")", "error", "{", "d", ".", "nodeUpdatesLock", ".", ...
// markNodeReady updates the description of a node, updates its address, and sets status to READY // this is used during registration when a new node description is provided // and during node updates when the node description changes
[ "markNodeReady", "updates", "the", "description", "of", "a", "node", "updates", "its", "address", "and", "sets", "status", "to", "READY", "this", "is", "used", "during", "registration", "when", "a", "new", "node", "description", "is", "provided", "and", "durin...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/dispatcher/dispatcher.go#L489-L526
train
docker/swarmkit
manager/dispatcher/dispatcher.go
nodeIPFromContext
func nodeIPFromContext(ctx context.Context) (string, error) { nodeInfo, err := ca.RemoteNode(ctx) if err != nil { return "", err } addr, _, err := net.SplitHostPort(nodeInfo.RemoteAddr) if err != nil { return "", errors.Wrap(err, "unable to get ip from addr:port") } return addr, nil }
go
func nodeIPFromContext(ctx context.Context) (string, error) { nodeInfo, err := ca.RemoteNode(ctx) if err != nil { return "", err } addr, _, err := net.SplitHostPort(nodeInfo.RemoteAddr) if err != nil { return "", errors.Wrap(err, "unable to get ip from addr:port") } return addr, nil }
[ "func", "nodeIPFromContext", "(", "ctx", "context", ".", "Context", ")", "(", "string", ",", "error", ")", "{", "nodeInfo", ",", "err", ":=", "ca", ".", "RemoteNode", "(", "ctx", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"\"", ",", "err",...
// gets the node IP from the context of a grpc call
[ "gets", "the", "node", "IP", "from", "the", "context", "of", "a", "grpc", "call" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/dispatcher/dispatcher.go#L529-L539
train
docker/swarmkit
manager/dispatcher/dispatcher.go
register
func (d *Dispatcher) register(ctx context.Context, nodeID string, description *api.NodeDescription) (string, error) { logLocal := log.G(ctx).WithField("method", "(*Dispatcher).register") // prevent register until we're ready to accept it dctx, err := d.isRunningLocked() if err != nil { return "", err } if err := d.nodes.CheckRateLimit(nodeID); err != nil { return "", err } // TODO(stevvooe): Validate node specification. var node *api.Node d.store.View(func(tx store.ReadTx) { node = store.GetNode(tx, nodeID) }) if node == nil { return "", ErrNodeNotFound } addr, err := nodeIPFromContext(ctx) if err != nil { logLocal.WithError(err).Debug("failed to get remote node IP") } if err := d.markNodeReady(dctx, nodeID, description, addr); err != nil { return "", err } expireFunc := func() { log.G(ctx).Debugf("heartbeat expiration for worker %s, setting worker status to NodeStatus_DOWN ", nodeID) if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, "heartbeat failure"); err != nil { log.G(ctx).WithError(err).Errorf("failed deregistering node after heartbeat expiration") } } rn := d.nodes.Add(node, expireFunc) logLocal.Infof("worker %s was successfully registered", nodeID) // NOTE(stevvooe): We need be a little careful with re-registration. The // current implementation just matches the node id and then gives away the // sessionID. If we ever want to use sessionID as a secret, which we may // want to, this is giving away the keys to the kitchen. // // The right behavior is going to be informed by identity. Basically, each // time a node registers, we invalidate the session and issue a new // session, once identity is proven. This will cause misbehaved agents to // be kicked when multiple connections are made. return rn.SessionID, nil }
go
func (d *Dispatcher) register(ctx context.Context, nodeID string, description *api.NodeDescription) (string, error) { logLocal := log.G(ctx).WithField("method", "(*Dispatcher).register") // prevent register until we're ready to accept it dctx, err := d.isRunningLocked() if err != nil { return "", err } if err := d.nodes.CheckRateLimit(nodeID); err != nil { return "", err } // TODO(stevvooe): Validate node specification. var node *api.Node d.store.View(func(tx store.ReadTx) { node = store.GetNode(tx, nodeID) }) if node == nil { return "", ErrNodeNotFound } addr, err := nodeIPFromContext(ctx) if err != nil { logLocal.WithError(err).Debug("failed to get remote node IP") } if err := d.markNodeReady(dctx, nodeID, description, addr); err != nil { return "", err } expireFunc := func() { log.G(ctx).Debugf("heartbeat expiration for worker %s, setting worker status to NodeStatus_DOWN ", nodeID) if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, "heartbeat failure"); err != nil { log.G(ctx).WithError(err).Errorf("failed deregistering node after heartbeat expiration") } } rn := d.nodes.Add(node, expireFunc) logLocal.Infof("worker %s was successfully registered", nodeID) // NOTE(stevvooe): We need be a little careful with re-registration. The // current implementation just matches the node id and then gives away the // sessionID. If we ever want to use sessionID as a secret, which we may // want to, this is giving away the keys to the kitchen. // // The right behavior is going to be informed by identity. Basically, each // time a node registers, we invalidate the session and issue a new // session, once identity is proven. This will cause misbehaved agents to // be kicked when multiple connections are made. return rn.SessionID, nil }
[ "func", "(", "d", "*", "Dispatcher", ")", "register", "(", "ctx", "context", ".", "Context", ",", "nodeID", "string", ",", "description", "*", "api", ".", "NodeDescription", ")", "(", "string", ",", "error", ")", "{", "logLocal", ":=", "log", ".", "G",...
// register is used for registration of node with particular dispatcher.
[ "register", "is", "used", "for", "registration", "of", "node", "with", "particular", "dispatcher", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/dispatcher/dispatcher.go#L542-L592
train
docker/swarmkit
manager/dispatcher/dispatcher.go
UpdateTaskStatus
func (d *Dispatcher) UpdateTaskStatus(ctx context.Context, r *api.UpdateTaskStatusRequest) (*api.UpdateTaskStatusResponse, error) { d.rpcRW.RLock() defer d.rpcRW.RUnlock() dctx, err := d.isRunningLocked() if err != nil { return nil, err } nodeInfo, err := ca.RemoteNode(ctx) if err != nil { return nil, err } nodeID := nodeInfo.NodeID fields := logrus.Fields{ "node.id": nodeID, "node.session": r.SessionID, "method": "(*Dispatcher).UpdateTaskStatus", } if nodeInfo.ForwardedBy != nil { fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID } log := log.G(ctx).WithFields(fields) if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil { return nil, err } validTaskUpdates := make([]*api.UpdateTaskStatusRequest_TaskStatusUpdate, 0, len(r.Updates)) // Validate task updates for _, u := range r.Updates { if u.Status == nil { log.WithField("task.id", u.TaskID).Warn("task report has nil status") continue } var t *api.Task d.store.View(func(tx store.ReadTx) { t = store.GetTask(tx, u.TaskID) }) if t == nil { // Task may have been deleted log.WithField("task.id", u.TaskID).Debug("cannot find target task in store") continue } if t.NodeID != nodeID { err := status.Errorf(codes.PermissionDenied, "cannot update a task not assigned this node") log.WithField("task.id", u.TaskID).Error(err) return nil, err } validTaskUpdates = append(validTaskUpdates, u) } d.taskUpdatesLock.Lock() // Enqueue task updates for _, u := range validTaskUpdates { d.taskUpdates[u.TaskID] = u.Status } numUpdates := len(d.taskUpdates) d.taskUpdatesLock.Unlock() if numUpdates >= maxBatchItems { select { case d.processUpdatesTrigger <- struct{}{}: case <-dctx.Done(): } } return nil, nil }
go
func (d *Dispatcher) UpdateTaskStatus(ctx context.Context, r *api.UpdateTaskStatusRequest) (*api.UpdateTaskStatusResponse, error) { d.rpcRW.RLock() defer d.rpcRW.RUnlock() dctx, err := d.isRunningLocked() if err != nil { return nil, err } nodeInfo, err := ca.RemoteNode(ctx) if err != nil { return nil, err } nodeID := nodeInfo.NodeID fields := logrus.Fields{ "node.id": nodeID, "node.session": r.SessionID, "method": "(*Dispatcher).UpdateTaskStatus", } if nodeInfo.ForwardedBy != nil { fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID } log := log.G(ctx).WithFields(fields) if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil { return nil, err } validTaskUpdates := make([]*api.UpdateTaskStatusRequest_TaskStatusUpdate, 0, len(r.Updates)) // Validate task updates for _, u := range r.Updates { if u.Status == nil { log.WithField("task.id", u.TaskID).Warn("task report has nil status") continue } var t *api.Task d.store.View(func(tx store.ReadTx) { t = store.GetTask(tx, u.TaskID) }) if t == nil { // Task may have been deleted log.WithField("task.id", u.TaskID).Debug("cannot find target task in store") continue } if t.NodeID != nodeID { err := status.Errorf(codes.PermissionDenied, "cannot update a task not assigned this node") log.WithField("task.id", u.TaskID).Error(err) return nil, err } validTaskUpdates = append(validTaskUpdates, u) } d.taskUpdatesLock.Lock() // Enqueue task updates for _, u := range validTaskUpdates { d.taskUpdates[u.TaskID] = u.Status } numUpdates := len(d.taskUpdates) d.taskUpdatesLock.Unlock() if numUpdates >= maxBatchItems { select { case d.processUpdatesTrigger <- struct{}{}: case <-dctx.Done(): } } return nil, nil }
[ "func", "(", "d", "*", "Dispatcher", ")", "UpdateTaskStatus", "(", "ctx", "context", ".", "Context", ",", "r", "*", "api", ".", "UpdateTaskStatusRequest", ")", "(", "*", "api", ".", "UpdateTaskStatusResponse", ",", "error", ")", "{", "d", ".", "rpcRW", "...
// UpdateTaskStatus updates status of task. Node should send such updates // on every status change of its tasks.
[ "UpdateTaskStatus", "updates", "status", "of", "task", ".", "Node", "should", "send", "such", "updates", "on", "every", "status", "change", "of", "its", "tasks", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/dispatcher/dispatcher.go#L596-L668
train
docker/swarmkit
manager/dispatcher/dispatcher.go
markNodeNotReady
func (d *Dispatcher) markNodeNotReady(id string, state api.NodeStatus_State, message string) error { logLocal := log.G(d.ctx).WithField("method", "(*Dispatcher).markNodeNotReady") dctx, err := d.isRunningLocked() if err != nil { return err } // Node is down. Add it to down nodes so that we can keep // track of tasks assigned to the node. var node *api.Node d.store.View(func(readTx store.ReadTx) { node = store.GetNode(readTx, id) if node == nil { err = fmt.Errorf("could not find node %s while trying to add to down nodes store", id) } }) if err != nil { return err } expireFunc := func() { log.G(dctx).Debugf(`worker timed-out %s in "down" state, moving all tasks to "ORPHANED" state`, id) if err := d.moveTasksToOrphaned(id); err != nil { log.G(dctx).WithError(err).Error(`failed to move all tasks to "ORPHANED" state`) } d.downNodes.Delete(id) } d.downNodes.Add(node, expireFunc) logLocal.Debugf("added node %s to down nodes list", node.ID) status := &api.NodeStatus{ State: state, Message: message, } d.nodeUpdatesLock.Lock() // pluck the description out of nodeUpdates. this protects against a case // where a node is marked ready and a description is added, but then the // node is immediately marked not ready. this preserves that description d.nodeUpdates[id] = nodeUpdate{status: status, description: d.nodeUpdates[id].description} numUpdates := len(d.nodeUpdates) d.nodeUpdatesLock.Unlock() if numUpdates >= maxBatchItems { select { case d.processUpdatesTrigger <- struct{}{}: case <-dctx.Done(): } } if rn := d.nodes.Delete(id); rn == nil { return errors.Errorf("node %s is not found in local storage", id) } logLocal.Debugf("deleted node %s from node store", node.ID) return nil }
go
func (d *Dispatcher) markNodeNotReady(id string, state api.NodeStatus_State, message string) error { logLocal := log.G(d.ctx).WithField("method", "(*Dispatcher).markNodeNotReady") dctx, err := d.isRunningLocked() if err != nil { return err } // Node is down. Add it to down nodes so that we can keep // track of tasks assigned to the node. var node *api.Node d.store.View(func(readTx store.ReadTx) { node = store.GetNode(readTx, id) if node == nil { err = fmt.Errorf("could not find node %s while trying to add to down nodes store", id) } }) if err != nil { return err } expireFunc := func() { log.G(dctx).Debugf(`worker timed-out %s in "down" state, moving all tasks to "ORPHANED" state`, id) if err := d.moveTasksToOrphaned(id); err != nil { log.G(dctx).WithError(err).Error(`failed to move all tasks to "ORPHANED" state`) } d.downNodes.Delete(id) } d.downNodes.Add(node, expireFunc) logLocal.Debugf("added node %s to down nodes list", node.ID) status := &api.NodeStatus{ State: state, Message: message, } d.nodeUpdatesLock.Lock() // pluck the description out of nodeUpdates. this protects against a case // where a node is marked ready and a description is added, but then the // node is immediately marked not ready. this preserves that description d.nodeUpdates[id] = nodeUpdate{status: status, description: d.nodeUpdates[id].description} numUpdates := len(d.nodeUpdates) d.nodeUpdatesLock.Unlock() if numUpdates >= maxBatchItems { select { case d.processUpdatesTrigger <- struct{}{}: case <-dctx.Done(): } } if rn := d.nodes.Delete(id); rn == nil { return errors.Errorf("node %s is not found in local storage", id) } logLocal.Debugf("deleted node %s from node store", node.ID) return nil }
[ "func", "(", "d", "*", "Dispatcher", ")", "markNodeNotReady", "(", "id", "string", ",", "state", "api", ".", "NodeStatus_State", ",", "message", "string", ")", "error", "{", "logLocal", ":=", "log", ".", "G", "(", "d", ".", "ctx", ")", ".", "WithField"...
// markNodeNotReady sets the node state to some state other than READY
[ "markNodeNotReady", "sets", "the", "node", "state", "to", "some", "state", "other", "than", "READY" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/dispatcher/dispatcher.go#L1109-L1168
train
docker/swarmkit
manager/dispatcher/dispatcher.go
Heartbeat
func (d *Dispatcher) Heartbeat(ctx context.Context, r *api.HeartbeatRequest) (*api.HeartbeatResponse, error) { d.rpcRW.RLock() defer d.rpcRW.RUnlock() // TODO(anshul) Explore if its possible to check context here without locking. if _, err := d.isRunningLocked(); err != nil { return nil, status.Errorf(codes.Aborted, "dispatcher is stopped") } nodeInfo, err := ca.RemoteNode(ctx) if err != nil { return nil, err } period, err := d.nodes.Heartbeat(nodeInfo.NodeID, r.SessionID) log.G(ctx).WithField("method", "(*Dispatcher).Heartbeat").Debugf("received heartbeat from worker %v, expect next heartbeat in %v", nodeInfo, period) return &api.HeartbeatResponse{Period: period}, err }
go
func (d *Dispatcher) Heartbeat(ctx context.Context, r *api.HeartbeatRequest) (*api.HeartbeatResponse, error) { d.rpcRW.RLock() defer d.rpcRW.RUnlock() // TODO(anshul) Explore if its possible to check context here without locking. if _, err := d.isRunningLocked(); err != nil { return nil, status.Errorf(codes.Aborted, "dispatcher is stopped") } nodeInfo, err := ca.RemoteNode(ctx) if err != nil { return nil, err } period, err := d.nodes.Heartbeat(nodeInfo.NodeID, r.SessionID) log.G(ctx).WithField("method", "(*Dispatcher).Heartbeat").Debugf("received heartbeat from worker %v, expect next heartbeat in %v", nodeInfo, period) return &api.HeartbeatResponse{Period: period}, err }
[ "func", "(", "d", "*", "Dispatcher", ")", "Heartbeat", "(", "ctx", "context", ".", "Context", ",", "r", "*", "api", ".", "HeartbeatRequest", ")", "(", "*", "api", ".", "HeartbeatResponse", ",", "error", ")", "{", "d", ".", "rpcRW", ".", "RLock", "(",...
// Heartbeat is heartbeat method for nodes. It returns new TTL in response. // Node should send new heartbeat earlier than now + TTL, otherwise it will // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN
[ "Heartbeat", "is", "heartbeat", "method", "for", "nodes", ".", "It", "returns", "new", "TTL", "in", "response", ".", "Node", "should", "send", "new", "heartbeat", "earlier", "than", "now", "+", "TTL", "otherwise", "it", "will", "be", "deregistered", "from", ...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/dispatcher/dispatcher.go#L1173-L1191
train
docker/swarmkit
manager/state/raft/raft.go
NewNode
func NewNode(opts NodeOptions) *Node { cfg := opts.Config if cfg == nil { cfg = DefaultNodeConfig() } if opts.TickInterval == 0 { opts.TickInterval = time.Second } if opts.SendTimeout == 0 { opts.SendTimeout = 2 * time.Second } raftStore := raft.NewMemoryStorage() n := &Node{ cluster: membership.NewCluster(), raftStore: raftStore, opts: opts, Config: &raft.Config{ ElectionTick: cfg.ElectionTick, HeartbeatTick: cfg.HeartbeatTick, Storage: raftStore, MaxSizePerMsg: cfg.MaxSizePerMsg, MaxInflightMsgs: cfg.MaxInflightMsgs, Logger: cfg.Logger, CheckQuorum: cfg.CheckQuorum, }, doneCh: make(chan struct{}), RemovedFromRaft: make(chan struct{}), stopped: make(chan struct{}), leadershipBroadcast: watch.NewQueue(), keyRotator: opts.KeyRotator, } n.memoryStore = store.NewMemoryStore(n) if opts.ClockSource == nil { n.ticker = clock.NewClock().NewTicker(opts.TickInterval) } else { n.ticker = opts.ClockSource.NewTicker(opts.TickInterval) } n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now()) n.wait = newWait() n.cancelFunc = func(n *Node) func() { var cancelOnce sync.Once return func() { cancelOnce.Do(func() { close(n.stopped) }) } }(n) return n }
go
func NewNode(opts NodeOptions) *Node { cfg := opts.Config if cfg == nil { cfg = DefaultNodeConfig() } if opts.TickInterval == 0 { opts.TickInterval = time.Second } if opts.SendTimeout == 0 { opts.SendTimeout = 2 * time.Second } raftStore := raft.NewMemoryStorage() n := &Node{ cluster: membership.NewCluster(), raftStore: raftStore, opts: opts, Config: &raft.Config{ ElectionTick: cfg.ElectionTick, HeartbeatTick: cfg.HeartbeatTick, Storage: raftStore, MaxSizePerMsg: cfg.MaxSizePerMsg, MaxInflightMsgs: cfg.MaxInflightMsgs, Logger: cfg.Logger, CheckQuorum: cfg.CheckQuorum, }, doneCh: make(chan struct{}), RemovedFromRaft: make(chan struct{}), stopped: make(chan struct{}), leadershipBroadcast: watch.NewQueue(), keyRotator: opts.KeyRotator, } n.memoryStore = store.NewMemoryStore(n) if opts.ClockSource == nil { n.ticker = clock.NewClock().NewTicker(opts.TickInterval) } else { n.ticker = opts.ClockSource.NewTicker(opts.TickInterval) } n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now()) n.wait = newWait() n.cancelFunc = func(n *Node) func() { var cancelOnce sync.Once return func() { cancelOnce.Do(func() { close(n.stopped) }) } }(n) return n }
[ "func", "NewNode", "(", "opts", "NodeOptions", ")", "*", "Node", "{", "cfg", ":=", "opts", ".", "Config", "\n", "if", "cfg", "==", "nil", "{", "cfg", "=", "DefaultNodeConfig", "(", ")", "\n", "}", "\n", "if", "opts", ".", "TickInterval", "==", "0", ...
// NewNode generates a new Raft node
[ "NewNode", "generates", "a", "new", "Raft", "node" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L212-L266
train
docker/swarmkit
manager/state/raft/raft.go
IsIDRemoved
func (n *Node) IsIDRemoved(id uint64) bool { return n.cluster.IsIDRemoved(id) }
go
func (n *Node) IsIDRemoved(id uint64) bool { return n.cluster.IsIDRemoved(id) }
[ "func", "(", "n", "*", "Node", ")", "IsIDRemoved", "(", "id", "uint64", ")", "bool", "{", "return", "n", ".", "cluster", ".", "IsIDRemoved", "(", "id", ")", "\n", "}" ]
// IsIDRemoved reports if member with id was removed from cluster. // Part of transport.Raft interface.
[ "IsIDRemoved", "reports", "if", "member", "with", "id", "was", "removed", "from", "cluster", ".", "Part", "of", "transport", ".", "Raft", "interface", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L270-L272
train
docker/swarmkit
manager/state/raft/raft.go
NodeRemoved
func (n *Node) NodeRemoved() { n.removeRaftOnce.Do(func() { atomic.StoreUint32(&n.isMember, 0) close(n.RemovedFromRaft) }) }
go
func (n *Node) NodeRemoved() { n.removeRaftOnce.Do(func() { atomic.StoreUint32(&n.isMember, 0) close(n.RemovedFromRaft) }) }
[ "func", "(", "n", "*", "Node", ")", "NodeRemoved", "(", ")", "{", "n", ".", "removeRaftOnce", ".", "Do", "(", "func", "(", ")", "{", "atomic", ".", "StoreUint32", "(", "&", "n", ".", "isMember", ",", "0", ")", "\n", "close", "(", "n", ".", "Rem...
// NodeRemoved signals that node was removed from cluster and should stop. // Part of transport.Raft interface.
[ "NodeRemoved", "signals", "that", "node", "was", "removed", "from", "cluster", "and", "should", "stop", ".", "Part", "of", "transport", ".", "Raft", "interface", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L276-L281
train
docker/swarmkit
manager/state/raft/raft.go
ReportSnapshot
func (n *Node) ReportSnapshot(id uint64, status raft.SnapshotStatus) { n.raftNode.ReportSnapshot(id, status) }
go
func (n *Node) ReportSnapshot(id uint64, status raft.SnapshotStatus) { n.raftNode.ReportSnapshot(id, status) }
[ "func", "(", "n", "*", "Node", ")", "ReportSnapshot", "(", "id", "uint64", ",", "status", "raft", ".", "SnapshotStatus", ")", "{", "n", ".", "raftNode", ".", "ReportSnapshot", "(", "id", ",", "status", ")", "\n", "}" ]
// ReportSnapshot reports snapshot status to underlying raft node. // Part of transport.Raft interface.
[ "ReportSnapshot", "reports", "snapshot", "status", "to", "underlying", "raft", "node", ".", "Part", "of", "transport", ".", "Raft", "interface", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L285-L287
train
docker/swarmkit
manager/state/raft/raft.go
SetAddr
func (n *Node) SetAddr(ctx context.Context, addr string) error { n.addrLock.Lock() defer n.addrLock.Unlock() n.opts.Addr = addr if !n.IsMember() { return nil } newRaftMember := &api.RaftMember{ RaftID: n.Config.ID, NodeID: n.opts.ID, Addr: addr, } if err := n.cluster.UpdateMember(n.Config.ID, newRaftMember); err != nil { return err } // If the raft node is running, submit a configuration change // with the new address. // TODO(aaronl): Currently, this node must be the leader to // submit this configuration change. This works for the initial // use cases (single-node cluster late binding ports, or calling // SetAddr before joining a cluster). In the future, we may want // to support having a follower proactively change its remote // address. leadershipCh, cancelWatch := n.SubscribeLeadership() defer cancelWatch() ctx, cancelCtx := n.WithContext(ctx) defer cancelCtx() isLeader := atomic.LoadUint32(&n.signalledLeadership) == 1 for !isLeader { select { case leadershipChange := <-leadershipCh: if leadershipChange == IsLeader { isLeader = true } case <-ctx.Done(): return ctx.Err() } } return n.updateNodeBlocking(ctx, n.Config.ID, addr) }
go
func (n *Node) SetAddr(ctx context.Context, addr string) error { n.addrLock.Lock() defer n.addrLock.Unlock() n.opts.Addr = addr if !n.IsMember() { return nil } newRaftMember := &api.RaftMember{ RaftID: n.Config.ID, NodeID: n.opts.ID, Addr: addr, } if err := n.cluster.UpdateMember(n.Config.ID, newRaftMember); err != nil { return err } // If the raft node is running, submit a configuration change // with the new address. // TODO(aaronl): Currently, this node must be the leader to // submit this configuration change. This works for the initial // use cases (single-node cluster late binding ports, or calling // SetAddr before joining a cluster). In the future, we may want // to support having a follower proactively change its remote // address. leadershipCh, cancelWatch := n.SubscribeLeadership() defer cancelWatch() ctx, cancelCtx := n.WithContext(ctx) defer cancelCtx() isLeader := atomic.LoadUint32(&n.signalledLeadership) == 1 for !isLeader { select { case leadershipChange := <-leadershipCh: if leadershipChange == IsLeader { isLeader = true } case <-ctx.Done(): return ctx.Err() } } return n.updateNodeBlocking(ctx, n.Config.ID, addr) }
[ "func", "(", "n", "*", "Node", ")", "SetAddr", "(", "ctx", "context", ".", "Context", ",", "addr", "string", ")", "error", "{", "n", ".", "addrLock", ".", "Lock", "(", ")", "\n", "defer", "n", ".", "addrLock", ".", "Unlock", "(", ")", "\n", "n", ...
// SetAddr provides the raft node's address. This can be used in cases where // opts.Addr was not provided to NewNode, for example when a port was not bound // until after the raft node was created.
[ "SetAddr", "provides", "the", "raft", "node", "s", "address", ".", "This", "can", "be", "used", "in", "cases", "where", "opts", ".", "Addr", "was", "not", "provided", "to", "NewNode", "for", "example", "when", "a", "port", "was", "not", "bound", "until",...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L299-L347
train
docker/swarmkit
manager/state/raft/raft.go
WithContext
func (n *Node) WithContext(ctx context.Context) (context.Context, context.CancelFunc) { ctx, cancel := context.WithCancel(ctx) go func() { select { case <-ctx.Done(): case <-n.stopped: cancel() } }() return ctx, cancel }
go
func (n *Node) WithContext(ctx context.Context) (context.Context, context.CancelFunc) { ctx, cancel := context.WithCancel(ctx) go func() { select { case <-ctx.Done(): case <-n.stopped: cancel() } }() return ctx, cancel }
[ "func", "(", "n", "*", "Node", ")", "WithContext", "(", "ctx", "context", ".", "Context", ")", "(", "context", ".", "Context", ",", "context", ".", "CancelFunc", ")", "{", "ctx", ",", "cancel", ":=", "context", ".", "WithCancel", "(", "ctx", ")", "\n...
// WithContext returns context which is cancelled when parent context cancelled // or node is stopped.
[ "WithContext", "returns", "context", "which", "is", "cancelled", "when", "parent", "context", "cancelled", "or", "node", "is", "stopped", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L351-L362
train
docker/swarmkit
manager/state/raft/raft.go
JoinAndStart
func (n *Node) JoinAndStart(ctx context.Context) (err error) { ctx, cancel := n.WithContext(ctx) defer func() { cancel() if err != nil { n.stopMu.Lock() // to shutdown transport n.cancelFunc() n.stopMu.Unlock() n.done() } else { atomic.StoreUint32(&n.isMember, 1) } }() loadAndStartErr := n.loadAndStart(ctx, n.opts.ForceNewCluster) if loadAndStartErr != nil && loadAndStartErr != storage.ErrNoWAL { return loadAndStartErr } snapshot, err := n.raftStore.Snapshot() // Snapshot never returns an error if err != nil { panic("could not get snapshot of raft store") } n.confState = snapshot.Metadata.ConfState n.appliedIndex = snapshot.Metadata.Index n.snapshotMeta = snapshot.Metadata n.writtenWALIndex, _ = n.raftStore.LastIndex() // lastIndex always returns nil as an error n.addrLock.Lock() defer n.addrLock.Unlock() // override the module field entirely, since etcd/raft is not exactly a submodule n.Config.Logger = log.G(ctx).WithField("module", "raft") // restore from snapshot if loadAndStartErr == nil { if n.opts.JoinAddr != "" && n.opts.ForceJoin { if err := n.joinCluster(ctx); err != nil { return errors.Wrap(err, "failed to rejoin cluster") } } n.campaignWhenAble = true n.initTransport() n.raftNode = raft.RestartNode(n.Config) return nil } if n.opts.JoinAddr == "" { // First member in the cluster, self-assign ID n.Config.ID = uint64(rand.Int63()) + 1 peer, err := n.newRaftLogs(n.opts.ID) if err != nil { return err } n.campaignWhenAble = true n.initTransport() n.raftNode = raft.StartNode(n.Config, []raft.Peer{peer}) return nil } // join to existing cluster if err := n.joinCluster(ctx); err != nil { return err } if _, err := n.newRaftLogs(n.opts.ID); err != nil { return err } n.initTransport() n.raftNode = raft.StartNode(n.Config, nil) return nil }
go
func (n *Node) JoinAndStart(ctx context.Context) (err error) { ctx, cancel := n.WithContext(ctx) defer func() { cancel() if err != nil { n.stopMu.Lock() // to shutdown transport n.cancelFunc() n.stopMu.Unlock() n.done() } else { atomic.StoreUint32(&n.isMember, 1) } }() loadAndStartErr := n.loadAndStart(ctx, n.opts.ForceNewCluster) if loadAndStartErr != nil && loadAndStartErr != storage.ErrNoWAL { return loadAndStartErr } snapshot, err := n.raftStore.Snapshot() // Snapshot never returns an error if err != nil { panic("could not get snapshot of raft store") } n.confState = snapshot.Metadata.ConfState n.appliedIndex = snapshot.Metadata.Index n.snapshotMeta = snapshot.Metadata n.writtenWALIndex, _ = n.raftStore.LastIndex() // lastIndex always returns nil as an error n.addrLock.Lock() defer n.addrLock.Unlock() // override the module field entirely, since etcd/raft is not exactly a submodule n.Config.Logger = log.G(ctx).WithField("module", "raft") // restore from snapshot if loadAndStartErr == nil { if n.opts.JoinAddr != "" && n.opts.ForceJoin { if err := n.joinCluster(ctx); err != nil { return errors.Wrap(err, "failed to rejoin cluster") } } n.campaignWhenAble = true n.initTransport() n.raftNode = raft.RestartNode(n.Config) return nil } if n.opts.JoinAddr == "" { // First member in the cluster, self-assign ID n.Config.ID = uint64(rand.Int63()) + 1 peer, err := n.newRaftLogs(n.opts.ID) if err != nil { return err } n.campaignWhenAble = true n.initTransport() n.raftNode = raft.StartNode(n.Config, []raft.Peer{peer}) return nil } // join to existing cluster if err := n.joinCluster(ctx); err != nil { return err } if _, err := n.newRaftLogs(n.opts.ID); err != nil { return err } n.initTransport() n.raftNode = raft.StartNode(n.Config, nil) return nil }
[ "func", "(", "n", "*", "Node", ")", "JoinAndStart", "(", "ctx", "context", ".", "Context", ")", "(", "err", "error", ")", "{", "ctx", ",", "cancel", ":=", "n", ".", "WithContext", "(", "ctx", ")", "\n", "defer", "func", "(", ")", "{", "cancel", "...
// JoinAndStart joins and starts the raft server
[ "JoinAndStart", "joins", "and", "starts", "the", "raft", "server" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L375-L452
train
docker/swarmkit
manager/state/raft/raft.go
DefaultNodeConfig
func DefaultNodeConfig() *raft.Config { return &raft.Config{ HeartbeatTick: 1, // Recommended value in etcd/raft is 10 x (HeartbeatTick). // Lower values were seen to have caused instability because of // frequent leader elections when running on flakey networks. ElectionTick: 10, MaxSizePerMsg: math.MaxUint16, MaxInflightMsgs: 256, Logger: log.L, CheckQuorum: true, } }
go
func DefaultNodeConfig() *raft.Config { return &raft.Config{ HeartbeatTick: 1, // Recommended value in etcd/raft is 10 x (HeartbeatTick). // Lower values were seen to have caused instability because of // frequent leader elections when running on flakey networks. ElectionTick: 10, MaxSizePerMsg: math.MaxUint16, MaxInflightMsgs: 256, Logger: log.L, CheckQuorum: true, } }
[ "func", "DefaultNodeConfig", "(", ")", "*", "raft", ".", "Config", "{", "return", "&", "raft", ".", "Config", "{", "HeartbeatTick", ":", "1", ",", "ElectionTick", ":", "10", ",", "MaxSizePerMsg", ":", "math", ".", "MaxUint16", ",", "MaxInflightMsgs", ":", ...
// DefaultNodeConfig returns the default config for a // raft node that can be modified and customized
[ "DefaultNodeConfig", "returns", "the", "default", "config", "for", "a", "raft", "node", "that", "can", "be", "modified", "and", "customized" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L482-L494
train
docker/swarmkit
manager/state/raft/raft.go
DefaultRaftConfig
func DefaultRaftConfig() api.RaftConfig { return api.RaftConfig{ KeepOldSnapshots: 0, SnapshotInterval: 10000, LogEntriesForSlowFollowers: 500, // Recommended value in etcd/raft is 10 x (HeartbeatTick). // Lower values were seen to have caused instability because of // frequent leader elections when running on flakey networks. HeartbeatTick: 1, ElectionTick: 10, } }
go
func DefaultRaftConfig() api.RaftConfig { return api.RaftConfig{ KeepOldSnapshots: 0, SnapshotInterval: 10000, LogEntriesForSlowFollowers: 500, // Recommended value in etcd/raft is 10 x (HeartbeatTick). // Lower values were seen to have caused instability because of // frequent leader elections when running on flakey networks. HeartbeatTick: 1, ElectionTick: 10, } }
[ "func", "DefaultRaftConfig", "(", ")", "api", ".", "RaftConfig", "{", "return", "api", ".", "RaftConfig", "{", "KeepOldSnapshots", ":", "0", ",", "SnapshotInterval", ":", "10000", ",", "LogEntriesForSlowFollowers", ":", "500", ",", "HeartbeatTick", ":", "1", "...
// DefaultRaftConfig returns a default api.RaftConfig.
[ "DefaultRaftConfig", "returns", "a", "default", "api", ".", "RaftConfig", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L497-L508
train
docker/swarmkit
manager/state/raft/raft.go
isLeader
func (n *Node) isLeader() bool { if !n.IsMember() { return false } if n.Status().Lead == n.Config.ID { return true } return false }
go
func (n *Node) isLeader() bool { if !n.IsMember() { return false } if n.Status().Lead == n.Config.ID { return true } return false }
[ "func", "(", "n", "*", "Node", ")", "isLeader", "(", ")", "bool", "{", "if", "!", "n", ".", "IsMember", "(", ")", "{", "return", "false", "\n", "}", "\n", "if", "n", ".", "Status", "(", ")", ".", "Lead", "==", "n", ".", "Config", ".", "ID", ...
// isLeader checks if we are the leader or not, without the protection of lock
[ "isLeader", "checks", "if", "we", "are", "the", "leader", "or", "not", "without", "the", "protection", "of", "lock" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L862-L871
train
docker/swarmkit
manager/state/raft/raft.go
IsLeader
func (n *Node) IsLeader() bool { n.stopMu.RLock() defer n.stopMu.RUnlock() return n.isLeader() }
go
func (n *Node) IsLeader() bool { n.stopMu.RLock() defer n.stopMu.RUnlock() return n.isLeader() }
[ "func", "(", "n", "*", "Node", ")", "IsLeader", "(", ")", "bool", "{", "n", ".", "stopMu", ".", "RLock", "(", ")", "\n", "defer", "n", ".", "stopMu", ".", "RUnlock", "(", ")", "\n", "return", "n", ".", "isLeader", "(", ")", "\n", "}" ]
// IsLeader checks if we are the leader or not, with the protection of lock
[ "IsLeader", "checks", "if", "we", "are", "the", "leader", "or", "not", "with", "the", "protection", "of", "lock" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L874-L879
train
docker/swarmkit
manager/state/raft/raft.go
Leader
func (n *Node) Leader() (uint64, error) { n.stopMu.RLock() defer n.stopMu.RUnlock() if !n.IsMember() { return raft.None, ErrNoRaftMember } leader := n.leader() if leader == raft.None { return raft.None, ErrNoClusterLeader } return leader, nil }
go
func (n *Node) Leader() (uint64, error) { n.stopMu.RLock() defer n.stopMu.RUnlock() if !n.IsMember() { return raft.None, ErrNoRaftMember } leader := n.leader() if leader == raft.None { return raft.None, ErrNoClusterLeader } return leader, nil }
[ "func", "(", "n", "*", "Node", ")", "Leader", "(", ")", "(", "uint64", ",", "error", ")", "{", "n", ".", "stopMu", ".", "RLock", "(", ")", "\n", "defer", "n", ".", "stopMu", ".", "RUnlock", "(", ")", "\n", "if", "!", "n", ".", "IsMember", "("...
// Leader returns the id of the leader, with the protection of lock
[ "Leader", "returns", "the", "id", "of", "the", "leader", "with", "the", "protection", "of", "lock" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L888-L901
train
docker/swarmkit
manager/state/raft/raft.go
checkHealth
func (n *Node) checkHealth(ctx context.Context, addr string, timeout time.Duration) error { conn, err := dial(addr, "tcp", n.opts.TLSCredentials, timeout) if err != nil { return err } defer conn.Close() if timeout != 0 { tctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() ctx = tctx } healthClient := api.NewHealthClient(conn) resp, err := healthClient.Check(ctx, &api.HealthCheckRequest{Service: "Raft"}) if err != nil { return errors.Wrap(err, "could not connect to prospective new cluster member using its advertised address") } if resp.Status != api.HealthCheckResponse_SERVING { return fmt.Errorf("health check returned status %s", resp.Status.String()) } return nil }
go
func (n *Node) checkHealth(ctx context.Context, addr string, timeout time.Duration) error { conn, err := dial(addr, "tcp", n.opts.TLSCredentials, timeout) if err != nil { return err } defer conn.Close() if timeout != 0 { tctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() ctx = tctx } healthClient := api.NewHealthClient(conn) resp, err := healthClient.Check(ctx, &api.HealthCheckRequest{Service: "Raft"}) if err != nil { return errors.Wrap(err, "could not connect to prospective new cluster member using its advertised address") } if resp.Status != api.HealthCheckResponse_SERVING { return fmt.Errorf("health check returned status %s", resp.Status.String()) } return nil }
[ "func", "(", "n", "*", "Node", ")", "checkHealth", "(", "ctx", "context", ".", "Context", ",", "addr", "string", ",", "timeout", "time", ".", "Duration", ")", "error", "{", "conn", ",", "err", ":=", "dial", "(", "addr", ",", "\"tcp\"", ",", "n", "....
// checkHealth tries to contact an aspiring member through its advertised address // and checks if its raft server is running.
[ "checkHealth", "tries", "to", "contact", "an", "aspiring", "member", "through", "its", "advertised", "address", "and", "checks", "if", "its", "raft", "server", "is", "running", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1040-L1064
train
docker/swarmkit
manager/state/raft/raft.go
addMember
func (n *Node) addMember(ctx context.Context, addr string, raftID uint64, nodeID string) error { node := api.RaftMember{ RaftID: raftID, NodeID: nodeID, Addr: addr, } meta, err := node.Marshal() if err != nil { return err } cc := raftpb.ConfChange{ Type: raftpb.ConfChangeAddNode, NodeID: raftID, Context: meta, } // Wait for a raft round to process the configuration change return n.configure(ctx, cc) }
go
func (n *Node) addMember(ctx context.Context, addr string, raftID uint64, nodeID string) error { node := api.RaftMember{ RaftID: raftID, NodeID: nodeID, Addr: addr, } meta, err := node.Marshal() if err != nil { return err } cc := raftpb.ConfChange{ Type: raftpb.ConfChangeAddNode, NodeID: raftID, Context: meta, } // Wait for a raft round to process the configuration change return n.configure(ctx, cc) }
[ "func", "(", "n", "*", "Node", ")", "addMember", "(", "ctx", "context", ".", "Context", ",", "addr", "string", ",", "raftID", "uint64", ",", "nodeID", "string", ")", "error", "{", "node", ":=", "api", ".", "RaftMember", "{", "RaftID", ":", "raftID", ...
// addMember submits a configuration change to add a new member on the raft cluster.
[ "addMember", "submits", "a", "configuration", "change", "to", "add", "a", "new", "member", "on", "the", "raft", "cluster", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1067-L1087
train
docker/swarmkit
manager/state/raft/raft.go
updateNodeBlocking
func (n *Node) updateNodeBlocking(ctx context.Context, id uint64, addr string) error { m := n.cluster.GetMember(id) if m == nil { return errors.Errorf("member %x is not found for update", id) } node := api.RaftMember{ RaftID: m.RaftID, NodeID: m.NodeID, Addr: addr, } meta, err := node.Marshal() if err != nil { return err } cc := raftpb.ConfChange{ Type: raftpb.ConfChangeUpdateNode, NodeID: id, Context: meta, } // Wait for a raft round to process the configuration change return n.configure(ctx, cc) }
go
func (n *Node) updateNodeBlocking(ctx context.Context, id uint64, addr string) error { m := n.cluster.GetMember(id) if m == nil { return errors.Errorf("member %x is not found for update", id) } node := api.RaftMember{ RaftID: m.RaftID, NodeID: m.NodeID, Addr: addr, } meta, err := node.Marshal() if err != nil { return err } cc := raftpb.ConfChange{ Type: raftpb.ConfChangeUpdateNode, NodeID: id, Context: meta, } // Wait for a raft round to process the configuration change return n.configure(ctx, cc) }
[ "func", "(", "n", "*", "Node", ")", "updateNodeBlocking", "(", "ctx", "context", ".", "Context", ",", "id", "uint64", ",", "addr", "string", ")", "error", "{", "m", ":=", "n", ".", "cluster", ".", "GetMember", "(", "id", ")", "\n", "if", "m", "==",...
// updateNodeBlocking runs synchronous job to update node address in whole cluster.
[ "updateNodeBlocking", "runs", "synchronous", "job", "to", "update", "node", "address", "in", "whole", "cluster", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1090-L1114
train
docker/swarmkit
manager/state/raft/raft.go
UpdateNode
func (n *Node) UpdateNode(id uint64, addr string) { ctx, cancel := n.WithContext(context.Background()) defer cancel() // spawn updating info in raft in background to unblock transport go func() { if err := n.updateNodeBlocking(ctx, id, addr); err != nil { log.G(ctx).WithFields(logrus.Fields{"raft_id": n.Config.ID, "update_id": id}).WithError(err).Error("failed to update member address in cluster") } }() }
go
func (n *Node) UpdateNode(id uint64, addr string) { ctx, cancel := n.WithContext(context.Background()) defer cancel() // spawn updating info in raft in background to unblock transport go func() { if err := n.updateNodeBlocking(ctx, id, addr); err != nil { log.G(ctx).WithFields(logrus.Fields{"raft_id": n.Config.ID, "update_id": id}).WithError(err).Error("failed to update member address in cluster") } }() }
[ "func", "(", "n", "*", "Node", ")", "UpdateNode", "(", "id", "uint64", ",", "addr", "string", ")", "{", "ctx", ",", "cancel", ":=", "n", ".", "WithContext", "(", "context", ".", "Background", "(", ")", ")", "\n", "defer", "cancel", "(", ")", "\n", ...
// UpdateNode submits a configuration change to change a member's address.
[ "UpdateNode", "submits", "a", "configuration", "change", "to", "change", "a", "member", "s", "address", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1117-L1126
train
docker/swarmkit
manager/state/raft/raft.go
Leave
func (n *Node) Leave(ctx context.Context, req *api.LeaveRequest) (*api.LeaveResponse, error) { if req.Node == nil { return nil, status.Errorf(codes.InvalidArgument, "no node information provided") } nodeInfo, err := ca.RemoteNode(ctx) if err != nil { return nil, err } ctx, cancel := n.WithContext(ctx) defer cancel() fields := logrus.Fields{ "node.id": nodeInfo.NodeID, "method": "(*Node).Leave", "raft_id": fmt.Sprintf("%x", n.Config.ID), } if nodeInfo.ForwardedBy != nil { fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID } log.G(ctx).WithFields(fields).Debug("") if err := n.removeMember(ctx, req.Node.RaftID); err != nil { return nil, err } return &api.LeaveResponse{}, nil }
go
func (n *Node) Leave(ctx context.Context, req *api.LeaveRequest) (*api.LeaveResponse, error) { if req.Node == nil { return nil, status.Errorf(codes.InvalidArgument, "no node information provided") } nodeInfo, err := ca.RemoteNode(ctx) if err != nil { return nil, err } ctx, cancel := n.WithContext(ctx) defer cancel() fields := logrus.Fields{ "node.id": nodeInfo.NodeID, "method": "(*Node).Leave", "raft_id": fmt.Sprintf("%x", n.Config.ID), } if nodeInfo.ForwardedBy != nil { fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID } log.G(ctx).WithFields(fields).Debug("") if err := n.removeMember(ctx, req.Node.RaftID); err != nil { return nil, err } return &api.LeaveResponse{}, nil }
[ "func", "(", "n", "*", "Node", ")", "Leave", "(", "ctx", "context", ".", "Context", ",", "req", "*", "api", ".", "LeaveRequest", ")", "(", "*", "api", ".", "LeaveResponse", ",", "error", ")", "{", "if", "req", ".", "Node", "==", "nil", "{", "retu...
// Leave asks to a member of the raft to remove // us from the raft cluster. This method is called // from a member who is willing to leave its raft // membership to an active member of the raft
[ "Leave", "asks", "to", "a", "member", "of", "the", "raft", "to", "remove", "us", "from", "the", "raft", "cluster", ".", "This", "method", "is", "called", "from", "a", "member", "who", "is", "willing", "to", "leave", "its", "raft", "membership", "to", "...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1132-L1160
train
docker/swarmkit
manager/state/raft/raft.go
CanRemoveMember
func (n *Node) CanRemoveMember(id uint64) bool { members := n.cluster.Members() nreachable := 0 // reachable managers after removal for _, m := range members { if m.RaftID == id { continue } // Local node from where the remove is issued if m.RaftID == n.Config.ID { nreachable++ continue } if n.transport.Active(m.RaftID) { nreachable++ } } nquorum := (len(members)-1)/2 + 1 return nreachable >= nquorum }
go
func (n *Node) CanRemoveMember(id uint64) bool { members := n.cluster.Members() nreachable := 0 // reachable managers after removal for _, m := range members { if m.RaftID == id { continue } // Local node from where the remove is issued if m.RaftID == n.Config.ID { nreachable++ continue } if n.transport.Active(m.RaftID) { nreachable++ } } nquorum := (len(members)-1)/2 + 1 return nreachable >= nquorum }
[ "func", "(", "n", "*", "Node", ")", "CanRemoveMember", "(", "id", "uint64", ")", "bool", "{", "members", ":=", "n", ".", "cluster", ".", "Members", "(", ")", "\n", "nreachable", ":=", "0", "\n", "for", "_", ",", "m", ":=", "range", "members", "{", ...
// CanRemoveMember checks if a member can be removed from // the context of the current node.
[ "CanRemoveMember", "checks", "if", "a", "member", "can", "be", "removed", "from", "the", "context", "of", "the", "current", "node", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1164-L1187
train
docker/swarmkit
manager/state/raft/raft.go
TransferLeadership
func (n *Node) TransferLeadership(ctx context.Context) error { ctx, cancelTransfer := context.WithTimeout(ctx, n.reqTimeout()) defer cancelTransfer() n.stopMu.RLock() defer n.stopMu.RUnlock() if !n.IsMember() { return ErrNoRaftMember } if !n.isLeader() { return ErrLostLeadership } transferee, err := n.transport.LongestActive() if err != nil { return errors.Wrap(err, "failed to get longest-active member") } start := time.Now() n.raftNode.TransferLeadership(ctx, n.Config.ID, transferee) ticker := time.NewTicker(n.opts.TickInterval / 10) defer ticker.Stop() var leader uint64 for { leader = n.leader() if leader != raft.None && leader != n.Config.ID { break } select { case <-ctx.Done(): return ctx.Err() case <-ticker.C: } } log.G(ctx).Infof("raft: transfer leadership %x -> %x finished in %v", n.Config.ID, leader, time.Since(start)) return nil }
go
func (n *Node) TransferLeadership(ctx context.Context) error { ctx, cancelTransfer := context.WithTimeout(ctx, n.reqTimeout()) defer cancelTransfer() n.stopMu.RLock() defer n.stopMu.RUnlock() if !n.IsMember() { return ErrNoRaftMember } if !n.isLeader() { return ErrLostLeadership } transferee, err := n.transport.LongestActive() if err != nil { return errors.Wrap(err, "failed to get longest-active member") } start := time.Now() n.raftNode.TransferLeadership(ctx, n.Config.ID, transferee) ticker := time.NewTicker(n.opts.TickInterval / 10) defer ticker.Stop() var leader uint64 for { leader = n.leader() if leader != raft.None && leader != n.Config.ID { break } select { case <-ctx.Done(): return ctx.Err() case <-ticker.C: } } log.G(ctx).Infof("raft: transfer leadership %x -> %x finished in %v", n.Config.ID, leader, time.Since(start)) return nil }
[ "func", "(", "n", "*", "Node", ")", "TransferLeadership", "(", "ctx", "context", ".", "Context", ")", "error", "{", "ctx", ",", "cancelTransfer", ":=", "context", ".", "WithTimeout", "(", "ctx", ",", "n", ".", "reqTimeout", "(", ")", ")", "\n", "defer"...
// TransferLeadership attempts to transfer leadership to a different node, // and wait for the transfer to happen.
[ "TransferLeadership", "attempts", "to", "transfer", "leadership", "to", "a", "different", "node", "and", "wait", "for", "the", "transfer", "to", "happen", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1219-L1256
train
docker/swarmkit
manager/state/raft/raft.go
RemoveMember
func (n *Node) RemoveMember(ctx context.Context, id uint64) error { ctx, cancel := n.WithContext(ctx) defer cancel() return n.removeMember(ctx, id) }
go
func (n *Node) RemoveMember(ctx context.Context, id uint64) error { ctx, cancel := n.WithContext(ctx) defer cancel() return n.removeMember(ctx, id) }
[ "func", "(", "n", "*", "Node", ")", "RemoveMember", "(", "ctx", "context", ".", "Context", ",", "id", "uint64", ")", "error", "{", "ctx", ",", "cancel", ":=", "n", ".", "WithContext", "(", "ctx", ")", "\n", "defer", "cancel", "(", ")", "\n", "retur...
// RemoveMember submits a configuration change to remove a member from the raft cluster // after checking if the operation would not result in a loss of quorum.
[ "RemoveMember", "submits", "a", "configuration", "change", "to", "remove", "a", "member", "from", "the", "raft", "cluster", "after", "checking", "if", "the", "operation", "would", "not", "result", "in", "a", "loss", "of", "quorum", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1260-L1264
train
docker/swarmkit
manager/state/raft/raft.go
processRaftMessageLogger
func (n *Node) processRaftMessageLogger(ctx context.Context, msg *api.ProcessRaftMessageRequest) *logrus.Entry { fields := logrus.Fields{ "method": "(*Node).ProcessRaftMessage", } if n.IsMember() { fields["raft_id"] = fmt.Sprintf("%x", n.Config.ID) } if msg != nil && msg.Message != nil { fields["from"] = fmt.Sprintf("%x", msg.Message.From) } return log.G(ctx).WithFields(fields) }
go
func (n *Node) processRaftMessageLogger(ctx context.Context, msg *api.ProcessRaftMessageRequest) *logrus.Entry { fields := logrus.Fields{ "method": "(*Node).ProcessRaftMessage", } if n.IsMember() { fields["raft_id"] = fmt.Sprintf("%x", n.Config.ID) } if msg != nil && msg.Message != nil { fields["from"] = fmt.Sprintf("%x", msg.Message.From) } return log.G(ctx).WithFields(fields) }
[ "func", "(", "n", "*", "Node", ")", "processRaftMessageLogger", "(", "ctx", "context", ".", "Context", ",", "msg", "*", "api", ".", "ProcessRaftMessageRequest", ")", "*", "logrus", ".", "Entry", "{", "fields", ":=", "logrus", ".", "Fields", "{", "\"method\...
// processRaftMessageLogger is used to lazily create a logger for // ProcessRaftMessage. Usually nothing will be logged, so it is useful to avoid // formatting strings and allocating a logger when it won't be used.
[ "processRaftMessageLogger", "is", "used", "to", "lazily", "create", "a", "logger", "for", "ProcessRaftMessage", ".", "Usually", "nothing", "will", "be", "logged", "so", "it", "is", "useful", "to", "avoid", "formatting", "strings", "and", "allocating", "a", "logg...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1269-L1283
train
docker/swarmkit
manager/state/raft/raft.go
StreamRaftMessage
func (n *Node) StreamRaftMessage(stream api.Raft_StreamRaftMessageServer) error { // recvdMsg is the current messasge received from the stream. // assembledMessage is where the data from recvdMsg is appended to. var recvdMsg, assembledMessage *api.StreamRaftMessageRequest var err error // First message index. var raftMsgIndex uint64 for { recvdMsg, err = stream.Recv() if err == io.EOF { break } else if err != nil { log.G(stream.Context()).WithError(err).Error("error while reading from stream") return err } // Initialized the message to be used for assembling // the raft message. if assembledMessage == nil { // For all message types except raftpb.MsgSnap, // we don't expect more than a single message // on the stream so we'll get an EOF on the next Recv() // and go on to process the received message. assembledMessage = recvdMsg raftMsgIndex = recvdMsg.Message.Index continue } // Verify raft message index. if recvdMsg.Message.Index != raftMsgIndex { errMsg := fmt.Sprintf("Raft message chunk with index %d is different from the previously received raft message index %d", recvdMsg.Message.Index, raftMsgIndex) log.G(stream.Context()).Errorf(errMsg) return status.Errorf(codes.InvalidArgument, "%s", errMsg) } // Verify that multiple message received on a stream // can only be of type raftpb.MsgSnap. if recvdMsg.Message.Type != raftpb.MsgSnap { errMsg := fmt.Sprintf("Raft message chunk is not of type %d", raftpb.MsgSnap) log.G(stream.Context()).Errorf(errMsg) return status.Errorf(codes.InvalidArgument, "%s", errMsg) } // Append the received snapshot data. assembledMessage.Message.Snapshot.Data = append(assembledMessage.Message.Snapshot.Data, recvdMsg.Message.Snapshot.Data...) } // We should have the complete snapshot. Verify and process. if err == io.EOF { _, err = n.ProcessRaftMessage(stream.Context(), &api.ProcessRaftMessageRequest{Message: assembledMessage.Message}) if err == nil { // Translate the response of ProcessRaftMessage() from // ProcessRaftMessageResponse to StreamRaftMessageResponse if needed. return stream.SendAndClose(&api.StreamRaftMessageResponse{}) } } return err }
go
func (n *Node) StreamRaftMessage(stream api.Raft_StreamRaftMessageServer) error { // recvdMsg is the current messasge received from the stream. // assembledMessage is where the data from recvdMsg is appended to. var recvdMsg, assembledMessage *api.StreamRaftMessageRequest var err error // First message index. var raftMsgIndex uint64 for { recvdMsg, err = stream.Recv() if err == io.EOF { break } else if err != nil { log.G(stream.Context()).WithError(err).Error("error while reading from stream") return err } // Initialized the message to be used for assembling // the raft message. if assembledMessage == nil { // For all message types except raftpb.MsgSnap, // we don't expect more than a single message // on the stream so we'll get an EOF on the next Recv() // and go on to process the received message. assembledMessage = recvdMsg raftMsgIndex = recvdMsg.Message.Index continue } // Verify raft message index. if recvdMsg.Message.Index != raftMsgIndex { errMsg := fmt.Sprintf("Raft message chunk with index %d is different from the previously received raft message index %d", recvdMsg.Message.Index, raftMsgIndex) log.G(stream.Context()).Errorf(errMsg) return status.Errorf(codes.InvalidArgument, "%s", errMsg) } // Verify that multiple message received on a stream // can only be of type raftpb.MsgSnap. if recvdMsg.Message.Type != raftpb.MsgSnap { errMsg := fmt.Sprintf("Raft message chunk is not of type %d", raftpb.MsgSnap) log.G(stream.Context()).Errorf(errMsg) return status.Errorf(codes.InvalidArgument, "%s", errMsg) } // Append the received snapshot data. assembledMessage.Message.Snapshot.Data = append(assembledMessage.Message.Snapshot.Data, recvdMsg.Message.Snapshot.Data...) } // We should have the complete snapshot. Verify and process. if err == io.EOF { _, err = n.ProcessRaftMessage(stream.Context(), &api.ProcessRaftMessageRequest{Message: assembledMessage.Message}) if err == nil { // Translate the response of ProcessRaftMessage() from // ProcessRaftMessageResponse to StreamRaftMessageResponse if needed. return stream.SendAndClose(&api.StreamRaftMessageResponse{}) } } return err }
[ "func", "(", "n", "*", "Node", ")", "StreamRaftMessage", "(", "stream", "api", ".", "Raft_StreamRaftMessageServer", ")", "error", "{", "var", "recvdMsg", ",", "assembledMessage", "*", "api", ".", "StreamRaftMessageRequest", "\n", "var", "err", "error", "\n", "...
// StreamRaftMessage is the server endpoint for streaming Raft messages. // It accepts a stream of raft messages to be processed on this raft member, // returning a StreamRaftMessageResponse when processing of the streamed // messages is complete. // It is called from the Raft leader, which uses it to stream messages // to this raft member. // A single stream corresponds to a single raft message, // which may be disassembled and streamed by the sender // as individual messages. Therefore, each of the messages // received by the stream will have the same raft message type and index. // Currently, only messages of type raftpb.MsgSnap can be disassembled, sent // and received on the stream.
[ "StreamRaftMessage", "is", "the", "server", "endpoint", "for", "streaming", "Raft", "messages", ".", "It", "accepts", "a", "stream", "of", "raft", "messages", "to", "be", "processed", "on", "this", "raft", "member", "returning", "a", "StreamRaftMessageResponse", ...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1327-L1389
train
docker/swarmkit
manager/state/raft/raft.go
ProcessRaftMessage
func (n *Node) ProcessRaftMessage(ctx context.Context, msg *api.ProcessRaftMessageRequest) (*api.ProcessRaftMessageResponse, error) { if msg == nil || msg.Message == nil { n.processRaftMessageLogger(ctx, msg).Debug("received empty message") return &api.ProcessRaftMessageResponse{}, nil } // Don't process the message if this comes from // a node in the remove set if n.cluster.IsIDRemoved(msg.Message.From) { n.processRaftMessageLogger(ctx, msg).Debug("received message from removed member") return nil, status.Errorf(codes.NotFound, "%s", membership.ErrMemberRemoved.Error()) } ctx, cancel := n.WithContext(ctx) defer cancel() // TODO(aaronl): Address changes are temporarily disabled. // See https://github.com/docker/docker/issues/30455. // This should be reenabled in the future with additional // safeguards (perhaps storing multiple addresses per node). //if err := n.reportNewAddress(ctx, msg.Message.From); err != nil { // log.G(ctx).WithError(err).Errorf("failed to report new address of %x to transport", msg.Message.From) //} // Reject vote requests from unreachable peers if msg.Message.Type == raftpb.MsgVote { member := n.cluster.GetMember(msg.Message.From) if member == nil { n.processRaftMessageLogger(ctx, msg).Debug("received message from unknown member") return &api.ProcessRaftMessageResponse{}, nil } if err := n.transport.HealthCheck(ctx, msg.Message.From); err != nil { n.processRaftMessageLogger(ctx, msg).WithError(err).Debug("member which sent vote request failed health check") return &api.ProcessRaftMessageResponse{}, nil } } if msg.Message.Type == raftpb.MsgProp { // We don't accept forwarded proposals. Our // current architecture depends on only the leader // making proposals, so in-flight proposals can be // guaranteed not to conflict. n.processRaftMessageLogger(ctx, msg).Debug("dropped forwarded proposal") return &api.ProcessRaftMessageResponse{}, nil } // can't stop the raft node while an async RPC is in progress n.stopMu.RLock() defer n.stopMu.RUnlock() if n.IsMember() { if msg.Message.To != n.Config.ID { n.processRaftMessageLogger(ctx, msg).Errorf("received message intended for raft_id %x", msg.Message.To) return &api.ProcessRaftMessageResponse{}, nil } if err := n.raftNode.Step(ctx, *msg.Message); err != nil { n.processRaftMessageLogger(ctx, msg).WithError(err).Debug("raft Step failed") } } return &api.ProcessRaftMessageResponse{}, nil }
go
func (n *Node) ProcessRaftMessage(ctx context.Context, msg *api.ProcessRaftMessageRequest) (*api.ProcessRaftMessageResponse, error) { if msg == nil || msg.Message == nil { n.processRaftMessageLogger(ctx, msg).Debug("received empty message") return &api.ProcessRaftMessageResponse{}, nil } // Don't process the message if this comes from // a node in the remove set if n.cluster.IsIDRemoved(msg.Message.From) { n.processRaftMessageLogger(ctx, msg).Debug("received message from removed member") return nil, status.Errorf(codes.NotFound, "%s", membership.ErrMemberRemoved.Error()) } ctx, cancel := n.WithContext(ctx) defer cancel() // TODO(aaronl): Address changes are temporarily disabled. // See https://github.com/docker/docker/issues/30455. // This should be reenabled in the future with additional // safeguards (perhaps storing multiple addresses per node). //if err := n.reportNewAddress(ctx, msg.Message.From); err != nil { // log.G(ctx).WithError(err).Errorf("failed to report new address of %x to transport", msg.Message.From) //} // Reject vote requests from unreachable peers if msg.Message.Type == raftpb.MsgVote { member := n.cluster.GetMember(msg.Message.From) if member == nil { n.processRaftMessageLogger(ctx, msg).Debug("received message from unknown member") return &api.ProcessRaftMessageResponse{}, nil } if err := n.transport.HealthCheck(ctx, msg.Message.From); err != nil { n.processRaftMessageLogger(ctx, msg).WithError(err).Debug("member which sent vote request failed health check") return &api.ProcessRaftMessageResponse{}, nil } } if msg.Message.Type == raftpb.MsgProp { // We don't accept forwarded proposals. Our // current architecture depends on only the leader // making proposals, so in-flight proposals can be // guaranteed not to conflict. n.processRaftMessageLogger(ctx, msg).Debug("dropped forwarded proposal") return &api.ProcessRaftMessageResponse{}, nil } // can't stop the raft node while an async RPC is in progress n.stopMu.RLock() defer n.stopMu.RUnlock() if n.IsMember() { if msg.Message.To != n.Config.ID { n.processRaftMessageLogger(ctx, msg).Errorf("received message intended for raft_id %x", msg.Message.To) return &api.ProcessRaftMessageResponse{}, nil } if err := n.raftNode.Step(ctx, *msg.Message); err != nil { n.processRaftMessageLogger(ctx, msg).WithError(err).Debug("raft Step failed") } } return &api.ProcessRaftMessageResponse{}, nil }
[ "func", "(", "n", "*", "Node", ")", "ProcessRaftMessage", "(", "ctx", "context", ".", "Context", ",", "msg", "*", "api", ".", "ProcessRaftMessageRequest", ")", "(", "*", "api", ".", "ProcessRaftMessageResponse", ",", "error", ")", "{", "if", "msg", "==", ...
// ProcessRaftMessage calls 'Step' which advances the // raft state machine with the provided message on the // receiving node
[ "ProcessRaftMessage", "calls", "Step", "which", "advances", "the", "raft", "state", "machine", "with", "the", "provided", "message", "on", "the", "receiving", "node" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1394-L1457
train
docker/swarmkit
manager/state/raft/raft.go
ResolveAddress
func (n *Node) ResolveAddress(ctx context.Context, msg *api.ResolveAddressRequest) (*api.ResolveAddressResponse, error) { if !n.IsMember() { return nil, ErrNoRaftMember } nodeInfo, err := ca.RemoteNode(ctx) if err != nil { return nil, err } fields := logrus.Fields{ "node.id": nodeInfo.NodeID, "method": "(*Node).ResolveAddress", "raft_id": fmt.Sprintf("%x", n.Config.ID), } if nodeInfo.ForwardedBy != nil { fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID } log.G(ctx).WithFields(fields).Debug("") member := n.cluster.GetMember(msg.RaftID) if member == nil { return nil, status.Errorf(codes.NotFound, "member %x not found", msg.RaftID) } return &api.ResolveAddressResponse{Addr: member.Addr}, nil }
go
func (n *Node) ResolveAddress(ctx context.Context, msg *api.ResolveAddressRequest) (*api.ResolveAddressResponse, error) { if !n.IsMember() { return nil, ErrNoRaftMember } nodeInfo, err := ca.RemoteNode(ctx) if err != nil { return nil, err } fields := logrus.Fields{ "node.id": nodeInfo.NodeID, "method": "(*Node).ResolveAddress", "raft_id": fmt.Sprintf("%x", n.Config.ID), } if nodeInfo.ForwardedBy != nil { fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID } log.G(ctx).WithFields(fields).Debug("") member := n.cluster.GetMember(msg.RaftID) if member == nil { return nil, status.Errorf(codes.NotFound, "member %x not found", msg.RaftID) } return &api.ResolveAddressResponse{Addr: member.Addr}, nil }
[ "func", "(", "n", "*", "Node", ")", "ResolveAddress", "(", "ctx", "context", ".", "Context", ",", "msg", "*", "api", ".", "ResolveAddressRequest", ")", "(", "*", "api", ".", "ResolveAddressResponse", ",", "error", ")", "{", "if", "!", "n", ".", "IsMemb...
// ResolveAddress returns the address reaching for a given node ID.
[ "ResolveAddress", "returns", "the", "address", "reaching", "for", "a", "given", "node", "ID", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1460-L1485
train
docker/swarmkit
manager/state/raft/raft.go
LeaderConn
func (n *Node) LeaderConn(ctx context.Context) (*grpc.ClientConn, error) { cc, err := n.getLeaderConn() if err == nil { return cc, nil } if err == raftselector.ErrIsLeader { return nil, err } if atomic.LoadUint32(&n.ticksWithNoLeader) > lostQuorumTimeout { return nil, errLostQuorum } ticker := time.NewTicker(1 * time.Second) defer ticker.Stop() for { select { case <-ticker.C: cc, err := n.getLeaderConn() if err == nil { return cc, nil } if err == raftselector.ErrIsLeader { return nil, err } case <-ctx.Done(): return nil, ctx.Err() } } }
go
func (n *Node) LeaderConn(ctx context.Context) (*grpc.ClientConn, error) { cc, err := n.getLeaderConn() if err == nil { return cc, nil } if err == raftselector.ErrIsLeader { return nil, err } if atomic.LoadUint32(&n.ticksWithNoLeader) > lostQuorumTimeout { return nil, errLostQuorum } ticker := time.NewTicker(1 * time.Second) defer ticker.Stop() for { select { case <-ticker.C: cc, err := n.getLeaderConn() if err == nil { return cc, nil } if err == raftselector.ErrIsLeader { return nil, err } case <-ctx.Done(): return nil, ctx.Err() } } }
[ "func", "(", "n", "*", "Node", ")", "LeaderConn", "(", "ctx", "context", ".", "Context", ")", "(", "*", "grpc", ".", "ClientConn", ",", "error", ")", "{", "cc", ",", "err", ":=", "n", ".", "getLeaderConn", "(", ")", "\n", "if", "err", "==", "nil"...
// LeaderConn returns current connection to cluster leader or raftselector.ErrIsLeader // if current machine is leader.
[ "LeaderConn", "returns", "current", "connection", "to", "cluster", "leader", "or", "raftselector", ".", "ErrIsLeader", "if", "current", "machine", "is", "leader", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1505-L1533
train
docker/swarmkit
manager/state/raft/raft.go
registerNode
func (n *Node) registerNode(node *api.RaftMember) error { if n.cluster.IsIDRemoved(node.RaftID) { return nil } member := &membership.Member{} existingMember := n.cluster.GetMember(node.RaftID) if existingMember != nil { // Member already exists // If the address is different from what we thought it was, // update it. This can happen if we just joined a cluster // and are adding ourself now with the remotely-reachable // address. if existingMember.Addr != node.Addr { if node.RaftID != n.Config.ID { if err := n.transport.UpdatePeer(node.RaftID, node.Addr); err != nil { return err } } member.RaftMember = node n.cluster.AddMember(member) } return nil } // Avoid opening a connection to the local node if node.RaftID != n.Config.ID { if err := n.transport.AddPeer(node.RaftID, node.Addr); err != nil { return err } } member.RaftMember = node err := n.cluster.AddMember(member) if err != nil { if rerr := n.transport.RemovePeer(node.RaftID); rerr != nil { return errors.Wrapf(rerr, "failed to remove peer after error %v", err) } return err } return nil }
go
func (n *Node) registerNode(node *api.RaftMember) error { if n.cluster.IsIDRemoved(node.RaftID) { return nil } member := &membership.Member{} existingMember := n.cluster.GetMember(node.RaftID) if existingMember != nil { // Member already exists // If the address is different from what we thought it was, // update it. This can happen if we just joined a cluster // and are adding ourself now with the remotely-reachable // address. if existingMember.Addr != node.Addr { if node.RaftID != n.Config.ID { if err := n.transport.UpdatePeer(node.RaftID, node.Addr); err != nil { return err } } member.RaftMember = node n.cluster.AddMember(member) } return nil } // Avoid opening a connection to the local node if node.RaftID != n.Config.ID { if err := n.transport.AddPeer(node.RaftID, node.Addr); err != nil { return err } } member.RaftMember = node err := n.cluster.AddMember(member) if err != nil { if rerr := n.transport.RemovePeer(node.RaftID); rerr != nil { return errors.Wrapf(rerr, "failed to remove peer after error %v", err) } return err } return nil }
[ "func", "(", "n", "*", "Node", ")", "registerNode", "(", "node", "*", "api", ".", "RaftMember", ")", "error", "{", "if", "n", ".", "cluster", ".", "IsIDRemoved", "(", "node", ".", "RaftID", ")", "{", "return", "nil", "\n", "}", "\n", "member", ":="...
// registerNode registers a new node on the cluster memberlist
[ "registerNode", "registers", "a", "new", "node", "on", "the", "cluster", "memberlist" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1536-L1581
train
docker/swarmkit
manager/state/raft/raft.go
GetVersion
func (n *Node) GetVersion() *api.Version { n.stopMu.RLock() defer n.stopMu.RUnlock() if !n.IsMember() { return nil } status := n.Status() return &api.Version{Index: status.Commit} }
go
func (n *Node) GetVersion() *api.Version { n.stopMu.RLock() defer n.stopMu.RUnlock() if !n.IsMember() { return nil } status := n.Status() return &api.Version{Index: status.Commit} }
[ "func", "(", "n", "*", "Node", ")", "GetVersion", "(", ")", "*", "api", ".", "Version", "{", "n", ".", "stopMu", ".", "RLock", "(", ")", "\n", "defer", "n", ".", "stopMu", ".", "RUnlock", "(", ")", "\n", "if", "!", "n", ".", "IsMember", "(", ...
// GetVersion returns the sequence information for the current raft round.
[ "GetVersion", "returns", "the", "sequence", "information", "for", "the", "current", "raft", "round", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1595-L1605
train
docker/swarmkit
manager/state/raft/raft.go
ChangesBetween
func (n *Node) ChangesBetween(from, to api.Version) ([]state.Change, error) { n.stopMu.RLock() defer n.stopMu.RUnlock() if from.Index > to.Index { return nil, errors.New("versions are out of order") } if !n.IsMember() { return nil, ErrNoRaftMember } // never returns error last, _ := n.raftStore.LastIndex() if to.Index > last { return nil, errors.New("last version is out of bounds") } pbs, err := n.raftStore.Entries(from.Index+1, to.Index+1, math.MaxUint64) if err != nil { return nil, err } var changes []state.Change for _, pb := range pbs { if pb.Type != raftpb.EntryNormal || pb.Data == nil { continue } r := &api.InternalRaftRequest{} err := proto.Unmarshal(pb.Data, r) if err != nil { return nil, errors.Wrap(err, "error umarshalling internal raft request") } if r.Action != nil { changes = append(changes, state.Change{StoreActions: r.Action, Version: api.Version{Index: pb.Index}}) } } return changes, nil }
go
func (n *Node) ChangesBetween(from, to api.Version) ([]state.Change, error) { n.stopMu.RLock() defer n.stopMu.RUnlock() if from.Index > to.Index { return nil, errors.New("versions are out of order") } if !n.IsMember() { return nil, ErrNoRaftMember } // never returns error last, _ := n.raftStore.LastIndex() if to.Index > last { return nil, errors.New("last version is out of bounds") } pbs, err := n.raftStore.Entries(from.Index+1, to.Index+1, math.MaxUint64) if err != nil { return nil, err } var changes []state.Change for _, pb := range pbs { if pb.Type != raftpb.EntryNormal || pb.Data == nil { continue } r := &api.InternalRaftRequest{} err := proto.Unmarshal(pb.Data, r) if err != nil { return nil, errors.Wrap(err, "error umarshalling internal raft request") } if r.Action != nil { changes = append(changes, state.Change{StoreActions: r.Action, Version: api.Version{Index: pb.Index}}) } } return changes, nil }
[ "func", "(", "n", "*", "Node", ")", "ChangesBetween", "(", "from", ",", "to", "api", ".", "Version", ")", "(", "[", "]", "state", ".", "Change", ",", "error", ")", "{", "n", ".", "stopMu", ".", "RLock", "(", ")", "\n", "defer", "n", ".", "stopM...
// ChangesBetween returns the changes starting after "from", up to and // including "to". If these changes are not available because the log // has been compacted, an error will be returned.
[ "ChangesBetween", "returns", "the", "changes", "starting", "after", "from", "up", "to", "and", "including", "to", ".", "If", "these", "changes", "are", "not", "available", "because", "the", "log", "has", "been", "compacted", "an", "error", "will", "be", "ret...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1610-L1651
train
docker/swarmkit
manager/state/raft/raft.go
SubscribePeers
func (n *Node) SubscribePeers() (q chan events.Event, cancel func()) { return n.cluster.PeersBroadcast.Watch() }
go
func (n *Node) SubscribePeers() (q chan events.Event, cancel func()) { return n.cluster.PeersBroadcast.Watch() }
[ "func", "(", "n", "*", "Node", ")", "SubscribePeers", "(", ")", "(", "q", "chan", "events", ".", "Event", ",", "cancel", "func", "(", ")", ")", "{", "return", "n", ".", "cluster", ".", "PeersBroadcast", ".", "Watch", "(", ")", "\n", "}" ]
// SubscribePeers subscribes to peer updates in cluster. It sends always full // list of peers.
[ "SubscribePeers", "subscribes", "to", "peer", "updates", "in", "cluster", ".", "It", "sends", "always", "full", "list", "of", "peers", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1655-L1657
train
docker/swarmkit
manager/state/raft/raft.go
GetMemberlist
func (n *Node) GetMemberlist() map[uint64]*api.RaftMember { memberlist := make(map[uint64]*api.RaftMember) members := n.cluster.Members() leaderID, err := n.Leader() if err != nil { leaderID = raft.None } for id, member := range members { reachability := api.RaftMemberStatus_REACHABLE leader := false if member.RaftID != n.Config.ID { if !n.transport.Active(member.RaftID) { reachability = api.RaftMemberStatus_UNREACHABLE } } if member.RaftID == leaderID { leader = true } memberlist[id] = &api.RaftMember{ RaftID: member.RaftID, NodeID: member.NodeID, Addr: member.Addr, Status: api.RaftMemberStatus{ Leader: leader, Reachability: reachability, }, } } return memberlist }
go
func (n *Node) GetMemberlist() map[uint64]*api.RaftMember { memberlist := make(map[uint64]*api.RaftMember) members := n.cluster.Members() leaderID, err := n.Leader() if err != nil { leaderID = raft.None } for id, member := range members { reachability := api.RaftMemberStatus_REACHABLE leader := false if member.RaftID != n.Config.ID { if !n.transport.Active(member.RaftID) { reachability = api.RaftMemberStatus_UNREACHABLE } } if member.RaftID == leaderID { leader = true } memberlist[id] = &api.RaftMember{ RaftID: member.RaftID, NodeID: member.NodeID, Addr: member.Addr, Status: api.RaftMemberStatus{ Leader: leader, Reachability: reachability, }, } } return memberlist }
[ "func", "(", "n", "*", "Node", ")", "GetMemberlist", "(", ")", "map", "[", "uint64", "]", "*", "api", ".", "RaftMember", "{", "memberlist", ":=", "make", "(", "map", "[", "uint64", "]", "*", "api", ".", "RaftMember", ")", "\n", "members", ":=", "n"...
// GetMemberlist returns the current list of raft members in the cluster.
[ "GetMemberlist", "returns", "the", "current", "list", "of", "raft", "members", "in", "the", "cluster", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1660-L1694
train
docker/swarmkit
manager/state/raft/raft.go
GetMemberByNodeID
func (n *Node) GetMemberByNodeID(nodeID string) *membership.Member { members := n.cluster.Members() for _, member := range members { if member.NodeID == nodeID { return member } } return nil }
go
func (n *Node) GetMemberByNodeID(nodeID string) *membership.Member { members := n.cluster.Members() for _, member := range members { if member.NodeID == nodeID { return member } } return nil }
[ "func", "(", "n", "*", "Node", ")", "GetMemberByNodeID", "(", "nodeID", "string", ")", "*", "membership", ".", "Member", "{", "members", ":=", "n", ".", "cluster", ".", "Members", "(", ")", "\n", "for", "_", ",", "member", ":=", "range", "members", "...
// GetMemberByNodeID returns member information based // on its generic Node ID.
[ "GetMemberByNodeID", "returns", "member", "information", "based", "on", "its", "generic", "Node", "ID", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1703-L1711
train
docker/swarmkit
manager/state/raft/raft.go
GetNodeIDByRaftID
func (n *Node) GetNodeIDByRaftID(raftID uint64) (string, error) { if member, ok := n.cluster.Members()[raftID]; ok { return member.NodeID, nil } // this is the only possible error value that should be returned; the // manager code depends on this. if you need to add more errors later, make // sure that you update the callers of this method accordingly return "", ErrMemberUnknown }
go
func (n *Node) GetNodeIDByRaftID(raftID uint64) (string, error) { if member, ok := n.cluster.Members()[raftID]; ok { return member.NodeID, nil } // this is the only possible error value that should be returned; the // manager code depends on this. if you need to add more errors later, make // sure that you update the callers of this method accordingly return "", ErrMemberUnknown }
[ "func", "(", "n", "*", "Node", ")", "GetNodeIDByRaftID", "(", "raftID", "uint64", ")", "(", "string", ",", "error", ")", "{", "if", "member", ",", "ok", ":=", "n", ".", "cluster", ".", "Members", "(", ")", "[", "raftID", "]", ";", "ok", "{", "ret...
// GetNodeIDByRaftID returns the generic Node ID of a member given its raft ID. // It returns ErrMemberUnknown if the raft ID is unknown.
[ "GetNodeIDByRaftID", "returns", "the", "generic", "Node", "ID", "of", "a", "member", "given", "its", "raft", "ID", ".", "It", "returns", "ErrMemberUnknown", "if", "the", "raft", "ID", "is", "unknown", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1715-L1723
train
docker/swarmkit
manager/state/raft/raft.go
saveToStorage
func (n *Node) saveToStorage( ctx context.Context, raftConfig *api.RaftConfig, hardState raftpb.HardState, entries []raftpb.Entry, snapshot raftpb.Snapshot, ) (err error) { if !raft.IsEmptySnap(snapshot) { if err := n.raftLogger.SaveSnapshot(snapshot); err != nil { return errors.Wrap(err, "failed to save snapshot") } if err := n.raftLogger.GC(snapshot.Metadata.Index, snapshot.Metadata.Term, raftConfig.KeepOldSnapshots); err != nil { log.G(ctx).WithError(err).Error("unable to clean old snapshots and WALs") } if err = n.raftStore.ApplySnapshot(snapshot); err != nil { return errors.Wrap(err, "failed to apply snapshot on raft node") } } if err := n.raftLogger.SaveEntries(hardState, entries); err != nil { return errors.Wrap(err, "failed to save raft log entries") } if len(entries) > 0 { lastIndex := entries[len(entries)-1].Index if lastIndex > n.writtenWALIndex { n.writtenWALIndex = lastIndex } } if err = n.raftStore.Append(entries); err != nil { return errors.Wrap(err, "failed to append raft log entries") } return nil }
go
func (n *Node) saveToStorage( ctx context.Context, raftConfig *api.RaftConfig, hardState raftpb.HardState, entries []raftpb.Entry, snapshot raftpb.Snapshot, ) (err error) { if !raft.IsEmptySnap(snapshot) { if err := n.raftLogger.SaveSnapshot(snapshot); err != nil { return errors.Wrap(err, "failed to save snapshot") } if err := n.raftLogger.GC(snapshot.Metadata.Index, snapshot.Metadata.Term, raftConfig.KeepOldSnapshots); err != nil { log.G(ctx).WithError(err).Error("unable to clean old snapshots and WALs") } if err = n.raftStore.ApplySnapshot(snapshot); err != nil { return errors.Wrap(err, "failed to apply snapshot on raft node") } } if err := n.raftLogger.SaveEntries(hardState, entries); err != nil { return errors.Wrap(err, "failed to save raft log entries") } if len(entries) > 0 { lastIndex := entries[len(entries)-1].Index if lastIndex > n.writtenWALIndex { n.writtenWALIndex = lastIndex } } if err = n.raftStore.Append(entries); err != nil { return errors.Wrap(err, "failed to append raft log entries") } return nil }
[ "func", "(", "n", "*", "Node", ")", "saveToStorage", "(", "ctx", "context", ".", "Context", ",", "raftConfig", "*", "api", ".", "RaftConfig", ",", "hardState", "raftpb", ".", "HardState", ",", "entries", "[", "]", "raftpb", ".", "Entry", ",", "snapshot",...
// Saves a log entry to our Store
[ "Saves", "a", "log", "entry", "to", "our", "Store" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1732-L1768
train
docker/swarmkit
manager/state/raft/raft.go
configure
func (n *Node) configure(ctx context.Context, cc raftpb.ConfChange) error { cc.ID = n.reqIDGen.Next() ctx, cancel := context.WithCancel(ctx) ch := n.wait.register(cc.ID, nil, cancel) if err := n.raftNode.ProposeConfChange(ctx, cc); err != nil { n.wait.cancel(cc.ID) return err } select { case x := <-ch: if err, ok := x.(error); ok { return err } if x != nil { log.G(ctx).Panic("raft: configuration change error, return type should always be error") } return nil case <-ctx.Done(): n.wait.cancel(cc.ID) return ctx.Err() } }
go
func (n *Node) configure(ctx context.Context, cc raftpb.ConfChange) error { cc.ID = n.reqIDGen.Next() ctx, cancel := context.WithCancel(ctx) ch := n.wait.register(cc.ID, nil, cancel) if err := n.raftNode.ProposeConfChange(ctx, cc); err != nil { n.wait.cancel(cc.ID) return err } select { case x := <-ch: if err, ok := x.(error); ok { return err } if x != nil { log.G(ctx).Panic("raft: configuration change error, return type should always be error") } return nil case <-ctx.Done(): n.wait.cancel(cc.ID) return ctx.Err() } }
[ "func", "(", "n", "*", "Node", ")", "configure", "(", "ctx", "context", ".", "Context", ",", "cc", "raftpb", ".", "ConfChange", ")", "error", "{", "cc", ".", "ID", "=", "n", ".", "reqIDGen", ".", "Next", "(", ")", "\n", "ctx", ",", "cancel", ":="...
// configure sends a configuration change through consensus and // then waits for it to be applied to the server. It will block // until the change is performed or there is an error.
[ "configure", "sends", "a", "configuration", "change", "through", "consensus", "and", "then", "waits", "for", "it", "to", "be", "applied", "to", "the", "server", ".", "It", "will", "block", "until", "the", "change", "is", "performed", "or", "there", "is", "...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1857-L1881
train
docker/swarmkit
manager/state/raft/raft.go
applyAddNode
func (n *Node) applyAddNode(cc raftpb.ConfChange) error { member := &api.RaftMember{} err := proto.Unmarshal(cc.Context, member) if err != nil { return err } // ID must be non zero if member.RaftID == 0 { return nil } return n.registerNode(member) }
go
func (n *Node) applyAddNode(cc raftpb.ConfChange) error { member := &api.RaftMember{} err := proto.Unmarshal(cc.Context, member) if err != nil { return err } // ID must be non zero if member.RaftID == 0 { return nil } return n.registerNode(member) }
[ "func", "(", "n", "*", "Node", ")", "applyAddNode", "(", "cc", "raftpb", ".", "ConfChange", ")", "error", "{", "member", ":=", "&", "api", ".", "RaftMember", "{", "}", "\n", "err", ":=", "proto", ".", "Unmarshal", "(", "cc", ".", "Context", ",", "m...
// applyAddNode is called when we receive a ConfChange // from a member in the raft cluster, this adds a new // node to the existing raft cluster
[ "applyAddNode", "is", "called", "when", "we", "receive", "a", "ConfChange", "from", "a", "member", "in", "the", "raft", "cluster", "this", "adds", "a", "new", "node", "to", "the", "existing", "raft", "cluster" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1967-L1980
train
docker/swarmkit
manager/state/raft/raft.go
applyUpdateNode
func (n *Node) applyUpdateNode(ctx context.Context, cc raftpb.ConfChange) error { newMember := &api.RaftMember{} err := proto.Unmarshal(cc.Context, newMember) if err != nil { return err } if newMember.RaftID == n.Config.ID { return nil } if err := n.transport.UpdatePeer(newMember.RaftID, newMember.Addr); err != nil { return err } return n.cluster.UpdateMember(newMember.RaftID, newMember) }
go
func (n *Node) applyUpdateNode(ctx context.Context, cc raftpb.ConfChange) error { newMember := &api.RaftMember{} err := proto.Unmarshal(cc.Context, newMember) if err != nil { return err } if newMember.RaftID == n.Config.ID { return nil } if err := n.transport.UpdatePeer(newMember.RaftID, newMember.Addr); err != nil { return err } return n.cluster.UpdateMember(newMember.RaftID, newMember) }
[ "func", "(", "n", "*", "Node", ")", "applyUpdateNode", "(", "ctx", "context", ".", "Context", ",", "cc", "raftpb", ".", "ConfChange", ")", "error", "{", "newMember", ":=", "&", "api", ".", "RaftMember", "{", "}", "\n", "err", ":=", "proto", ".", "Unm...
// applyUpdateNode is called when we receive a ConfChange from a member in the // raft cluster which update the address of an existing node.
[ "applyUpdateNode", "is", "called", "when", "we", "receive", "a", "ConfChange", "from", "a", "member", "in", "the", "raft", "cluster", "which", "update", "the", "address", "of", "an", "existing", "node", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L1984-L1998
train
docker/swarmkit
manager/state/raft/raft.go
applyRemoveNode
func (n *Node) applyRemoveNode(ctx context.Context, cc raftpb.ConfChange) (err error) { // If the node from where the remove is issued is // a follower and the leader steps down, Campaign // to be the leader. if cc.NodeID == n.leader() && !n.isLeader() { if err = n.raftNode.Campaign(ctx); err != nil { return err } } if cc.NodeID == n.Config.ID { // wait for the commit ack to be sent before closing connection n.asyncTasks.Wait() n.NodeRemoved() } else if err := n.transport.RemovePeer(cc.NodeID); err != nil { return err } return n.cluster.RemoveMember(cc.NodeID) }
go
func (n *Node) applyRemoveNode(ctx context.Context, cc raftpb.ConfChange) (err error) { // If the node from where the remove is issued is // a follower and the leader steps down, Campaign // to be the leader. if cc.NodeID == n.leader() && !n.isLeader() { if err = n.raftNode.Campaign(ctx); err != nil { return err } } if cc.NodeID == n.Config.ID { // wait for the commit ack to be sent before closing connection n.asyncTasks.Wait() n.NodeRemoved() } else if err := n.transport.RemovePeer(cc.NodeID); err != nil { return err } return n.cluster.RemoveMember(cc.NodeID) }
[ "func", "(", "n", "*", "Node", ")", "applyRemoveNode", "(", "ctx", "context", ".", "Context", ",", "cc", "raftpb", ".", "ConfChange", ")", "(", "err", "error", ")", "{", "if", "cc", ".", "NodeID", "==", "n", ".", "leader", "(", ")", "&&", "!", "n...
// applyRemoveNode is called when we receive a ConfChange // from a member in the raft cluster, this removes a node // from the existing raft cluster
[ "applyRemoveNode", "is", "called", "when", "we", "receive", "a", "ConfChange", "from", "a", "member", "in", "the", "raft", "cluster", "this", "removes", "a", "node", "from", "the", "existing", "raft", "cluster" ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L2003-L2024
train
docker/swarmkit
manager/state/raft/raft.go
SubscribeLeadership
func (n *Node) SubscribeLeadership() (q chan events.Event, cancel func()) { return n.leadershipBroadcast.Watch() }
go
func (n *Node) SubscribeLeadership() (q chan events.Event, cancel func()) { return n.leadershipBroadcast.Watch() }
[ "func", "(", "n", "*", "Node", ")", "SubscribeLeadership", "(", ")", "(", "q", "chan", "events", ".", "Event", ",", "cancel", "func", "(", ")", ")", "{", "return", "n", ".", "leadershipBroadcast", ".", "Watch", "(", ")", "\n", "}" ]
// SubscribeLeadership returns channel to which events about leadership change // will be sent in form of raft.LeadershipState. Also cancel func is returned - // it should be called when listener is no longer interested in events.
[ "SubscribeLeadership", "returns", "channel", "to", "which", "events", "about", "leadership", "change", "will", "be", "sent", "in", "form", "of", "raft", ".", "LeadershipState", ".", "Also", "cancel", "func", "is", "returned", "-", "it", "should", "be", "called...
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/raft/raft.go#L2029-L2031
train
docker/swarmkit
manager/constraint/constraint.go
Parse
func Parse(env []string) ([]Constraint, error) { exprs := []Constraint{} for _, e := range env { found := false // each expr is in the form of "key op value" for i, op := range operators { if !strings.Contains(e, op) { continue } // split with the op parts := strings.SplitN(e, op, 2) if len(parts) < 2 { return nil, fmt.Errorf("invalid expr: %s", e) } part0 := strings.TrimSpace(parts[0]) // validate key matched := alphaNumeric.MatchString(part0) if !matched { return nil, fmt.Errorf("key '%s' is invalid", part0) } part1 := strings.TrimSpace(parts[1]) // validate Value matched = valuePattern.MatchString(part1) if !matched { return nil, fmt.Errorf("value '%s' is invalid", part1) } // TODO(dongluochen): revisit requirements to see if globing or regex are useful exprs = append(exprs, Constraint{key: part0, operator: i, exp: part1}) found = true break // found an op, move to next entry } if !found { return nil, fmt.Errorf("constraint expected one operator from %s", strings.Join(operators, ", ")) } } return exprs, nil }
go
func Parse(env []string) ([]Constraint, error) { exprs := []Constraint{} for _, e := range env { found := false // each expr is in the form of "key op value" for i, op := range operators { if !strings.Contains(e, op) { continue } // split with the op parts := strings.SplitN(e, op, 2) if len(parts) < 2 { return nil, fmt.Errorf("invalid expr: %s", e) } part0 := strings.TrimSpace(parts[0]) // validate key matched := alphaNumeric.MatchString(part0) if !matched { return nil, fmt.Errorf("key '%s' is invalid", part0) } part1 := strings.TrimSpace(parts[1]) // validate Value matched = valuePattern.MatchString(part1) if !matched { return nil, fmt.Errorf("value '%s' is invalid", part1) } // TODO(dongluochen): revisit requirements to see if globing or regex are useful exprs = append(exprs, Constraint{key: part0, operator: i, exp: part1}) found = true break // found an op, move to next entry } if !found { return nil, fmt.Errorf("constraint expected one operator from %s", strings.Join(operators, ", ")) } } return exprs, nil }
[ "func", "Parse", "(", "env", "[", "]", "string", ")", "(", "[", "]", "Constraint", ",", "error", ")", "{", "exprs", ":=", "[", "]", "Constraint", "{", "}", "\n", "for", "_", ",", "e", ":=", "range", "env", "{", "found", ":=", "false", "\n", "fo...
// Parse parses list of constraints.
[ "Parse", "parses", "list", "of", "constraints", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/constraint/constraint.go#L40-L81
train
docker/swarmkit
manager/constraint/constraint.go
Match
func (c *Constraint) Match(whats ...string) bool { var match bool // full string match for _, what := range whats { // case insensitive compare if strings.EqualFold(c.exp, what) { match = true break } } switch c.operator { case eq: return match case noteq: return !match } return false }
go
func (c *Constraint) Match(whats ...string) bool { var match bool // full string match for _, what := range whats { // case insensitive compare if strings.EqualFold(c.exp, what) { match = true break } } switch c.operator { case eq: return match case noteq: return !match } return false }
[ "func", "(", "c", "*", "Constraint", ")", "Match", "(", "whats", "...", "string", ")", "bool", "{", "var", "match", "bool", "\n", "for", "_", ",", "what", ":=", "range", "whats", "{", "if", "strings", ".", "EqualFold", "(", "c", ".", "exp", ",", ...
// Match checks if the Constraint matches the target strings.
[ "Match", "checks", "if", "the", "Constraint", "matches", "the", "target", "strings", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/constraint/constraint.go#L84-L104
train
docker/swarmkit
agent/storage.go
InitDB
func InitDB(db *bolt.DB) error { return db.Update(func(tx *bolt.Tx) error { _, err := createBucketIfNotExists(tx, bucketKeyStorageVersion, bucketKeyTasks) return err }) }
go
func InitDB(db *bolt.DB) error { return db.Update(func(tx *bolt.Tx) error { _, err := createBucketIfNotExists(tx, bucketKeyStorageVersion, bucketKeyTasks) return err }) }
[ "func", "InitDB", "(", "db", "*", "bolt", ".", "DB", ")", "error", "{", "return", "db", ".", "Update", "(", "func", "(", "tx", "*", "bolt", ".", "Tx", ")", "error", "{", "_", ",", "err", ":=", "createBucketIfNotExists", "(", "tx", ",", "bucketKeySt...
// InitDB prepares a database for writing task data. // // Proper buckets will be created if they don't already exist.
[ "InitDB", "prepares", "a", "database", "for", "writing", "task", "data", ".", "Proper", "buckets", "will", "be", "created", "if", "they", "don", "t", "already", "exist", "." ]
59163bf75df38489d4a10392265d27156dc473c5
https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/agent/storage.go#L26-L31
train