repo stringlengths 5 67 | path stringlengths 4 218 | func_name stringlengths 0 151 | original_string stringlengths 52 373k | language stringclasses 6 values | code stringlengths 52 373k | code_tokens listlengths 10 512 | docstring stringlengths 3 47.2k | docstring_tokens listlengths 3 234 | sha stringlengths 40 40 | url stringlengths 85 339 | partition stringclasses 3 values |
|---|---|---|---|---|---|---|---|---|---|---|---|
docker/swarmkit | integration/node.go | Pause | func (n *testNode) Pause(forceNewCluster bool) error {
rAddr, err := n.node.RemoteAPIAddr()
if err != nil {
rAddr = "127.0.0.1:0"
}
if err := n.stop(); err != nil {
return err
}
cfg := n.config
cfg.ListenRemoteAPI = rAddr
// If JoinAddr is set, the node will connect to the join addr and ignore any
// other remotes that are stored in the raft directory.
cfg.JoinAddr = ""
cfg.JoinToken = ""
cfg.ForceNewCluster = forceNewCluster
node, err := node.New(cfg)
if err != nil {
return err
}
n.node = node
return nil
} | go | func (n *testNode) Pause(forceNewCluster bool) error {
rAddr, err := n.node.RemoteAPIAddr()
if err != nil {
rAddr = "127.0.0.1:0"
}
if err := n.stop(); err != nil {
return err
}
cfg := n.config
cfg.ListenRemoteAPI = rAddr
// If JoinAddr is set, the node will connect to the join addr and ignore any
// other remotes that are stored in the raft directory.
cfg.JoinAddr = ""
cfg.JoinToken = ""
cfg.ForceNewCluster = forceNewCluster
node, err := node.New(cfg)
if err != nil {
return err
}
n.node = node
return nil
} | [
"func",
"(",
"n",
"*",
"testNode",
")",
"Pause",
"(",
"forceNewCluster",
"bool",
")",
"error",
"{",
"rAddr",
",",
"err",
":=",
"n",
".",
"node",
".",
"RemoteAPIAddr",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"rAddr",
"=",
"\"127.0.0.1:0\"",
"\n... | // Pause stops the node, and creates a new swarm node while keeping all the state | [
"Pause",
"stops",
"the",
"node",
"and",
"creates",
"a",
"new",
"swarm",
"node",
"while",
"keeping",
"all",
"the",
"state"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/integration/node.go#L88-L112 | train |
docker/swarmkit | integration/node.go | Stop | func (n *testNode) Stop() error {
if err := n.stop(); err != nil {
return err
}
return os.RemoveAll(n.stateDir)
} | go | func (n *testNode) Stop() error {
if err := n.stop(); err != nil {
return err
}
return os.RemoveAll(n.stateDir)
} | [
"func",
"(",
"n",
"*",
"testNode",
")",
"Stop",
"(",
")",
"error",
"{",
"if",
"err",
":=",
"n",
".",
"stop",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"return",
"os",
".",
"RemoveAll",
"(",
"n",
".",
"stateDir",
... | // Stop stops the node and removes its state directory. | [
"Stop",
"stops",
"the",
"node",
"and",
"removes",
"its",
"state",
"directory",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/integration/node.go#L146-L151 | train |
docker/swarmkit | integration/node.go | ControlClient | func (n *testNode) ControlClient(ctx context.Context) (api.ControlClient, error) {
ctx, cancel := context.WithTimeout(ctx, opsTimeout)
defer cancel()
connChan := n.node.ListenControlSocket(ctx)
var controlConn *grpc.ClientConn
if err := testutils.PollFuncWithTimeout(nil, func() error {
select {
case controlConn = <-connChan:
default:
}
if controlConn == nil {
return fmt.Errorf("didn't get control api connection")
}
return nil
}, opsTimeout); err != nil {
return nil, err
}
return api.NewControlClient(controlConn), nil
} | go | func (n *testNode) ControlClient(ctx context.Context) (api.ControlClient, error) {
ctx, cancel := context.WithTimeout(ctx, opsTimeout)
defer cancel()
connChan := n.node.ListenControlSocket(ctx)
var controlConn *grpc.ClientConn
if err := testutils.PollFuncWithTimeout(nil, func() error {
select {
case controlConn = <-connChan:
default:
}
if controlConn == nil {
return fmt.Errorf("didn't get control api connection")
}
return nil
}, opsTimeout); err != nil {
return nil, err
}
return api.NewControlClient(controlConn), nil
} | [
"func",
"(",
"n",
"*",
"testNode",
")",
"ControlClient",
"(",
"ctx",
"context",
".",
"Context",
")",
"(",
"api",
".",
"ControlClient",
",",
"error",
")",
"{",
"ctx",
",",
"cancel",
":=",
"context",
".",
"WithTimeout",
"(",
"ctx",
",",
"opsTimeout",
")"... | // ControlClient returns grpc client to ControlAPI of node. It will panic for
// non-manager nodes. | [
"ControlClient",
"returns",
"grpc",
"client",
"to",
"ControlAPI",
"of",
"node",
".",
"It",
"will",
"panic",
"for",
"non",
"-",
"manager",
"nodes",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/integration/node.go#L155-L173 | train |
docker/swarmkit | manager/orchestrator/task.go | NewTask | func NewTask(cluster *api.Cluster, service *api.Service, slot uint64, nodeID string) *api.Task {
var logDriver *api.Driver
if service.Spec.Task.LogDriver != nil {
// use the log driver specific to the task, if we have it.
logDriver = service.Spec.Task.LogDriver
} else if cluster != nil {
// pick up the cluster default, if available.
logDriver = cluster.Spec.TaskDefaults.LogDriver // nil is okay here.
}
taskID := identity.NewID()
task := api.Task{
ID: taskID,
ServiceAnnotations: service.Spec.Annotations,
Spec: service.Spec.Task,
SpecVersion: service.SpecVersion,
ServiceID: service.ID,
Slot: slot,
Status: api.TaskStatus{
State: api.TaskStateNew,
Timestamp: ptypes.MustTimestampProto(time.Now()),
Message: "created",
},
Endpoint: &api.Endpoint{
Spec: service.Spec.Endpoint.Copy(),
},
DesiredState: api.TaskStateRunning,
LogDriver: logDriver,
}
// In global mode we also set the NodeID
if nodeID != "" {
task.NodeID = nodeID
}
return &task
} | go | func NewTask(cluster *api.Cluster, service *api.Service, slot uint64, nodeID string) *api.Task {
var logDriver *api.Driver
if service.Spec.Task.LogDriver != nil {
// use the log driver specific to the task, if we have it.
logDriver = service.Spec.Task.LogDriver
} else if cluster != nil {
// pick up the cluster default, if available.
logDriver = cluster.Spec.TaskDefaults.LogDriver // nil is okay here.
}
taskID := identity.NewID()
task := api.Task{
ID: taskID,
ServiceAnnotations: service.Spec.Annotations,
Spec: service.Spec.Task,
SpecVersion: service.SpecVersion,
ServiceID: service.ID,
Slot: slot,
Status: api.TaskStatus{
State: api.TaskStateNew,
Timestamp: ptypes.MustTimestampProto(time.Now()),
Message: "created",
},
Endpoint: &api.Endpoint{
Spec: service.Spec.Endpoint.Copy(),
},
DesiredState: api.TaskStateRunning,
LogDriver: logDriver,
}
// In global mode we also set the NodeID
if nodeID != "" {
task.NodeID = nodeID
}
return &task
} | [
"func",
"NewTask",
"(",
"cluster",
"*",
"api",
".",
"Cluster",
",",
"service",
"*",
"api",
".",
"Service",
",",
"slot",
"uint64",
",",
"nodeID",
"string",
")",
"*",
"api",
".",
"Task",
"{",
"var",
"logDriver",
"*",
"api",
".",
"Driver",
"\n",
"if",
... | // NewTask creates a new task. | [
"NewTask",
"creates",
"a",
"new",
"task",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/task.go#L16-L52 | train |
docker/swarmkit | manager/orchestrator/task.go | RestartCondition | func RestartCondition(task *api.Task) api.RestartPolicy_RestartCondition {
restartCondition := defaults.Service.Task.Restart.Condition
if task.Spec.Restart != nil {
restartCondition = task.Spec.Restart.Condition
}
return restartCondition
} | go | func RestartCondition(task *api.Task) api.RestartPolicy_RestartCondition {
restartCondition := defaults.Service.Task.Restart.Condition
if task.Spec.Restart != nil {
restartCondition = task.Spec.Restart.Condition
}
return restartCondition
} | [
"func",
"RestartCondition",
"(",
"task",
"*",
"api",
".",
"Task",
")",
"api",
".",
"RestartPolicy_RestartCondition",
"{",
"restartCondition",
":=",
"defaults",
".",
"Service",
".",
"Task",
".",
"Restart",
".",
"Condition",
"\n",
"if",
"task",
".",
"Spec",
".... | // RestartCondition returns the restart condition to apply to this task. | [
"RestartCondition",
"returns",
"the",
"restart",
"condition",
"to",
"apply",
"to",
"this",
"task",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/task.go#L55-L61 | train |
docker/swarmkit | manager/orchestrator/task.go | nodeMatches | func nodeMatches(s *api.Service, n *api.Node) bool {
if n == nil {
return false
}
constraints, _ := constraint.Parse(s.Spec.Task.Placement.Constraints)
return constraint.NodeMatches(constraints, n)
} | go | func nodeMatches(s *api.Service, n *api.Node) bool {
if n == nil {
return false
}
constraints, _ := constraint.Parse(s.Spec.Task.Placement.Constraints)
return constraint.NodeMatches(constraints, n)
} | [
"func",
"nodeMatches",
"(",
"s",
"*",
"api",
".",
"Service",
",",
"n",
"*",
"api",
".",
"Node",
")",
"bool",
"{",
"if",
"n",
"==",
"nil",
"{",
"return",
"false",
"\n",
"}",
"\n",
"constraints",
",",
"_",
":=",
"constraint",
".",
"Parse",
"(",
"s"... | // Checks if the current assigned node matches the Placement.Constraints
// specified in the task spec for Updater.newService. | [
"Checks",
"if",
"the",
"current",
"assigned",
"node",
"matches",
"the",
"Placement",
".",
"Constraints",
"specified",
"in",
"the",
"task",
"spec",
"for",
"Updater",
".",
"newService",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/task.go#L117-L124 | train |
docker/swarmkit | manager/orchestrator/task.go | IsTaskDirtyPlacementConstraintsOnly | func IsTaskDirtyPlacementConstraintsOnly(serviceTaskSpec api.TaskSpec, t *api.Task) bool {
// Compare the task placement constraints.
if reflect.DeepEqual(serviceTaskSpec.Placement, t.Spec.Placement) {
return false
}
// Update spec placement to only the fields
// other than the placement constraints in the spec.
serviceTaskSpec.Placement = t.Spec.Placement
return reflect.DeepEqual(serviceTaskSpec, t.Spec)
} | go | func IsTaskDirtyPlacementConstraintsOnly(serviceTaskSpec api.TaskSpec, t *api.Task) bool {
// Compare the task placement constraints.
if reflect.DeepEqual(serviceTaskSpec.Placement, t.Spec.Placement) {
return false
}
// Update spec placement to only the fields
// other than the placement constraints in the spec.
serviceTaskSpec.Placement = t.Spec.Placement
return reflect.DeepEqual(serviceTaskSpec, t.Spec)
} | [
"func",
"IsTaskDirtyPlacementConstraintsOnly",
"(",
"serviceTaskSpec",
"api",
".",
"TaskSpec",
",",
"t",
"*",
"api",
".",
"Task",
")",
"bool",
"{",
"if",
"reflect",
".",
"DeepEqual",
"(",
"serviceTaskSpec",
".",
"Placement",
",",
"t",
".",
"Spec",
".",
"Plac... | // IsTaskDirtyPlacementConstraintsOnly checks if the Placement field alone
// in the spec has changed. | [
"IsTaskDirtyPlacementConstraintsOnly",
"checks",
"if",
"the",
"Placement",
"field",
"alone",
"in",
"the",
"spec",
"has",
"changed",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/task.go#L128-L138 | train |
docker/swarmkit | manager/orchestrator/task.go | InvalidNode | func InvalidNode(n *api.Node) bool {
return n == nil ||
n.Status.State == api.NodeStatus_DOWN ||
n.Spec.Availability == api.NodeAvailabilityDrain
} | go | func InvalidNode(n *api.Node) bool {
return n == nil ||
n.Status.State == api.NodeStatus_DOWN ||
n.Spec.Availability == api.NodeAvailabilityDrain
} | [
"func",
"InvalidNode",
"(",
"n",
"*",
"api",
".",
"Node",
")",
"bool",
"{",
"return",
"n",
"==",
"nil",
"||",
"n",
".",
"Status",
".",
"State",
"==",
"api",
".",
"NodeStatus_DOWN",
"||",
"n",
".",
"Spec",
".",
"Availability",
"==",
"api",
".",
"Nod... | // InvalidNode is true if the node is nil, down, or drained | [
"InvalidNode",
"is",
"true",
"if",
"the",
"node",
"is",
"nil",
"down",
"or",
"drained"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/task.go#L141-L145 | train |
docker/swarmkit | manager/orchestrator/task.go | Swap | func (t TasksByTimestamp) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
} | go | func (t TasksByTimestamp) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
} | [
"func",
"(",
"t",
"TasksByTimestamp",
")",
"Swap",
"(",
"i",
",",
"j",
"int",
")",
"{",
"t",
"[",
"i",
"]",
",",
"t",
"[",
"j",
"]",
"=",
"t",
"[",
"j",
"]",
",",
"t",
"[",
"i",
"]",
"\n",
"}"
] | // Swap implements the Swap method for sorting. | [
"Swap",
"implements",
"the",
"Swap",
"method",
"for",
"sorting",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/task.go#L165-L167 | train |
docker/swarmkit | manager/orchestrator/task.go | Less | func (t TasksByTimestamp) Less(i, j int) bool {
iTimestamp := taskTimestamp(t[i])
jTimestamp := taskTimestamp(t[j])
if iTimestamp == nil {
return true
}
if jTimestamp == nil {
return false
}
if iTimestamp.Seconds < jTimestamp.Seconds {
return true
}
if iTimestamp.Seconds > jTimestamp.Seconds {
return false
}
return iTimestamp.Nanos < jTimestamp.Nanos
} | go | func (t TasksByTimestamp) Less(i, j int) bool {
iTimestamp := taskTimestamp(t[i])
jTimestamp := taskTimestamp(t[j])
if iTimestamp == nil {
return true
}
if jTimestamp == nil {
return false
}
if iTimestamp.Seconds < jTimestamp.Seconds {
return true
}
if iTimestamp.Seconds > jTimestamp.Seconds {
return false
}
return iTimestamp.Nanos < jTimestamp.Nanos
} | [
"func",
"(",
"t",
"TasksByTimestamp",
")",
"Less",
"(",
"i",
",",
"j",
"int",
")",
"bool",
"{",
"iTimestamp",
":=",
"taskTimestamp",
"(",
"t",
"[",
"i",
"]",
")",
"\n",
"jTimestamp",
":=",
"taskTimestamp",
"(",
"t",
"[",
"j",
"]",
")",
"\n",
"if",
... | // Less implements the Less method for sorting. | [
"Less",
"implements",
"the",
"Less",
"method",
"for",
"sorting",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/task.go#L170-L187 | train |
docker/swarmkit | watch/sinks.go | NewChannelSink | func (s *TimeoutDropErrChanGen) NewChannelSink() (events.Sink, *events.Channel) {
ch := events.NewChannel(0)
return timeoutSink{
timeout: s.timeout,
sink: dropErrClosed{
sink: ch,
},
}, ch
} | go | func (s *TimeoutDropErrChanGen) NewChannelSink() (events.Sink, *events.Channel) {
ch := events.NewChannel(0)
return timeoutSink{
timeout: s.timeout,
sink: dropErrClosed{
sink: ch,
},
}, ch
} | [
"func",
"(",
"s",
"*",
"TimeoutDropErrChanGen",
")",
"NewChannelSink",
"(",
")",
"(",
"events",
".",
"Sink",
",",
"*",
"events",
".",
"Channel",
")",
"{",
"ch",
":=",
"events",
".",
"NewChannel",
"(",
"0",
")",
"\n",
"return",
"timeoutSink",
"{",
"time... | // NewChannelSink creates a new sink chain of timeoutSink->dropErrClosed->Channel | [
"NewChannelSink",
"creates",
"a",
"new",
"sink",
"chain",
"of",
"timeoutSink",
"-",
">",
"dropErrClosed",
"-",
">",
"Channel"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/watch/sinks.go#L81-L89 | train |
docker/swarmkit | manager/state/store/networks.go | CreateNetwork | func CreateNetwork(tx Tx, n *api.Network) error {
// Ensure the name is not already in use.
if tx.lookup(tableNetwork, indexName, strings.ToLower(n.Spec.Annotations.Name)) != nil {
return ErrNameConflict
}
return tx.create(tableNetwork, n)
} | go | func CreateNetwork(tx Tx, n *api.Network) error {
// Ensure the name is not already in use.
if tx.lookup(tableNetwork, indexName, strings.ToLower(n.Spec.Annotations.Name)) != nil {
return ErrNameConflict
}
return tx.create(tableNetwork, n)
} | [
"func",
"CreateNetwork",
"(",
"tx",
"Tx",
",",
"n",
"*",
"api",
".",
"Network",
")",
"error",
"{",
"if",
"tx",
".",
"lookup",
"(",
"tableNetwork",
",",
"indexName",
",",
"strings",
".",
"ToLower",
"(",
"n",
".",
"Spec",
".",
"Annotations",
".",
"Name... | // CreateNetwork adds a new network to the store.
// Returns ErrExist if the ID is already taken. | [
"CreateNetwork",
"adds",
"a",
"new",
"network",
"to",
"the",
"store",
".",
"Returns",
"ErrExist",
"if",
"the",
"ID",
"is",
"already",
"taken",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/store/networks.go#L66-L73 | train |
docker/swarmkit | manager/state/store/networks.go | UpdateNetwork | func UpdateNetwork(tx Tx, n *api.Network) error {
// Ensure the name is either not in use or already used by this same Network.
if existing := tx.lookup(tableNetwork, indexName, strings.ToLower(n.Spec.Annotations.Name)); existing != nil {
if existing.GetID() != n.ID {
return ErrNameConflict
}
}
return tx.update(tableNetwork, n)
} | go | func UpdateNetwork(tx Tx, n *api.Network) error {
// Ensure the name is either not in use or already used by this same Network.
if existing := tx.lookup(tableNetwork, indexName, strings.ToLower(n.Spec.Annotations.Name)); existing != nil {
if existing.GetID() != n.ID {
return ErrNameConflict
}
}
return tx.update(tableNetwork, n)
} | [
"func",
"UpdateNetwork",
"(",
"tx",
"Tx",
",",
"n",
"*",
"api",
".",
"Network",
")",
"error",
"{",
"if",
"existing",
":=",
"tx",
".",
"lookup",
"(",
"tableNetwork",
",",
"indexName",
",",
"strings",
".",
"ToLower",
"(",
"n",
".",
"Spec",
".",
"Annota... | // UpdateNetwork updates an existing network in the store.
// Returns ErrNotExist if the network doesn't exist. | [
"UpdateNetwork",
"updates",
"an",
"existing",
"network",
"in",
"the",
"store",
".",
"Returns",
"ErrNotExist",
"if",
"the",
"network",
"doesn",
"t",
"exist",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/store/networks.go#L77-L86 | train |
docker/swarmkit | manager/state/store/networks.go | DeleteNetwork | func DeleteNetwork(tx Tx, id string) error {
return tx.delete(tableNetwork, id)
} | go | func DeleteNetwork(tx Tx, id string) error {
return tx.delete(tableNetwork, id)
} | [
"func",
"DeleteNetwork",
"(",
"tx",
"Tx",
",",
"id",
"string",
")",
"error",
"{",
"return",
"tx",
".",
"delete",
"(",
"tableNetwork",
",",
"id",
")",
"\n",
"}"
] | // DeleteNetwork removes a network from the store.
// Returns ErrNotExist if the network doesn't exist. | [
"DeleteNetwork",
"removes",
"a",
"network",
"from",
"the",
"store",
".",
"Returns",
"ErrNotExist",
"if",
"the",
"network",
"doesn",
"t",
"exist",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/store/networks.go#L90-L92 | train |
docker/swarmkit | manager/state/store/networks.go | GetNetwork | func GetNetwork(tx ReadTx, id string) *api.Network {
n := tx.get(tableNetwork, id)
if n == nil {
return nil
}
return n.(*api.Network)
} | go | func GetNetwork(tx ReadTx, id string) *api.Network {
n := tx.get(tableNetwork, id)
if n == nil {
return nil
}
return n.(*api.Network)
} | [
"func",
"GetNetwork",
"(",
"tx",
"ReadTx",
",",
"id",
"string",
")",
"*",
"api",
".",
"Network",
"{",
"n",
":=",
"tx",
".",
"get",
"(",
"tableNetwork",
",",
"id",
")",
"\n",
"if",
"n",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"return",... | // GetNetwork looks up a network by ID.
// Returns nil if the network doesn't exist. | [
"GetNetwork",
"looks",
"up",
"a",
"network",
"by",
"ID",
".",
"Returns",
"nil",
"if",
"the",
"network",
"doesn",
"t",
"exist",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/store/networks.go#L96-L102 | train |
docker/swarmkit | manager/state/store/networks.go | FindNetworks | func FindNetworks(tx ReadTx, by By) ([]*api.Network, error) {
checkType := func(by By) error {
switch by.(type) {
case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix, byAll:
return nil
default:
return ErrInvalidFindBy
}
}
networkList := []*api.Network{}
appendResult := func(o api.StoreObject) {
networkList = append(networkList, o.(*api.Network))
}
err := tx.find(tableNetwork, by, checkType, appendResult)
return networkList, err
} | go | func FindNetworks(tx ReadTx, by By) ([]*api.Network, error) {
checkType := func(by By) error {
switch by.(type) {
case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix, byAll:
return nil
default:
return ErrInvalidFindBy
}
}
networkList := []*api.Network{}
appendResult := func(o api.StoreObject) {
networkList = append(networkList, o.(*api.Network))
}
err := tx.find(tableNetwork, by, checkType, appendResult)
return networkList, err
} | [
"func",
"FindNetworks",
"(",
"tx",
"ReadTx",
",",
"by",
"By",
")",
"(",
"[",
"]",
"*",
"api",
".",
"Network",
",",
"error",
")",
"{",
"checkType",
":=",
"func",
"(",
"by",
"By",
")",
"error",
"{",
"switch",
"by",
".",
"(",
"type",
")",
"{",
"ca... | // FindNetworks selects a set of networks and returns them. | [
"FindNetworks",
"selects",
"a",
"set",
"of",
"networks",
"and",
"returns",
"them",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/store/networks.go#L105-L122 | train |
docker/swarmkit | agent/resource.go | AttachNetwork | func (r *resourceAllocator) AttachNetwork(ctx context.Context, id, target string, addresses []string) (string, error) {
var taskID string
if err := r.agent.withSession(ctx, func(session *session) error {
client := api.NewResourceAllocatorClient(session.conn.ClientConn)
r, err := client.AttachNetwork(ctx, &api.AttachNetworkRequest{
Config: &api.NetworkAttachmentConfig{
Target: target,
Addresses: addresses,
},
ContainerID: id,
})
if err != nil {
return err
}
taskID = r.AttachmentID
return nil
}); err != nil {
return "", err
}
return taskID, nil
} | go | func (r *resourceAllocator) AttachNetwork(ctx context.Context, id, target string, addresses []string) (string, error) {
var taskID string
if err := r.agent.withSession(ctx, func(session *session) error {
client := api.NewResourceAllocatorClient(session.conn.ClientConn)
r, err := client.AttachNetwork(ctx, &api.AttachNetworkRequest{
Config: &api.NetworkAttachmentConfig{
Target: target,
Addresses: addresses,
},
ContainerID: id,
})
if err != nil {
return err
}
taskID = r.AttachmentID
return nil
}); err != nil {
return "", err
}
return taskID, nil
} | [
"func",
"(",
"r",
"*",
"resourceAllocator",
")",
"AttachNetwork",
"(",
"ctx",
"context",
".",
"Context",
",",
"id",
",",
"target",
"string",
",",
"addresses",
"[",
"]",
"string",
")",
"(",
"string",
",",
"error",
")",
"{",
"var",
"taskID",
"string",
"\... | // AttachNetwork creates a network attachment. | [
"AttachNetwork",
"creates",
"a",
"network",
"attachment",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/agent/resource.go#L31-L52 | train |
docker/swarmkit | agent/resource.go | DetachNetwork | func (r *resourceAllocator) DetachNetwork(ctx context.Context, aID string) error {
return r.agent.withSession(ctx, func(session *session) error {
client := api.NewResourceAllocatorClient(session.conn.ClientConn)
_, err := client.DetachNetwork(ctx, &api.DetachNetworkRequest{
AttachmentID: aID,
})
return err
})
} | go | func (r *resourceAllocator) DetachNetwork(ctx context.Context, aID string) error {
return r.agent.withSession(ctx, func(session *session) error {
client := api.NewResourceAllocatorClient(session.conn.ClientConn)
_, err := client.DetachNetwork(ctx, &api.DetachNetworkRequest{
AttachmentID: aID,
})
return err
})
} | [
"func",
"(",
"r",
"*",
"resourceAllocator",
")",
"DetachNetwork",
"(",
"ctx",
"context",
".",
"Context",
",",
"aID",
"string",
")",
"error",
"{",
"return",
"r",
".",
"agent",
".",
"withSession",
"(",
"ctx",
",",
"func",
"(",
"session",
"*",
"session",
... | // DetachNetwork deletes a network attachment. | [
"DetachNetwork",
"deletes",
"a",
"network",
"attachment",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/agent/resource.go#L55-L64 | train |
docker/swarmkit | manager/state/store/secrets.go | CreateSecret | func CreateSecret(tx Tx, s *api.Secret) error {
// Ensure the name is not already in use.
if tx.lookup(tableSecret, indexName, strings.ToLower(s.Spec.Annotations.Name)) != nil {
return ErrNameConflict
}
return tx.create(tableSecret, s)
} | go | func CreateSecret(tx Tx, s *api.Secret) error {
// Ensure the name is not already in use.
if tx.lookup(tableSecret, indexName, strings.ToLower(s.Spec.Annotations.Name)) != nil {
return ErrNameConflict
}
return tx.create(tableSecret, s)
} | [
"func",
"CreateSecret",
"(",
"tx",
"Tx",
",",
"s",
"*",
"api",
".",
"Secret",
")",
"error",
"{",
"if",
"tx",
".",
"lookup",
"(",
"tableSecret",
",",
"indexName",
",",
"strings",
".",
"ToLower",
"(",
"s",
".",
"Spec",
".",
"Annotations",
".",
"Name",
... | // CreateSecret adds a new secret to the store.
// Returns ErrExist if the ID is already taken. | [
"CreateSecret",
"adds",
"a",
"new",
"secret",
"to",
"the",
"store",
".",
"Returns",
"ErrExist",
"if",
"the",
"ID",
"is",
"already",
"taken",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/store/secrets.go#L66-L73 | train |
docker/swarmkit | manager/state/store/secrets.go | UpdateSecret | func UpdateSecret(tx Tx, s *api.Secret) error {
// Ensure the name is either not in use or already used by this same Secret.
if existing := tx.lookup(tableSecret, indexName, strings.ToLower(s.Spec.Annotations.Name)); existing != nil {
if existing.GetID() != s.ID {
return ErrNameConflict
}
}
return tx.update(tableSecret, s)
} | go | func UpdateSecret(tx Tx, s *api.Secret) error {
// Ensure the name is either not in use or already used by this same Secret.
if existing := tx.lookup(tableSecret, indexName, strings.ToLower(s.Spec.Annotations.Name)); existing != nil {
if existing.GetID() != s.ID {
return ErrNameConflict
}
}
return tx.update(tableSecret, s)
} | [
"func",
"UpdateSecret",
"(",
"tx",
"Tx",
",",
"s",
"*",
"api",
".",
"Secret",
")",
"error",
"{",
"if",
"existing",
":=",
"tx",
".",
"lookup",
"(",
"tableSecret",
",",
"indexName",
",",
"strings",
".",
"ToLower",
"(",
"s",
".",
"Spec",
".",
"Annotatio... | // UpdateSecret updates an existing secret in the store.
// Returns ErrNotExist if the secret doesn't exist. | [
"UpdateSecret",
"updates",
"an",
"existing",
"secret",
"in",
"the",
"store",
".",
"Returns",
"ErrNotExist",
"if",
"the",
"secret",
"doesn",
"t",
"exist",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/store/secrets.go#L77-L86 | train |
docker/swarmkit | manager/state/store/secrets.go | DeleteSecret | func DeleteSecret(tx Tx, id string) error {
return tx.delete(tableSecret, id)
} | go | func DeleteSecret(tx Tx, id string) error {
return tx.delete(tableSecret, id)
} | [
"func",
"DeleteSecret",
"(",
"tx",
"Tx",
",",
"id",
"string",
")",
"error",
"{",
"return",
"tx",
".",
"delete",
"(",
"tableSecret",
",",
"id",
")",
"\n",
"}"
] | // DeleteSecret removes a secret from the store.
// Returns ErrNotExist if the secret doesn't exist. | [
"DeleteSecret",
"removes",
"a",
"secret",
"from",
"the",
"store",
".",
"Returns",
"ErrNotExist",
"if",
"the",
"secret",
"doesn",
"t",
"exist",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/store/secrets.go#L90-L92 | train |
docker/swarmkit | manager/state/store/secrets.go | GetSecret | func GetSecret(tx ReadTx, id string) *api.Secret {
n := tx.get(tableSecret, id)
if n == nil {
return nil
}
return n.(*api.Secret)
} | go | func GetSecret(tx ReadTx, id string) *api.Secret {
n := tx.get(tableSecret, id)
if n == nil {
return nil
}
return n.(*api.Secret)
} | [
"func",
"GetSecret",
"(",
"tx",
"ReadTx",
",",
"id",
"string",
")",
"*",
"api",
".",
"Secret",
"{",
"n",
":=",
"tx",
".",
"get",
"(",
"tableSecret",
",",
"id",
")",
"\n",
"if",
"n",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"return",
... | // GetSecret looks up a secret by ID.
// Returns nil if the secret doesn't exist. | [
"GetSecret",
"looks",
"up",
"a",
"secret",
"by",
"ID",
".",
"Returns",
"nil",
"if",
"the",
"secret",
"doesn",
"t",
"exist",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/store/secrets.go#L96-L102 | train |
docker/swarmkit | manager/state/store/secrets.go | FindSecrets | func FindSecrets(tx ReadTx, by By) ([]*api.Secret, error) {
checkType := func(by By) error {
switch by.(type) {
case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix:
return nil
default:
return ErrInvalidFindBy
}
}
secretList := []*api.Secret{}
appendResult := func(o api.StoreObject) {
secretList = append(secretList, o.(*api.Secret))
}
err := tx.find(tableSecret, by, checkType, appendResult)
return secretList, err
} | go | func FindSecrets(tx ReadTx, by By) ([]*api.Secret, error) {
checkType := func(by By) error {
switch by.(type) {
case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix:
return nil
default:
return ErrInvalidFindBy
}
}
secretList := []*api.Secret{}
appendResult := func(o api.StoreObject) {
secretList = append(secretList, o.(*api.Secret))
}
err := tx.find(tableSecret, by, checkType, appendResult)
return secretList, err
} | [
"func",
"FindSecrets",
"(",
"tx",
"ReadTx",
",",
"by",
"By",
")",
"(",
"[",
"]",
"*",
"api",
".",
"Secret",
",",
"error",
")",
"{",
"checkType",
":=",
"func",
"(",
"by",
"By",
")",
"error",
"{",
"switch",
"by",
".",
"(",
"type",
")",
"{",
"case... | // FindSecrets selects a set of secrets and returns them. | [
"FindSecrets",
"selects",
"a",
"set",
"of",
"secrets",
"and",
"returns",
"them",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/store/secrets.go#L105-L122 | train |
docker/swarmkit | manager/allocator/network.go | getNodeNetworks | func (a *Allocator) getNodeNetworks(nodeID string) ([]*api.Network, error) {
var (
// no need to initialize networks. we only append to it, and appending
// to a nil slice is valid. this has the added bonus of making this nil
// if we return an error
networks []*api.Network
err error
)
a.store.View(func(tx store.ReadTx) {
// get all tasks currently assigned to this node. it's no big deal if
// the tasks change in the meantime, there's no race to clean up
// unneeded network attachments on a node.
var tasks []*api.Task
tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID))
if err != nil {
return
}
// we need to keep track of network IDs that we've already added to the
// list of networks we're going to return. we could do
// map[string]*api.Network and then convert to []*api.Network and
// return that, but it seems cleaner to have a separate set and list.
networkIDs := map[string]struct{}{}
for _, task := range tasks {
// we don't need to check if a task is before the Assigned state.
// the only way we have a task with a NodeID that isn't yet in
// Assigned is if it's a global service task. this check is not
// necessary:
// if task.Status.State < api.TaskStateAssigned {
// continue
// }
if task.Status.State > api.TaskStateRunning {
// we don't need to have network attachments for a task that's
// already in a terminal state
continue
}
// now go through the task's network attachments and find all of
// the networks
for _, attachment := range task.Networks {
// if the network is an overlay network, and the network ID is
// not yet in the set of network IDs, then add it to the set
// and add the network to the list of networks we'll be
// returning
if _, ok := networkIDs[attachment.Network.ID]; isOverlayNetwork(attachment.Network) && !ok {
networkIDs[attachment.Network.ID] = struct{}{}
// we don't need to worry about retrieving the network from
// the store, because the network in the attachment is an
// identical copy of the network in the store.
networks = append(networks, attachment.Network)
}
}
}
})
// finally, we need the ingress network if one exists.
if a.netCtx != nil && a.netCtx.ingressNetwork != nil {
networks = append(networks, a.netCtx.ingressNetwork)
}
return networks, err
} | go | func (a *Allocator) getNodeNetworks(nodeID string) ([]*api.Network, error) {
var (
// no need to initialize networks. we only append to it, and appending
// to a nil slice is valid. this has the added bonus of making this nil
// if we return an error
networks []*api.Network
err error
)
a.store.View(func(tx store.ReadTx) {
// get all tasks currently assigned to this node. it's no big deal if
// the tasks change in the meantime, there's no race to clean up
// unneeded network attachments on a node.
var tasks []*api.Task
tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID))
if err != nil {
return
}
// we need to keep track of network IDs that we've already added to the
// list of networks we're going to return. we could do
// map[string]*api.Network and then convert to []*api.Network and
// return that, but it seems cleaner to have a separate set and list.
networkIDs := map[string]struct{}{}
for _, task := range tasks {
// we don't need to check if a task is before the Assigned state.
// the only way we have a task with a NodeID that isn't yet in
// Assigned is if it's a global service task. this check is not
// necessary:
// if task.Status.State < api.TaskStateAssigned {
// continue
// }
if task.Status.State > api.TaskStateRunning {
// we don't need to have network attachments for a task that's
// already in a terminal state
continue
}
// now go through the task's network attachments and find all of
// the networks
for _, attachment := range task.Networks {
// if the network is an overlay network, and the network ID is
// not yet in the set of network IDs, then add it to the set
// and add the network to the list of networks we'll be
// returning
if _, ok := networkIDs[attachment.Network.ID]; isOverlayNetwork(attachment.Network) && !ok {
networkIDs[attachment.Network.ID] = struct{}{}
// we don't need to worry about retrieving the network from
// the store, because the network in the attachment is an
// identical copy of the network in the store.
networks = append(networks, attachment.Network)
}
}
}
})
// finally, we need the ingress network if one exists.
if a.netCtx != nil && a.netCtx.ingressNetwork != nil {
networks = append(networks, a.netCtx.ingressNetwork)
}
return networks, err
} | [
"func",
"(",
"a",
"*",
"Allocator",
")",
"getNodeNetworks",
"(",
"nodeID",
"string",
")",
"(",
"[",
"]",
"*",
"api",
".",
"Network",
",",
"error",
")",
"{",
"var",
"(",
"networks",
"[",
"]",
"*",
"api",
".",
"Network",
"\n",
"err",
"error",
"\n",
... | // getNodeNetworks returns all networks that should be allocated for a node | [
"getNodeNetworks",
"returns",
"all",
"networks",
"that",
"should",
"be",
"allocated",
"for",
"a",
"node"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/allocator/network.go#L407-L467 | train |
docker/swarmkit | manager/allocator/network.go | allocateServices | func (a *Allocator) allocateServices(ctx context.Context, existingAddressesOnly bool) error {
var (
nc = a.netCtx
services []*api.Service
err error
)
a.store.View(func(tx store.ReadTx) {
services, err = store.FindServices(tx, store.All)
})
if err != nil {
return errors.Wrap(err, "error listing all services in store while trying to allocate during init")
}
var allocatedServices []*api.Service
for _, s := range services {
if nc.nwkAllocator.IsServiceAllocated(s, networkallocator.OnInit) {
continue
}
if existingAddressesOnly &&
(s.Endpoint == nil ||
len(s.Endpoint.VirtualIPs) == 0) {
continue
}
if err := a.allocateService(ctx, s, existingAddressesOnly); err != nil {
log.G(ctx).WithField("existingAddressesOnly", existingAddressesOnly).WithError(err).Errorf("failed allocating service %s during init", s.ID)
continue
}
allocatedServices = append(allocatedServices, s)
}
if err := a.store.Batch(func(batch *store.Batch) error {
for _, s := range allocatedServices {
if err := a.commitAllocatedService(ctx, batch, s); err != nil {
log.G(ctx).WithError(err).Errorf("failed committing allocation of service %s during init", s.ID)
}
}
return nil
}); err != nil {
for _, s := range allocatedServices {
log.G(ctx).WithError(err).Errorf("failed committing allocation of service %v during init", s.GetID())
}
}
return nil
} | go | func (a *Allocator) allocateServices(ctx context.Context, existingAddressesOnly bool) error {
var (
nc = a.netCtx
services []*api.Service
err error
)
a.store.View(func(tx store.ReadTx) {
services, err = store.FindServices(tx, store.All)
})
if err != nil {
return errors.Wrap(err, "error listing all services in store while trying to allocate during init")
}
var allocatedServices []*api.Service
for _, s := range services {
if nc.nwkAllocator.IsServiceAllocated(s, networkallocator.OnInit) {
continue
}
if existingAddressesOnly &&
(s.Endpoint == nil ||
len(s.Endpoint.VirtualIPs) == 0) {
continue
}
if err := a.allocateService(ctx, s, existingAddressesOnly); err != nil {
log.G(ctx).WithField("existingAddressesOnly", existingAddressesOnly).WithError(err).Errorf("failed allocating service %s during init", s.ID)
continue
}
allocatedServices = append(allocatedServices, s)
}
if err := a.store.Batch(func(batch *store.Batch) error {
for _, s := range allocatedServices {
if err := a.commitAllocatedService(ctx, batch, s); err != nil {
log.G(ctx).WithError(err).Errorf("failed committing allocation of service %s during init", s.ID)
}
}
return nil
}); err != nil {
for _, s := range allocatedServices {
log.G(ctx).WithError(err).Errorf("failed committing allocation of service %v during init", s.GetID())
}
}
return nil
} | [
"func",
"(",
"a",
"*",
"Allocator",
")",
"allocateServices",
"(",
"ctx",
"context",
".",
"Context",
",",
"existingAddressesOnly",
"bool",
")",
"error",
"{",
"var",
"(",
"nc",
"=",
"a",
".",
"netCtx",
"\n",
"services",
"[",
"]",
"*",
"api",
".",
"Servic... | // allocateServices allocates services in the store so far before we process
// watched events. | [
"allocateServices",
"allocates",
"services",
"in",
"the",
"store",
"so",
"far",
"before",
"we",
"process",
"watched",
"events",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/allocator/network.go#L667-L712 | train |
docker/swarmkit | manager/allocator/network.go | taskReadyForNetworkVote | func taskReadyForNetworkVote(t *api.Task, s *api.Service, nc *networkContext) bool {
// Task is ready for vote if the following is true:
//
// Task has no network attached or networks attached but all
// of them allocated AND Task's service has no endpoint or
// network configured or service endpoints have been
// allocated.
return (len(t.Networks) == 0 || nc.nwkAllocator.IsTaskAllocated(t)) &&
(s == nil || nc.nwkAllocator.IsServiceAllocated(s))
} | go | func taskReadyForNetworkVote(t *api.Task, s *api.Service, nc *networkContext) bool {
// Task is ready for vote if the following is true:
//
// Task has no network attached or networks attached but all
// of them allocated AND Task's service has no endpoint or
// network configured or service endpoints have been
// allocated.
return (len(t.Networks) == 0 || nc.nwkAllocator.IsTaskAllocated(t)) &&
(s == nil || nc.nwkAllocator.IsServiceAllocated(s))
} | [
"func",
"taskReadyForNetworkVote",
"(",
"t",
"*",
"api",
".",
"Task",
",",
"s",
"*",
"api",
".",
"Service",
",",
"nc",
"*",
"networkContext",
")",
"bool",
"{",
"return",
"(",
"len",
"(",
"t",
".",
"Networks",
")",
"==",
"0",
"||",
"nc",
".",
"nwkAl... | // taskReadyForNetworkVote checks if the task is ready for a network
// vote to move it to PENDING state. | [
"taskReadyForNetworkVote",
"checks",
"if",
"the",
"task",
"is",
"ready",
"for",
"a",
"network",
"vote",
"to",
"move",
"it",
"to",
"PENDING",
"state",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/allocator/network.go#L808-L817 | train |
docker/swarmkit | manager/allocator/network.go | allocateNode | func (a *Allocator) allocateNode(ctx context.Context, node *api.Node, existingAddressesOnly bool, networks []*api.Network) bool {
var allocated bool
nc := a.netCtx
var nwIDs = make(map[string]struct{}, len(networks))
// go through all of the networks we've passed in
for _, network := range networks {
nwIDs[network.ID] = struct{}{}
// for each one, create space for an attachment. then, search through
// all of the attachments already on the node. if the attachment
// exists, then copy it to the node. if not, we'll allocate it below.
var lbAttachment *api.NetworkAttachment
for _, na := range node.Attachments {
if na.Network != nil && na.Network.ID == network.ID {
lbAttachment = na
break
}
}
if lbAttachment != nil {
if nc.nwkAllocator.IsAttachmentAllocated(node, lbAttachment) {
continue
}
}
if lbAttachment == nil {
// if we're restoring state, we should not add an attachment here.
if existingAddressesOnly {
continue
}
lbAttachment = &api.NetworkAttachment{}
node.Attachments = append(node.Attachments, lbAttachment)
}
if existingAddressesOnly && len(lbAttachment.Addresses) == 0 {
continue
}
lbAttachment.Network = network.Copy()
if err := a.netCtx.nwkAllocator.AllocateAttachment(node, lbAttachment); err != nil {
log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s", node.ID)
// TODO: Should we add a unallocatedNode and retry allocating resources like we do for network, tasks, services?
// right now, we will only retry allocating network resources for the node when the node is updated.
continue
}
allocated = true
}
// if we're only initializing existing addresses, we should stop here and
// not deallocate anything
if existingAddressesOnly {
return allocated
}
// now that we've allocated everything new, we have to remove things that
// do not belong. we have to do this last because we can easily roll back
// attachments we've allocated if something goes wrong by freeing them, but
// we can't roll back deallocating attachments by reacquiring them.
// we're using a trick to filter without allocating see the official go
// wiki on github:
// https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating
attachments := node.Attachments[:0]
for _, attach := range node.Attachments {
if _, ok := nwIDs[attach.Network.ID]; ok {
// attachment belongs to one of the networks, so keep it
attachments = append(attachments, attach)
} else {
// free the attachment and remove it from the node's attachments by
// re-slicing
if err := a.netCtx.nwkAllocator.DeallocateAttachment(node, attach); err != nil {
// if deallocation fails, there's nothing we can do besides log
// an error and keep going
log.G(ctx).WithError(err).Errorf(
"error deallocating attachment for network %v on node %v",
attach.Network.ID, node.ID,
)
}
// strictly speaking, nothing was allocated, but something was
// deallocated and that counts.
allocated = true
// also, set the somethingWasDeallocated flag so the allocator
// knows that it can now try again.
a.netCtx.somethingWasDeallocated = true
}
}
node.Attachments = attachments
return allocated
} | go | func (a *Allocator) allocateNode(ctx context.Context, node *api.Node, existingAddressesOnly bool, networks []*api.Network) bool {
var allocated bool
nc := a.netCtx
var nwIDs = make(map[string]struct{}, len(networks))
// go through all of the networks we've passed in
for _, network := range networks {
nwIDs[network.ID] = struct{}{}
// for each one, create space for an attachment. then, search through
// all of the attachments already on the node. if the attachment
// exists, then copy it to the node. if not, we'll allocate it below.
var lbAttachment *api.NetworkAttachment
for _, na := range node.Attachments {
if na.Network != nil && na.Network.ID == network.ID {
lbAttachment = na
break
}
}
if lbAttachment != nil {
if nc.nwkAllocator.IsAttachmentAllocated(node, lbAttachment) {
continue
}
}
if lbAttachment == nil {
// if we're restoring state, we should not add an attachment here.
if existingAddressesOnly {
continue
}
lbAttachment = &api.NetworkAttachment{}
node.Attachments = append(node.Attachments, lbAttachment)
}
if existingAddressesOnly && len(lbAttachment.Addresses) == 0 {
continue
}
lbAttachment.Network = network.Copy()
if err := a.netCtx.nwkAllocator.AllocateAttachment(node, lbAttachment); err != nil {
log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s", node.ID)
// TODO: Should we add a unallocatedNode and retry allocating resources like we do for network, tasks, services?
// right now, we will only retry allocating network resources for the node when the node is updated.
continue
}
allocated = true
}
// if we're only initializing existing addresses, we should stop here and
// not deallocate anything
if existingAddressesOnly {
return allocated
}
// now that we've allocated everything new, we have to remove things that
// do not belong. we have to do this last because we can easily roll back
// attachments we've allocated if something goes wrong by freeing them, but
// we can't roll back deallocating attachments by reacquiring them.
// we're using a trick to filter without allocating see the official go
// wiki on github:
// https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating
attachments := node.Attachments[:0]
for _, attach := range node.Attachments {
if _, ok := nwIDs[attach.Network.ID]; ok {
// attachment belongs to one of the networks, so keep it
attachments = append(attachments, attach)
} else {
// free the attachment and remove it from the node's attachments by
// re-slicing
if err := a.netCtx.nwkAllocator.DeallocateAttachment(node, attach); err != nil {
// if deallocation fails, there's nothing we can do besides log
// an error and keep going
log.G(ctx).WithError(err).Errorf(
"error deallocating attachment for network %v on node %v",
attach.Network.ID, node.ID,
)
}
// strictly speaking, nothing was allocated, but something was
// deallocated and that counts.
allocated = true
// also, set the somethingWasDeallocated flag so the allocator
// knows that it can now try again.
a.netCtx.somethingWasDeallocated = true
}
}
node.Attachments = attachments
return allocated
} | [
"func",
"(",
"a",
"*",
"Allocator",
")",
"allocateNode",
"(",
"ctx",
"context",
".",
"Context",
",",
"node",
"*",
"api",
".",
"Node",
",",
"existingAddressesOnly",
"bool",
",",
"networks",
"[",
"]",
"*",
"api",
".",
"Network",
")",
"bool",
"{",
"var",
... | // allocateNode takes a context, a node, whether or not new allocations should
// be made, and the networks to allocate. it then makes sure an attachment is
// allocated for every network in the provided networks, allocating new
// attachments if existingAddressesOnly is false. it return true if something
// new was allocated or something was removed, or false otherwise.
//
// additionally, allocateNode will remove and free any attachments for networks
// not in the set of networks passed in. | [
"allocateNode",
"takes",
"a",
"context",
"a",
"node",
"whether",
"or",
"not",
"new",
"allocations",
"should",
"be",
"made",
"and",
"the",
"networks",
"to",
"allocate",
".",
"it",
"then",
"makes",
"sure",
"an",
"attachment",
"is",
"allocated",
"for",
"every",... | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/allocator/network.go#L987-L1080 | train |
docker/swarmkit | manager/allocator/network.go | allocateService | func (a *Allocator) allocateService(ctx context.Context, s *api.Service, existingAddressesOnly bool) error {
nc := a.netCtx
if s.Spec.Endpoint != nil {
// service has user-defined endpoint
if s.Endpoint == nil {
// service currently has no allocated endpoint, need allocated.
s.Endpoint = &api.Endpoint{
Spec: s.Spec.Endpoint.Copy(),
}
}
// The service is trying to expose ports to the external
// world. Automatically attach the service to the ingress
// network only if it is not already done.
if IsIngressNetworkNeeded(s) {
if nc.ingressNetwork == nil {
return fmt.Errorf("ingress network is missing")
}
var found bool
for _, vip := range s.Endpoint.VirtualIPs {
if vip.NetworkID == nc.ingressNetwork.ID {
found = true
break
}
}
if !found {
s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs,
&api.Endpoint_VirtualIP{NetworkID: nc.ingressNetwork.ID})
}
}
} else if s.Endpoint != nil && !existingAddressesOnly {
// if we are in the restart phase there is no reason to try to deallocate anything because the state
// is not there
// service has no user-defined endpoints while has already allocated network resources,
// need deallocated.
if err := nc.nwkAllocator.DeallocateService(s); err != nil {
return err
}
nc.somethingWasDeallocated = true
}
if err := nc.nwkAllocator.AllocateService(s); err != nil {
nc.unallocatedServices[s.ID] = s
return err
}
// If the service doesn't expose ports any more and if we have
// any lingering virtual IP references for ingress network
// clean them up here.
if !IsIngressNetworkNeeded(s) && nc.ingressNetwork != nil {
if s.Endpoint != nil {
for i, vip := range s.Endpoint.VirtualIPs {
if vip.NetworkID == nc.ingressNetwork.ID {
n := len(s.Endpoint.VirtualIPs)
s.Endpoint.VirtualIPs[i], s.Endpoint.VirtualIPs[n-1] = s.Endpoint.VirtualIPs[n-1], nil
s.Endpoint.VirtualIPs = s.Endpoint.VirtualIPs[:n-1]
break
}
}
}
}
return nil
} | go | func (a *Allocator) allocateService(ctx context.Context, s *api.Service, existingAddressesOnly bool) error {
nc := a.netCtx
if s.Spec.Endpoint != nil {
// service has user-defined endpoint
if s.Endpoint == nil {
// service currently has no allocated endpoint, need allocated.
s.Endpoint = &api.Endpoint{
Spec: s.Spec.Endpoint.Copy(),
}
}
// The service is trying to expose ports to the external
// world. Automatically attach the service to the ingress
// network only if it is not already done.
if IsIngressNetworkNeeded(s) {
if nc.ingressNetwork == nil {
return fmt.Errorf("ingress network is missing")
}
var found bool
for _, vip := range s.Endpoint.VirtualIPs {
if vip.NetworkID == nc.ingressNetwork.ID {
found = true
break
}
}
if !found {
s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs,
&api.Endpoint_VirtualIP{NetworkID: nc.ingressNetwork.ID})
}
}
} else if s.Endpoint != nil && !existingAddressesOnly {
// if we are in the restart phase there is no reason to try to deallocate anything because the state
// is not there
// service has no user-defined endpoints while has already allocated network resources,
// need deallocated.
if err := nc.nwkAllocator.DeallocateService(s); err != nil {
return err
}
nc.somethingWasDeallocated = true
}
if err := nc.nwkAllocator.AllocateService(s); err != nil {
nc.unallocatedServices[s.ID] = s
return err
}
// If the service doesn't expose ports any more and if we have
// any lingering virtual IP references for ingress network
// clean them up here.
if !IsIngressNetworkNeeded(s) && nc.ingressNetwork != nil {
if s.Endpoint != nil {
for i, vip := range s.Endpoint.VirtualIPs {
if vip.NetworkID == nc.ingressNetwork.ID {
n := len(s.Endpoint.VirtualIPs)
s.Endpoint.VirtualIPs[i], s.Endpoint.VirtualIPs[n-1] = s.Endpoint.VirtualIPs[n-1], nil
s.Endpoint.VirtualIPs = s.Endpoint.VirtualIPs[:n-1]
break
}
}
}
}
return nil
} | [
"func",
"(",
"a",
"*",
"Allocator",
")",
"allocateService",
"(",
"ctx",
"context",
".",
"Context",
",",
"s",
"*",
"api",
".",
"Service",
",",
"existingAddressesOnly",
"bool",
")",
"error",
"{",
"nc",
":=",
"a",
".",
"netCtx",
"\n",
"if",
"s",
".",
"S... | // allocateService takes care to align the desired state with the spec passed
// the last parameter is true only during restart when the data is read from raft
// and used to build internal state | [
"allocateService",
"takes",
"care",
"to",
"align",
"the",
"desired",
"state",
"with",
"the",
"spec",
"passed",
"the",
"last",
"parameter",
"is",
"true",
"only",
"during",
"restart",
"when",
"the",
"data",
"is",
"read",
"from",
"raft",
"and",
"used",
"to",
... | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/allocator/network.go#L1164-L1228 | train |
docker/swarmkit | manager/allocator/network.go | updateTaskStatus | func updateTaskStatus(t *api.Task, newStatus api.TaskState, message string) {
t.Status = api.TaskStatus{
State: newStatus,
Message: message,
Timestamp: ptypes.MustTimestampProto(time.Now()),
}
} | go | func updateTaskStatus(t *api.Task, newStatus api.TaskState, message string) {
t.Status = api.TaskStatus{
State: newStatus,
Message: message,
Timestamp: ptypes.MustTimestampProto(time.Now()),
}
} | [
"func",
"updateTaskStatus",
"(",
"t",
"*",
"api",
".",
"Task",
",",
"newStatus",
"api",
".",
"TaskState",
",",
"message",
"string",
")",
"{",
"t",
".",
"Status",
"=",
"api",
".",
"TaskStatus",
"{",
"State",
":",
"newStatus",
",",
"Message",
":",
"messa... | // updateTaskStatus sets TaskStatus and updates timestamp. | [
"updateTaskStatus",
"sets",
"TaskStatus",
"and",
"updates",
"timestamp",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/allocator/network.go#L1521-L1527 | train |
docker/swarmkit | manager/allocator/network.go | GetIngressNetwork | func GetIngressNetwork(s *store.MemoryStore) (*api.Network, error) {
var (
networks []*api.Network
err error
)
s.View(func(tx store.ReadTx) {
networks, err = store.FindNetworks(tx, store.All)
})
if err != nil {
return nil, err
}
for _, n := range networks {
if IsIngressNetwork(n) {
return n, nil
}
}
return nil, ErrNoIngress
} | go | func GetIngressNetwork(s *store.MemoryStore) (*api.Network, error) {
var (
networks []*api.Network
err error
)
s.View(func(tx store.ReadTx) {
networks, err = store.FindNetworks(tx, store.All)
})
if err != nil {
return nil, err
}
for _, n := range networks {
if IsIngressNetwork(n) {
return n, nil
}
}
return nil, ErrNoIngress
} | [
"func",
"GetIngressNetwork",
"(",
"s",
"*",
"store",
".",
"MemoryStore",
")",
"(",
"*",
"api",
".",
"Network",
",",
"error",
")",
"{",
"var",
"(",
"networks",
"[",
"]",
"*",
"api",
".",
"Network",
"\n",
"err",
"error",
"\n",
")",
"\n",
"s",
".",
... | // GetIngressNetwork fetches the ingress network from store.
// ErrNoIngress will be returned if the ingress network is not present,
// nil otherwise. In case of any other failure in accessing the store,
// the respective error will be reported as is. | [
"GetIngressNetwork",
"fetches",
"the",
"ingress",
"network",
"from",
"store",
".",
"ErrNoIngress",
"will",
"be",
"returned",
"if",
"the",
"ingress",
"network",
"is",
"not",
"present",
"nil",
"otherwise",
".",
"In",
"case",
"of",
"any",
"other",
"failure",
"in"... | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/allocator/network.go#L1538-L1555 | train |
docker/swarmkit | manager/health/health.go | Check | func (s *Server) Check(ctx context.Context, in *api.HealthCheckRequest) (*api.HealthCheckResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
if in.Service == "" {
// check the server overall health status.
return &api.HealthCheckResponse{
Status: api.HealthCheckResponse_SERVING,
}, nil
}
if status, ok := s.statusMap[in.Service]; ok {
return &api.HealthCheckResponse{
Status: status,
}, nil
}
return nil, status.Errorf(codes.NotFound, "unknown service")
} | go | func (s *Server) Check(ctx context.Context, in *api.HealthCheckRequest) (*api.HealthCheckResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
if in.Service == "" {
// check the server overall health status.
return &api.HealthCheckResponse{
Status: api.HealthCheckResponse_SERVING,
}, nil
}
if status, ok := s.statusMap[in.Service]; ok {
return &api.HealthCheckResponse{
Status: status,
}, nil
}
return nil, status.Errorf(codes.NotFound, "unknown service")
} | [
"func",
"(",
"s",
"*",
"Server",
")",
"Check",
"(",
"ctx",
"context",
".",
"Context",
",",
"in",
"*",
"api",
".",
"HealthCheckRequest",
")",
"(",
"*",
"api",
".",
"HealthCheckResponse",
",",
"error",
")",
"{",
"s",
".",
"mu",
".",
"Lock",
"(",
")",... | // Check checks if the grpc server is healthy and running. | [
"Check",
"checks",
"if",
"the",
"grpc",
"server",
"is",
"healthy",
"and",
"running",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/health/health.go#L35-L50 | train |
docker/swarmkit | manager/controlapi/extension.go | CreateExtension | func (s *Server) CreateExtension(ctx context.Context, request *api.CreateExtensionRequest) (*api.CreateExtensionResponse, error) {
if request.Annotations == nil || request.Annotations.Name == "" {
return nil, status.Errorf(codes.InvalidArgument, "extension name must be provided")
}
extension := &api.Extension{
ID: identity.NewID(),
Annotations: *request.Annotations,
Description: request.Description,
}
err := s.store.Update(func(tx store.Tx) error {
return store.CreateExtension(tx, extension)
})
switch err {
case store.ErrNameConflict:
return nil, status.Errorf(codes.AlreadyExists, "extension %s already exists", request.Annotations.Name)
case nil:
log.G(ctx).WithFields(logrus.Fields{
"extension.Name": request.Annotations.Name,
"method": "CreateExtension",
}).Debugf("extension created")
return &api.CreateExtensionResponse{Extension: extension}, nil
default:
return nil, status.Errorf(codes.Internal, "could not create extension: %v", err.Error())
}
} | go | func (s *Server) CreateExtension(ctx context.Context, request *api.CreateExtensionRequest) (*api.CreateExtensionResponse, error) {
if request.Annotations == nil || request.Annotations.Name == "" {
return nil, status.Errorf(codes.InvalidArgument, "extension name must be provided")
}
extension := &api.Extension{
ID: identity.NewID(),
Annotations: *request.Annotations,
Description: request.Description,
}
err := s.store.Update(func(tx store.Tx) error {
return store.CreateExtension(tx, extension)
})
switch err {
case store.ErrNameConflict:
return nil, status.Errorf(codes.AlreadyExists, "extension %s already exists", request.Annotations.Name)
case nil:
log.G(ctx).WithFields(logrus.Fields{
"extension.Name": request.Annotations.Name,
"method": "CreateExtension",
}).Debugf("extension created")
return &api.CreateExtensionResponse{Extension: extension}, nil
default:
return nil, status.Errorf(codes.Internal, "could not create extension: %v", err.Error())
}
} | [
"func",
"(",
"s",
"*",
"Server",
")",
"CreateExtension",
"(",
"ctx",
"context",
".",
"Context",
",",
"request",
"*",
"api",
".",
"CreateExtensionRequest",
")",
"(",
"*",
"api",
".",
"CreateExtensionResponse",
",",
"error",
")",
"{",
"if",
"request",
".",
... | // CreateExtension creates an `Extension` based on the provided `CreateExtensionRequest.Extension`
// and returns a `CreateExtensionResponse`.
// - Returns `InvalidArgument` if the `CreateExtensionRequest.Extension` is malformed,
// or fails validation.
// - Returns an error if the creation fails. | [
"CreateExtension",
"creates",
"an",
"Extension",
"based",
"on",
"the",
"provided",
"CreateExtensionRequest",
".",
"Extension",
"and",
"returns",
"a",
"CreateExtensionResponse",
".",
"-",
"Returns",
"InvalidArgument",
"if",
"the",
"CreateExtensionRequest",
".",
"Extensio... | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/extension.go#L21-L49 | train |
docker/swarmkit | manager/controlapi/extension.go | GetExtension | func (s *Server) GetExtension(ctx context.Context, request *api.GetExtensionRequest) (*api.GetExtensionResponse, error) {
if request.ExtensionID == "" {
return nil, status.Errorf(codes.InvalidArgument, "extension ID must be provided")
}
var extension *api.Extension
s.store.View(func(tx store.ReadTx) {
extension = store.GetExtension(tx, request.ExtensionID)
})
if extension == nil {
return nil, status.Errorf(codes.NotFound, "extension %s not found", request.ExtensionID)
}
return &api.GetExtensionResponse{Extension: extension}, nil
} | go | func (s *Server) GetExtension(ctx context.Context, request *api.GetExtensionRequest) (*api.GetExtensionResponse, error) {
if request.ExtensionID == "" {
return nil, status.Errorf(codes.InvalidArgument, "extension ID must be provided")
}
var extension *api.Extension
s.store.View(func(tx store.ReadTx) {
extension = store.GetExtension(tx, request.ExtensionID)
})
if extension == nil {
return nil, status.Errorf(codes.NotFound, "extension %s not found", request.ExtensionID)
}
return &api.GetExtensionResponse{Extension: extension}, nil
} | [
"func",
"(",
"s",
"*",
"Server",
")",
"GetExtension",
"(",
"ctx",
"context",
".",
"Context",
",",
"request",
"*",
"api",
".",
"GetExtensionRequest",
")",
"(",
"*",
"api",
".",
"GetExtensionResponse",
",",
"error",
")",
"{",
"if",
"request",
".",
"Extensi... | // GetExtension returns a `GetExtensionResponse` with a `Extension` with the same
// id as `GetExtensionRequest.extension_id`
// - Returns `NotFound` if the Extension with the given id is not found.
// - Returns `InvalidArgument` if the `GetExtensionRequest.extension_id` is empty.
// - Returns an error if the get fails. | [
"GetExtension",
"returns",
"a",
"GetExtensionResponse",
"with",
"a",
"Extension",
"with",
"the",
"same",
"id",
"as",
"GetExtensionRequest",
".",
"extension_id",
"-",
"Returns",
"NotFound",
"if",
"the",
"Extension",
"with",
"the",
"given",
"id",
"is",
"not",
"fou... | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/extension.go#L56-L71 | train |
docker/swarmkit | manager/controlapi/extension.go | RemoveExtension | func (s *Server) RemoveExtension(ctx context.Context, request *api.RemoveExtensionRequest) (*api.RemoveExtensionResponse, error) {
if request.ExtensionID == "" {
return nil, status.Errorf(codes.InvalidArgument, "extension ID must be provided")
}
err := s.store.Update(func(tx store.Tx) error {
// Check if the extension exists
extension := store.GetExtension(tx, request.ExtensionID)
if extension == nil {
return status.Errorf(codes.NotFound, "could not find extension %s", request.ExtensionID)
}
// Check if any resources of this type present in the store, return error if so
resources, err := store.FindResources(tx, store.ByKind(request.ExtensionID))
if err != nil {
return status.Errorf(codes.Internal, "could not find resources using extension %s: %v", request.ExtensionID, err)
}
if len(resources) != 0 {
resourceNames := make([]string, 0, len(resources))
// Number of resources for an extension could be quite large.
// Show a limited number of resources for debugging.
attachedResourceForDebug := 10
for _, resource := range resources {
resourceNames = append(resourceNames, resource.Annotations.Name)
attachedResourceForDebug = attachedResourceForDebug - 1
if attachedResourceForDebug == 0 {
break
}
}
extensionName := extension.Annotations.Name
resourceNameStr := strings.Join(resourceNames, ", ")
resourceStr := "resources"
if len(resourceNames) == 1 {
resourceStr = "resource"
}
return status.Errorf(codes.InvalidArgument, "extension '%s' is in use by the following %s: %v", extensionName, resourceStr, resourceNameStr)
}
return store.DeleteExtension(tx, request.ExtensionID)
})
switch err {
case store.ErrNotExist:
return nil, status.Errorf(codes.NotFound, "extension %s not found", request.ExtensionID)
case nil:
log.G(ctx).WithFields(logrus.Fields{
"extension.ID": request.ExtensionID,
"method": "RemoveExtension",
}).Debugf("extension removed")
return &api.RemoveExtensionResponse{}, nil
default:
return nil, err
}
} | go | func (s *Server) RemoveExtension(ctx context.Context, request *api.RemoveExtensionRequest) (*api.RemoveExtensionResponse, error) {
if request.ExtensionID == "" {
return nil, status.Errorf(codes.InvalidArgument, "extension ID must be provided")
}
err := s.store.Update(func(tx store.Tx) error {
// Check if the extension exists
extension := store.GetExtension(tx, request.ExtensionID)
if extension == nil {
return status.Errorf(codes.NotFound, "could not find extension %s", request.ExtensionID)
}
// Check if any resources of this type present in the store, return error if so
resources, err := store.FindResources(tx, store.ByKind(request.ExtensionID))
if err != nil {
return status.Errorf(codes.Internal, "could not find resources using extension %s: %v", request.ExtensionID, err)
}
if len(resources) != 0 {
resourceNames := make([]string, 0, len(resources))
// Number of resources for an extension could be quite large.
// Show a limited number of resources for debugging.
attachedResourceForDebug := 10
for _, resource := range resources {
resourceNames = append(resourceNames, resource.Annotations.Name)
attachedResourceForDebug = attachedResourceForDebug - 1
if attachedResourceForDebug == 0 {
break
}
}
extensionName := extension.Annotations.Name
resourceNameStr := strings.Join(resourceNames, ", ")
resourceStr := "resources"
if len(resourceNames) == 1 {
resourceStr = "resource"
}
return status.Errorf(codes.InvalidArgument, "extension '%s' is in use by the following %s: %v", extensionName, resourceStr, resourceNameStr)
}
return store.DeleteExtension(tx, request.ExtensionID)
})
switch err {
case store.ErrNotExist:
return nil, status.Errorf(codes.NotFound, "extension %s not found", request.ExtensionID)
case nil:
log.G(ctx).WithFields(logrus.Fields{
"extension.ID": request.ExtensionID,
"method": "RemoveExtension",
}).Debugf("extension removed")
return &api.RemoveExtensionResponse{}, nil
default:
return nil, err
}
} | [
"func",
"(",
"s",
"*",
"Server",
")",
"RemoveExtension",
"(",
"ctx",
"context",
".",
"Context",
",",
"request",
"*",
"api",
".",
"RemoveExtensionRequest",
")",
"(",
"*",
"api",
".",
"RemoveExtensionResponse",
",",
"error",
")",
"{",
"if",
"request",
".",
... | // RemoveExtension removes the extension referenced by `RemoveExtensionRequest.ID`.
// - Returns `InvalidArgument` if `RemoveExtensionRequest.extension_id` is empty.
// - Returns `NotFound` if the an extension named `RemoveExtensionRequest.extension_id` is not found.
// - Returns an error if the deletion fails. | [
"RemoveExtension",
"removes",
"the",
"extension",
"referenced",
"by",
"RemoveExtensionRequest",
".",
"ID",
".",
"-",
"Returns",
"InvalidArgument",
"if",
"RemoveExtensionRequest",
".",
"extension_id",
"is",
"empty",
".",
"-",
"Returns",
"NotFound",
"if",
"the",
"an",... | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/extension.go#L77-L133 | train |
docker/swarmkit | log/context.go | WithFields | func WithFields(ctx context.Context, fields logrus.Fields) context.Context {
logger := ctx.Value(loggerKey{})
if logger == nil {
logger = L
}
return WithLogger(ctx, logger.(*logrus.Entry).WithFields(fields))
} | go | func WithFields(ctx context.Context, fields logrus.Fields) context.Context {
logger := ctx.Value(loggerKey{})
if logger == nil {
logger = L
}
return WithLogger(ctx, logger.(*logrus.Entry).WithFields(fields))
} | [
"func",
"WithFields",
"(",
"ctx",
"context",
".",
"Context",
",",
"fields",
"logrus",
".",
"Fields",
")",
"context",
".",
"Context",
"{",
"logger",
":=",
"ctx",
".",
"Value",
"(",
"loggerKey",
"{",
"}",
")",
"\n",
"if",
"logger",
"==",
"nil",
"{",
"l... | // WithFields returns a new context with added fields to logger. | [
"WithFields",
"returns",
"a",
"new",
"context",
"with",
"added",
"fields",
"to",
"logger",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/log/context.go#L33-L40 | train |
docker/swarmkit | log/context.go | WithField | func WithField(ctx context.Context, key, value string) context.Context {
return WithFields(ctx, logrus.Fields{key: value})
} | go | func WithField(ctx context.Context, key, value string) context.Context {
return WithFields(ctx, logrus.Fields{key: value})
} | [
"func",
"WithField",
"(",
"ctx",
"context",
".",
"Context",
",",
"key",
",",
"value",
"string",
")",
"context",
".",
"Context",
"{",
"return",
"WithFields",
"(",
"ctx",
",",
"logrus",
".",
"Fields",
"{",
"key",
":",
"value",
"}",
")",
"\n",
"}"
] | // WithField is convenience wrapper around WithFields. | [
"WithField",
"is",
"convenience",
"wrapper",
"around",
"WithFields",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/log/context.go#L43-L45 | train |
docker/swarmkit | log/context.go | GetModulePath | func GetModulePath(ctx context.Context) string {
module := ctx.Value(moduleKey{})
if module == nil {
return ""
}
return module.(string)
} | go | func GetModulePath(ctx context.Context) string {
module := ctx.Value(moduleKey{})
if module == nil {
return ""
}
return module.(string)
} | [
"func",
"GetModulePath",
"(",
"ctx",
"context",
".",
"Context",
")",
"string",
"{",
"module",
":=",
"ctx",
".",
"Value",
"(",
"moduleKey",
"{",
"}",
")",
"\n",
"if",
"module",
"==",
"nil",
"{",
"return",
"\"\"",
"\n",
"}",
"\n",
"return",
"module",
"... | // GetModulePath returns the module path for the provided context. If no module
// is set, an empty string is returned. | [
"GetModulePath",
"returns",
"the",
"module",
"path",
"for",
"the",
"provided",
"context",
".",
"If",
"no",
"module",
"is",
"set",
"an",
"empty",
"string",
"is",
"returned",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/log/context.go#L89-L96 | train |
docker/swarmkit | api/genericresource/resource_management.go | Claim | func Claim(nodeAvailableResources, taskAssigned *[]*api.GenericResource,
taskReservations []*api.GenericResource) error {
var resSelected []*api.GenericResource
for _, res := range taskReservations {
tr := res.GetDiscreteResourceSpec()
if tr == nil {
return fmt.Errorf("task should only hold Discrete type")
}
// Select the resources
nrs, err := selectNodeResources(*nodeAvailableResources, tr)
if err != nil {
return err
}
resSelected = append(resSelected, nrs...)
}
ClaimResources(nodeAvailableResources, taskAssigned, resSelected)
return nil
} | go | func Claim(nodeAvailableResources, taskAssigned *[]*api.GenericResource,
taskReservations []*api.GenericResource) error {
var resSelected []*api.GenericResource
for _, res := range taskReservations {
tr := res.GetDiscreteResourceSpec()
if tr == nil {
return fmt.Errorf("task should only hold Discrete type")
}
// Select the resources
nrs, err := selectNodeResources(*nodeAvailableResources, tr)
if err != nil {
return err
}
resSelected = append(resSelected, nrs...)
}
ClaimResources(nodeAvailableResources, taskAssigned, resSelected)
return nil
} | [
"func",
"Claim",
"(",
"nodeAvailableResources",
",",
"taskAssigned",
"*",
"[",
"]",
"*",
"api",
".",
"GenericResource",
",",
"taskReservations",
"[",
"]",
"*",
"api",
".",
"GenericResource",
")",
"error",
"{",
"var",
"resSelected",
"[",
"]",
"*",
"api",
".... | // Claim assigns GenericResources to a task by taking them from the
// node's GenericResource list and storing them in the task's available list | [
"Claim",
"assigns",
"GenericResources",
"to",
"a",
"task",
"by",
"taking",
"them",
"from",
"the",
"node",
"s",
"GenericResource",
"list",
"and",
"storing",
"them",
"in",
"the",
"task",
"s",
"available",
"list"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/api/genericresource/resource_management.go#L11-L32 | train |
docker/swarmkit | api/genericresource/resource_management.go | ClaimResources | func ClaimResources(nodeAvailableResources, taskAssigned *[]*api.GenericResource,
resSelected []*api.GenericResource) {
*taskAssigned = append(*taskAssigned, resSelected...)
ConsumeNodeResources(nodeAvailableResources, resSelected)
} | go | func ClaimResources(nodeAvailableResources, taskAssigned *[]*api.GenericResource,
resSelected []*api.GenericResource) {
*taskAssigned = append(*taskAssigned, resSelected...)
ConsumeNodeResources(nodeAvailableResources, resSelected)
} | [
"func",
"ClaimResources",
"(",
"nodeAvailableResources",
",",
"taskAssigned",
"*",
"[",
"]",
"*",
"api",
".",
"GenericResource",
",",
"resSelected",
"[",
"]",
"*",
"api",
".",
"GenericResource",
")",
"{",
"*",
"taskAssigned",
"=",
"append",
"(",
"*",
"taskAs... | // ClaimResources adds the specified resources to the task's list
// and removes them from the node's generic resource list | [
"ClaimResources",
"adds",
"the",
"specified",
"resources",
"to",
"the",
"task",
"s",
"list",
"and",
"removes",
"them",
"from",
"the",
"node",
"s",
"generic",
"resource",
"list"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/api/genericresource/resource_management.go#L36-L40 | train |
docker/swarmkit | api/genericresource/resource_management.go | Reclaim | func Reclaim(nodeAvailableResources *[]*api.GenericResource, taskAssigned, nodeRes []*api.GenericResource) error {
err := reclaimResources(nodeAvailableResources, taskAssigned)
if err != nil {
return err
}
sanitize(nodeRes, nodeAvailableResources)
return nil
} | go | func Reclaim(nodeAvailableResources *[]*api.GenericResource, taskAssigned, nodeRes []*api.GenericResource) error {
err := reclaimResources(nodeAvailableResources, taskAssigned)
if err != nil {
return err
}
sanitize(nodeRes, nodeAvailableResources)
return nil
} | [
"func",
"Reclaim",
"(",
"nodeAvailableResources",
"*",
"[",
"]",
"*",
"api",
".",
"GenericResource",
",",
"taskAssigned",
",",
"nodeRes",
"[",
"]",
"*",
"api",
".",
"GenericResource",
")",
"error",
"{",
"err",
":=",
"reclaimResources",
"(",
"nodeAvailableResou... | // Reclaim adds the resources taken by the task to the node's store | [
"Reclaim",
"adds",
"the",
"resources",
"taken",
"by",
"the",
"task",
"to",
"the",
"node",
"s",
"store"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/api/genericresource/resource_management.go#L75-L84 | train |
docker/swarmkit | api/genericresource/string.go | Kind | func Kind(res *api.GenericResource) string {
switch r := res.Resource.(type) {
case *api.GenericResource_DiscreteResourceSpec:
return r.DiscreteResourceSpec.Kind
case *api.GenericResource_NamedResourceSpec:
return r.NamedResourceSpec.Kind
}
return ""
} | go | func Kind(res *api.GenericResource) string {
switch r := res.Resource.(type) {
case *api.GenericResource_DiscreteResourceSpec:
return r.DiscreteResourceSpec.Kind
case *api.GenericResource_NamedResourceSpec:
return r.NamedResourceSpec.Kind
}
return ""
} | [
"func",
"Kind",
"(",
"res",
"*",
"api",
".",
"GenericResource",
")",
"string",
"{",
"switch",
"r",
":=",
"res",
".",
"Resource",
".",
"(",
"type",
")",
"{",
"case",
"*",
"api",
".",
"GenericResource_DiscreteResourceSpec",
":",
"return",
"r",
".",
"Discre... | // Kind returns the kind key as a string | [
"Kind",
"returns",
"the",
"kind",
"key",
"as",
"a",
"string"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/api/genericresource/string.go#L15-L24 | train |
docker/swarmkit | api/genericresource/string.go | Value | func Value(res *api.GenericResource) string {
switch res := res.Resource.(type) {
case *api.GenericResource_DiscreteResourceSpec:
return discreteToString(res)
case *api.GenericResource_NamedResourceSpec:
return res.NamedResourceSpec.Value
}
return ""
} | go | func Value(res *api.GenericResource) string {
switch res := res.Resource.(type) {
case *api.GenericResource_DiscreteResourceSpec:
return discreteToString(res)
case *api.GenericResource_NamedResourceSpec:
return res.NamedResourceSpec.Value
}
return ""
} | [
"func",
"Value",
"(",
"res",
"*",
"api",
".",
"GenericResource",
")",
"string",
"{",
"switch",
"res",
":=",
"res",
".",
"Resource",
".",
"(",
"type",
")",
"{",
"case",
"*",
"api",
".",
"GenericResource_DiscreteResourceSpec",
":",
"return",
"discreteToString"... | // Value returns the value key as a string | [
"Value",
"returns",
"the",
"value",
"key",
"as",
"a",
"string"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/api/genericresource/string.go#L27-L36 | train |
docker/swarmkit | api/genericresource/string.go | EnvFormat | func EnvFormat(res []*api.GenericResource, prefix string) []string {
envs := make(map[string][]string)
for _, v := range res {
key := Kind(v)
val := Value(v)
envs[key] = append(envs[key], val)
}
env := make([]string, 0, len(res))
for k, v := range envs {
k = strings.ToUpper(prefix + "_" + k)
env = append(env, k+"="+strings.Join(v, ","))
}
return env
} | go | func EnvFormat(res []*api.GenericResource, prefix string) []string {
envs := make(map[string][]string)
for _, v := range res {
key := Kind(v)
val := Value(v)
envs[key] = append(envs[key], val)
}
env := make([]string, 0, len(res))
for k, v := range envs {
k = strings.ToUpper(prefix + "_" + k)
env = append(env, k+"="+strings.Join(v, ","))
}
return env
} | [
"func",
"EnvFormat",
"(",
"res",
"[",
"]",
"*",
"api",
".",
"GenericResource",
",",
"prefix",
"string",
")",
"[",
"]",
"string",
"{",
"envs",
":=",
"make",
"(",
"map",
"[",
"string",
"]",
"[",
"]",
"string",
")",
"\n",
"for",
"_",
",",
"v",
":=",... | // EnvFormat returns the environment string version of the resource | [
"EnvFormat",
"returns",
"the",
"environment",
"string",
"version",
"of",
"the",
"resource"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/api/genericresource/string.go#L39-L54 | train |
docker/swarmkit | cmd/swarmctl/service/flagparser/flags.go | Merge | func Merge(cmd *cobra.Command, spec *api.ServiceSpec, c api.ControlClient) error {
flags := cmd.Flags()
if flags.Changed("force") {
force, err := flags.GetBool("force")
if err != nil {
return err
}
if force {
spec.Task.ForceUpdate++
}
}
if flags.Changed("name") {
name, err := flags.GetString("name")
if err != nil {
return err
}
spec.Annotations.Name = name
}
if flags.Changed("label") {
labels, err := flags.GetStringSlice("label")
if err != nil {
return err
}
spec.Annotations.Labels = map[string]string{}
for _, l := range labels {
parts := strings.SplitN(l, "=", 2)
if len(parts) != 2 {
return fmt.Errorf("malformed label: %s", l)
}
spec.Annotations.Labels[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
}
}
if err := parseMode(flags, spec); err != nil {
return err
}
if err := parseContainer(flags, spec); err != nil {
return err
}
if err := parseResource(flags, spec); err != nil {
return err
}
if err := parsePorts(flags, spec); err != nil {
return err
}
if err := parseNetworks(cmd, spec, c); err != nil {
return err
}
if err := parseRestart(flags, spec); err != nil {
return err
}
if err := parseUpdate(flags, spec); err != nil {
return err
}
if err := parsePlacement(flags, spec); err != nil {
return err
}
if err := parseBind(flags, spec); err != nil {
return err
}
if err := parseVolume(flags, spec); err != nil {
return err
}
if err := parseTmpfs(flags, spec); err != nil {
return err
}
if err := parseNpipe(flags, spec); err != nil {
return err
}
driver, err := common.ParseLogDriverFlags(flags)
if err != nil {
return err
}
spec.Task.LogDriver = driver
return nil
} | go | func Merge(cmd *cobra.Command, spec *api.ServiceSpec, c api.ControlClient) error {
flags := cmd.Flags()
if flags.Changed("force") {
force, err := flags.GetBool("force")
if err != nil {
return err
}
if force {
spec.Task.ForceUpdate++
}
}
if flags.Changed("name") {
name, err := flags.GetString("name")
if err != nil {
return err
}
spec.Annotations.Name = name
}
if flags.Changed("label") {
labels, err := flags.GetStringSlice("label")
if err != nil {
return err
}
spec.Annotations.Labels = map[string]string{}
for _, l := range labels {
parts := strings.SplitN(l, "=", 2)
if len(parts) != 2 {
return fmt.Errorf("malformed label: %s", l)
}
spec.Annotations.Labels[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
}
}
if err := parseMode(flags, spec); err != nil {
return err
}
if err := parseContainer(flags, spec); err != nil {
return err
}
if err := parseResource(flags, spec); err != nil {
return err
}
if err := parsePorts(flags, spec); err != nil {
return err
}
if err := parseNetworks(cmd, spec, c); err != nil {
return err
}
if err := parseRestart(flags, spec); err != nil {
return err
}
if err := parseUpdate(flags, spec); err != nil {
return err
}
if err := parsePlacement(flags, spec); err != nil {
return err
}
if err := parseBind(flags, spec); err != nil {
return err
}
if err := parseVolume(flags, spec); err != nil {
return err
}
if err := parseTmpfs(flags, spec); err != nil {
return err
}
if err := parseNpipe(flags, spec); err != nil {
return err
}
driver, err := common.ParseLogDriverFlags(flags)
if err != nil {
return err
}
spec.Task.LogDriver = driver
return nil
} | [
"func",
"Merge",
"(",
"cmd",
"*",
"cobra",
".",
"Command",
",",
"spec",
"*",
"api",
".",
"ServiceSpec",
",",
"c",
"api",
".",
"ControlClient",
")",
"error",
"{",
"flags",
":=",
"cmd",
".",
"Flags",
"(",
")",
"\n",
"if",
"flags",
".",
"Changed",
"("... | // Merge merges a flagset into a service spec. | [
"Merge",
"merges",
"a",
"flagset",
"into",
"a",
"service",
"spec",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/cmd/swarmctl/service/flagparser/flags.go#L68-L159 | train |
docker/swarmkit | node/node.go | observe | func (l *lastSeenRole) observe(newRole api.NodeRole) bool {
changed := l.role != newRole
l.role = newRole
return changed
} | go | func (l *lastSeenRole) observe(newRole api.NodeRole) bool {
changed := l.role != newRole
l.role = newRole
return changed
} | [
"func",
"(",
"l",
"*",
"lastSeenRole",
")",
"observe",
"(",
"newRole",
"api",
".",
"NodeRole",
")",
"bool",
"{",
"changed",
":=",
"l",
".",
"role",
"!=",
"newRole",
"\n",
"l",
".",
"role",
"=",
"newRole",
"\n",
"return",
"changed",
"\n",
"}"
] | // observe notes the latest value of this node role, and returns true if it
// is the first seen value, or is different from the most recently seen value. | [
"observe",
"notes",
"the",
"latest",
"value",
"of",
"this",
"node",
"role",
"and",
"returns",
"true",
"if",
"it",
"is",
"the",
"first",
"seen",
"value",
"or",
"is",
"different",
"from",
"the",
"most",
"recently",
"seen",
"value",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L174-L178 | train |
docker/swarmkit | node/node.go | RemoteAPIAddr | func (n *Node) RemoteAPIAddr() (string, error) {
n.RLock()
defer n.RUnlock()
if n.manager == nil {
return "", errors.New("manager is not running")
}
addr := n.manager.Addr()
if addr == "" {
return "", errors.New("manager addr is not set")
}
return addr, nil
} | go | func (n *Node) RemoteAPIAddr() (string, error) {
n.RLock()
defer n.RUnlock()
if n.manager == nil {
return "", errors.New("manager is not running")
}
addr := n.manager.Addr()
if addr == "" {
return "", errors.New("manager addr is not set")
}
return addr, nil
} | [
"func",
"(",
"n",
"*",
"Node",
")",
"RemoteAPIAddr",
"(",
")",
"(",
"string",
",",
"error",
")",
"{",
"n",
".",
"RLock",
"(",
")",
"\n",
"defer",
"n",
".",
"RUnlock",
"(",
")",
"\n",
"if",
"n",
".",
"manager",
"==",
"nil",
"{",
"return",
"\"\""... | // RemoteAPIAddr returns address on which remote manager api listens.
// Returns nil if node is not manager. | [
"RemoteAPIAddr",
"returns",
"address",
"on",
"which",
"remote",
"manager",
"api",
"listens",
".",
"Returns",
"nil",
"if",
"node",
"is",
"not",
"manager",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L182-L193 | train |
docker/swarmkit | node/node.go | New | func New(c *Config) (*Node, error) {
if err := os.MkdirAll(c.StateDir, 0700); err != nil {
return nil, err
}
stateFile := filepath.Join(c.StateDir, stateFilename)
dt, err := ioutil.ReadFile(stateFile)
var p []api.Peer
if err != nil && !os.IsNotExist(err) {
return nil, err
}
if err == nil {
if err := json.Unmarshal(dt, &p); err != nil {
return nil, err
}
}
n := &Node{
remotes: newPersistentRemotes(stateFile, p...),
role: ca.WorkerRole,
config: c,
started: make(chan struct{}),
stopped: make(chan struct{}),
closed: make(chan struct{}),
ready: make(chan struct{}),
notifyNodeChange: make(chan *agent.NodeChanges, 1),
unlockKey: c.UnlockKey,
}
if n.config.JoinAddr != "" || n.config.ForceNewCluster {
n.remotes = newPersistentRemotes(filepath.Join(n.config.StateDir, stateFilename))
if n.config.JoinAddr != "" {
n.remotes.Observe(api.Peer{Addr: n.config.JoinAddr}, remotes.DefaultObservationWeight)
}
}
n.connBroker = connectionbroker.New(n.remotes)
n.roleCond = sync.NewCond(n.RLocker())
n.connCond = sync.NewCond(n.RLocker())
return n, nil
} | go | func New(c *Config) (*Node, error) {
if err := os.MkdirAll(c.StateDir, 0700); err != nil {
return nil, err
}
stateFile := filepath.Join(c.StateDir, stateFilename)
dt, err := ioutil.ReadFile(stateFile)
var p []api.Peer
if err != nil && !os.IsNotExist(err) {
return nil, err
}
if err == nil {
if err := json.Unmarshal(dt, &p); err != nil {
return nil, err
}
}
n := &Node{
remotes: newPersistentRemotes(stateFile, p...),
role: ca.WorkerRole,
config: c,
started: make(chan struct{}),
stopped: make(chan struct{}),
closed: make(chan struct{}),
ready: make(chan struct{}),
notifyNodeChange: make(chan *agent.NodeChanges, 1),
unlockKey: c.UnlockKey,
}
if n.config.JoinAddr != "" || n.config.ForceNewCluster {
n.remotes = newPersistentRemotes(filepath.Join(n.config.StateDir, stateFilename))
if n.config.JoinAddr != "" {
n.remotes.Observe(api.Peer{Addr: n.config.JoinAddr}, remotes.DefaultObservationWeight)
}
}
n.connBroker = connectionbroker.New(n.remotes)
n.roleCond = sync.NewCond(n.RLocker())
n.connCond = sync.NewCond(n.RLocker())
return n, nil
} | [
"func",
"New",
"(",
"c",
"*",
"Config",
")",
"(",
"*",
"Node",
",",
"error",
")",
"{",
"if",
"err",
":=",
"os",
".",
"MkdirAll",
"(",
"c",
".",
"StateDir",
",",
"0700",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}"... | // New returns new Node instance. | [
"New",
"returns",
"new",
"Node",
"instance",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L196-L235 | train |
docker/swarmkit | node/node.go | BindRemote | func (n *Node) BindRemote(ctx context.Context, listenAddr string, advertiseAddr string) error {
n.RLock()
defer n.RUnlock()
if n.manager == nil {
return errors.New("manager is not running")
}
return n.manager.BindRemote(ctx, manager.RemoteAddrs{
ListenAddr: listenAddr,
AdvertiseAddr: advertiseAddr,
})
} | go | func (n *Node) BindRemote(ctx context.Context, listenAddr string, advertiseAddr string) error {
n.RLock()
defer n.RUnlock()
if n.manager == nil {
return errors.New("manager is not running")
}
return n.manager.BindRemote(ctx, manager.RemoteAddrs{
ListenAddr: listenAddr,
AdvertiseAddr: advertiseAddr,
})
} | [
"func",
"(",
"n",
"*",
"Node",
")",
"BindRemote",
"(",
"ctx",
"context",
".",
"Context",
",",
"listenAddr",
"string",
",",
"advertiseAddr",
"string",
")",
"error",
"{",
"n",
".",
"RLock",
"(",
")",
"\n",
"defer",
"n",
".",
"RUnlock",
"(",
")",
"\n",
... | // BindRemote starts a listener that exposes the remote API. | [
"BindRemote",
"starts",
"a",
"listener",
"that",
"exposes",
"the",
"remote",
"API",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L238-L250 | train |
docker/swarmkit | node/node.go | Start | func (n *Node) Start(ctx context.Context) error {
err := errNodeStarted
n.startOnce.Do(func() {
close(n.started)
go n.run(ctx)
err = nil // clear error above, only once.
})
return err
} | go | func (n *Node) Start(ctx context.Context) error {
err := errNodeStarted
n.startOnce.Do(func() {
close(n.started)
go n.run(ctx)
err = nil // clear error above, only once.
})
return err
} | [
"func",
"(",
"n",
"*",
"Node",
")",
"Start",
"(",
"ctx",
"context",
".",
"Context",
")",
"error",
"{",
"err",
":=",
"errNodeStarted",
"\n",
"n",
".",
"startOnce",
".",
"Do",
"(",
"func",
"(",
")",
"{",
"close",
"(",
"n",
".",
"started",
")",
"\n"... | // Start starts a node instance. | [
"Start",
"starts",
"a",
"node",
"instance",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L253-L262 | train |
docker/swarmkit | node/node.go | configVXLANUDPPort | func configVXLANUDPPort(ctx context.Context, vxlanUDPPort uint32) {
if err := overlayutils.ConfigVXLANUDPPort(vxlanUDPPort); err != nil {
log.G(ctx).WithError(err).Error("failed to configure VXLAN UDP port")
return
}
logrus.Infof("initialized VXLAN UDP port to %d ", vxlanUDPPort)
} | go | func configVXLANUDPPort(ctx context.Context, vxlanUDPPort uint32) {
if err := overlayutils.ConfigVXLANUDPPort(vxlanUDPPort); err != nil {
log.G(ctx).WithError(err).Error("failed to configure VXLAN UDP port")
return
}
logrus.Infof("initialized VXLAN UDP port to %d ", vxlanUDPPort)
} | [
"func",
"configVXLANUDPPort",
"(",
"ctx",
"context",
".",
"Context",
",",
"vxlanUDPPort",
"uint32",
")",
"{",
"if",
"err",
":=",
"overlayutils",
".",
"ConfigVXLANUDPPort",
"(",
"vxlanUDPPort",
")",
";",
"err",
"!=",
"nil",
"{",
"log",
".",
"G",
"(",
"ctx",... | // configVXLANUDPPort sets vxlan port in libnetwork | [
"configVXLANUDPPort",
"sets",
"vxlan",
"port",
"in",
"libnetwork"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L275-L281 | train |
docker/swarmkit | node/node.go | Stop | func (n *Node) Stop(ctx context.Context) error {
select {
case <-n.started:
default:
return errNodeNotStarted
}
// ask agent to clean up assignments
n.Lock()
if n.agent != nil {
if err := n.agent.Leave(ctx); err != nil {
log.G(ctx).WithError(err).Error("agent failed to clean up assignments")
}
}
n.Unlock()
n.stopOnce.Do(func() {
close(n.stopped)
})
select {
case <-n.closed:
return nil
case <-ctx.Done():
return ctx.Err()
}
} | go | func (n *Node) Stop(ctx context.Context) error {
select {
case <-n.started:
default:
return errNodeNotStarted
}
// ask agent to clean up assignments
n.Lock()
if n.agent != nil {
if err := n.agent.Leave(ctx); err != nil {
log.G(ctx).WithError(err).Error("agent failed to clean up assignments")
}
}
n.Unlock()
n.stopOnce.Do(func() {
close(n.stopped)
})
select {
case <-n.closed:
return nil
case <-ctx.Done():
return ctx.Err()
}
} | [
"func",
"(",
"n",
"*",
"Node",
")",
"Stop",
"(",
"ctx",
"context",
".",
"Context",
")",
"error",
"{",
"select",
"{",
"case",
"<-",
"n",
".",
"started",
":",
"default",
":",
"return",
"errNodeNotStarted",
"\n",
"}",
"\n",
"n",
".",
"Lock",
"(",
")",... | // Stop stops node execution | [
"Stop",
"stops",
"node",
"execution"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L533-L558 | train |
docker/swarmkit | node/node.go | Err | func (n *Node) Err(ctx context.Context) error {
select {
case <-n.closed:
return n.err
case <-ctx.Done():
return ctx.Err()
}
} | go | func (n *Node) Err(ctx context.Context) error {
select {
case <-n.closed:
return n.err
case <-ctx.Done():
return ctx.Err()
}
} | [
"func",
"(",
"n",
"*",
"Node",
")",
"Err",
"(",
"ctx",
"context",
".",
"Context",
")",
"error",
"{",
"select",
"{",
"case",
"<-",
"n",
".",
"closed",
":",
"return",
"n",
".",
"err",
"\n",
"case",
"<-",
"ctx",
".",
"Done",
"(",
")",
":",
"return... | // Err returns the error that caused the node to shutdown or nil. Err blocks
// until the node has fully shut down. | [
"Err",
"returns",
"the",
"error",
"that",
"caused",
"the",
"node",
"to",
"shutdown",
"or",
"nil",
".",
"Err",
"blocks",
"until",
"the",
"node",
"has",
"fully",
"shut",
"down",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L562-L569 | train |
docker/swarmkit | node/node.go | runAgent | func (n *Node) runAgent(ctx context.Context, db *bolt.DB, securityConfig *ca.SecurityConfig, ready chan<- struct{}) error {
// First, get a channel for knowing when a remote peer has been selected.
// The value returned from the remotesCh is ignored, we just need to know
// when the peer is selected
remotesCh := n.remotes.WaitSelect(ctx)
// then, we set up a new context to pass specifically to
// ListenControlSocket, and start that method to wait on a connection on
// the cluster control API.
waitCtx, waitCancel := context.WithCancel(ctx)
controlCh := n.ListenControlSocket(waitCtx)
// The goal here to wait either until we have a remote peer selected, or
// connection to the control
// socket. These are both ways to connect the
// agent to a manager, and we need to wait until one or the other is
// available to start the agent
waitPeer:
for {
select {
case <-ctx.Done():
break waitPeer
case <-remotesCh:
break waitPeer
case conn := <-controlCh:
// conn will probably be nil the first time we call this, probably,
// but only a non-nil conn represent an actual connection.
if conn != nil {
break waitPeer
}
}
}
// We can stop listening for new control socket connections once we're
// ready
waitCancel()
// NOTE(dperny): not sure why we need to recheck the context here. I guess
// it avoids a race if the context was canceled at the same time that a
// connection or peer was available. I think it's just an optimization.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
// Now we can go ahead and configure, create, and start the agent.
secChangesCh, secChangesCancel := securityConfig.Watch()
defer secChangesCancel()
rootCA := securityConfig.RootCA()
issuer := securityConfig.IssuerInfo()
agentConfig := &agent.Config{
Hostname: n.config.Hostname,
ConnBroker: n.connBroker,
Executor: n.config.Executor,
DB: db,
NotifyNodeChange: n.notifyNodeChange,
NotifyTLSChange: secChangesCh,
Credentials: securityConfig.ClientTLSCreds,
NodeTLSInfo: &api.NodeTLSInfo{
TrustRoot: rootCA.Certs,
CertIssuerPublicKey: issuer.PublicKey,
CertIssuerSubject: issuer.Subject,
},
FIPS: n.config.FIPS,
}
// if a join address has been specified, then if the agent fails to connect
// due to a TLS error, fail fast - don't keep re-trying to join
if n.config.JoinAddr != "" {
agentConfig.SessionTracker = &firstSessionErrorTracker{}
}
a, err := agent.New(agentConfig)
if err != nil {
return err
}
if err := a.Start(ctx); err != nil {
return err
}
n.Lock()
n.agent = a
n.Unlock()
defer func() {
n.Lock()
n.agent = nil
n.Unlock()
}()
// when the agent indicates that it is ready, we close the ready channel.
go func() {
<-a.Ready()
close(ready)
}()
// todo: manually call stop on context cancellation?
return a.Err(context.Background())
} | go | func (n *Node) runAgent(ctx context.Context, db *bolt.DB, securityConfig *ca.SecurityConfig, ready chan<- struct{}) error {
// First, get a channel for knowing when a remote peer has been selected.
// The value returned from the remotesCh is ignored, we just need to know
// when the peer is selected
remotesCh := n.remotes.WaitSelect(ctx)
// then, we set up a new context to pass specifically to
// ListenControlSocket, and start that method to wait on a connection on
// the cluster control API.
waitCtx, waitCancel := context.WithCancel(ctx)
controlCh := n.ListenControlSocket(waitCtx)
// The goal here to wait either until we have a remote peer selected, or
// connection to the control
// socket. These are both ways to connect the
// agent to a manager, and we need to wait until one or the other is
// available to start the agent
waitPeer:
for {
select {
case <-ctx.Done():
break waitPeer
case <-remotesCh:
break waitPeer
case conn := <-controlCh:
// conn will probably be nil the first time we call this, probably,
// but only a non-nil conn represent an actual connection.
if conn != nil {
break waitPeer
}
}
}
// We can stop listening for new control socket connections once we're
// ready
waitCancel()
// NOTE(dperny): not sure why we need to recheck the context here. I guess
// it avoids a race if the context was canceled at the same time that a
// connection or peer was available. I think it's just an optimization.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
// Now we can go ahead and configure, create, and start the agent.
secChangesCh, secChangesCancel := securityConfig.Watch()
defer secChangesCancel()
rootCA := securityConfig.RootCA()
issuer := securityConfig.IssuerInfo()
agentConfig := &agent.Config{
Hostname: n.config.Hostname,
ConnBroker: n.connBroker,
Executor: n.config.Executor,
DB: db,
NotifyNodeChange: n.notifyNodeChange,
NotifyTLSChange: secChangesCh,
Credentials: securityConfig.ClientTLSCreds,
NodeTLSInfo: &api.NodeTLSInfo{
TrustRoot: rootCA.Certs,
CertIssuerPublicKey: issuer.PublicKey,
CertIssuerSubject: issuer.Subject,
},
FIPS: n.config.FIPS,
}
// if a join address has been specified, then if the agent fails to connect
// due to a TLS error, fail fast - don't keep re-trying to join
if n.config.JoinAddr != "" {
agentConfig.SessionTracker = &firstSessionErrorTracker{}
}
a, err := agent.New(agentConfig)
if err != nil {
return err
}
if err := a.Start(ctx); err != nil {
return err
}
n.Lock()
n.agent = a
n.Unlock()
defer func() {
n.Lock()
n.agent = nil
n.Unlock()
}()
// when the agent indicates that it is ready, we close the ready channel.
go func() {
<-a.Ready()
close(ready)
}()
// todo: manually call stop on context cancellation?
return a.Err(context.Background())
} | [
"func",
"(",
"n",
"*",
"Node",
")",
"runAgent",
"(",
"ctx",
"context",
".",
"Context",
",",
"db",
"*",
"bolt",
".",
"DB",
",",
"securityConfig",
"*",
"ca",
".",
"SecurityConfig",
",",
"ready",
"chan",
"<-",
"struct",
"{",
"}",
")",
"error",
"{",
"r... | // runAgent starts the node's agent. When the agent has started, the provided
// ready channel is closed. When the agent exits, this will return the error
// that caused it. | [
"runAgent",
"starts",
"the",
"node",
"s",
"agent",
".",
"When",
"the",
"agent",
"has",
"started",
"the",
"provided",
"ready",
"channel",
"is",
"closed",
".",
"When",
"the",
"agent",
"exits",
"this",
"will",
"return",
"the",
"error",
"that",
"caused",
"it",... | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L574-L674 | train |
docker/swarmkit | node/node.go | ListenControlSocket | func (n *Node) ListenControlSocket(ctx context.Context) <-chan *grpc.ClientConn {
c := make(chan *grpc.ClientConn, 1)
n.RLock()
conn := n.conn
c <- conn
done := make(chan struct{})
go func() {
select {
case <-ctx.Done():
n.connCond.Broadcast()
case <-done:
}
}()
go func() {
defer close(c)
defer close(done)
defer n.RUnlock()
for {
select {
case <-ctx.Done():
return
default:
}
if conn == n.conn {
n.connCond.Wait()
continue
}
conn = n.conn
select {
case c <- conn:
case <-ctx.Done():
return
}
}
}()
return c
} | go | func (n *Node) ListenControlSocket(ctx context.Context) <-chan *grpc.ClientConn {
c := make(chan *grpc.ClientConn, 1)
n.RLock()
conn := n.conn
c <- conn
done := make(chan struct{})
go func() {
select {
case <-ctx.Done():
n.connCond.Broadcast()
case <-done:
}
}()
go func() {
defer close(c)
defer close(done)
defer n.RUnlock()
for {
select {
case <-ctx.Done():
return
default:
}
if conn == n.conn {
n.connCond.Wait()
continue
}
conn = n.conn
select {
case c <- conn:
case <-ctx.Done():
return
}
}
}()
return c
} | [
"func",
"(",
"n",
"*",
"Node",
")",
"ListenControlSocket",
"(",
"ctx",
"context",
".",
"Context",
")",
"<-",
"chan",
"*",
"grpc",
".",
"ClientConn",
"{",
"c",
":=",
"make",
"(",
"chan",
"*",
"grpc",
".",
"ClientConn",
",",
"1",
")",
"\n",
"n",
".",... | // ListenControlSocket listens changes of a connection for managing the
// cluster control api | [
"ListenControlSocket",
"listens",
"changes",
"of",
"a",
"connection",
"for",
"managing",
"the",
"cluster",
"control",
"api"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L695-L731 | train |
docker/swarmkit | node/node.go | NodeID | func (n *Node) NodeID() string {
n.RLock()
defer n.RUnlock()
return n.nodeID
} | go | func (n *Node) NodeID() string {
n.RLock()
defer n.RUnlock()
return n.nodeID
} | [
"func",
"(",
"n",
"*",
"Node",
")",
"NodeID",
"(",
")",
"string",
"{",
"n",
".",
"RLock",
"(",
")",
"\n",
"defer",
"n",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"n",
".",
"nodeID",
"\n",
"}"
] | // NodeID returns current node's ID. May be empty if not set. | [
"NodeID",
"returns",
"current",
"node",
"s",
"ID",
".",
"May",
"be",
"empty",
"if",
"not",
"set",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L734-L738 | train |
docker/swarmkit | node/node.go | Manager | func (n *Node) Manager() *manager.Manager {
n.RLock()
defer n.RUnlock()
return n.manager
} | go | func (n *Node) Manager() *manager.Manager {
n.RLock()
defer n.RUnlock()
return n.manager
} | [
"func",
"(",
"n",
"*",
"Node",
")",
"Manager",
"(",
")",
"*",
"manager",
".",
"Manager",
"{",
"n",
".",
"RLock",
"(",
")",
"\n",
"defer",
"n",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"n",
".",
"manager",
"\n",
"}"
] | // Manager returns manager instance started by node. May be nil. | [
"Manager",
"returns",
"manager",
"instance",
"started",
"by",
"node",
".",
"May",
"be",
"nil",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L741-L745 | train |
docker/swarmkit | node/node.go | Agent | func (n *Node) Agent() *agent.Agent {
n.RLock()
defer n.RUnlock()
return n.agent
} | go | func (n *Node) Agent() *agent.Agent {
n.RLock()
defer n.RUnlock()
return n.agent
} | [
"func",
"(",
"n",
"*",
"Node",
")",
"Agent",
"(",
")",
"*",
"agent",
".",
"Agent",
"{",
"n",
".",
"RLock",
"(",
")",
"\n",
"defer",
"n",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"n",
".",
"agent",
"\n",
"}"
] | // Agent returns agent instance started by node. May be nil. | [
"Agent",
"returns",
"agent",
"instance",
"started",
"by",
"node",
".",
"May",
"be",
"nil",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L748-L752 | train |
docker/swarmkit | node/node.go | Remotes | func (n *Node) Remotes() []api.Peer {
weights := n.remotes.Weights()
remotes := make([]api.Peer, 0, len(weights))
for p := range weights {
remotes = append(remotes, p)
}
return remotes
} | go | func (n *Node) Remotes() []api.Peer {
weights := n.remotes.Weights()
remotes := make([]api.Peer, 0, len(weights))
for p := range weights {
remotes = append(remotes, p)
}
return remotes
} | [
"func",
"(",
"n",
"*",
"Node",
")",
"Remotes",
"(",
")",
"[",
"]",
"api",
".",
"Peer",
"{",
"weights",
":=",
"n",
".",
"remotes",
".",
"Weights",
"(",
")",
"\n",
"remotes",
":=",
"make",
"(",
"[",
"]",
"api",
".",
"Peer",
",",
"0",
",",
"len"... | // Remotes returns a list of known peers known to node. | [
"Remotes",
"returns",
"a",
"list",
"of",
"known",
"peers",
"known",
"to",
"node",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L769-L776 | train |
docker/swarmkit | node/node.go | isMandatoryFIPSClusterID | func isMandatoryFIPSClusterID(securityConfig *ca.SecurityConfig) bool {
return strings.HasPrefix(securityConfig.ClientTLSCreds.Organization(), "FIPS.")
} | go | func isMandatoryFIPSClusterID(securityConfig *ca.SecurityConfig) bool {
return strings.HasPrefix(securityConfig.ClientTLSCreds.Organization(), "FIPS.")
} | [
"func",
"isMandatoryFIPSClusterID",
"(",
"securityConfig",
"*",
"ca",
".",
"SecurityConfig",
")",
"bool",
"{",
"return",
"strings",
".",
"HasPrefix",
"(",
"securityConfig",
".",
"ClientTLSCreds",
".",
"Organization",
"(",
")",
",",
"\"FIPS.\"",
")",
"\n",
"}"
] | // Given a cluster ID, returns whether the cluster ID indicates that the cluster
// mandates FIPS mode. These cluster IDs start with "FIPS." as a prefix. | [
"Given",
"a",
"cluster",
"ID",
"returns",
"whether",
"the",
"cluster",
"ID",
"indicates",
"that",
"the",
"cluster",
"mandates",
"FIPS",
"mode",
".",
"These",
"cluster",
"IDs",
"start",
"with",
"FIPS",
".",
"as",
"a",
"prefix",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L780-L782 | train |
docker/swarmkit | node/node.go | isMandatoryFIPSClusterJoinToken | func isMandatoryFIPSClusterJoinToken(joinToken string) bool {
if parsed, err := ca.ParseJoinToken(joinToken); err == nil {
return parsed.FIPS
}
return false
} | go | func isMandatoryFIPSClusterJoinToken(joinToken string) bool {
if parsed, err := ca.ParseJoinToken(joinToken); err == nil {
return parsed.FIPS
}
return false
} | [
"func",
"isMandatoryFIPSClusterJoinToken",
"(",
"joinToken",
"string",
")",
"bool",
"{",
"if",
"parsed",
",",
"err",
":=",
"ca",
".",
"ParseJoinToken",
"(",
"joinToken",
")",
";",
"err",
"==",
"nil",
"{",
"return",
"parsed",
".",
"FIPS",
"\n",
"}",
"\n",
... | // Given a join token, returns whether it indicates that the cluster mandates FIPS
// mode. | [
"Given",
"a",
"join",
"token",
"returns",
"whether",
"it",
"indicates",
"that",
"the",
"cluster",
"mandates",
"FIPS",
"mode",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L786-L791 | train |
docker/swarmkit | node/node.go | superviseManager | func (n *Node) superviseManager(ctx context.Context, securityConfig *ca.SecurityConfig, rootPaths ca.CertPaths, ready chan struct{}, renewer *ca.TLSRenewer) error {
// superviseManager is a loop, because we can come in and out of being a
// manager, and need to appropriately handle that without disrupting the
// node functionality.
for {
// if we're not a manager, we're just gonna park here and wait until we
// are. For normal agent nodes, we'll stay here forever, as intended.
if err := n.waitRole(ctx, ca.ManagerRole); err != nil {
return err
}
// Once we know we are a manager, we get ourselves ready for when we
// lose that role. we create a channel to signal that we've become a
// worker, and close it when n.waitRole completes.
workerRole := make(chan struct{})
waitRoleCtx, waitRoleCancel := context.WithCancel(ctx)
go func() {
if n.waitRole(waitRoleCtx, ca.WorkerRole) == nil {
close(workerRole)
}
}()
// the ready channel passed to superviseManager is in turn passed down
// to the runManager function. It's used to signal to the caller that
// the manager has started.
wasRemoved, err := n.runManager(ctx, securityConfig, rootPaths, ready, workerRole)
if err != nil {
waitRoleCancel()
return errors.Wrap(err, "manager stopped")
}
// If the manager stopped running and our role is still
// "manager", it's possible that the manager was demoted and
// the agent hasn't realized this yet. We should wait for the
// role to change instead of restarting the manager immediately.
err = func() error {
timer := time.NewTimer(roleChangeTimeout)
defer timer.Stop()
defer waitRoleCancel()
select {
case <-timer.C:
case <-workerRole:
return nil
case <-ctx.Done():
return ctx.Err()
}
if !wasRemoved {
log.G(ctx).Warn("failed to get worker role after manager stop, restarting manager")
return nil
}
// We need to be extra careful about restarting the
// manager. It may cause the node to wrongly join under
// a new Raft ID. Since we didn't see a role change
// yet, force a certificate renewal. If the certificate
// comes back with a worker role, we know we shouldn't
// restart the manager. However, if we don't see
// workerRole get closed, it means we didn't switch to
// a worker certificate, either because we couldn't
// contact a working CA, or because we've been
// re-promoted. In this case, we must assume we were
// re-promoted, and restart the manager.
log.G(ctx).Warn("failed to get worker role after manager stop, forcing certificate renewal")
// We can safely reset this timer without stopping/draining the timer
// first because the only way the code has reached this point is if the timer
// has already expired - if the role changed or the context were canceled,
// then we would have returned already.
timer.Reset(roleChangeTimeout)
renewer.Renew()
// Now that the renewal request has been sent to the
// renewal goroutine, wait for a change in role.
select {
case <-timer.C:
log.G(ctx).Warn("failed to get worker role after manager stop, restarting manager")
case <-workerRole:
case <-ctx.Done():
return ctx.Err()
}
return nil
}()
if err != nil {
return err
}
// set ready to nil after the first time we've gone through this, as we
// don't need to signal after the first time that the manager is ready.
ready = nil
}
} | go | func (n *Node) superviseManager(ctx context.Context, securityConfig *ca.SecurityConfig, rootPaths ca.CertPaths, ready chan struct{}, renewer *ca.TLSRenewer) error {
// superviseManager is a loop, because we can come in and out of being a
// manager, and need to appropriately handle that without disrupting the
// node functionality.
for {
// if we're not a manager, we're just gonna park here and wait until we
// are. For normal agent nodes, we'll stay here forever, as intended.
if err := n.waitRole(ctx, ca.ManagerRole); err != nil {
return err
}
// Once we know we are a manager, we get ourselves ready for when we
// lose that role. we create a channel to signal that we've become a
// worker, and close it when n.waitRole completes.
workerRole := make(chan struct{})
waitRoleCtx, waitRoleCancel := context.WithCancel(ctx)
go func() {
if n.waitRole(waitRoleCtx, ca.WorkerRole) == nil {
close(workerRole)
}
}()
// the ready channel passed to superviseManager is in turn passed down
// to the runManager function. It's used to signal to the caller that
// the manager has started.
wasRemoved, err := n.runManager(ctx, securityConfig, rootPaths, ready, workerRole)
if err != nil {
waitRoleCancel()
return errors.Wrap(err, "manager stopped")
}
// If the manager stopped running and our role is still
// "manager", it's possible that the manager was demoted and
// the agent hasn't realized this yet. We should wait for the
// role to change instead of restarting the manager immediately.
err = func() error {
timer := time.NewTimer(roleChangeTimeout)
defer timer.Stop()
defer waitRoleCancel()
select {
case <-timer.C:
case <-workerRole:
return nil
case <-ctx.Done():
return ctx.Err()
}
if !wasRemoved {
log.G(ctx).Warn("failed to get worker role after manager stop, restarting manager")
return nil
}
// We need to be extra careful about restarting the
// manager. It may cause the node to wrongly join under
// a new Raft ID. Since we didn't see a role change
// yet, force a certificate renewal. If the certificate
// comes back with a worker role, we know we shouldn't
// restart the manager. However, if we don't see
// workerRole get closed, it means we didn't switch to
// a worker certificate, either because we couldn't
// contact a working CA, or because we've been
// re-promoted. In this case, we must assume we were
// re-promoted, and restart the manager.
log.G(ctx).Warn("failed to get worker role after manager stop, forcing certificate renewal")
// We can safely reset this timer without stopping/draining the timer
// first because the only way the code has reached this point is if the timer
// has already expired - if the role changed or the context were canceled,
// then we would have returned already.
timer.Reset(roleChangeTimeout)
renewer.Renew()
// Now that the renewal request has been sent to the
// renewal goroutine, wait for a change in role.
select {
case <-timer.C:
log.G(ctx).Warn("failed to get worker role after manager stop, restarting manager")
case <-workerRole:
case <-ctx.Done():
return ctx.Err()
}
return nil
}()
if err != nil {
return err
}
// set ready to nil after the first time we've gone through this, as we
// don't need to signal after the first time that the manager is ready.
ready = nil
}
} | [
"func",
"(",
"n",
"*",
"Node",
")",
"superviseManager",
"(",
"ctx",
"context",
".",
"Context",
",",
"securityConfig",
"*",
"ca",
".",
"SecurityConfig",
",",
"rootPaths",
"ca",
".",
"CertPaths",
",",
"ready",
"chan",
"struct",
"{",
"}",
",",
"renewer",
"*... | // superviseManager controls whether or not we are running a manager on this
// node | [
"superviseManager",
"controls",
"whether",
"or",
"not",
"we",
"are",
"running",
"a",
"manager",
"on",
"this",
"node"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L1095-L1187 | train |
docker/swarmkit | node/node.go | DowngradeKey | func (n *Node) DowngradeKey() error {
paths := ca.NewConfigPaths(filepath.Join(n.config.StateDir, certDirectory))
krw := ca.NewKeyReadWriter(paths.Node, n.config.UnlockKey, nil)
return krw.DowngradeKey()
} | go | func (n *Node) DowngradeKey() error {
paths := ca.NewConfigPaths(filepath.Join(n.config.StateDir, certDirectory))
krw := ca.NewKeyReadWriter(paths.Node, n.config.UnlockKey, nil)
return krw.DowngradeKey()
} | [
"func",
"(",
"n",
"*",
"Node",
")",
"DowngradeKey",
"(",
")",
"error",
"{",
"paths",
":=",
"ca",
".",
"NewConfigPaths",
"(",
"filepath",
".",
"Join",
"(",
"n",
".",
"config",
".",
"StateDir",
",",
"certDirectory",
")",
")",
"\n",
"krw",
":=",
"ca",
... | // DowngradeKey reverts the node key to older format so that it can
// run on older version of swarmkit | [
"DowngradeKey",
"reverts",
"the",
"node",
"key",
"to",
"older",
"format",
"so",
"that",
"it",
"can",
"run",
"on",
"older",
"version",
"of",
"swarmkit"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L1191-L1196 | train |
docker/swarmkit | node/node.go | WaitSelect | func (s *persistentRemotes) WaitSelect(ctx context.Context) <-chan api.Peer {
c := make(chan api.Peer, 1)
s.RLock()
done := make(chan struct{})
go func() {
select {
case <-ctx.Done():
s.c.Broadcast()
case <-done:
}
}()
go func() {
defer s.RUnlock()
defer close(c)
defer close(done)
for {
if ctx.Err() != nil {
return
}
p, err := s.Select()
if err == nil {
c <- p
return
}
s.c.Wait()
}
}()
return c
} | go | func (s *persistentRemotes) WaitSelect(ctx context.Context) <-chan api.Peer {
c := make(chan api.Peer, 1)
s.RLock()
done := make(chan struct{})
go func() {
select {
case <-ctx.Done():
s.c.Broadcast()
case <-done:
}
}()
go func() {
defer s.RUnlock()
defer close(c)
defer close(done)
for {
if ctx.Err() != nil {
return
}
p, err := s.Select()
if err == nil {
c <- p
return
}
s.c.Wait()
}
}()
return c
} | [
"func",
"(",
"s",
"*",
"persistentRemotes",
")",
"WaitSelect",
"(",
"ctx",
"context",
".",
"Context",
")",
"<-",
"chan",
"api",
".",
"Peer",
"{",
"c",
":=",
"make",
"(",
"chan",
"api",
".",
"Peer",
",",
"1",
")",
"\n",
"s",
".",
"RLock",
"(",
")"... | // WaitSelect waits until at least one remote becomes available and then selects one. | [
"WaitSelect",
"waits",
"until",
"at",
"least",
"one",
"remote",
"becomes",
"available",
"and",
"then",
"selects",
"one",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L1253-L1281 | train |
docker/swarmkit | node/node.go | SessionClosed | func (fs *firstSessionErrorTracker) SessionClosed() error {
fs.mu.Lock()
defer fs.mu.Unlock()
// if we've successfully established at least 1 session, never return
// errors
if fs.pastFirstSession {
return nil
}
// get the GRPC status from the error, because we only care about GRPC
// errors
grpcStatus, ok := status.FromError(fs.err)
// if this isn't a GRPC error, it's not an error we return from this method
if !ok {
return nil
}
// NOTE(dperny, cyli): grpc does not expose the error type, which means we have
// to string matching to figure out if it's an x509 error.
//
// The error we're looking for has "connection error:", then says
// "transport:" and finally has "x509:"
// specifically, the connection error description reads:
//
// transport: authentication handshake failed: x509: certificate signed by unknown authority
//
// This string matching has caused trouble in the past. specifically, at
// some point between grpc versions 1.3.0 and 1.7.5, the string we were
// matching changed from "transport: x509" to "transport: authentication
// handshake failed: x509", which was an issue because we were matching for
// string "transport: x509:".
//
// In GRPC >= 1.10.x, transient errors like TLS errors became hidden by the
// load balancing that GRPC does. In GRPC 1.11.x, they were exposed again
// (usually) in RPC calls, but the error string then became:
// rpc error: code = Unavailable desc = all SubConns are in TransientFailure, latest connection error: connection error: desc = "transport: authentication handshake failed: x509: certificate signed by unknown authority"
//
// It also went from an Internal error to an Unavailable error. So we're just going
// to search for the string: "transport: authentication handshake failed: x509:" since
// we want to fail for ALL x509 failures, not just unknown authority errors.
if !strings.Contains(grpcStatus.Message(), "connection error") ||
!strings.Contains(grpcStatus.Message(), "transport: authentication handshake failed: x509:") {
return nil
}
return fs.err
} | go | func (fs *firstSessionErrorTracker) SessionClosed() error {
fs.mu.Lock()
defer fs.mu.Unlock()
// if we've successfully established at least 1 session, never return
// errors
if fs.pastFirstSession {
return nil
}
// get the GRPC status from the error, because we only care about GRPC
// errors
grpcStatus, ok := status.FromError(fs.err)
// if this isn't a GRPC error, it's not an error we return from this method
if !ok {
return nil
}
// NOTE(dperny, cyli): grpc does not expose the error type, which means we have
// to string matching to figure out if it's an x509 error.
//
// The error we're looking for has "connection error:", then says
// "transport:" and finally has "x509:"
// specifically, the connection error description reads:
//
// transport: authentication handshake failed: x509: certificate signed by unknown authority
//
// This string matching has caused trouble in the past. specifically, at
// some point between grpc versions 1.3.0 and 1.7.5, the string we were
// matching changed from "transport: x509" to "transport: authentication
// handshake failed: x509", which was an issue because we were matching for
// string "transport: x509:".
//
// In GRPC >= 1.10.x, transient errors like TLS errors became hidden by the
// load balancing that GRPC does. In GRPC 1.11.x, they were exposed again
// (usually) in RPC calls, but the error string then became:
// rpc error: code = Unavailable desc = all SubConns are in TransientFailure, latest connection error: connection error: desc = "transport: authentication handshake failed: x509: certificate signed by unknown authority"
//
// It also went from an Internal error to an Unavailable error. So we're just going
// to search for the string: "transport: authentication handshake failed: x509:" since
// we want to fail for ALL x509 failures, not just unknown authority errors.
if !strings.Contains(grpcStatus.Message(), "connection error") ||
!strings.Contains(grpcStatus.Message(), "transport: authentication handshake failed: x509:") {
return nil
}
return fs.err
} | [
"func",
"(",
"fs",
"*",
"firstSessionErrorTracker",
")",
"SessionClosed",
"(",
")",
"error",
"{",
"fs",
".",
"mu",
".",
"Lock",
"(",
")",
"\n",
"defer",
"fs",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"if",
"fs",
".",
"pastFirstSession",
"{",
"return... | // SessionClosed returns an error if we haven't yet established a session, and
// we get a gprc error as a result of an X509 failure. | [
"SessionClosed",
"returns",
"an",
"error",
"if",
"we",
"haven",
"t",
"yet",
"established",
"a",
"session",
"and",
"we",
"get",
"a",
"gprc",
"error",
"as",
"a",
"result",
"of",
"an",
"X509",
"failure",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/node/node.go#L1317-L1364 | train |
docker/swarmkit | manager/state/store/apply.go | Apply | func Apply(store *MemoryStore, item events.Event) (err error) {
return store.Update(func(tx Tx) error {
switch v := item.(type) {
case api.EventCreateTask:
return CreateTask(tx, v.Task)
case api.EventUpdateTask:
return UpdateTask(tx, v.Task)
case api.EventDeleteTask:
return DeleteTask(tx, v.Task.ID)
case api.EventCreateService:
return CreateService(tx, v.Service)
case api.EventUpdateService:
return UpdateService(tx, v.Service)
case api.EventDeleteService:
return DeleteService(tx, v.Service.ID)
case api.EventCreateNetwork:
return CreateNetwork(tx, v.Network)
case api.EventUpdateNetwork:
return UpdateNetwork(tx, v.Network)
case api.EventDeleteNetwork:
return DeleteNetwork(tx, v.Network.ID)
case api.EventCreateNode:
return CreateNode(tx, v.Node)
case api.EventUpdateNode:
return UpdateNode(tx, v.Node)
case api.EventDeleteNode:
return DeleteNode(tx, v.Node.ID)
case state.EventCommit:
return nil
}
return errors.New("unrecognized event type")
})
} | go | func Apply(store *MemoryStore, item events.Event) (err error) {
return store.Update(func(tx Tx) error {
switch v := item.(type) {
case api.EventCreateTask:
return CreateTask(tx, v.Task)
case api.EventUpdateTask:
return UpdateTask(tx, v.Task)
case api.EventDeleteTask:
return DeleteTask(tx, v.Task.ID)
case api.EventCreateService:
return CreateService(tx, v.Service)
case api.EventUpdateService:
return UpdateService(tx, v.Service)
case api.EventDeleteService:
return DeleteService(tx, v.Service.ID)
case api.EventCreateNetwork:
return CreateNetwork(tx, v.Network)
case api.EventUpdateNetwork:
return UpdateNetwork(tx, v.Network)
case api.EventDeleteNetwork:
return DeleteNetwork(tx, v.Network.ID)
case api.EventCreateNode:
return CreateNode(tx, v.Node)
case api.EventUpdateNode:
return UpdateNode(tx, v.Node)
case api.EventDeleteNode:
return DeleteNode(tx, v.Node.ID)
case state.EventCommit:
return nil
}
return errors.New("unrecognized event type")
})
} | [
"func",
"Apply",
"(",
"store",
"*",
"MemoryStore",
",",
"item",
"events",
".",
"Event",
")",
"(",
"err",
"error",
")",
"{",
"return",
"store",
".",
"Update",
"(",
"func",
"(",
"tx",
"Tx",
")",
"error",
"{",
"switch",
"v",
":=",
"item",
".",
"(",
... | // Apply takes an item from the event stream of one Store and applies it to
// a second Store. | [
"Apply",
"takes",
"an",
"item",
"from",
"the",
"event",
"stream",
"of",
"one",
"Store",
"and",
"applies",
"it",
"to",
"a",
"second",
"Store",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/state/store/apply.go#L13-L49 | train |
docker/swarmkit | manager/allocator/cnmallocator/portallocator.go | getPortConfigKey | func getPortConfigKey(p *api.PortConfig) api.PortConfig {
return api.PortConfig{
Name: p.Name,
Protocol: p.Protocol,
TargetPort: p.TargetPort,
}
} | go | func getPortConfigKey(p *api.PortConfig) api.PortConfig {
return api.PortConfig{
Name: p.Name,
Protocol: p.Protocol,
TargetPort: p.TargetPort,
}
} | [
"func",
"getPortConfigKey",
"(",
"p",
"*",
"api",
".",
"PortConfig",
")",
"api",
".",
"PortConfig",
"{",
"return",
"api",
".",
"PortConfig",
"{",
"Name",
":",
"p",
".",
"Name",
",",
"Protocol",
":",
"p",
".",
"Protocol",
",",
"TargetPort",
":",
"p",
... | // getPortConfigKey returns a map key for doing set operations with
// ports. The key consists of name, protocol and target port which
// uniquely identifies a port within a single Endpoint. | [
"getPortConfigKey",
"returns",
"a",
"map",
"key",
"for",
"doing",
"set",
"operations",
"with",
"ports",
".",
"The",
"key",
"consists",
"of",
"name",
"protocol",
"and",
"target",
"port",
"which",
"uniquely",
"identifies",
"a",
"port",
"within",
"a",
"single",
... | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/allocator/cnmallocator/portallocator.go#L144-L150 | train |
docker/swarmkit | manager/role_manager.go | newRoleManager | func newRoleManager(store *store.MemoryStore, raftNode *raft.Node) *roleManager {
ctx, cancel := context.WithCancel(context.Background())
return &roleManager{
ctx: ctx,
cancel: cancel,
store: store,
raft: raftNode,
doneChan: make(chan struct{}),
pendingReconciliation: make(map[string]*api.Node),
pendingRemoval: make(map[string]struct{}),
}
} | go | func newRoleManager(store *store.MemoryStore, raftNode *raft.Node) *roleManager {
ctx, cancel := context.WithCancel(context.Background())
return &roleManager{
ctx: ctx,
cancel: cancel,
store: store,
raft: raftNode,
doneChan: make(chan struct{}),
pendingReconciliation: make(map[string]*api.Node),
pendingRemoval: make(map[string]struct{}),
}
} | [
"func",
"newRoleManager",
"(",
"store",
"*",
"store",
".",
"MemoryStore",
",",
"raftNode",
"*",
"raft",
".",
"Node",
")",
"*",
"roleManager",
"{",
"ctx",
",",
"cancel",
":=",
"context",
".",
"WithCancel",
"(",
"context",
".",
"Background",
"(",
")",
")",... | // newRoleManager creates a new roleManager. | [
"newRoleManager",
"creates",
"a",
"new",
"roleManager",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/role_manager.go#L47-L58 | train |
docker/swarmkit | manager/role_manager.go | getTicker | func (rm *roleManager) getTicker(interval time.Duration) clock.Ticker {
if rm.clocksource == nil {
return clock.NewClock().NewTicker(interval)
}
return rm.clocksource.NewTicker(interval)
} | go | func (rm *roleManager) getTicker(interval time.Duration) clock.Ticker {
if rm.clocksource == nil {
return clock.NewClock().NewTicker(interval)
}
return rm.clocksource.NewTicker(interval)
} | [
"func",
"(",
"rm",
"*",
"roleManager",
")",
"getTicker",
"(",
"interval",
"time",
".",
"Duration",
")",
"clock",
".",
"Ticker",
"{",
"if",
"rm",
".",
"clocksource",
"==",
"nil",
"{",
"return",
"clock",
".",
"NewClock",
"(",
")",
".",
"NewTicker",
"(",
... | // getTicker returns a ticker based on the configured clock source | [
"getTicker",
"returns",
"a",
"ticker",
"based",
"on",
"the",
"configured",
"clock",
"source"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/role_manager.go#L61-L67 | train |
docker/swarmkit | manager/role_manager.go | removeMember | func (rm *roleManager) removeMember(ctx context.Context, member *membership.Member) {
// Quorum safeguard - quorum should have been checked before a node was allowed to be demoted, but if in the
// intervening time some other node disconnected, removing this node would result in a loss of cluster quorum.
// We leave it
if !rm.raft.CanRemoveMember(member.RaftID) {
// TODO(aaronl): Retry later
log.G(ctx).Debugf("can't demote node %s at this time: removing member from raft would result in a loss of quorum", member.NodeID)
return
}
rmCtx, rmCancel := context.WithTimeout(rm.ctx, removalTimeout)
defer rmCancel()
if member.RaftID == rm.raft.Config.ID {
// Don't use rmCtx, because we expect to lose
// leadership, which will cancel this context.
log.G(ctx).Info("demoted; transferring leadership")
err := rm.raft.TransferLeadership(context.Background())
if err == nil {
return
}
log.G(ctx).WithError(err).Info("failed to transfer leadership")
}
if err := rm.raft.RemoveMember(rmCtx, member.RaftID); err != nil {
// TODO(aaronl): Retry later
log.G(ctx).WithError(err).Debugf("can't demote node %s at this time", member.NodeID)
}
} | go | func (rm *roleManager) removeMember(ctx context.Context, member *membership.Member) {
// Quorum safeguard - quorum should have been checked before a node was allowed to be demoted, but if in the
// intervening time some other node disconnected, removing this node would result in a loss of cluster quorum.
// We leave it
if !rm.raft.CanRemoveMember(member.RaftID) {
// TODO(aaronl): Retry later
log.G(ctx).Debugf("can't demote node %s at this time: removing member from raft would result in a loss of quorum", member.NodeID)
return
}
rmCtx, rmCancel := context.WithTimeout(rm.ctx, removalTimeout)
defer rmCancel()
if member.RaftID == rm.raft.Config.ID {
// Don't use rmCtx, because we expect to lose
// leadership, which will cancel this context.
log.G(ctx).Info("demoted; transferring leadership")
err := rm.raft.TransferLeadership(context.Background())
if err == nil {
return
}
log.G(ctx).WithError(err).Info("failed to transfer leadership")
}
if err := rm.raft.RemoveMember(rmCtx, member.RaftID); err != nil {
// TODO(aaronl): Retry later
log.G(ctx).WithError(err).Debugf("can't demote node %s at this time", member.NodeID)
}
} | [
"func",
"(",
"rm",
"*",
"roleManager",
")",
"removeMember",
"(",
"ctx",
"context",
".",
"Context",
",",
"member",
"*",
"membership",
".",
"Member",
")",
"{",
"if",
"!",
"rm",
".",
"raft",
".",
"CanRemoveMember",
"(",
"member",
".",
"RaftID",
")",
"{",
... | // removeMember removes a member from the raft cluster membership | [
"removeMember",
"removes",
"a",
"member",
"from",
"the",
"raft",
"cluster",
"membership"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/role_manager.go#L200-L227 | train |
docker/swarmkit | manager/role_manager.go | reconcileRole | func (rm *roleManager) reconcileRole(ctx context.Context, node *api.Node) {
if node.Role == node.Spec.DesiredRole {
// Nothing to do.
delete(rm.pendingReconciliation, node.ID)
return
}
// Promotion can proceed right away.
if node.Spec.DesiredRole == api.NodeRoleManager && node.Role == api.NodeRoleWorker {
err := rm.store.Update(func(tx store.Tx) error {
updatedNode := store.GetNode(tx, node.ID)
if updatedNode == nil || updatedNode.Spec.DesiredRole != node.Spec.DesiredRole || updatedNode.Role != node.Role {
return nil
}
updatedNode.Role = api.NodeRoleManager
return store.UpdateNode(tx, updatedNode)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to promote node %s", node.ID)
} else {
delete(rm.pendingReconciliation, node.ID)
}
} else if node.Spec.DesiredRole == api.NodeRoleWorker && node.Role == api.NodeRoleManager {
// Check for node in memberlist
member := rm.raft.GetMemberByNodeID(node.ID)
if member != nil {
// We first try to remove the raft node from the raft cluster. On the next tick, if the node
// has been removed from the cluster membership, we then update the store to reflect the fact
// that it has been successfully demoted, and if that works, remove it from the pending list.
rm.removeMember(ctx, member)
return
}
err := rm.store.Update(func(tx store.Tx) error {
updatedNode := store.GetNode(tx, node.ID)
if updatedNode == nil || updatedNode.Spec.DesiredRole != node.Spec.DesiredRole || updatedNode.Role != node.Role {
return nil
}
updatedNode.Role = api.NodeRoleWorker
return store.UpdateNode(tx, updatedNode)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to demote node %s", node.ID)
} else {
delete(rm.pendingReconciliation, node.ID)
}
}
} | go | func (rm *roleManager) reconcileRole(ctx context.Context, node *api.Node) {
if node.Role == node.Spec.DesiredRole {
// Nothing to do.
delete(rm.pendingReconciliation, node.ID)
return
}
// Promotion can proceed right away.
if node.Spec.DesiredRole == api.NodeRoleManager && node.Role == api.NodeRoleWorker {
err := rm.store.Update(func(tx store.Tx) error {
updatedNode := store.GetNode(tx, node.ID)
if updatedNode == nil || updatedNode.Spec.DesiredRole != node.Spec.DesiredRole || updatedNode.Role != node.Role {
return nil
}
updatedNode.Role = api.NodeRoleManager
return store.UpdateNode(tx, updatedNode)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to promote node %s", node.ID)
} else {
delete(rm.pendingReconciliation, node.ID)
}
} else if node.Spec.DesiredRole == api.NodeRoleWorker && node.Role == api.NodeRoleManager {
// Check for node in memberlist
member := rm.raft.GetMemberByNodeID(node.ID)
if member != nil {
// We first try to remove the raft node from the raft cluster. On the next tick, if the node
// has been removed from the cluster membership, we then update the store to reflect the fact
// that it has been successfully demoted, and if that works, remove it from the pending list.
rm.removeMember(ctx, member)
return
}
err := rm.store.Update(func(tx store.Tx) error {
updatedNode := store.GetNode(tx, node.ID)
if updatedNode == nil || updatedNode.Spec.DesiredRole != node.Spec.DesiredRole || updatedNode.Role != node.Role {
return nil
}
updatedNode.Role = api.NodeRoleWorker
return store.UpdateNode(tx, updatedNode)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to demote node %s", node.ID)
} else {
delete(rm.pendingReconciliation, node.ID)
}
}
} | [
"func",
"(",
"rm",
"*",
"roleManager",
")",
"reconcileRole",
"(",
"ctx",
"context",
".",
"Context",
",",
"node",
"*",
"api",
".",
"Node",
")",
"{",
"if",
"node",
".",
"Role",
"==",
"node",
".",
"Spec",
".",
"DesiredRole",
"{",
"delete",
"(",
"rm",
... | // reconcileRole looks at the desired role for a node, and if it is being demoted or promoted, updates the
// node role accordingly. If the node is being demoted, it also removes the node from the raft cluster membership. | [
"reconcileRole",
"looks",
"at",
"the",
"desired",
"role",
"for",
"a",
"node",
"and",
"if",
"it",
"is",
"being",
"demoted",
"or",
"promoted",
"updates",
"the",
"node",
"role",
"accordingly",
".",
"If",
"the",
"node",
"is",
"being",
"demoted",
"it",
"also",
... | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/role_manager.go#L231-L279 | train |
docker/swarmkit | manager/scheduler/pipeline.go | NewPipeline | func NewPipeline() *Pipeline {
p := &Pipeline{}
for _, f := range defaultFilters {
p.checklist = append(p.checklist, checklistEntry{f: f})
}
return p
} | go | func NewPipeline() *Pipeline {
p := &Pipeline{}
for _, f := range defaultFilters {
p.checklist = append(p.checklist, checklistEntry{f: f})
}
return p
} | [
"func",
"NewPipeline",
"(",
")",
"*",
"Pipeline",
"{",
"p",
":=",
"&",
"Pipeline",
"{",
"}",
"\n",
"for",
"_",
",",
"f",
":=",
"range",
"defaultFilters",
"{",
"p",
".",
"checklist",
"=",
"append",
"(",
"p",
".",
"checklist",
",",
"checklistEntry",
"{... | // NewPipeline returns a pipeline with the default set of filters. | [
"NewPipeline",
"returns",
"a",
"pipeline",
"with",
"the",
"default",
"set",
"of",
"filters",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/scheduler/pipeline.go#L44-L52 | train |
docker/swarmkit | manager/scheduler/pipeline.go | Process | func (p *Pipeline) Process(n *NodeInfo) bool {
for i, entry := range p.checklist {
if entry.enabled && !entry.f.Check(n) {
// Immediately stop on first failure.
p.checklist[i].failureCount++
return false
}
}
for i := range p.checklist {
p.checklist[i].failureCount = 0
}
return true
} | go | func (p *Pipeline) Process(n *NodeInfo) bool {
for i, entry := range p.checklist {
if entry.enabled && !entry.f.Check(n) {
// Immediately stop on first failure.
p.checklist[i].failureCount++
return false
}
}
for i := range p.checklist {
p.checklist[i].failureCount = 0
}
return true
} | [
"func",
"(",
"p",
"*",
"Pipeline",
")",
"Process",
"(",
"n",
"*",
"NodeInfo",
")",
"bool",
"{",
"for",
"i",
",",
"entry",
":=",
"range",
"p",
".",
"checklist",
"{",
"if",
"entry",
".",
"enabled",
"&&",
"!",
"entry",
".",
"f",
".",
"Check",
"(",
... | // Process a node through the filter pipeline.
// Returns true if all filters pass, false otherwise. | [
"Process",
"a",
"node",
"through",
"the",
"filter",
"pipeline",
".",
"Returns",
"true",
"if",
"all",
"filters",
"pass",
"false",
"otherwise",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/scheduler/pipeline.go#L56-L68 | train |
docker/swarmkit | manager/scheduler/pipeline.go | SetTask | func (p *Pipeline) SetTask(t *api.Task) {
for i := range p.checklist {
p.checklist[i].enabled = p.checklist[i].f.SetTask(t)
p.checklist[i].failureCount = 0
}
} | go | func (p *Pipeline) SetTask(t *api.Task) {
for i := range p.checklist {
p.checklist[i].enabled = p.checklist[i].f.SetTask(t)
p.checklist[i].failureCount = 0
}
} | [
"func",
"(",
"p",
"*",
"Pipeline",
")",
"SetTask",
"(",
"t",
"*",
"api",
".",
"Task",
")",
"{",
"for",
"i",
":=",
"range",
"p",
".",
"checklist",
"{",
"p",
".",
"checklist",
"[",
"i",
"]",
".",
"enabled",
"=",
"p",
".",
"checklist",
"[",
"i",
... | // SetTask sets up the filters to process a new task. Once this is called,
// Process can be called repeatedly to try to assign the task various nodes. | [
"SetTask",
"sets",
"up",
"the",
"filters",
"to",
"process",
"a",
"new",
"task",
".",
"Once",
"this",
"is",
"called",
"Process",
"can",
"be",
"called",
"repeatedly",
"to",
"try",
"to",
"assign",
"the",
"task",
"various",
"nodes",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/scheduler/pipeline.go#L72-L77 | train |
docker/swarmkit | manager/scheduler/pipeline.go | Explain | func (p *Pipeline) Explain() string {
var explanation string
// Sort from most failures to least
sortedByFailures := make([]checklistEntry, len(p.checklist))
copy(sortedByFailures, p.checklist)
sort.Sort(sort.Reverse(checklistByFailures(sortedByFailures)))
for _, entry := range sortedByFailures {
if entry.failureCount > 0 {
if len(explanation) > 0 {
explanation += "; "
}
explanation += entry.f.Explain(entry.failureCount)
}
}
return explanation
} | go | func (p *Pipeline) Explain() string {
var explanation string
// Sort from most failures to least
sortedByFailures := make([]checklistEntry, len(p.checklist))
copy(sortedByFailures, p.checklist)
sort.Sort(sort.Reverse(checklistByFailures(sortedByFailures)))
for _, entry := range sortedByFailures {
if entry.failureCount > 0 {
if len(explanation) > 0 {
explanation += "; "
}
explanation += entry.f.Explain(entry.failureCount)
}
}
return explanation
} | [
"func",
"(",
"p",
"*",
"Pipeline",
")",
"Explain",
"(",
")",
"string",
"{",
"var",
"explanation",
"string",
"\n",
"sortedByFailures",
":=",
"make",
"(",
"[",
"]",
"checklistEntry",
",",
"len",
"(",
"p",
".",
"checklist",
")",
")",
"\n",
"copy",
"(",
... | // Explain returns a string explaining why a task could not be scheduled. | [
"Explain",
"returns",
"a",
"string",
"explaining",
"why",
"a",
"task",
"could",
"not",
"be",
"scheduled",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/scheduler/pipeline.go#L80-L99 | train |
docker/swarmkit | manager/controlapi/service.go | validateImage | func validateImage(image string) error {
if image == "" {
return status.Errorf(codes.InvalidArgument, "ContainerSpec: image reference must be provided")
}
if _, err := reference.ParseNormalizedNamed(image); err != nil {
return status.Errorf(codes.InvalidArgument, "ContainerSpec: %q is not a valid repository/tag", image)
}
return nil
} | go | func validateImage(image string) error {
if image == "" {
return status.Errorf(codes.InvalidArgument, "ContainerSpec: image reference must be provided")
}
if _, err := reference.ParseNormalizedNamed(image); err != nil {
return status.Errorf(codes.InvalidArgument, "ContainerSpec: %q is not a valid repository/tag", image)
}
return nil
} | [
"func",
"validateImage",
"(",
"image",
"string",
")",
"error",
"{",
"if",
"image",
"==",
"\"\"",
"{",
"return",
"status",
".",
"Errorf",
"(",
"codes",
".",
"InvalidArgument",
",",
"\"ContainerSpec: image reference must be provided\"",
")",
"\n",
"}",
"\n",
"if",... | // validateImage validates image name in containerSpec | [
"validateImage",
"validates",
"image",
"name",
"in",
"containerSpec"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/service.go#L165-L174 | train |
docker/swarmkit | manager/controlapi/service.go | validateMounts | func validateMounts(mounts []api.Mount) error {
mountMap := make(map[string]bool)
for _, mount := range mounts {
if _, exists := mountMap[mount.Target]; exists {
return status.Errorf(codes.InvalidArgument, "ContainerSpec: duplicate mount point: %s", mount.Target)
}
mountMap[mount.Target] = true
}
return nil
} | go | func validateMounts(mounts []api.Mount) error {
mountMap := make(map[string]bool)
for _, mount := range mounts {
if _, exists := mountMap[mount.Target]; exists {
return status.Errorf(codes.InvalidArgument, "ContainerSpec: duplicate mount point: %s", mount.Target)
}
mountMap[mount.Target] = true
}
return nil
} | [
"func",
"validateMounts",
"(",
"mounts",
"[",
"]",
"api",
".",
"Mount",
")",
"error",
"{",
"mountMap",
":=",
"make",
"(",
"map",
"[",
"string",
"]",
"bool",
")",
"\n",
"for",
"_",
",",
"mount",
":=",
"range",
"mounts",
"{",
"if",
"_",
",",
"exists"... | // validateMounts validates if there are duplicate mounts in containerSpec | [
"validateMounts",
"validates",
"if",
"there",
"are",
"duplicate",
"mounts",
"in",
"containerSpec"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/service.go#L177-L187 | train |
docker/swarmkit | manager/controlapi/service.go | validateHealthCheck | func validateHealthCheck(hc *api.HealthConfig) error {
if hc == nil {
return nil
}
if hc.Interval != nil {
interval, err := gogotypes.DurationFromProto(hc.Interval)
if err != nil {
return err
}
if interval != 0 && interval < minimumDuration {
return status.Errorf(codes.InvalidArgument, "ContainerSpec: Interval in HealthConfig cannot be less than %s", minimumDuration)
}
}
if hc.Timeout != nil {
timeout, err := gogotypes.DurationFromProto(hc.Timeout)
if err != nil {
return err
}
if timeout != 0 && timeout < minimumDuration {
return status.Errorf(codes.InvalidArgument, "ContainerSpec: Timeout in HealthConfig cannot be less than %s", minimumDuration)
}
}
if hc.StartPeriod != nil {
sp, err := gogotypes.DurationFromProto(hc.StartPeriod)
if err != nil {
return err
}
if sp != 0 && sp < minimumDuration {
return status.Errorf(codes.InvalidArgument, "ContainerSpec: StartPeriod in HealthConfig cannot be less than %s", minimumDuration)
}
}
if hc.Retries < 0 {
return status.Errorf(codes.InvalidArgument, "ContainerSpec: Retries in HealthConfig cannot be negative")
}
return nil
} | go | func validateHealthCheck(hc *api.HealthConfig) error {
if hc == nil {
return nil
}
if hc.Interval != nil {
interval, err := gogotypes.DurationFromProto(hc.Interval)
if err != nil {
return err
}
if interval != 0 && interval < minimumDuration {
return status.Errorf(codes.InvalidArgument, "ContainerSpec: Interval in HealthConfig cannot be less than %s", minimumDuration)
}
}
if hc.Timeout != nil {
timeout, err := gogotypes.DurationFromProto(hc.Timeout)
if err != nil {
return err
}
if timeout != 0 && timeout < minimumDuration {
return status.Errorf(codes.InvalidArgument, "ContainerSpec: Timeout in HealthConfig cannot be less than %s", minimumDuration)
}
}
if hc.StartPeriod != nil {
sp, err := gogotypes.DurationFromProto(hc.StartPeriod)
if err != nil {
return err
}
if sp != 0 && sp < minimumDuration {
return status.Errorf(codes.InvalidArgument, "ContainerSpec: StartPeriod in HealthConfig cannot be less than %s", minimumDuration)
}
}
if hc.Retries < 0 {
return status.Errorf(codes.InvalidArgument, "ContainerSpec: Retries in HealthConfig cannot be negative")
}
return nil
} | [
"func",
"validateHealthCheck",
"(",
"hc",
"*",
"api",
".",
"HealthConfig",
")",
"error",
"{",
"if",
"hc",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"if",
"hc",
".",
"Interval",
"!=",
"nil",
"{",
"interval",
",",
"err",
":=",
"gogotypes",
".... | // validateHealthCheck validates configs about container's health check | [
"validateHealthCheck",
"validates",
"configs",
"about",
"container",
"s",
"health",
"check"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/service.go#L190-L230 | train |
docker/swarmkit | manager/controlapi/service.go | validateSecretRefsSpec | func validateSecretRefsSpec(spec api.TaskSpec) error {
container := spec.GetContainer()
if container == nil {
return nil
}
// Keep a map to track all the targets that will be exposed
// The string returned is only used for logging. It could as well be struct{}{}
existingTargets := make(map[string]string)
for _, secretRef := range container.Secrets {
// SecretID and SecretName are mandatory, we have invalid references without them
if secretRef.SecretID == "" || secretRef.SecretName == "" {
return status.Errorf(codes.InvalidArgument, "malformed secret reference")
}
// Every secret reference requires a Target
if secretRef.GetTarget() == nil {
return status.Errorf(codes.InvalidArgument, "malformed secret reference, no target provided")
}
// If this is a file target, we will ensure filename uniqueness
if secretRef.GetFile() != nil {
fileName := secretRef.GetFile().Name
if fileName == "" {
return status.Errorf(codes.InvalidArgument, "malformed file secret reference, invalid target file name provided")
}
// If this target is already in use, we have conflicting targets
if prevSecretName, ok := existingTargets[fileName]; ok {
return status.Errorf(codes.InvalidArgument, "secret references '%s' and '%s' have a conflicting target: '%s'", prevSecretName, secretRef.SecretName, fileName)
}
existingTargets[fileName] = secretRef.SecretName
}
}
return nil
} | go | func validateSecretRefsSpec(spec api.TaskSpec) error {
container := spec.GetContainer()
if container == nil {
return nil
}
// Keep a map to track all the targets that will be exposed
// The string returned is only used for logging. It could as well be struct{}{}
existingTargets := make(map[string]string)
for _, secretRef := range container.Secrets {
// SecretID and SecretName are mandatory, we have invalid references without them
if secretRef.SecretID == "" || secretRef.SecretName == "" {
return status.Errorf(codes.InvalidArgument, "malformed secret reference")
}
// Every secret reference requires a Target
if secretRef.GetTarget() == nil {
return status.Errorf(codes.InvalidArgument, "malformed secret reference, no target provided")
}
// If this is a file target, we will ensure filename uniqueness
if secretRef.GetFile() != nil {
fileName := secretRef.GetFile().Name
if fileName == "" {
return status.Errorf(codes.InvalidArgument, "malformed file secret reference, invalid target file name provided")
}
// If this target is already in use, we have conflicting targets
if prevSecretName, ok := existingTargets[fileName]; ok {
return status.Errorf(codes.InvalidArgument, "secret references '%s' and '%s' have a conflicting target: '%s'", prevSecretName, secretRef.SecretName, fileName)
}
existingTargets[fileName] = secretRef.SecretName
}
}
return nil
} | [
"func",
"validateSecretRefsSpec",
"(",
"spec",
"api",
".",
"TaskSpec",
")",
"error",
"{",
"container",
":=",
"spec",
".",
"GetContainer",
"(",
")",
"\n",
"if",
"container",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"existingTargets",
":=",
"make"... | // validateSecretRefsSpec finds if the secrets passed in spec are valid and have no
// conflicting targets. | [
"validateSecretRefsSpec",
"finds",
"if",
"the",
"secrets",
"passed",
"in",
"spec",
"are",
"valid",
"and",
"have",
"no",
"conflicting",
"targets",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/service.go#L349-L385 | train |
docker/swarmkit | manager/controlapi/service.go | validateConfigRefsSpec | func validateConfigRefsSpec(spec api.TaskSpec) error {
container := spec.GetContainer()
if container == nil {
return nil
}
// check if we're using a config as a CredentialSpec -- if so, we need to
// verify
var (
credSpecConfig string
credSpecConfigFound bool
)
if p := container.Privileges; p != nil {
if cs := p.CredentialSpec; cs != nil {
// if there is no config in the credspec, then this will just be
// assigned to emptystring anyway, so we don't need to check
// existence.
credSpecConfig = cs.GetConfig()
}
}
// Keep a map to track all the targets that will be exposed
// The string returned is only used for logging. It could as well be struct{}{}
existingTargets := make(map[string]string)
for _, configRef := range container.Configs {
// ConfigID and ConfigName are mandatory, we have invalid references without them
if configRef.ConfigID == "" || configRef.ConfigName == "" {
return status.Errorf(codes.InvalidArgument, "malformed config reference")
}
// Every config reference requires a Target
if configRef.GetTarget() == nil {
return status.Errorf(codes.InvalidArgument, "malformed config reference, no target provided")
}
// If this is a file target, we will ensure filename uniqueness
if configRef.GetFile() != nil {
fileName := configRef.GetFile().Name
// Validate the file name
if fileName == "" {
return status.Errorf(codes.InvalidArgument, "malformed file config reference, invalid target file name provided")
}
// If this target is already in use, we have conflicting targets
if prevConfigName, ok := existingTargets[fileName]; ok {
return status.Errorf(codes.InvalidArgument, "config references '%s' and '%s' have a conflicting target: '%s'", prevConfigName, configRef.ConfigName, fileName)
}
existingTargets[fileName] = configRef.ConfigName
}
if configRef.GetRuntime() != nil {
if configRef.ConfigID == credSpecConfig {
credSpecConfigFound = true
}
}
}
if credSpecConfig != "" && !credSpecConfigFound {
return status.Errorf(
codes.InvalidArgument,
"CredentialSpec references config '%s', but that config isn't in config references with RuntimeTarget",
credSpecConfig,
)
}
return nil
} | go | func validateConfigRefsSpec(spec api.TaskSpec) error {
container := spec.GetContainer()
if container == nil {
return nil
}
// check if we're using a config as a CredentialSpec -- if so, we need to
// verify
var (
credSpecConfig string
credSpecConfigFound bool
)
if p := container.Privileges; p != nil {
if cs := p.CredentialSpec; cs != nil {
// if there is no config in the credspec, then this will just be
// assigned to emptystring anyway, so we don't need to check
// existence.
credSpecConfig = cs.GetConfig()
}
}
// Keep a map to track all the targets that will be exposed
// The string returned is only used for logging. It could as well be struct{}{}
existingTargets := make(map[string]string)
for _, configRef := range container.Configs {
// ConfigID and ConfigName are mandatory, we have invalid references without them
if configRef.ConfigID == "" || configRef.ConfigName == "" {
return status.Errorf(codes.InvalidArgument, "malformed config reference")
}
// Every config reference requires a Target
if configRef.GetTarget() == nil {
return status.Errorf(codes.InvalidArgument, "malformed config reference, no target provided")
}
// If this is a file target, we will ensure filename uniqueness
if configRef.GetFile() != nil {
fileName := configRef.GetFile().Name
// Validate the file name
if fileName == "" {
return status.Errorf(codes.InvalidArgument, "malformed file config reference, invalid target file name provided")
}
// If this target is already in use, we have conflicting targets
if prevConfigName, ok := existingTargets[fileName]; ok {
return status.Errorf(codes.InvalidArgument, "config references '%s' and '%s' have a conflicting target: '%s'", prevConfigName, configRef.ConfigName, fileName)
}
existingTargets[fileName] = configRef.ConfigName
}
if configRef.GetRuntime() != nil {
if configRef.ConfigID == credSpecConfig {
credSpecConfigFound = true
}
}
}
if credSpecConfig != "" && !credSpecConfigFound {
return status.Errorf(
codes.InvalidArgument,
"CredentialSpec references config '%s', but that config isn't in config references with RuntimeTarget",
credSpecConfig,
)
}
return nil
} | [
"func",
"validateConfigRefsSpec",
"(",
"spec",
"api",
".",
"TaskSpec",
")",
"error",
"{",
"container",
":=",
"spec",
".",
"GetContainer",
"(",
")",
"\n",
"if",
"container",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"var",
"(",
"credSpecConfig",
... | // validateConfigRefsSpec finds if the configs passed in spec are valid and have no
// conflicting targets. | [
"validateConfigRefsSpec",
"finds",
"if",
"the",
"configs",
"passed",
"in",
"spec",
"are",
"valid",
"and",
"have",
"no",
"conflicting",
"targets",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/service.go#L389-L456 | train |
docker/swarmkit | manager/controlapi/service.go | checkSecretExistence | func (s *Server) checkSecretExistence(tx store.Tx, spec *api.ServiceSpec) error {
container := spec.Task.GetContainer()
if container == nil {
return nil
}
var failedSecrets []string
for _, secretRef := range container.Secrets {
secret := store.GetSecret(tx, secretRef.SecretID)
// Check to see if the secret exists and secretRef.SecretName matches the actual secretName
if secret == nil || secret.Spec.Annotations.Name != secretRef.SecretName {
failedSecrets = append(failedSecrets, secretRef.SecretName)
}
}
if len(failedSecrets) > 0 {
secretStr := "secrets"
if len(failedSecrets) == 1 {
secretStr = "secret"
}
return status.Errorf(codes.InvalidArgument, "%s not found: %v", secretStr, strings.Join(failedSecrets, ", "))
}
return nil
} | go | func (s *Server) checkSecretExistence(tx store.Tx, spec *api.ServiceSpec) error {
container := spec.Task.GetContainer()
if container == nil {
return nil
}
var failedSecrets []string
for _, secretRef := range container.Secrets {
secret := store.GetSecret(tx, secretRef.SecretID)
// Check to see if the secret exists and secretRef.SecretName matches the actual secretName
if secret == nil || secret.Spec.Annotations.Name != secretRef.SecretName {
failedSecrets = append(failedSecrets, secretRef.SecretName)
}
}
if len(failedSecrets) > 0 {
secretStr := "secrets"
if len(failedSecrets) == 1 {
secretStr = "secret"
}
return status.Errorf(codes.InvalidArgument, "%s not found: %v", secretStr, strings.Join(failedSecrets, ", "))
}
return nil
} | [
"func",
"(",
"s",
"*",
"Server",
")",
"checkSecretExistence",
"(",
"tx",
"store",
".",
"Tx",
",",
"spec",
"*",
"api",
".",
"ServiceSpec",
")",
"error",
"{",
"container",
":=",
"spec",
".",
"Task",
".",
"GetContainer",
"(",
")",
"\n",
"if",
"container",... | // checkSecretExistence finds if the secret exists | [
"checkSecretExistence",
"finds",
"if",
"the",
"secret",
"exists"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/service.go#L609-L635 | train |
docker/swarmkit | manager/controlapi/service.go | checkConfigExistence | func (s *Server) checkConfigExistence(tx store.Tx, spec *api.ServiceSpec) error {
container := spec.Task.GetContainer()
if container == nil {
return nil
}
var failedConfigs []string
for _, configRef := range container.Configs {
config := store.GetConfig(tx, configRef.ConfigID)
// Check to see if the config exists and configRef.ConfigName matches the actual configName
if config == nil || config.Spec.Annotations.Name != configRef.ConfigName {
failedConfigs = append(failedConfigs, configRef.ConfigName)
}
}
if len(failedConfigs) > 0 {
configStr := "configs"
if len(failedConfigs) == 1 {
configStr = "config"
}
return status.Errorf(codes.InvalidArgument, "%s not found: %v", configStr, strings.Join(failedConfigs, ", "))
}
return nil
} | go | func (s *Server) checkConfigExistence(tx store.Tx, spec *api.ServiceSpec) error {
container := spec.Task.GetContainer()
if container == nil {
return nil
}
var failedConfigs []string
for _, configRef := range container.Configs {
config := store.GetConfig(tx, configRef.ConfigID)
// Check to see if the config exists and configRef.ConfigName matches the actual configName
if config == nil || config.Spec.Annotations.Name != configRef.ConfigName {
failedConfigs = append(failedConfigs, configRef.ConfigName)
}
}
if len(failedConfigs) > 0 {
configStr := "configs"
if len(failedConfigs) == 1 {
configStr = "config"
}
return status.Errorf(codes.InvalidArgument, "%s not found: %v", configStr, strings.Join(failedConfigs, ", "))
}
return nil
} | [
"func",
"(",
"s",
"*",
"Server",
")",
"checkConfigExistence",
"(",
"tx",
"store",
".",
"Tx",
",",
"spec",
"*",
"api",
".",
"ServiceSpec",
")",
"error",
"{",
"container",
":=",
"spec",
".",
"Task",
".",
"GetContainer",
"(",
")",
"\n",
"if",
"container",... | // checkConfigExistence finds if the config exists | [
"checkConfigExistence",
"finds",
"if",
"the",
"config",
"exists"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/service.go#L638-L664 | train |
docker/swarmkit | manager/controlapi/service.go | CreateService | func (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRequest) (*api.CreateServiceResponse, error) {
if err := validateServiceSpec(request.Spec); err != nil {
return nil, err
}
if err := s.validateNetworks(request.Spec.Task.Networks); err != nil {
return nil, err
}
if err := s.checkPortConflicts(request.Spec, ""); err != nil {
return nil, err
}
// TODO(aluzzardi): Consider using `Name` as a primary key to handle
// duplicate creations. See #65
service := &api.Service{
ID: identity.NewID(),
Spec: *request.Spec,
SpecVersion: &api.Version{},
}
if allocator.IsIngressNetworkNeeded(service) {
if _, err := allocator.GetIngressNetwork(s.store); err == allocator.ErrNoIngress {
return nil, status.Errorf(codes.FailedPrecondition, "service needs ingress network, but no ingress network is present")
}
}
err := s.store.Update(func(tx store.Tx) error {
// Check to see if all the secrets being added exist as objects
// in our datastore
err := s.checkSecretExistence(tx, request.Spec)
if err != nil {
return err
}
err = s.checkConfigExistence(tx, request.Spec)
if err != nil {
return err
}
return store.CreateService(tx, service)
})
switch err {
case store.ErrNameConflict:
// Enhance the name-confict error to include the service name. The original
// `ErrNameConflict` error-message is included for backward-compatibility
// with older consumers of the API performing string-matching.
return nil, status.Errorf(codes.AlreadyExists, "%s: service %s already exists", err.Error(), request.Spec.Annotations.Name)
case nil:
return &api.CreateServiceResponse{Service: service}, nil
default:
return nil, err
}
} | go | func (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRequest) (*api.CreateServiceResponse, error) {
if err := validateServiceSpec(request.Spec); err != nil {
return nil, err
}
if err := s.validateNetworks(request.Spec.Task.Networks); err != nil {
return nil, err
}
if err := s.checkPortConflicts(request.Spec, ""); err != nil {
return nil, err
}
// TODO(aluzzardi): Consider using `Name` as a primary key to handle
// duplicate creations. See #65
service := &api.Service{
ID: identity.NewID(),
Spec: *request.Spec,
SpecVersion: &api.Version{},
}
if allocator.IsIngressNetworkNeeded(service) {
if _, err := allocator.GetIngressNetwork(s.store); err == allocator.ErrNoIngress {
return nil, status.Errorf(codes.FailedPrecondition, "service needs ingress network, but no ingress network is present")
}
}
err := s.store.Update(func(tx store.Tx) error {
// Check to see if all the secrets being added exist as objects
// in our datastore
err := s.checkSecretExistence(tx, request.Spec)
if err != nil {
return err
}
err = s.checkConfigExistence(tx, request.Spec)
if err != nil {
return err
}
return store.CreateService(tx, service)
})
switch err {
case store.ErrNameConflict:
// Enhance the name-confict error to include the service name. The original
// `ErrNameConflict` error-message is included for backward-compatibility
// with older consumers of the API performing string-matching.
return nil, status.Errorf(codes.AlreadyExists, "%s: service %s already exists", err.Error(), request.Spec.Annotations.Name)
case nil:
return &api.CreateServiceResponse{Service: service}, nil
default:
return nil, err
}
} | [
"func",
"(",
"s",
"*",
"Server",
")",
"CreateService",
"(",
"ctx",
"context",
".",
"Context",
",",
"request",
"*",
"api",
".",
"CreateServiceRequest",
")",
"(",
"*",
"api",
".",
"CreateServiceResponse",
",",
"error",
")",
"{",
"if",
"err",
":=",
"validat... | // CreateService creates and returns a Service based on the provided ServiceSpec.
// - Returns `InvalidArgument` if the ServiceSpec is malformed.
// - Returns `Unimplemented` if the ServiceSpec references unimplemented features.
// - Returns `AlreadyExists` if the ServiceID conflicts.
// - Returns an error if the creation fails. | [
"CreateService",
"creates",
"and",
"returns",
"a",
"Service",
"based",
"on",
"the",
"provided",
"ServiceSpec",
".",
"-",
"Returns",
"InvalidArgument",
"if",
"the",
"ServiceSpec",
"is",
"malformed",
".",
"-",
"Returns",
"Unimplemented",
"if",
"the",
"ServiceSpec",
... | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/service.go#L671-L723 | train |
docker/swarmkit | manager/controlapi/service.go | GetService | func (s *Server) GetService(ctx context.Context, request *api.GetServiceRequest) (*api.GetServiceResponse, error) {
if request.ServiceID == "" {
return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
var service *api.Service
s.store.View(func(tx store.ReadTx) {
service = store.GetService(tx, request.ServiceID)
})
if service == nil {
return nil, status.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
}
if request.InsertDefaults {
service.Spec = *defaults.InterpolateService(&service.Spec)
}
return &api.GetServiceResponse{
Service: service,
}, nil
} | go | func (s *Server) GetService(ctx context.Context, request *api.GetServiceRequest) (*api.GetServiceResponse, error) {
if request.ServiceID == "" {
return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
var service *api.Service
s.store.View(func(tx store.ReadTx) {
service = store.GetService(tx, request.ServiceID)
})
if service == nil {
return nil, status.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
}
if request.InsertDefaults {
service.Spec = *defaults.InterpolateService(&service.Spec)
}
return &api.GetServiceResponse{
Service: service,
}, nil
} | [
"func",
"(",
"s",
"*",
"Server",
")",
"GetService",
"(",
"ctx",
"context",
".",
"Context",
",",
"request",
"*",
"api",
".",
"GetServiceRequest",
")",
"(",
"*",
"api",
".",
"GetServiceResponse",
",",
"error",
")",
"{",
"if",
"request",
".",
"ServiceID",
... | // GetService returns a Service given a ServiceID.
// - Returns `InvalidArgument` if ServiceID is not provided.
// - Returns `NotFound` if the Service is not found. | [
"GetService",
"returns",
"a",
"Service",
"given",
"a",
"ServiceID",
".",
"-",
"Returns",
"InvalidArgument",
"if",
"ServiceID",
"is",
"not",
"provided",
".",
"-",
"Returns",
"NotFound",
"if",
"the",
"Service",
"is",
"not",
"found",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/service.go#L728-L748 | train |
docker/swarmkit | manager/controlapi/service.go | RemoveService | func (s *Server) RemoveService(ctx context.Context, request *api.RemoveServiceRequest) (*api.RemoveServiceResponse, error) {
if request.ServiceID == "" {
return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
err := s.store.Update(func(tx store.Tx) error {
return store.DeleteService(tx, request.ServiceID)
})
if err != nil {
if err == store.ErrNotExist {
return nil, status.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
}
return nil, err
}
return &api.RemoveServiceResponse{}, nil
} | go | func (s *Server) RemoveService(ctx context.Context, request *api.RemoveServiceRequest) (*api.RemoveServiceResponse, error) {
if request.ServiceID == "" {
return nil, status.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
err := s.store.Update(func(tx store.Tx) error {
return store.DeleteService(tx, request.ServiceID)
})
if err != nil {
if err == store.ErrNotExist {
return nil, status.Errorf(codes.NotFound, "service %s not found", request.ServiceID)
}
return nil, err
}
return &api.RemoveServiceResponse{}, nil
} | [
"func",
"(",
"s",
"*",
"Server",
")",
"RemoveService",
"(",
"ctx",
"context",
".",
"Context",
",",
"request",
"*",
"api",
".",
"RemoveServiceRequest",
")",
"(",
"*",
"api",
".",
"RemoveServiceResponse",
",",
"error",
")",
"{",
"if",
"request",
".",
"Serv... | // RemoveService removes a Service referenced by ServiceID.
// - Returns `InvalidArgument` if ServiceID is not provided.
// - Returns `NotFound` if the Service is not found.
// - Returns an error if the deletion fails. | [
"RemoveService",
"removes",
"a",
"Service",
"referenced",
"by",
"ServiceID",
".",
"-",
"Returns",
"InvalidArgument",
"if",
"ServiceID",
"is",
"not",
"provided",
".",
"-",
"Returns",
"NotFound",
"if",
"the",
"Service",
"is",
"not",
"found",
".",
"-",
"Returns",... | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/service.go#L874-L889 | train |
docker/swarmkit | manager/controlapi/service.go | ListServices | func (s *Server) ListServices(ctx context.Context, request *api.ListServicesRequest) (*api.ListServicesResponse, error) {
var (
services []*api.Service
err error
)
s.store.View(func(tx store.ReadTx) {
switch {
case request.Filters != nil && len(request.Filters.Names) > 0:
services, err = store.FindServices(tx, buildFilters(store.ByName, request.Filters.Names))
case request.Filters != nil && len(request.Filters.NamePrefixes) > 0:
services, err = store.FindServices(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes))
case request.Filters != nil && len(request.Filters.IDPrefixes) > 0:
services, err = store.FindServices(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))
case request.Filters != nil && len(request.Filters.Runtimes) > 0:
services, err = store.FindServices(tx, buildFilters(store.ByRuntime, request.Filters.Runtimes))
default:
services, err = store.FindServices(tx, store.All)
}
})
if err != nil {
switch err {
case store.ErrInvalidFindBy:
return nil, status.Errorf(codes.InvalidArgument, err.Error())
default:
return nil, err
}
}
if request.Filters != nil {
services = filterServices(services,
func(e *api.Service) bool {
return filterContains(e.Spec.Annotations.Name, request.Filters.Names)
},
func(e *api.Service) bool {
return filterContainsPrefix(e.Spec.Annotations.Name, request.Filters.NamePrefixes)
},
func(e *api.Service) bool {
return filterContainsPrefix(e.ID, request.Filters.IDPrefixes)
},
func(e *api.Service) bool {
return filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels)
},
func(e *api.Service) bool {
if len(request.Filters.Runtimes) == 0 {
return true
}
r, err := naming.Runtime(e.Spec.Task)
if err != nil {
return false
}
return filterContains(r, request.Filters.Runtimes)
},
)
}
return &api.ListServicesResponse{
Services: services,
}, nil
} | go | func (s *Server) ListServices(ctx context.Context, request *api.ListServicesRequest) (*api.ListServicesResponse, error) {
var (
services []*api.Service
err error
)
s.store.View(func(tx store.ReadTx) {
switch {
case request.Filters != nil && len(request.Filters.Names) > 0:
services, err = store.FindServices(tx, buildFilters(store.ByName, request.Filters.Names))
case request.Filters != nil && len(request.Filters.NamePrefixes) > 0:
services, err = store.FindServices(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes))
case request.Filters != nil && len(request.Filters.IDPrefixes) > 0:
services, err = store.FindServices(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))
case request.Filters != nil && len(request.Filters.Runtimes) > 0:
services, err = store.FindServices(tx, buildFilters(store.ByRuntime, request.Filters.Runtimes))
default:
services, err = store.FindServices(tx, store.All)
}
})
if err != nil {
switch err {
case store.ErrInvalidFindBy:
return nil, status.Errorf(codes.InvalidArgument, err.Error())
default:
return nil, err
}
}
if request.Filters != nil {
services = filterServices(services,
func(e *api.Service) bool {
return filterContains(e.Spec.Annotations.Name, request.Filters.Names)
},
func(e *api.Service) bool {
return filterContainsPrefix(e.Spec.Annotations.Name, request.Filters.NamePrefixes)
},
func(e *api.Service) bool {
return filterContainsPrefix(e.ID, request.Filters.IDPrefixes)
},
func(e *api.Service) bool {
return filterMatchLabels(e.Spec.Annotations.Labels, request.Filters.Labels)
},
func(e *api.Service) bool {
if len(request.Filters.Runtimes) == 0 {
return true
}
r, err := naming.Runtime(e.Spec.Task)
if err != nil {
return false
}
return filterContains(r, request.Filters.Runtimes)
},
)
}
return &api.ListServicesResponse{
Services: services,
}, nil
} | [
"func",
"(",
"s",
"*",
"Server",
")",
"ListServices",
"(",
"ctx",
"context",
".",
"Context",
",",
"request",
"*",
"api",
".",
"ListServicesRequest",
")",
"(",
"*",
"api",
".",
"ListServicesResponse",
",",
"error",
")",
"{",
"var",
"(",
"services",
"[",
... | // ListServices returns a list of all services. | [
"ListServices",
"returns",
"a",
"list",
"of",
"all",
"services",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/controlapi/service.go#L911-L970 | train |
docker/swarmkit | manager/logbroker/broker.go | Start | func (lb *LogBroker) Start(ctx context.Context) error {
lb.mu.Lock()
defer lb.mu.Unlock()
if lb.cancelAll != nil {
return errAlreadyRunning
}
lb.pctx, lb.cancelAll = context.WithCancel(ctx)
lb.logQueue = watch.NewQueue()
lb.subscriptionQueue = watch.NewQueue()
lb.registeredSubscriptions = make(map[string]*subscription)
lb.subscriptionsByNode = make(map[string]map[*subscription]struct{})
return nil
} | go | func (lb *LogBroker) Start(ctx context.Context) error {
lb.mu.Lock()
defer lb.mu.Unlock()
if lb.cancelAll != nil {
return errAlreadyRunning
}
lb.pctx, lb.cancelAll = context.WithCancel(ctx)
lb.logQueue = watch.NewQueue()
lb.subscriptionQueue = watch.NewQueue()
lb.registeredSubscriptions = make(map[string]*subscription)
lb.subscriptionsByNode = make(map[string]map[*subscription]struct{})
return nil
} | [
"func",
"(",
"lb",
"*",
"LogBroker",
")",
"Start",
"(",
"ctx",
"context",
".",
"Context",
")",
"error",
"{",
"lb",
".",
"mu",
".",
"Lock",
"(",
")",
"\n",
"defer",
"lb",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"if",
"lb",
".",
"cancelAll",
"!... | // Start starts the log broker | [
"Start",
"starts",
"the",
"log",
"broker"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/logbroker/broker.go#L60-L74 | train |
docker/swarmkit | manager/logbroker/broker.go | Stop | func (lb *LogBroker) Stop() error {
lb.mu.Lock()
defer lb.mu.Unlock()
if lb.cancelAll == nil {
return errNotRunning
}
lb.cancelAll()
lb.cancelAll = nil
lb.logQueue.Close()
lb.subscriptionQueue.Close()
return nil
} | go | func (lb *LogBroker) Stop() error {
lb.mu.Lock()
defer lb.mu.Unlock()
if lb.cancelAll == nil {
return errNotRunning
}
lb.cancelAll()
lb.cancelAll = nil
lb.logQueue.Close()
lb.subscriptionQueue.Close()
return nil
} | [
"func",
"(",
"lb",
"*",
"LogBroker",
")",
"Stop",
"(",
")",
"error",
"{",
"lb",
".",
"mu",
".",
"Lock",
"(",
")",
"\n",
"defer",
"lb",
".",
"mu",
".",
"Unlock",
"(",
")",
"\n",
"if",
"lb",
".",
"cancelAll",
"==",
"nil",
"{",
"return",
"errNotRu... | // Stop stops the log broker | [
"Stop",
"stops",
"the",
"log",
"broker"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/logbroker/broker.go#L77-L91 | train |
docker/swarmkit | manager/logbroker/broker.go | watchSubscriptions | func (lb *LogBroker) watchSubscriptions(nodeID string) ([]*subscription, chan events.Event, func()) {
lb.mu.RLock()
defer lb.mu.RUnlock()
// Watch for subscription changes for this node.
ch, cancel := lb.subscriptionQueue.CallbackWatch(events.MatcherFunc(func(event events.Event) bool {
s := event.(*subscription)
return s.Contains(nodeID)
}))
// Grab current subscriptions.
var subscriptions []*subscription
for _, s := range lb.registeredSubscriptions {
if s.Contains(nodeID) {
subscriptions = append(subscriptions, s)
}
}
return subscriptions, ch, cancel
} | go | func (lb *LogBroker) watchSubscriptions(nodeID string) ([]*subscription, chan events.Event, func()) {
lb.mu.RLock()
defer lb.mu.RUnlock()
// Watch for subscription changes for this node.
ch, cancel := lb.subscriptionQueue.CallbackWatch(events.MatcherFunc(func(event events.Event) bool {
s := event.(*subscription)
return s.Contains(nodeID)
}))
// Grab current subscriptions.
var subscriptions []*subscription
for _, s := range lb.registeredSubscriptions {
if s.Contains(nodeID) {
subscriptions = append(subscriptions, s)
}
}
return subscriptions, ch, cancel
} | [
"func",
"(",
"lb",
"*",
"LogBroker",
")",
"watchSubscriptions",
"(",
"nodeID",
"string",
")",
"(",
"[",
"]",
"*",
"subscription",
",",
"chan",
"events",
".",
"Event",
",",
"func",
"(",
")",
")",
"{",
"lb",
".",
"mu",
".",
"RLock",
"(",
")",
"\n",
... | // watchSubscriptions grabs all current subscriptions and notifies of any
// subscription change for this node.
//
// Subscriptions may fire multiple times and the caller has to protect against
// dupes. | [
"watchSubscriptions",
"grabs",
"all",
"current",
"subscriptions",
"and",
"notifies",
"of",
"any",
"subscription",
"change",
"for",
"this",
"node",
".",
"Subscriptions",
"may",
"fire",
"multiple",
"times",
"and",
"the",
"caller",
"has",
"to",
"protect",
"against",
... | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/logbroker/broker.go#L170-L189 | train |
docker/swarmkit | manager/logbroker/broker.go | SubscribeLogs | func (lb *LogBroker) SubscribeLogs(request *api.SubscribeLogsRequest, stream api.Logs_SubscribeLogsServer) error {
ctx := stream.Context()
if err := validateSelector(request.Selector); err != nil {
return err
}
lb.mu.Lock()
pctx := lb.pctx
lb.mu.Unlock()
if pctx == nil {
return errNotRunning
}
subscription := lb.newSubscription(request.Selector, request.Options)
subscription.Run(pctx)
defer subscription.Stop()
log := log.G(ctx).WithFields(
logrus.Fields{
"method": "(*LogBroker).SubscribeLogs",
"subscription.id": subscription.message.ID,
},
)
log.Debug("subscribed")
publishCh, publishCancel := lb.subscribe(subscription.message.ID)
defer publishCancel()
lb.registerSubscription(subscription)
defer lb.unregisterSubscription(subscription)
completed := subscription.Wait(ctx)
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-pctx.Done():
return pctx.Err()
case event := <-publishCh:
publish := event.(*logMessage)
if publish.completed {
return publish.err
}
if err := stream.Send(&api.SubscribeLogsMessage{
Messages: publish.Messages,
}); err != nil {
return err
}
case <-completed:
completed = nil
lb.logQueue.Publish(&logMessage{
PublishLogsMessage: &api.PublishLogsMessage{
SubscriptionID: subscription.message.ID,
},
completed: true,
err: subscription.Err(),
})
}
}
} | go | func (lb *LogBroker) SubscribeLogs(request *api.SubscribeLogsRequest, stream api.Logs_SubscribeLogsServer) error {
ctx := stream.Context()
if err := validateSelector(request.Selector); err != nil {
return err
}
lb.mu.Lock()
pctx := lb.pctx
lb.mu.Unlock()
if pctx == nil {
return errNotRunning
}
subscription := lb.newSubscription(request.Selector, request.Options)
subscription.Run(pctx)
defer subscription.Stop()
log := log.G(ctx).WithFields(
logrus.Fields{
"method": "(*LogBroker).SubscribeLogs",
"subscription.id": subscription.message.ID,
},
)
log.Debug("subscribed")
publishCh, publishCancel := lb.subscribe(subscription.message.ID)
defer publishCancel()
lb.registerSubscription(subscription)
defer lb.unregisterSubscription(subscription)
completed := subscription.Wait(ctx)
for {
select {
case <-ctx.Done():
return ctx.Err()
case <-pctx.Done():
return pctx.Err()
case event := <-publishCh:
publish := event.(*logMessage)
if publish.completed {
return publish.err
}
if err := stream.Send(&api.SubscribeLogsMessage{
Messages: publish.Messages,
}); err != nil {
return err
}
case <-completed:
completed = nil
lb.logQueue.Publish(&logMessage{
PublishLogsMessage: &api.PublishLogsMessage{
SubscriptionID: subscription.message.ID,
},
completed: true,
err: subscription.Err(),
})
}
}
} | [
"func",
"(",
"lb",
"*",
"LogBroker",
")",
"SubscribeLogs",
"(",
"request",
"*",
"api",
".",
"SubscribeLogsRequest",
",",
"stream",
"api",
".",
"Logs_SubscribeLogsServer",
")",
"error",
"{",
"ctx",
":=",
"stream",
".",
"Context",
"(",
")",
"\n",
"if",
"err"... | // SubscribeLogs creates a log subscription and streams back logs | [
"SubscribeLogs",
"creates",
"a",
"log",
"subscription",
"and",
"streams",
"back",
"logs"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/logbroker/broker.go#L224-L284 | train |
docker/swarmkit | manager/logbroker/broker.go | ListenSubscriptions | func (lb *LogBroker) ListenSubscriptions(request *api.ListenSubscriptionsRequest, stream api.LogBroker_ListenSubscriptionsServer) error {
remote, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
lb.mu.Lock()
pctx := lb.pctx
lb.mu.Unlock()
if pctx == nil {
return errNotRunning
}
lb.nodeConnected(remote.NodeID)
defer lb.nodeDisconnected(remote.NodeID)
log := log.G(stream.Context()).WithFields(
logrus.Fields{
"method": "(*LogBroker).ListenSubscriptions",
"node": remote.NodeID,
},
)
subscriptions, subscriptionCh, subscriptionCancel := lb.watchSubscriptions(remote.NodeID)
defer subscriptionCancel()
log.Debug("node registered")
activeSubscriptions := make(map[string]*subscription)
// Start by sending down all active subscriptions.
for _, subscription := range subscriptions {
select {
case <-stream.Context().Done():
return stream.Context().Err()
case <-pctx.Done():
return nil
default:
}
if err := stream.Send(subscription.message); err != nil {
log.Error(err)
return err
}
activeSubscriptions[subscription.message.ID] = subscription
}
// Send down new subscriptions.
for {
select {
case v := <-subscriptionCh:
subscription := v.(*subscription)
if subscription.Closed() {
delete(activeSubscriptions, subscription.message.ID)
} else {
// Avoid sending down the same subscription multiple times
if _, ok := activeSubscriptions[subscription.message.ID]; ok {
continue
}
activeSubscriptions[subscription.message.ID] = subscription
}
if err := stream.Send(subscription.message); err != nil {
log.Error(err)
return err
}
case <-stream.Context().Done():
return stream.Context().Err()
case <-pctx.Done():
return nil
}
}
} | go | func (lb *LogBroker) ListenSubscriptions(request *api.ListenSubscriptionsRequest, stream api.LogBroker_ListenSubscriptionsServer) error {
remote, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
lb.mu.Lock()
pctx := lb.pctx
lb.mu.Unlock()
if pctx == nil {
return errNotRunning
}
lb.nodeConnected(remote.NodeID)
defer lb.nodeDisconnected(remote.NodeID)
log := log.G(stream.Context()).WithFields(
logrus.Fields{
"method": "(*LogBroker).ListenSubscriptions",
"node": remote.NodeID,
},
)
subscriptions, subscriptionCh, subscriptionCancel := lb.watchSubscriptions(remote.NodeID)
defer subscriptionCancel()
log.Debug("node registered")
activeSubscriptions := make(map[string]*subscription)
// Start by sending down all active subscriptions.
for _, subscription := range subscriptions {
select {
case <-stream.Context().Done():
return stream.Context().Err()
case <-pctx.Done():
return nil
default:
}
if err := stream.Send(subscription.message); err != nil {
log.Error(err)
return err
}
activeSubscriptions[subscription.message.ID] = subscription
}
// Send down new subscriptions.
for {
select {
case v := <-subscriptionCh:
subscription := v.(*subscription)
if subscription.Closed() {
delete(activeSubscriptions, subscription.message.ID)
} else {
// Avoid sending down the same subscription multiple times
if _, ok := activeSubscriptions[subscription.message.ID]; ok {
continue
}
activeSubscriptions[subscription.message.ID] = subscription
}
if err := stream.Send(subscription.message); err != nil {
log.Error(err)
return err
}
case <-stream.Context().Done():
return stream.Context().Err()
case <-pctx.Done():
return nil
}
}
} | [
"func",
"(",
"lb",
"*",
"LogBroker",
")",
"ListenSubscriptions",
"(",
"request",
"*",
"api",
".",
"ListenSubscriptionsRequest",
",",
"stream",
"api",
".",
"LogBroker_ListenSubscriptionsServer",
")",
"error",
"{",
"remote",
",",
"err",
":=",
"ca",
".",
"RemoteNod... | // ListenSubscriptions returns a stream of matching subscriptions for the current node | [
"ListenSubscriptions",
"returns",
"a",
"stream",
"of",
"matching",
"subscriptions",
"for",
"the",
"current",
"node"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/logbroker/broker.go#L306-L377 | train |
docker/swarmkit | manager/logbroker/broker.go | PublishLogs | func (lb *LogBroker) PublishLogs(stream api.LogBroker_PublishLogsServer) (err error) {
remote, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
var currentSubscription *subscription
defer func() {
if currentSubscription != nil {
lb.markDone(currentSubscription, remote.NodeID, err)
}
}()
for {
logMsg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&api.PublishLogsResponse{})
}
if err != nil {
return err
}
if logMsg.SubscriptionID == "" {
return status.Errorf(codes.InvalidArgument, "missing subscription ID")
}
if currentSubscription == nil {
currentSubscription = lb.getSubscription(logMsg.SubscriptionID)
if currentSubscription == nil {
return status.Errorf(codes.NotFound, "unknown subscription ID")
}
} else {
if logMsg.SubscriptionID != currentSubscription.message.ID {
return status.Errorf(codes.InvalidArgument, "different subscription IDs in the same session")
}
}
// if we have a close message, close out the subscription
if logMsg.Close {
// Mark done and then set to nil so if we error after this point,
// we don't try to close again in the defer
lb.markDone(currentSubscription, remote.NodeID, err)
currentSubscription = nil
return nil
}
// Make sure logs are emitted using the right Node ID to avoid impersonation.
for _, msg := range logMsg.Messages {
if msg.Context.NodeID != remote.NodeID {
return status.Errorf(codes.PermissionDenied, "invalid NodeID: expected=%s;received=%s", remote.NodeID, msg.Context.NodeID)
}
}
lb.publish(logMsg)
}
} | go | func (lb *LogBroker) PublishLogs(stream api.LogBroker_PublishLogsServer) (err error) {
remote, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
var currentSubscription *subscription
defer func() {
if currentSubscription != nil {
lb.markDone(currentSubscription, remote.NodeID, err)
}
}()
for {
logMsg, err := stream.Recv()
if err == io.EOF {
return stream.SendAndClose(&api.PublishLogsResponse{})
}
if err != nil {
return err
}
if logMsg.SubscriptionID == "" {
return status.Errorf(codes.InvalidArgument, "missing subscription ID")
}
if currentSubscription == nil {
currentSubscription = lb.getSubscription(logMsg.SubscriptionID)
if currentSubscription == nil {
return status.Errorf(codes.NotFound, "unknown subscription ID")
}
} else {
if logMsg.SubscriptionID != currentSubscription.message.ID {
return status.Errorf(codes.InvalidArgument, "different subscription IDs in the same session")
}
}
// if we have a close message, close out the subscription
if logMsg.Close {
// Mark done and then set to nil so if we error after this point,
// we don't try to close again in the defer
lb.markDone(currentSubscription, remote.NodeID, err)
currentSubscription = nil
return nil
}
// Make sure logs are emitted using the right Node ID to avoid impersonation.
for _, msg := range logMsg.Messages {
if msg.Context.NodeID != remote.NodeID {
return status.Errorf(codes.PermissionDenied, "invalid NodeID: expected=%s;received=%s", remote.NodeID, msg.Context.NodeID)
}
}
lb.publish(logMsg)
}
} | [
"func",
"(",
"lb",
"*",
"LogBroker",
")",
"PublishLogs",
"(",
"stream",
"api",
".",
"LogBroker_PublishLogsServer",
")",
"(",
"err",
"error",
")",
"{",
"remote",
",",
"err",
":=",
"ca",
".",
"RemoteNode",
"(",
"stream",
".",
"Context",
"(",
")",
")",
"\... | // PublishLogs publishes log messages for a given subscription | [
"PublishLogs",
"publishes",
"log",
"messages",
"for",
"a",
"given",
"subscription"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/logbroker/broker.go#L380-L435 | train |
docker/swarmkit | cmd/swarmctl/service/flagparser/secret.go | ParseAddSecret | func ParseAddSecret(cmd *cobra.Command, spec *api.ServiceSpec, flagName string) error {
flags := cmd.Flags()
if flags.Changed(flagName) {
secrets, err := flags.GetStringSlice(flagName)
if err != nil {
return err
}
container := spec.Task.GetContainer()
if container == nil {
spec.Task.Runtime = &api.TaskSpec_Container{
Container: &api.ContainerSpec{},
}
}
lookupSecretNames := []string{}
var needSecrets []*api.SecretReference
for _, secret := range secrets {
n, p, err := parseSecretString(secret)
if err != nil {
return err
}
// TODO(diogo): defaults to File targets, but in the future will take different types
secretRef := &api.SecretReference{
SecretName: n,
Target: &api.SecretReference_File{
File: &api.FileTarget{
Name: p,
Mode: 0444,
},
},
}
lookupSecretNames = append(lookupSecretNames, n)
needSecrets = append(needSecrets, secretRef)
}
client, err := common.Dial(cmd)
if err != nil {
return err
}
r, err := client.ListSecrets(common.Context(cmd),
&api.ListSecretsRequest{Filters: &api.ListSecretsRequest_Filters{Names: lookupSecretNames}})
if err != nil {
return err
}
foundSecrets := make(map[string]*api.Secret)
for _, secret := range r.Secrets {
foundSecrets[secret.Spec.Annotations.Name] = secret
}
for _, secretRef := range needSecrets {
secret, ok := foundSecrets[secretRef.SecretName]
if !ok {
return fmt.Errorf("secret not found: %s", secretRef.SecretName)
}
secretRef.SecretID = secret.ID
container.Secrets = append(container.Secrets, secretRef)
}
}
return nil
} | go | func ParseAddSecret(cmd *cobra.Command, spec *api.ServiceSpec, flagName string) error {
flags := cmd.Flags()
if flags.Changed(flagName) {
secrets, err := flags.GetStringSlice(flagName)
if err != nil {
return err
}
container := spec.Task.GetContainer()
if container == nil {
spec.Task.Runtime = &api.TaskSpec_Container{
Container: &api.ContainerSpec{},
}
}
lookupSecretNames := []string{}
var needSecrets []*api.SecretReference
for _, secret := range secrets {
n, p, err := parseSecretString(secret)
if err != nil {
return err
}
// TODO(diogo): defaults to File targets, but in the future will take different types
secretRef := &api.SecretReference{
SecretName: n,
Target: &api.SecretReference_File{
File: &api.FileTarget{
Name: p,
Mode: 0444,
},
},
}
lookupSecretNames = append(lookupSecretNames, n)
needSecrets = append(needSecrets, secretRef)
}
client, err := common.Dial(cmd)
if err != nil {
return err
}
r, err := client.ListSecrets(common.Context(cmd),
&api.ListSecretsRequest{Filters: &api.ListSecretsRequest_Filters{Names: lookupSecretNames}})
if err != nil {
return err
}
foundSecrets := make(map[string]*api.Secret)
for _, secret := range r.Secrets {
foundSecrets[secret.Spec.Annotations.Name] = secret
}
for _, secretRef := range needSecrets {
secret, ok := foundSecrets[secretRef.SecretName]
if !ok {
return fmt.Errorf("secret not found: %s", secretRef.SecretName)
}
secretRef.SecretID = secret.ID
container.Secrets = append(container.Secrets, secretRef)
}
}
return nil
} | [
"func",
"ParseAddSecret",
"(",
"cmd",
"*",
"cobra",
".",
"Command",
",",
"spec",
"*",
"api",
".",
"ServiceSpec",
",",
"flagName",
"string",
")",
"error",
"{",
"flags",
":=",
"cmd",
".",
"Flags",
"(",
")",
"\n",
"if",
"flags",
".",
"Changed",
"(",
"fl... | // ParseAddSecret validates secrets passed on the command line | [
"ParseAddSecret",
"validates",
"secrets",
"passed",
"on",
"the",
"command",
"line"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/cmd/swarmctl/service/flagparser/secret.go#L36-L104 | train |
docker/swarmkit | cmd/swarmctl/service/flagparser/secret.go | ParseRemoveSecret | func ParseRemoveSecret(cmd *cobra.Command, spec *api.ServiceSpec, flagName string) error {
flags := cmd.Flags()
if flags.Changed(flagName) {
secrets, err := flags.GetStringSlice(flagName)
if err != nil {
return err
}
container := spec.Task.GetContainer()
if container == nil {
return nil
}
wantToDelete := make(map[string]struct{})
for _, secret := range secrets {
n, _, err := parseSecretString(secret)
if err != nil {
return err
}
wantToDelete[n] = struct{}{}
}
secretRefs := []*api.SecretReference{}
for _, secretRef := range container.Secrets {
if _, ok := wantToDelete[secretRef.SecretName]; ok {
continue
}
secretRefs = append(secretRefs, secretRef)
}
container.Secrets = secretRefs
}
return nil
} | go | func ParseRemoveSecret(cmd *cobra.Command, spec *api.ServiceSpec, flagName string) error {
flags := cmd.Flags()
if flags.Changed(flagName) {
secrets, err := flags.GetStringSlice(flagName)
if err != nil {
return err
}
container := spec.Task.GetContainer()
if container == nil {
return nil
}
wantToDelete := make(map[string]struct{})
for _, secret := range secrets {
n, _, err := parseSecretString(secret)
if err != nil {
return err
}
wantToDelete[n] = struct{}{}
}
secretRefs := []*api.SecretReference{}
for _, secretRef := range container.Secrets {
if _, ok := wantToDelete[secretRef.SecretName]; ok {
continue
}
secretRefs = append(secretRefs, secretRef)
}
container.Secrets = secretRefs
}
return nil
} | [
"func",
"ParseRemoveSecret",
"(",
"cmd",
"*",
"cobra",
".",
"Command",
",",
"spec",
"*",
"api",
".",
"ServiceSpec",
",",
"flagName",
"string",
")",
"error",
"{",
"flags",
":=",
"cmd",
".",
"Flags",
"(",
")",
"\n",
"if",
"flags",
".",
"Changed",
"(",
... | // ParseRemoveSecret removes a set of secrets from the task spec's secret references | [
"ParseRemoveSecret",
"removes",
"a",
"set",
"of",
"secrets",
"from",
"the",
"task",
"spec",
"s",
"secret",
"references"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/cmd/swarmctl/service/flagparser/secret.go#L107-L144 | train |
docker/swarmkit | manager/orchestrator/global/global.go | NewGlobalOrchestrator | func NewGlobalOrchestrator(store *store.MemoryStore) *Orchestrator {
restartSupervisor := restart.NewSupervisor(store)
updater := update.NewSupervisor(store, restartSupervisor)
return &Orchestrator{
store: store,
nodes: make(map[string]*api.Node),
globalServices: make(map[string]globalService),
stopChan: make(chan struct{}),
doneChan: make(chan struct{}),
updater: updater,
restarts: restartSupervisor,
restartTasks: make(map[string]struct{}),
}
} | go | func NewGlobalOrchestrator(store *store.MemoryStore) *Orchestrator {
restartSupervisor := restart.NewSupervisor(store)
updater := update.NewSupervisor(store, restartSupervisor)
return &Orchestrator{
store: store,
nodes: make(map[string]*api.Node),
globalServices: make(map[string]globalService),
stopChan: make(chan struct{}),
doneChan: make(chan struct{}),
updater: updater,
restarts: restartSupervisor,
restartTasks: make(map[string]struct{}),
}
} | [
"func",
"NewGlobalOrchestrator",
"(",
"store",
"*",
"store",
".",
"MemoryStore",
")",
"*",
"Orchestrator",
"{",
"restartSupervisor",
":=",
"restart",
".",
"NewSupervisor",
"(",
"store",
")",
"\n",
"updater",
":=",
"update",
".",
"NewSupervisor",
"(",
"store",
... | // NewGlobalOrchestrator creates a new global Orchestrator | [
"NewGlobalOrchestrator",
"creates",
"a",
"new",
"global",
"Orchestrator"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/global/global.go#L45-L58 | train |
docker/swarmkit | manager/orchestrator/global/global.go | FixTask | func (g *Orchestrator) FixTask(ctx context.Context, batch *store.Batch, t *api.Task) {
if _, exists := g.globalServices[t.ServiceID]; !exists {
return
}
// if a task's DesiredState has past running, the task has been processed
if t.DesiredState > api.TaskStateRunning {
return
}
var node *api.Node
if t.NodeID != "" {
node = g.nodes[t.NodeID]
}
// if the node no longer valid, remove the task
if t.NodeID == "" || orchestrator.InvalidNode(node) {
g.shutdownTask(ctx, batch, t)
return
}
// restart a task if it fails
if t.Status.State > api.TaskStateRunning {
g.restartTasks[t.ID] = struct{}{}
}
} | go | func (g *Orchestrator) FixTask(ctx context.Context, batch *store.Batch, t *api.Task) {
if _, exists := g.globalServices[t.ServiceID]; !exists {
return
}
// if a task's DesiredState has past running, the task has been processed
if t.DesiredState > api.TaskStateRunning {
return
}
var node *api.Node
if t.NodeID != "" {
node = g.nodes[t.NodeID]
}
// if the node no longer valid, remove the task
if t.NodeID == "" || orchestrator.InvalidNode(node) {
g.shutdownTask(ctx, batch, t)
return
}
// restart a task if it fails
if t.Status.State > api.TaskStateRunning {
g.restartTasks[t.ID] = struct{}{}
}
} | [
"func",
"(",
"g",
"*",
"Orchestrator",
")",
"FixTask",
"(",
"ctx",
"context",
".",
"Context",
",",
"batch",
"*",
"store",
".",
"Batch",
",",
"t",
"*",
"api",
".",
"Task",
")",
"{",
"if",
"_",
",",
"exists",
":=",
"g",
".",
"globalServices",
"[",
... | // FixTask validates a task with the current cluster settings, and takes
// action to make it conformant to node state and service constraint
// it's called at orchestrator initialization | [
"FixTask",
"validates",
"a",
"task",
"with",
"the",
"current",
"cluster",
"settings",
"and",
"takes",
"action",
"to",
"make",
"it",
"conformant",
"to",
"node",
"state",
"and",
"service",
"constraint",
"it",
"s",
"called",
"at",
"orchestrator",
"initialization"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/global/global.go#L177-L200 | train |
docker/swarmkit | manager/orchestrator/global/global.go | handleTaskChange | func (g *Orchestrator) handleTaskChange(ctx context.Context, t *api.Task) {
if _, exists := g.globalServices[t.ServiceID]; !exists {
return
}
// if a task's DesiredState has passed running, it
// means the task has been processed
if t.DesiredState > api.TaskStateRunning {
return
}
// if a task has passed running, restart it
if t.Status.State > api.TaskStateRunning {
g.restartTasks[t.ID] = struct{}{}
}
} | go | func (g *Orchestrator) handleTaskChange(ctx context.Context, t *api.Task) {
if _, exists := g.globalServices[t.ServiceID]; !exists {
return
}
// if a task's DesiredState has passed running, it
// means the task has been processed
if t.DesiredState > api.TaskStateRunning {
return
}
// if a task has passed running, restart it
if t.Status.State > api.TaskStateRunning {
g.restartTasks[t.ID] = struct{}{}
}
} | [
"func",
"(",
"g",
"*",
"Orchestrator",
")",
"handleTaskChange",
"(",
"ctx",
"context",
".",
"Context",
",",
"t",
"*",
"api",
".",
"Task",
")",
"{",
"if",
"_",
",",
"exists",
":=",
"g",
".",
"globalServices",
"[",
"t",
".",
"ServiceID",
"]",
";",
"!... | // handleTaskChange defines what orchestrator does when a task is updated by agent | [
"handleTaskChange",
"defines",
"what",
"orchestrator",
"does",
"when",
"a",
"task",
"is",
"updated",
"by",
"agent"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/global/global.go#L203-L217 | train |
docker/swarmkit | manager/orchestrator/global/global.go | updateNode | func (g *Orchestrator) updateNode(node *api.Node) {
if node.Spec.Availability == api.NodeAvailabilityDrain || node.Status.State == api.NodeStatus_DOWN {
delete(g.nodes, node.ID)
} else {
g.nodes[node.ID] = node
}
} | go | func (g *Orchestrator) updateNode(node *api.Node) {
if node.Spec.Availability == api.NodeAvailabilityDrain || node.Status.State == api.NodeStatus_DOWN {
delete(g.nodes, node.ID)
} else {
g.nodes[node.ID] = node
}
} | [
"func",
"(",
"g",
"*",
"Orchestrator",
")",
"updateNode",
"(",
"node",
"*",
"api",
".",
"Node",
")",
"{",
"if",
"node",
".",
"Spec",
".",
"Availability",
"==",
"api",
".",
"NodeAvailabilityDrain",
"||",
"node",
".",
"Status",
".",
"State",
"==",
"api",... | // updateNode updates g.nodes based on the current node value | [
"updateNode",
"updates",
"g",
".",
"nodes",
"based",
"on",
"the",
"current",
"node",
"value"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/global/global.go#L353-L359 | train |
docker/swarmkit | manager/orchestrator/global/global.go | updateService | func (g *Orchestrator) updateService(service *api.Service) {
var constraints []constraint.Constraint
if service.Spec.Task.Placement != nil && len(service.Spec.Task.Placement.Constraints) != 0 {
constraints, _ = constraint.Parse(service.Spec.Task.Placement.Constraints)
}
g.globalServices[service.ID] = globalService{
Service: service,
constraints: constraints,
}
} | go | func (g *Orchestrator) updateService(service *api.Service) {
var constraints []constraint.Constraint
if service.Spec.Task.Placement != nil && len(service.Spec.Task.Placement.Constraints) != 0 {
constraints, _ = constraint.Parse(service.Spec.Task.Placement.Constraints)
}
g.globalServices[service.ID] = globalService{
Service: service,
constraints: constraints,
}
} | [
"func",
"(",
"g",
"*",
"Orchestrator",
")",
"updateService",
"(",
"service",
"*",
"api",
".",
"Service",
")",
"{",
"var",
"constraints",
"[",
"]",
"constraint",
".",
"Constraint",
"\n",
"if",
"service",
".",
"Spec",
".",
"Task",
".",
"Placement",
"!=",
... | // updateService updates g.globalServices based on the current service value | [
"updateService",
"updates",
"g",
".",
"globalServices",
"based",
"on",
"the",
"current",
"service",
"value"
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/global/global.go#L362-L373 | train |
docker/swarmkit | manager/orchestrator/global/global.go | SlotTuple | func (g *Orchestrator) SlotTuple(t *api.Task) orchestrator.SlotTuple {
return orchestrator.SlotTuple{
ServiceID: t.ServiceID,
NodeID: t.NodeID,
}
} | go | func (g *Orchestrator) SlotTuple(t *api.Task) orchestrator.SlotTuple {
return orchestrator.SlotTuple{
ServiceID: t.ServiceID,
NodeID: t.NodeID,
}
} | [
"func",
"(",
"g",
"*",
"Orchestrator",
")",
"SlotTuple",
"(",
"t",
"*",
"api",
".",
"Task",
")",
"orchestrator",
".",
"SlotTuple",
"{",
"return",
"orchestrator",
".",
"SlotTuple",
"{",
"ServiceID",
":",
"t",
".",
"ServiceID",
",",
"NodeID",
":",
"t",
"... | // SlotTuple returns a slot tuple for the global service task. | [
"SlotTuple",
"returns",
"a",
"slot",
"tuple",
"for",
"the",
"global",
"service",
"task",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/orchestrator/global/global.go#L583-L588 | train |
docker/swarmkit | manager/scheduler/nodeinfo.go | removeTask | func (nodeInfo *NodeInfo) removeTask(t *api.Task) bool {
oldTask, ok := nodeInfo.Tasks[t.ID]
if !ok {
return false
}
delete(nodeInfo.Tasks, t.ID)
if oldTask.DesiredState <= api.TaskStateRunning {
nodeInfo.ActiveTasksCount--
nodeInfo.ActiveTasksCountByService[t.ServiceID]--
}
if t.Endpoint != nil {
for _, port := range t.Endpoint.Ports {
if port.PublishMode == api.PublishModeHost && port.PublishedPort != 0 {
portSpec := hostPortSpec{protocol: port.Protocol, publishedPort: port.PublishedPort}
delete(nodeInfo.usedHostPorts, portSpec)
}
}
}
reservations := taskReservations(t.Spec)
resources := nodeInfo.AvailableResources
resources.MemoryBytes += reservations.MemoryBytes
resources.NanoCPUs += reservations.NanoCPUs
if nodeInfo.Description == nil || nodeInfo.Description.Resources == nil ||
nodeInfo.Description.Resources.Generic == nil {
return true
}
taskAssigned := t.AssignedGenericResources
nodeAvailableResources := &resources.Generic
nodeRes := nodeInfo.Description.Resources.Generic
genericresource.Reclaim(nodeAvailableResources, taskAssigned, nodeRes)
return true
} | go | func (nodeInfo *NodeInfo) removeTask(t *api.Task) bool {
oldTask, ok := nodeInfo.Tasks[t.ID]
if !ok {
return false
}
delete(nodeInfo.Tasks, t.ID)
if oldTask.DesiredState <= api.TaskStateRunning {
nodeInfo.ActiveTasksCount--
nodeInfo.ActiveTasksCountByService[t.ServiceID]--
}
if t.Endpoint != nil {
for _, port := range t.Endpoint.Ports {
if port.PublishMode == api.PublishModeHost && port.PublishedPort != 0 {
portSpec := hostPortSpec{protocol: port.Protocol, publishedPort: port.PublishedPort}
delete(nodeInfo.usedHostPorts, portSpec)
}
}
}
reservations := taskReservations(t.Spec)
resources := nodeInfo.AvailableResources
resources.MemoryBytes += reservations.MemoryBytes
resources.NanoCPUs += reservations.NanoCPUs
if nodeInfo.Description == nil || nodeInfo.Description.Resources == nil ||
nodeInfo.Description.Resources.Generic == nil {
return true
}
taskAssigned := t.AssignedGenericResources
nodeAvailableResources := &resources.Generic
nodeRes := nodeInfo.Description.Resources.Generic
genericresource.Reclaim(nodeAvailableResources, taskAssigned, nodeRes)
return true
} | [
"func",
"(",
"nodeInfo",
"*",
"NodeInfo",
")",
"removeTask",
"(",
"t",
"*",
"api",
".",
"Task",
")",
"bool",
"{",
"oldTask",
",",
"ok",
":=",
"nodeInfo",
".",
"Tasks",
"[",
"t",
".",
"ID",
"]",
"\n",
"if",
"!",
"ok",
"{",
"return",
"false",
"\n",... | // removeTask removes a task from nodeInfo if it's tracked there, and returns true
// if nodeInfo was modified. | [
"removeTask",
"removes",
"a",
"task",
"from",
"nodeInfo",
"if",
"it",
"s",
"tracked",
"there",
"and",
"returns",
"true",
"if",
"nodeInfo",
"was",
"modified",
"."
] | 59163bf75df38489d4a10392265d27156dc473c5 | https://github.com/docker/swarmkit/blob/59163bf75df38489d4a10392265d27156dc473c5/manager/scheduler/nodeinfo.go#L66-L104 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.