repo stringlengths 5 67 | path stringlengths 4 218 | func_name stringlengths 0 151 | original_string stringlengths 52 373k | language stringclasses 6 values | code stringlengths 52 373k | code_tokens listlengths 10 512 | docstring stringlengths 3 47.2k | docstring_tokens listlengths 3 234 | sha stringlengths 40 40 | url stringlengths 85 339 | partition stringclasses 3 values |
|---|---|---|---|---|---|---|---|---|---|---|---|
nats-io/nats-streaming-server | server/raft_log.go | StoreLog | func (r *raftLog) StoreLog(log *raft.Log) error {
return r.StoreLogs([]*raft.Log{log})
} | go | func (r *raftLog) StoreLog(log *raft.Log) error {
return r.StoreLogs([]*raft.Log{log})
} | [
"func",
"(",
"r",
"*",
"raftLog",
")",
"StoreLog",
"(",
"log",
"*",
"raft",
".",
"Log",
")",
"error",
"{",
"return",
"r",
".",
"StoreLogs",
"(",
"[",
"]",
"*",
"raft",
".",
"Log",
"{",
"log",
"}",
")",
"\n",
"}"
] | // StoreLog implements the LogStore interface | [
"StoreLog",
"implements",
"the",
"LogStore",
"interface"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_log.go#L234-L236 | train |
nats-io/nats-streaming-server | server/raft_log.go | StoreLogs | func (r *raftLog) StoreLogs(logs []*raft.Log) error {
r.Lock()
tx, err := r.conn.Begin(true)
if err != nil {
r.Unlock()
return err
}
bucket := tx.Bucket(logsBucket)
for _, log := range logs {
var (
key [8]byte
val []byte
)
binary.BigEndian.PutUint64(key[:], log.Index)
val, err = r.encodeRaftLog(log)
if err != nil {
break
}
err = bucket.Put(key[:], val)
if err != nil {
break
}
}
if err != nil {
tx.Rollback()
} else {
err = tx.Commit()
}
r.Unlock()
return err
} | go | func (r *raftLog) StoreLogs(logs []*raft.Log) error {
r.Lock()
tx, err := r.conn.Begin(true)
if err != nil {
r.Unlock()
return err
}
bucket := tx.Bucket(logsBucket)
for _, log := range logs {
var (
key [8]byte
val []byte
)
binary.BigEndian.PutUint64(key[:], log.Index)
val, err = r.encodeRaftLog(log)
if err != nil {
break
}
err = bucket.Put(key[:], val)
if err != nil {
break
}
}
if err != nil {
tx.Rollback()
} else {
err = tx.Commit()
}
r.Unlock()
return err
} | [
"func",
"(",
"r",
"*",
"raftLog",
")",
"StoreLogs",
"(",
"logs",
"[",
"]",
"*",
"raft",
".",
"Log",
")",
"error",
"{",
"r",
".",
"Lock",
"(",
")",
"\n",
"tx",
",",
"err",
":=",
"r",
".",
"conn",
".",
"Begin",
"(",
"true",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"r",
".",
"Unlock",
"(",
")",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"bucket",
":=",
"tx",
".",
"Bucket",
"(",
"logsBucket",
")",
"\n",
"for",
"_",
",",
"log",
":=",
"range",
"logs",
"{",
"var",
"(",
"key",
"[",
"8",
"]",
"byte",
"\n",
"val",
"[",
"]",
"byte",
"\n",
")",
"\n",
"binary",
".",
"BigEndian",
".",
"PutUint64",
"(",
"key",
"[",
":",
"]",
",",
"log",
".",
"Index",
")",
"\n",
"val",
",",
"err",
"=",
"r",
".",
"encodeRaftLog",
"(",
"log",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"break",
"\n",
"}",
"\n",
"err",
"=",
"bucket",
".",
"Put",
"(",
"key",
"[",
":",
"]",
",",
"val",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"tx",
".",
"Rollback",
"(",
")",
"\n",
"}",
"else",
"{",
"err",
"=",
"tx",
".",
"Commit",
"(",
")",
"\n",
"}",
"\n",
"r",
".",
"Unlock",
"(",
")",
"\n",
"return",
"err",
"\n",
"}"
] | // StoreLogs implements the LogStore interface | [
"StoreLogs",
"implements",
"the",
"LogStore",
"interface"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_log.go#L239-L269 | train |
nats-io/nats-streaming-server | server/raft_log.go | DeleteRange | func (r *raftLog) DeleteRange(min, max uint64) (retErr error) {
r.Lock()
defer r.Unlock()
start := time.Now()
r.log.Noticef("Deleting raft logs from %v to %v", min, max)
err := r.deleteRange(min, max)
dur := time.Since(start)
durTxt := fmt.Sprintf("Deletion took %v", dur)
if dur > 2*time.Second {
r.log.Errorf(durTxt)
} else {
r.log.Noticef(durTxt)
}
return err
} | go | func (r *raftLog) DeleteRange(min, max uint64) (retErr error) {
r.Lock()
defer r.Unlock()
start := time.Now()
r.log.Noticef("Deleting raft logs from %v to %v", min, max)
err := r.deleteRange(min, max)
dur := time.Since(start)
durTxt := fmt.Sprintf("Deletion took %v", dur)
if dur > 2*time.Second {
r.log.Errorf(durTxt)
} else {
r.log.Noticef(durTxt)
}
return err
} | [
"func",
"(",
"r",
"*",
"raftLog",
")",
"DeleteRange",
"(",
"min",
",",
"max",
"uint64",
")",
"(",
"retErr",
"error",
")",
"{",
"r",
".",
"Lock",
"(",
")",
"\n",
"defer",
"r",
".",
"Unlock",
"(",
")",
"\n",
"start",
":=",
"time",
".",
"Now",
"(",
")",
"\n",
"r",
".",
"log",
".",
"Noticef",
"(",
"\"Deleting raft logs from %v to %v\"",
",",
"min",
",",
"max",
")",
"\n",
"err",
":=",
"r",
".",
"deleteRange",
"(",
"min",
",",
"max",
")",
"\n",
"dur",
":=",
"time",
".",
"Since",
"(",
"start",
")",
"\n",
"durTxt",
":=",
"fmt",
".",
"Sprintf",
"(",
"\"Deletion took %v\"",
",",
"dur",
")",
"\n",
"if",
"dur",
">",
"2",
"*",
"time",
".",
"Second",
"{",
"r",
".",
"log",
".",
"Errorf",
"(",
"durTxt",
")",
"\n",
"}",
"else",
"{",
"r",
".",
"log",
".",
"Noticef",
"(",
"durTxt",
")",
"\n",
"}",
"\n",
"return",
"err",
"\n",
"}"
] | // DeleteRange implements the LogStore interface | [
"DeleteRange",
"implements",
"the",
"LogStore",
"interface"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_log.go#L272-L287 | train |
nats-io/nats-streaming-server | server/raft_log.go | Set | func (r *raftLog) Set(k, v []byte) error {
r.Lock()
tx, err := r.conn.Begin(true)
if err != nil {
r.Unlock()
return err
}
bucket := tx.Bucket(confBucket)
err = bucket.Put(k, v)
if err != nil {
tx.Rollback()
} else {
err = tx.Commit()
}
r.Unlock()
return err
} | go | func (r *raftLog) Set(k, v []byte) error {
r.Lock()
tx, err := r.conn.Begin(true)
if err != nil {
r.Unlock()
return err
}
bucket := tx.Bucket(confBucket)
err = bucket.Put(k, v)
if err != nil {
tx.Rollback()
} else {
err = tx.Commit()
}
r.Unlock()
return err
} | [
"func",
"(",
"r",
"*",
"raftLog",
")",
"Set",
"(",
"k",
",",
"v",
"[",
"]",
"byte",
")",
"error",
"{",
"r",
".",
"Lock",
"(",
")",
"\n",
"tx",
",",
"err",
":=",
"r",
".",
"conn",
".",
"Begin",
"(",
"true",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"r",
".",
"Unlock",
"(",
")",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"bucket",
":=",
"tx",
".",
"Bucket",
"(",
"confBucket",
")",
"\n",
"err",
"=",
"bucket",
".",
"Put",
"(",
"k",
",",
"v",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"tx",
".",
"Rollback",
"(",
")",
"\n",
"}",
"else",
"{",
"err",
"=",
"tx",
".",
"Commit",
"(",
")",
"\n",
"}",
"\n",
"r",
".",
"Unlock",
"(",
")",
"\n",
"return",
"err",
"\n",
"}"
] | // Set implements the Stable interface | [
"Set",
"implements",
"the",
"Stable",
"interface"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_log.go#L314-L330 | train |
nats-io/nats-streaming-server | server/raft_log.go | Get | func (r *raftLog) Get(k []byte) ([]byte, error) {
r.RLock()
tx, err := r.conn.Begin(false)
if err != nil {
r.RUnlock()
return nil, err
}
var v []byte
bucket := tx.Bucket(confBucket)
val := bucket.Get(k)
if val == nil {
err = errKeyNotFound
} else {
// Make a copy
v = append([]byte(nil), val...)
}
tx.Rollback()
r.RUnlock()
return v, err
} | go | func (r *raftLog) Get(k []byte) ([]byte, error) {
r.RLock()
tx, err := r.conn.Begin(false)
if err != nil {
r.RUnlock()
return nil, err
}
var v []byte
bucket := tx.Bucket(confBucket)
val := bucket.Get(k)
if val == nil {
err = errKeyNotFound
} else {
// Make a copy
v = append([]byte(nil), val...)
}
tx.Rollback()
r.RUnlock()
return v, err
} | [
"func",
"(",
"r",
"*",
"raftLog",
")",
"Get",
"(",
"k",
"[",
"]",
"byte",
")",
"(",
"[",
"]",
"byte",
",",
"error",
")",
"{",
"r",
".",
"RLock",
"(",
")",
"\n",
"tx",
",",
"err",
":=",
"r",
".",
"conn",
".",
"Begin",
"(",
"false",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"r",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"var",
"v",
"[",
"]",
"byte",
"\n",
"bucket",
":=",
"tx",
".",
"Bucket",
"(",
"confBucket",
")",
"\n",
"val",
":=",
"bucket",
".",
"Get",
"(",
"k",
")",
"\n",
"if",
"val",
"==",
"nil",
"{",
"err",
"=",
"errKeyNotFound",
"\n",
"}",
"else",
"{",
"v",
"=",
"append",
"(",
"[",
"]",
"byte",
"(",
"nil",
")",
",",
"val",
"...",
")",
"\n",
"}",
"\n",
"tx",
".",
"Rollback",
"(",
")",
"\n",
"r",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"v",
",",
"err",
"\n",
"}"
] | // Get implements the Stable interface | [
"Get",
"implements",
"the",
"Stable",
"interface"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_log.go#L333-L352 | train |
nats-io/nats-streaming-server | server/raft_log.go | SetUint64 | func (r *raftLog) SetUint64(k []byte, v uint64) error {
var vbytes [8]byte
binary.BigEndian.PutUint64(vbytes[:], v)
err := r.Set(k, vbytes[:])
return err
} | go | func (r *raftLog) SetUint64(k []byte, v uint64) error {
var vbytes [8]byte
binary.BigEndian.PutUint64(vbytes[:], v)
err := r.Set(k, vbytes[:])
return err
} | [
"func",
"(",
"r",
"*",
"raftLog",
")",
"SetUint64",
"(",
"k",
"[",
"]",
"byte",
",",
"v",
"uint64",
")",
"error",
"{",
"var",
"vbytes",
"[",
"8",
"]",
"byte",
"\n",
"binary",
".",
"BigEndian",
".",
"PutUint64",
"(",
"vbytes",
"[",
":",
"]",
",",
"v",
")",
"\n",
"err",
":=",
"r",
".",
"Set",
"(",
"k",
",",
"vbytes",
"[",
":",
"]",
")",
"\n",
"return",
"err",
"\n",
"}"
] | // SetUint64 implements the Stable interface | [
"SetUint64",
"implements",
"the",
"Stable",
"interface"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_log.go#L355-L360 | train |
nats-io/nats-streaming-server | server/raft_log.go | GetUint64 | func (r *raftLog) GetUint64(k []byte) (uint64, error) {
var v uint64
vbytes, err := r.Get(k)
if err == nil {
v = binary.BigEndian.Uint64(vbytes)
}
return v, err
} | go | func (r *raftLog) GetUint64(k []byte) (uint64, error) {
var v uint64
vbytes, err := r.Get(k)
if err == nil {
v = binary.BigEndian.Uint64(vbytes)
}
return v, err
} | [
"func",
"(",
"r",
"*",
"raftLog",
")",
"GetUint64",
"(",
"k",
"[",
"]",
"byte",
")",
"(",
"uint64",
",",
"error",
")",
"{",
"var",
"v",
"uint64",
"\n",
"vbytes",
",",
"err",
":=",
"r",
".",
"Get",
"(",
"k",
")",
"\n",
"if",
"err",
"==",
"nil",
"{",
"v",
"=",
"binary",
".",
"BigEndian",
".",
"Uint64",
"(",
"vbytes",
")",
"\n",
"}",
"\n",
"return",
"v",
",",
"err",
"\n",
"}"
] | // GetUint64 implements the Stable interface | [
"GetUint64",
"implements",
"the",
"Stable",
"interface"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/raft_log.go#L363-L370 | train |
nats-io/nats-streaming-server | server/snapshot.go | Restore | func (r *raftFSM) Restore(snapshot io.ReadCloser) (retErr error) {
s := r.server
defer snapshot.Close()
r.Lock()
defer r.Unlock()
// This function may be invoked directly from raft.NewRaft() when
// the node is initialized and if there were exisiting local snapshots,
// or later, when catching up with a leader. We behave differently
// depending on the situation. So we need to know if we are called
// from NewRaft().
//
// To do so, we first look at the number of local snapshots before
// calling NewRaft(). If the number is > 0, it means that Raft will
// call us within NewRaft(). Raft will restore the latest snapshot
// first, and only in case of Restore() returning an error will move
// to the next (earliest) one. When there are none and Restore() still
// returns an error raft.NewRaft() will return an error.
//
// So on error we decrement the number of snapshots, on success we set
// it to 0. This means that next time Restore() is invoked, we know it
// is restoring from a leader, not from the local snapshots.
inNewRaftCall := r.snapshotsOnInit != 0
if inNewRaftCall {
defer func() {
if retErr != nil {
r.snapshotsOnInit--
} else {
r.snapshotsOnInit = 0
}
}()
} else {
s.log.Noticef("restoring from snapshot")
defer s.log.Noticef("done restoring from snapshot")
}
// We need to drop current state. The server will recover from snapshot
// and all newer Raft entry logs (basically the entire state is being
// reconstructed from this point on).
for _, c := range s.channels.getAll() {
for _, sub := range c.ss.getAllSubs() {
sub.RLock()
clientID := sub.ClientID
sub.RUnlock()
if err := s.unsubscribeSub(c, clientID, "unsub", sub, false, false); err != nil {
return err
}
}
c.store.Subs.Flush()
}
for clientID := range s.clients.getClients() {
if _, err := s.clients.unregister(clientID); err != nil {
return err
}
}
sizeBuf := make([]byte, 4)
// Read the snapshot size.
if _, err := io.ReadFull(snapshot, sizeBuf); err != nil {
if err == io.EOF {
return nil
}
return err
}
// Read the snapshot.
size := util.ByteOrder.Uint32(sizeBuf)
buf := make([]byte, size)
if _, err := io.ReadFull(snapshot, buf); err != nil {
return err
}
serverSnap := &spb.RaftSnapshot{}
if err := serverSnap.Unmarshal(buf); err != nil {
panic(err)
}
if err := r.restoreClientsFromSnapshot(serverSnap); err != nil {
return err
}
return r.restoreChannelsFromSnapshot(serverSnap, inNewRaftCall)
} | go | func (r *raftFSM) Restore(snapshot io.ReadCloser) (retErr error) {
s := r.server
defer snapshot.Close()
r.Lock()
defer r.Unlock()
// This function may be invoked directly from raft.NewRaft() when
// the node is initialized and if there were exisiting local snapshots,
// or later, when catching up with a leader. We behave differently
// depending on the situation. So we need to know if we are called
// from NewRaft().
//
// To do so, we first look at the number of local snapshots before
// calling NewRaft(). If the number is > 0, it means that Raft will
// call us within NewRaft(). Raft will restore the latest snapshot
// first, and only in case of Restore() returning an error will move
// to the next (earliest) one. When there are none and Restore() still
// returns an error raft.NewRaft() will return an error.
//
// So on error we decrement the number of snapshots, on success we set
// it to 0. This means that next time Restore() is invoked, we know it
// is restoring from a leader, not from the local snapshots.
inNewRaftCall := r.snapshotsOnInit != 0
if inNewRaftCall {
defer func() {
if retErr != nil {
r.snapshotsOnInit--
} else {
r.snapshotsOnInit = 0
}
}()
} else {
s.log.Noticef("restoring from snapshot")
defer s.log.Noticef("done restoring from snapshot")
}
// We need to drop current state. The server will recover from snapshot
// and all newer Raft entry logs (basically the entire state is being
// reconstructed from this point on).
for _, c := range s.channels.getAll() {
for _, sub := range c.ss.getAllSubs() {
sub.RLock()
clientID := sub.ClientID
sub.RUnlock()
if err := s.unsubscribeSub(c, clientID, "unsub", sub, false, false); err != nil {
return err
}
}
c.store.Subs.Flush()
}
for clientID := range s.clients.getClients() {
if _, err := s.clients.unregister(clientID); err != nil {
return err
}
}
sizeBuf := make([]byte, 4)
// Read the snapshot size.
if _, err := io.ReadFull(snapshot, sizeBuf); err != nil {
if err == io.EOF {
return nil
}
return err
}
// Read the snapshot.
size := util.ByteOrder.Uint32(sizeBuf)
buf := make([]byte, size)
if _, err := io.ReadFull(snapshot, buf); err != nil {
return err
}
serverSnap := &spb.RaftSnapshot{}
if err := serverSnap.Unmarshal(buf); err != nil {
panic(err)
}
if err := r.restoreClientsFromSnapshot(serverSnap); err != nil {
return err
}
return r.restoreChannelsFromSnapshot(serverSnap, inNewRaftCall)
} | [
"func",
"(",
"r",
"*",
"raftFSM",
")",
"Restore",
"(",
"snapshot",
"io",
".",
"ReadCloser",
")",
"(",
"retErr",
"error",
")",
"{",
"s",
":=",
"r",
".",
"server",
"\n",
"defer",
"snapshot",
".",
"Close",
"(",
")",
"\n",
"r",
".",
"Lock",
"(",
")",
"\n",
"defer",
"r",
".",
"Unlock",
"(",
")",
"\n",
"inNewRaftCall",
":=",
"r",
".",
"snapshotsOnInit",
"!=",
"0",
"\n",
"if",
"inNewRaftCall",
"{",
"defer",
"func",
"(",
")",
"{",
"if",
"retErr",
"!=",
"nil",
"{",
"r",
".",
"snapshotsOnInit",
"--",
"\n",
"}",
"else",
"{",
"r",
".",
"snapshotsOnInit",
"=",
"0",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n",
"}",
"else",
"{",
"s",
".",
"log",
".",
"Noticef",
"(",
"\"restoring from snapshot\"",
")",
"\n",
"defer",
"s",
".",
"log",
".",
"Noticef",
"(",
"\"done restoring from snapshot\"",
")",
"\n",
"}",
"\n",
"for",
"_",
",",
"c",
":=",
"range",
"s",
".",
"channels",
".",
"getAll",
"(",
")",
"{",
"for",
"_",
",",
"sub",
":=",
"range",
"c",
".",
"ss",
".",
"getAllSubs",
"(",
")",
"{",
"sub",
".",
"RLock",
"(",
")",
"\n",
"clientID",
":=",
"sub",
".",
"ClientID",
"\n",
"sub",
".",
"RUnlock",
"(",
")",
"\n",
"if",
"err",
":=",
"s",
".",
"unsubscribeSub",
"(",
"c",
",",
"clientID",
",",
"\"unsub\"",
",",
"sub",
",",
"false",
",",
"false",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"c",
".",
"store",
".",
"Subs",
".",
"Flush",
"(",
")",
"\n",
"}",
"\n",
"for",
"clientID",
":=",
"range",
"s",
".",
"clients",
".",
"getClients",
"(",
")",
"{",
"if",
"_",
",",
"err",
":=",
"s",
".",
"clients",
".",
"unregister",
"(",
"clientID",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"sizeBuf",
":=",
"make",
"(",
"[",
"]",
"byte",
",",
"4",
")",
"\n",
"if",
"_",
",",
"err",
":=",
"io",
".",
"ReadFull",
"(",
"snapshot",
",",
"sizeBuf",
")",
";",
"err",
"!=",
"nil",
"{",
"if",
"err",
"==",
"io",
".",
"EOF",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"size",
":=",
"util",
".",
"ByteOrder",
".",
"Uint32",
"(",
"sizeBuf",
")",
"\n",
"buf",
":=",
"make",
"(",
"[",
"]",
"byte",
",",
"size",
")",
"\n",
"if",
"_",
",",
"err",
":=",
"io",
".",
"ReadFull",
"(",
"snapshot",
",",
"buf",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"serverSnap",
":=",
"&",
"spb",
".",
"RaftSnapshot",
"{",
"}",
"\n",
"if",
"err",
":=",
"serverSnap",
".",
"Unmarshal",
"(",
"buf",
")",
";",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"r",
".",
"restoreClientsFromSnapshot",
"(",
"serverSnap",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"return",
"r",
".",
"restoreChannelsFromSnapshot",
"(",
"serverSnap",
",",
"inNewRaftCall",
")",
"\n",
"}"
] | // Restore is used to restore an FSM from a snapshot. It is not called
// concurrently with any other command. The FSM must discard all previous
// state. | [
"Restore",
"is",
"used",
"to",
"restore",
"an",
"FSM",
"from",
"a",
"snapshot",
".",
"It",
"is",
"not",
"called",
"concurrently",
"with",
"any",
"other",
"command",
".",
"The",
"FSM",
"must",
"discard",
"all",
"previous",
"state",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/snapshot.go#L225-L305 | train |
nats-io/nats-streaming-server | stores/raftstore.go | NewRaftStore | func NewRaftStore(log logger.Logger, s Store, limits *StoreLimits) *RaftStore {
return &RaftStore{Store: s, log: log}
} | go | func NewRaftStore(log logger.Logger, s Store, limits *StoreLimits) *RaftStore {
return &RaftStore{Store: s, log: log}
} | [
"func",
"NewRaftStore",
"(",
"log",
"logger",
".",
"Logger",
",",
"s",
"Store",
",",
"limits",
"*",
"StoreLimits",
")",
"*",
"RaftStore",
"{",
"return",
"&",
"RaftStore",
"{",
"Store",
":",
"s",
",",
"log",
":",
"log",
"}",
"\n",
"}"
] | // NewRaftStore returns an instarce of a RaftStore | [
"NewRaftStore",
"returns",
"an",
"instarce",
"of",
"a",
"RaftStore"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/raftstore.go#L40-L42 | train |
nats-io/nats-streaming-server | stores/common.go | setLimits | func (gs *genericStore) setLimits(limits *StoreLimits) error {
// Make a copy.
gs.limits = limits.Clone()
// Build will validate and apply inheritance if no error.
if err := gs.limits.Build(); err != nil {
return err
}
// We don't need the PerChannel map and the sublist. So replace
// the map with the sublist instead.
gs.sublist = util.NewSublist()
for key, val := range gs.limits.PerChannel {
// val is already a copy of the original limits.PerChannel[key],
// so don't need to make a copy again, we own this.
gs.sublist.Insert(key, val)
}
// Get rid of the map now.
gs.limits.PerChannel = nil
return nil
} | go | func (gs *genericStore) setLimits(limits *StoreLimits) error {
// Make a copy.
gs.limits = limits.Clone()
// Build will validate and apply inheritance if no error.
if err := gs.limits.Build(); err != nil {
return err
}
// We don't need the PerChannel map and the sublist. So replace
// the map with the sublist instead.
gs.sublist = util.NewSublist()
for key, val := range gs.limits.PerChannel {
// val is already a copy of the original limits.PerChannel[key],
// so don't need to make a copy again, we own this.
gs.sublist.Insert(key, val)
}
// Get rid of the map now.
gs.limits.PerChannel = nil
return nil
} | [
"func",
"(",
"gs",
"*",
"genericStore",
")",
"setLimits",
"(",
"limits",
"*",
"StoreLimits",
")",
"error",
"{",
"gs",
".",
"limits",
"=",
"limits",
".",
"Clone",
"(",
")",
"\n",
"if",
"err",
":=",
"gs",
".",
"limits",
".",
"Build",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"gs",
".",
"sublist",
"=",
"util",
".",
"NewSublist",
"(",
")",
"\n",
"for",
"key",
",",
"val",
":=",
"range",
"gs",
".",
"limits",
".",
"PerChannel",
"{",
"gs",
".",
"sublist",
".",
"Insert",
"(",
"key",
",",
"val",
")",
"\n",
"}",
"\n",
"gs",
".",
"limits",
".",
"PerChannel",
"=",
"nil",
"\n",
"return",
"nil",
"\n",
"}"
] | // setLimits makes a copy of the given StoreLimits,
// validates the limits and if ok, applies the inheritance. | [
"setLimits",
"makes",
"a",
"copy",
"of",
"the",
"given",
"StoreLimits",
"validates",
"the",
"limits",
"and",
"if",
"ok",
"applies",
"the",
"inheritance",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/common.go#L115-L133 | train |
nats-io/nats-streaming-server | stores/common.go | getChannelLimits | func (gs *genericStore) getChannelLimits(channel string) *ChannelLimits {
r := gs.sublist.Match(channel)
if len(r) == 0 {
// If there is no match, that means we need to use the global limits.
return &gs.limits.ChannelLimits
}
// If there is a match, use the limits from the last element because
// we know that the returned array is ordered from widest to narrowest,
// and the only literal that there is would be the channel we are
// looking up.
return r[len(r)-1].(*ChannelLimits)
} | go | func (gs *genericStore) getChannelLimits(channel string) *ChannelLimits {
r := gs.sublist.Match(channel)
if len(r) == 0 {
// If there is no match, that means we need to use the global limits.
return &gs.limits.ChannelLimits
}
// If there is a match, use the limits from the last element because
// we know that the returned array is ordered from widest to narrowest,
// and the only literal that there is would be the channel we are
// looking up.
return r[len(r)-1].(*ChannelLimits)
} | [
"func",
"(",
"gs",
"*",
"genericStore",
")",
"getChannelLimits",
"(",
"channel",
"string",
")",
"*",
"ChannelLimits",
"{",
"r",
":=",
"gs",
".",
"sublist",
".",
"Match",
"(",
"channel",
")",
"\n",
"if",
"len",
"(",
"r",
")",
"==",
"0",
"{",
"return",
"&",
"gs",
".",
"limits",
".",
"ChannelLimits",
"\n",
"}",
"\n",
"return",
"r",
"[",
"len",
"(",
"r",
")",
"-",
"1",
"]",
".",
"(",
"*",
"ChannelLimits",
")",
"\n",
"}"
] | // Returns the appropriate limits for this channel based on inheritance.
// The channel is assumed to be a literal, and the store lock held on entry. | [
"Returns",
"the",
"appropriate",
"limits",
"for",
"this",
"channel",
"based",
"on",
"inheritance",
".",
"The",
"channel",
"is",
"assumed",
"to",
"be",
"a",
"literal",
"and",
"the",
"store",
"lock",
"held",
"on",
"entry",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/common.go#L137-L148 | train |
nats-io/nats-streaming-server | stores/common.go | GetChannelLimits | func (gs *genericStore) GetChannelLimits(channel string) *ChannelLimits {
gs.RLock()
defer gs.RUnlock()
c := gs.channels[channel]
if c == nil {
return nil
}
// Return a copy
cl := *gs.getChannelLimits(channel)
return &cl
} | go | func (gs *genericStore) GetChannelLimits(channel string) *ChannelLimits {
gs.RLock()
defer gs.RUnlock()
c := gs.channels[channel]
if c == nil {
return nil
}
// Return a copy
cl := *gs.getChannelLimits(channel)
return &cl
} | [
"func",
"(",
"gs",
"*",
"genericStore",
")",
"GetChannelLimits",
"(",
"channel",
"string",
")",
"*",
"ChannelLimits",
"{",
"gs",
".",
"RLock",
"(",
")",
"\n",
"defer",
"gs",
".",
"RUnlock",
"(",
")",
"\n",
"c",
":=",
"gs",
".",
"channels",
"[",
"channel",
"]",
"\n",
"if",
"c",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"cl",
":=",
"*",
"gs",
".",
"getChannelLimits",
"(",
"channel",
")",
"\n",
"return",
"&",
"cl",
"\n",
"}"
] | // GetChannelLimits implements the Store interface | [
"GetChannelLimits",
"implements",
"the",
"Store",
"interface"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/common.go#L151-L161 | train |
nats-io/nats-streaming-server | stores/common.go | SetLimits | func (gs *genericStore) SetLimits(limits *StoreLimits) error {
gs.Lock()
err := gs.setLimits(limits)
gs.Unlock()
return err
} | go | func (gs *genericStore) SetLimits(limits *StoreLimits) error {
gs.Lock()
err := gs.setLimits(limits)
gs.Unlock()
return err
} | [
"func",
"(",
"gs",
"*",
"genericStore",
")",
"SetLimits",
"(",
"limits",
"*",
"StoreLimits",
")",
"error",
"{",
"gs",
".",
"Lock",
"(",
")",
"\n",
"err",
":=",
"gs",
".",
"setLimits",
"(",
"limits",
")",
"\n",
"gs",
".",
"Unlock",
"(",
")",
"\n",
"return",
"err",
"\n",
"}"
] | // SetLimits sets limits for this store | [
"SetLimits",
"sets",
"limits",
"for",
"this",
"store"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/common.go#L164-L169 | train |
nats-io/nats-streaming-server | stores/common.go | canAddChannel | func (gs *genericStore) canAddChannel(name string) error {
if gs.channels[name] != nil {
return ErrAlreadyExists
}
if gs.limits.MaxChannels > 0 && len(gs.channels) >= gs.limits.MaxChannels {
return ErrTooManyChannels
}
return nil
} | go | func (gs *genericStore) canAddChannel(name string) error {
if gs.channels[name] != nil {
return ErrAlreadyExists
}
if gs.limits.MaxChannels > 0 && len(gs.channels) >= gs.limits.MaxChannels {
return ErrTooManyChannels
}
return nil
} | [
"func",
"(",
"gs",
"*",
"genericStore",
")",
"canAddChannel",
"(",
"name",
"string",
")",
"error",
"{",
"if",
"gs",
".",
"channels",
"[",
"name",
"]",
"!=",
"nil",
"{",
"return",
"ErrAlreadyExists",
"\n",
"}",
"\n",
"if",
"gs",
".",
"limits",
".",
"MaxChannels",
">",
"0",
"&&",
"len",
"(",
"gs",
".",
"channels",
")",
">=",
"gs",
".",
"limits",
".",
"MaxChannels",
"{",
"return",
"ErrTooManyChannels",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // canAddChannel returns true if the current number of channels is below the limit.
// If a channel named `channelName` alreadt exists, an error is returned.
// Store lock is assumed to be locked. | [
"canAddChannel",
"returns",
"true",
"if",
"the",
"current",
"number",
"of",
"channels",
"is",
"below",
"the",
"limit",
".",
"If",
"a",
"channel",
"named",
"channelName",
"alreadt",
"exists",
"an",
"error",
"is",
"returned",
".",
"Store",
"lock",
"is",
"assumed",
"to",
"be",
"locked",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/common.go#L203-L211 | train |
nats-io/nats-streaming-server | stores/common.go | Close | func (gs *genericStore) Close() error {
gs.Lock()
defer gs.Unlock()
if gs.closed {
return nil
}
gs.closed = true
return gs.close()
} | go | func (gs *genericStore) Close() error {
gs.Lock()
defer gs.Unlock()
if gs.closed {
return nil
}
gs.closed = true
return gs.close()
} | [
"func",
"(",
"gs",
"*",
"genericStore",
")",
"Close",
"(",
")",
"error",
"{",
"gs",
".",
"Lock",
"(",
")",
"\n",
"defer",
"gs",
".",
"Unlock",
"(",
")",
"\n",
"if",
"gs",
".",
"closed",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"gs",
".",
"closed",
"=",
"true",
"\n",
"return",
"gs",
".",
"close",
"(",
")",
"\n",
"}"
] | // Close closes all stores | [
"Close",
"closes",
"all",
"stores"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/common.go#L224-L232 | train |
nats-io/nats-streaming-server | stores/common.go | close | func (gs *genericStore) close() error {
var err error
var lerr error
for _, cs := range gs.channels {
lerr = cs.Subs.Close()
if lerr != nil && err == nil {
err = lerr
}
lerr = cs.Msgs.Close()
if lerr != nil && err == nil {
err = lerr
}
}
return err
} | go | func (gs *genericStore) close() error {
var err error
var lerr error
for _, cs := range gs.channels {
lerr = cs.Subs.Close()
if lerr != nil && err == nil {
err = lerr
}
lerr = cs.Msgs.Close()
if lerr != nil && err == nil {
err = lerr
}
}
return err
} | [
"func",
"(",
"gs",
"*",
"genericStore",
")",
"close",
"(",
")",
"error",
"{",
"var",
"err",
"error",
"\n",
"var",
"lerr",
"error",
"\n",
"for",
"_",
",",
"cs",
":=",
"range",
"gs",
".",
"channels",
"{",
"lerr",
"=",
"cs",
".",
"Subs",
".",
"Close",
"(",
")",
"\n",
"if",
"lerr",
"!=",
"nil",
"&&",
"err",
"==",
"nil",
"{",
"err",
"=",
"lerr",
"\n",
"}",
"\n",
"lerr",
"=",
"cs",
".",
"Msgs",
".",
"Close",
"(",
")",
"\n",
"if",
"lerr",
"!=",
"nil",
"&&",
"err",
"==",
"nil",
"{",
"err",
"=",
"lerr",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"err",
"\n",
"}"
] | // close closes all stores. Store lock is assumed held on entry | [
"close",
"closes",
"all",
"stores",
".",
"Store",
"lock",
"is",
"assumed",
"held",
"on",
"entry"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/common.go#L235-L250 | train |
nats-io/nats-streaming-server | stores/common.go | State | func (gms *genericMsgStore) State() (numMessages int, byteSize uint64, err error) {
gms.RLock()
c, b := gms.totalCount, gms.totalBytes
gms.RUnlock()
return c, b, nil
} | go | func (gms *genericMsgStore) State() (numMessages int, byteSize uint64, err error) {
gms.RLock()
c, b := gms.totalCount, gms.totalBytes
gms.RUnlock()
return c, b, nil
} | [
"func",
"(",
"gms",
"*",
"genericMsgStore",
")",
"State",
"(",
")",
"(",
"numMessages",
"int",
",",
"byteSize",
"uint64",
",",
"err",
"error",
")",
"{",
"gms",
".",
"RLock",
"(",
")",
"\n",
"c",
",",
"b",
":=",
"gms",
".",
"totalCount",
",",
"gms",
".",
"totalBytes",
"\n",
"gms",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"c",
",",
"b",
",",
"nil",
"\n",
"}"
] | // State returns some statistics related to this store | [
"State",
"returns",
"some",
"statistics",
"related",
"to",
"this",
"store"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/common.go#L264-L269 | train |
nats-io/nats-streaming-server | stores/common.go | FirstSequence | func (gms *genericMsgStore) FirstSequence() (uint64, error) {
gms.RLock()
first := gms.first
gms.RUnlock()
return first, nil
} | go | func (gms *genericMsgStore) FirstSequence() (uint64, error) {
gms.RLock()
first := gms.first
gms.RUnlock()
return first, nil
} | [
"func",
"(",
"gms",
"*",
"genericMsgStore",
")",
"FirstSequence",
"(",
")",
"(",
"uint64",
",",
"error",
")",
"{",
"gms",
".",
"RLock",
"(",
")",
"\n",
"first",
":=",
"gms",
".",
"first",
"\n",
"gms",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"first",
",",
"nil",
"\n",
"}"
] | // FirstSequence returns sequence for first message stored. | [
"FirstSequence",
"returns",
"sequence",
"for",
"first",
"message",
"stored",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/common.go#L278-L283 | train |
nats-io/nats-streaming-server | stores/common.go | LastSequence | func (gms *genericMsgStore) LastSequence() (uint64, error) {
gms.RLock()
last := gms.last
gms.RUnlock()
return last, nil
} | go | func (gms *genericMsgStore) LastSequence() (uint64, error) {
gms.RLock()
last := gms.last
gms.RUnlock()
return last, nil
} | [
"func",
"(",
"gms",
"*",
"genericMsgStore",
")",
"LastSequence",
"(",
")",
"(",
"uint64",
",",
"error",
")",
"{",
"gms",
".",
"RLock",
"(",
")",
"\n",
"last",
":=",
"gms",
".",
"last",
"\n",
"gms",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"last",
",",
"nil",
"\n",
"}"
] | // LastSequence returns sequence for last message stored. | [
"LastSequence",
"returns",
"sequence",
"for",
"last",
"message",
"stored",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/common.go#L286-L291 | train |
nats-io/nats-streaming-server | stores/common.go | FirstAndLastSequence | func (gms *genericMsgStore) FirstAndLastSequence() (uint64, uint64, error) {
gms.RLock()
first, last := gms.first, gms.last
gms.RUnlock()
return first, last, nil
} | go | func (gms *genericMsgStore) FirstAndLastSequence() (uint64, uint64, error) {
gms.RLock()
first, last := gms.first, gms.last
gms.RUnlock()
return first, last, nil
} | [
"func",
"(",
"gms",
"*",
"genericMsgStore",
")",
"FirstAndLastSequence",
"(",
")",
"(",
"uint64",
",",
"uint64",
",",
"error",
")",
"{",
"gms",
".",
"RLock",
"(",
")",
"\n",
"first",
",",
"last",
":=",
"gms",
".",
"first",
",",
"gms",
".",
"last",
"\n",
"gms",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"first",
",",
"last",
",",
"nil",
"\n",
"}"
] | // FirstAndLastSequence returns sequences for the first and last messages stored. | [
"FirstAndLastSequence",
"returns",
"sequences",
"for",
"the",
"first",
"and",
"last",
"messages",
"stored",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/common.go#L294-L299 | train |
nats-io/nats-streaming-server | stores/common.go | CreateSub | func (gss *genericSubStore) CreateSub(sub *spb.SubState) error {
gss.Lock()
err := gss.createSub(sub)
gss.Unlock()
return err
} | go | func (gss *genericSubStore) CreateSub(sub *spb.SubState) error {
gss.Lock()
err := gss.createSub(sub)
gss.Unlock()
return err
} | [
"func",
"(",
"gss",
"*",
"genericSubStore",
")",
"CreateSub",
"(",
"sub",
"*",
"spb",
".",
"SubState",
")",
"error",
"{",
"gss",
".",
"Lock",
"(",
")",
"\n",
"err",
":=",
"gss",
".",
"createSub",
"(",
"sub",
")",
"\n",
"gss",
".",
"Unlock",
"(",
")",
"\n",
"return",
"err",
"\n",
"}"
] | // CreateSub records a new subscription represented by SubState. On success,
// it records the subscription's ID in SubState.ID. This ID is to be used
// by the other SubStore methods. | [
"CreateSub",
"records",
"a",
"new",
"subscription",
"represented",
"by",
"SubState",
".",
"On",
"success",
"it",
"records",
"the",
"subscription",
"s",
"ID",
"in",
"SubState",
".",
"ID",
".",
"This",
"ID",
"is",
"to",
"be",
"used",
"by",
"the",
"other",
"SubStore",
"methods",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/common.go#L354-L359 | train |
nats-io/nats-streaming-server | stores/common.go | createSub | func (gss *genericSubStore) createSub(sub *spb.SubState) error {
if gss.limits.MaxSubscriptions > 0 && len(gss.subs) >= gss.limits.MaxSubscriptions {
return ErrTooManySubs
}
// Bump the max value before assigning it to the new subscription.
gss.maxSubID++
// This new subscription has the max value.
sub.ID = gss.maxSubID
// Store anything. Some implementations may replace with specific
// object.
gss.subs[sub.ID] = emptySub
return nil
} | go | func (gss *genericSubStore) createSub(sub *spb.SubState) error {
if gss.limits.MaxSubscriptions > 0 && len(gss.subs) >= gss.limits.MaxSubscriptions {
return ErrTooManySubs
}
// Bump the max value before assigning it to the new subscription.
gss.maxSubID++
// This new subscription has the max value.
sub.ID = gss.maxSubID
// Store anything. Some implementations may replace with specific
// object.
gss.subs[sub.ID] = emptySub
return nil
} | [
"func",
"(",
"gss",
"*",
"genericSubStore",
")",
"createSub",
"(",
"sub",
"*",
"spb",
".",
"SubState",
")",
"error",
"{",
"if",
"gss",
".",
"limits",
".",
"MaxSubscriptions",
">",
"0",
"&&",
"len",
"(",
"gss",
".",
"subs",
")",
">=",
"gss",
".",
"limits",
".",
"MaxSubscriptions",
"{",
"return",
"ErrTooManySubs",
"\n",
"}",
"\n",
"gss",
".",
"maxSubID",
"++",
"\n",
"sub",
".",
"ID",
"=",
"gss",
".",
"maxSubID",
"\n",
"gss",
".",
"subs",
"[",
"sub",
".",
"ID",
"]",
"=",
"emptySub",
"\n",
"return",
"nil",
"\n",
"}"
] | // createSub checks that the number of subscriptions is below the max
// and if so, assigns a new subscription ID and keep track of it in a map.
// Lock is assumed to be held on entry. | [
"createSub",
"checks",
"that",
"the",
"number",
"of",
"subscriptions",
"is",
"below",
"the",
"max",
"and",
"if",
"so",
"assigns",
"a",
"new",
"subscription",
"ID",
"and",
"keep",
"track",
"of",
"it",
"in",
"a",
"map",
".",
"Lock",
"is",
"assumed",
"to",
"be",
"held",
"on",
"entry",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/stores/common.go#L369-L384 | train |
nats-io/nats-streaming-server | server/partitions.go | initPartitions | func (s *StanServer) initPartitions() error {
// The option says that the server should only use the pre-defined channels,
// but none was specified. Don't see the point in continuing...
if len(s.opts.StoreLimits.PerChannel) == 0 {
return ErrNoChannel
}
nc, err := s.createNatsClientConn("pc")
if err != nil {
return err
}
p := &partitions{
s: s,
nc: nc,
}
// Now that the connection is created, we need to set s.partitioning to cp
// so that server shutdown can properly close this connection.
s.partitions = p
p.createChannelsMapAndSublist(s.opts.StoreLimits.PerChannel)
p.sendListSubject = partitionsPrefix + "." + s.opts.ID
// Use the partitions' own connection for channels list requests
p.processChanSub, err = p.nc.Subscribe(p.sendListSubject, p.processChannelsListRequests)
if err != nil {
return fmt.Errorf("unable to subscribe: %v", err)
}
p.processChanSub.SetPendingLimits(-1, -1)
p.inboxSub, err = p.nc.SubscribeSync(nats.NewInbox())
if err != nil {
return fmt.Errorf("unable to subscribe: %v", err)
}
p.Lock()
// Set this before the first attempt so we don't miss any notification
// of a change in topology. Since we hold the lock, and even if there
// was a notification happening now, the callback will execute only
// after we are done with the initial check.
nc.SetDiscoveredServersHandler(p.topologyChanged)
// Now send our list and check if any server is complaining
// about having one channel in common.
if err := p.checkChannelsUniqueInCluster(); err != nil {
p.Unlock()
return err
}
p.Unlock()
return nil
} | go | func (s *StanServer) initPartitions() error {
// The option says that the server should only use the pre-defined channels,
// but none was specified. Don't see the point in continuing...
if len(s.opts.StoreLimits.PerChannel) == 0 {
return ErrNoChannel
}
nc, err := s.createNatsClientConn("pc")
if err != nil {
return err
}
p := &partitions{
s: s,
nc: nc,
}
// Now that the connection is created, we need to set s.partitioning to cp
// so that server shutdown can properly close this connection.
s.partitions = p
p.createChannelsMapAndSublist(s.opts.StoreLimits.PerChannel)
p.sendListSubject = partitionsPrefix + "." + s.opts.ID
// Use the partitions' own connection for channels list requests
p.processChanSub, err = p.nc.Subscribe(p.sendListSubject, p.processChannelsListRequests)
if err != nil {
return fmt.Errorf("unable to subscribe: %v", err)
}
p.processChanSub.SetPendingLimits(-1, -1)
p.inboxSub, err = p.nc.SubscribeSync(nats.NewInbox())
if err != nil {
return fmt.Errorf("unable to subscribe: %v", err)
}
p.Lock()
// Set this before the first attempt so we don't miss any notification
// of a change in topology. Since we hold the lock, and even if there
// was a notification happening now, the callback will execute only
// after we are done with the initial check.
nc.SetDiscoveredServersHandler(p.topologyChanged)
// Now send our list and check if any server is complaining
// about having one channel in common.
if err := p.checkChannelsUniqueInCluster(); err != nil {
p.Unlock()
return err
}
p.Unlock()
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"initPartitions",
"(",
")",
"error",
"{",
"if",
"len",
"(",
"s",
".",
"opts",
".",
"StoreLimits",
".",
"PerChannel",
")",
"==",
"0",
"{",
"return",
"ErrNoChannel",
"\n",
"}",
"\n",
"nc",
",",
"err",
":=",
"s",
".",
"createNatsClientConn",
"(",
"\"pc\"",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"p",
":=",
"&",
"partitions",
"{",
"s",
":",
"s",
",",
"nc",
":",
"nc",
",",
"}",
"\n",
"s",
".",
"partitions",
"=",
"p",
"\n",
"p",
".",
"createChannelsMapAndSublist",
"(",
"s",
".",
"opts",
".",
"StoreLimits",
".",
"PerChannel",
")",
"\n",
"p",
".",
"sendListSubject",
"=",
"partitionsPrefix",
"+",
"\".\"",
"+",
"s",
".",
"opts",
".",
"ID",
"\n",
"p",
".",
"processChanSub",
",",
"err",
"=",
"p",
".",
"nc",
".",
"Subscribe",
"(",
"p",
".",
"sendListSubject",
",",
"p",
".",
"processChannelsListRequests",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"unable to subscribe: %v\"",
",",
"err",
")",
"\n",
"}",
"\n",
"p",
".",
"processChanSub",
".",
"SetPendingLimits",
"(",
"-",
"1",
",",
"-",
"1",
")",
"\n",
"p",
".",
"inboxSub",
",",
"err",
"=",
"p",
".",
"nc",
".",
"SubscribeSync",
"(",
"nats",
".",
"NewInbox",
"(",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"unable to subscribe: %v\"",
",",
"err",
")",
"\n",
"}",
"\n",
"p",
".",
"Lock",
"(",
")",
"\n",
"nc",
".",
"SetDiscoveredServersHandler",
"(",
"p",
".",
"topologyChanged",
")",
"\n",
"if",
"err",
":=",
"p",
".",
"checkChannelsUniqueInCluster",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"p",
".",
"Unlock",
"(",
")",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"p",
".",
"Unlock",
"(",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // Initialize the channels partitions objects and issue the first
// request to check if other servers in the cluster incorrectly have
// any of the channel that this server is supposed to handle. | [
"Initialize",
"the",
"channels",
"partitions",
"objects",
"and",
"issue",
"the",
"first",
"request",
"to",
"check",
"if",
"other",
"servers",
"in",
"the",
"cluster",
"incorrectly",
"have",
"any",
"of",
"the",
"channel",
"that",
"this",
"server",
"is",
"supposed",
"to",
"handle",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/partitions.go#L64-L107 | train |
nats-io/nats-streaming-server | server/partitions.go | createChannelsMapAndSublist | func (p *partitions) createChannelsMapAndSublist(storeChannels map[string]*stores.ChannelLimits) {
p.channels = make([]string, 0, len(storeChannels))
p.sl = util.NewSublist()
for c := range storeChannels {
p.channels = append(p.channels, c)
// When creating the store, we have already checked that channel names
// were valid. So this call cannot fail.
p.sl.Insert(c, channelInterest)
}
} | go | func (p *partitions) createChannelsMapAndSublist(storeChannels map[string]*stores.ChannelLimits) {
p.channels = make([]string, 0, len(storeChannels))
p.sl = util.NewSublist()
for c := range storeChannels {
p.channels = append(p.channels, c)
// When creating the store, we have already checked that channel names
// were valid. So this call cannot fail.
p.sl.Insert(c, channelInterest)
}
} | [
"func",
"(",
"p",
"*",
"partitions",
")",
"createChannelsMapAndSublist",
"(",
"storeChannels",
"map",
"[",
"string",
"]",
"*",
"stores",
".",
"ChannelLimits",
")",
"{",
"p",
".",
"channels",
"=",
"make",
"(",
"[",
"]",
"string",
",",
"0",
",",
"len",
"(",
"storeChannels",
")",
")",
"\n",
"p",
".",
"sl",
"=",
"util",
".",
"NewSublist",
"(",
")",
"\n",
"for",
"c",
":=",
"range",
"storeChannels",
"{",
"p",
".",
"channels",
"=",
"append",
"(",
"p",
".",
"channels",
",",
"c",
")",
"\n",
"p",
".",
"sl",
".",
"Insert",
"(",
"c",
",",
"channelInterest",
")",
"\n",
"}",
"\n",
"}"
] | // Creates the channels map based on the store's PerChannel map that was given. | [
"Creates",
"the",
"channels",
"map",
"based",
"on",
"the",
"store",
"s",
"PerChannel",
"map",
"that",
"was",
"given",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/partitions.go#L110-L119 | train |
nats-io/nats-streaming-server | server/partitions.go | topologyChanged | func (p *partitions) topologyChanged(_ *nats.Conn) {
p.Lock()
defer p.Unlock()
if p.isShutdown {
return
}
// Let's wait before checking (sending the list and waiting for a reply)
// so that the new NATS Server has a chance to send its local
// subscriptions to the rest of the cluster. That will reduce the risk
// of missing the reply from the new server.
time.Sleep(partitionsWaitOnChange)
if err := p.checkChannelsUniqueInCluster(); err != nil {
// If server is started from command line, the Fatalf
// call will cause the process to exit. If the server
// is run programmatically and no logger has been set
// we need to exit with the panic.
p.s.log.Fatalf("Partitioning error: %v", err)
// For tests
if partitionsNoPanic {
p.s.setLastError(err)
return
}
panic(err)
}
} | go | func (p *partitions) topologyChanged(_ *nats.Conn) {
p.Lock()
defer p.Unlock()
if p.isShutdown {
return
}
// Let's wait before checking (sending the list and waiting for a reply)
// so that the new NATS Server has a chance to send its local
// subscriptions to the rest of the cluster. That will reduce the risk
// of missing the reply from the new server.
time.Sleep(partitionsWaitOnChange)
if err := p.checkChannelsUniqueInCluster(); err != nil {
// If server is started from command line, the Fatalf
// call will cause the process to exit. If the server
// is run programmatically and no logger has been set
// we need to exit with the panic.
p.s.log.Fatalf("Partitioning error: %v", err)
// For tests
if partitionsNoPanic {
p.s.setLastError(err)
return
}
panic(err)
}
} | [
"func",
"(",
"p",
"*",
"partitions",
")",
"topologyChanged",
"(",
"_",
"*",
"nats",
".",
"Conn",
")",
"{",
"p",
".",
"Lock",
"(",
")",
"\n",
"defer",
"p",
".",
"Unlock",
"(",
")",
"\n",
"if",
"p",
".",
"isShutdown",
"{",
"return",
"\n",
"}",
"\n",
"time",
".",
"Sleep",
"(",
"partitionsWaitOnChange",
")",
"\n",
"if",
"err",
":=",
"p",
".",
"checkChannelsUniqueInCluster",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"p",
".",
"s",
".",
"log",
".",
"Fatalf",
"(",
"\"Partitioning error: %v\"",
",",
"err",
")",
"\n",
"if",
"partitionsNoPanic",
"{",
"p",
".",
"s",
".",
"setLastError",
"(",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"}"
] | // Topology changed. Sends the list of channels. | [
"Topology",
"changed",
".",
"Sends",
"the",
"list",
"of",
"channels",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/partitions.go#L122-L146 | train |
nats-io/nats-streaming-server | server/partitions.go | initSubscriptions | func (p *partitions) initSubscriptions() error {
// NOTE: Use the server's nc connection here, not the partitions' one.
for _, channelName := range p.channels {
pubSubject := fmt.Sprintf("%s.%s", p.s.info.Publish, channelName)
if _, err := p.s.nc.Subscribe(pubSubject, p.s.processClientPublish); err != nil {
return fmt.Errorf("could not subscribe to publish subject %q, %v", channelName, err)
}
}
return nil
} | go | func (p *partitions) initSubscriptions() error {
// NOTE: Use the server's nc connection here, not the partitions' one.
for _, channelName := range p.channels {
pubSubject := fmt.Sprintf("%s.%s", p.s.info.Publish, channelName)
if _, err := p.s.nc.Subscribe(pubSubject, p.s.processClientPublish); err != nil {
return fmt.Errorf("could not subscribe to publish subject %q, %v", channelName, err)
}
}
return nil
} | [
"func",
"(",
"p",
"*",
"partitions",
")",
"initSubscriptions",
"(",
")",
"error",
"{",
"for",
"_",
",",
"channelName",
":=",
"range",
"p",
".",
"channels",
"{",
"pubSubject",
":=",
"fmt",
".",
"Sprintf",
"(",
"\"%s.%s\"",
",",
"p",
".",
"s",
".",
"info",
".",
"Publish",
",",
"channelName",
")",
"\n",
"if",
"_",
",",
"err",
":=",
"p",
".",
"s",
".",
"nc",
".",
"Subscribe",
"(",
"pubSubject",
",",
"p",
".",
"s",
".",
"processClientPublish",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"could not subscribe to publish subject %q, %v\"",
",",
"channelName",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // Create the internal subscriptions on the list of channels. | [
"Create",
"the",
"internal",
"subscriptions",
"on",
"the",
"list",
"of",
"channels",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/partitions.go#L149-L158 | train |
nats-io/nats-streaming-server | server/partitions.go | processChannelsListRequests | func (p *partitions) processChannelsListRequests(m *nats.Msg) {
// Message cannot be empty, we are supposed to receive
// a spb.CtrlMsg_Partitioning protocol. We should also
// have a repy subject
if len(m.Data) == 0 || m.Reply == "" {
return
}
req := spb.CtrlMsg{}
if err := req.Unmarshal(m.Data); err != nil {
p.s.log.Errorf("Error processing partitioning request: %v", err)
return
}
// If this is our own request, ignore
if req.ServerID == p.s.serverID {
return
}
channels, err := util.DecodeChannels(req.Data)
if err != nil {
p.s.log.Errorf("Error processing partitioning request: %v", err)
return
}
// Check that we don't have any of these channels defined.
// If we do, send a reply with simply the name of the offending
// channel in reply.Data
reply := spb.CtrlMsg{
ServerID: p.s.serverID,
MsgType: spb.CtrlMsg_Partitioning,
}
gotError := false
sl := util.NewSublist()
for _, c := range channels {
if r := p.sl.Match(c); len(r) > 0 {
reply.Data = []byte(c)
gotError = true
break
}
sl.Insert(c, channelInterest)
}
if !gotError {
// Go over our channels and check with the other server sublist
for _, c := range p.channels {
if r := sl.Match(c); len(r) > 0 {
reply.Data = []byte(c)
break
}
}
}
replyBytes, _ := reply.Marshal()
// If there is no duplicate, reply.Data will be empty, which means
// that there was no conflict.
if err := p.nc.Publish(m.Reply, replyBytes); err != nil {
p.s.log.Errorf("Error sending reply to partitioning request: %v", err)
}
} | go | func (p *partitions) processChannelsListRequests(m *nats.Msg) {
// Message cannot be empty, we are supposed to receive
// a spb.CtrlMsg_Partitioning protocol. We should also
// have a repy subject
if len(m.Data) == 0 || m.Reply == "" {
return
}
req := spb.CtrlMsg{}
if err := req.Unmarshal(m.Data); err != nil {
p.s.log.Errorf("Error processing partitioning request: %v", err)
return
}
// If this is our own request, ignore
if req.ServerID == p.s.serverID {
return
}
channels, err := util.DecodeChannels(req.Data)
if err != nil {
p.s.log.Errorf("Error processing partitioning request: %v", err)
return
}
// Check that we don't have any of these channels defined.
// If we do, send a reply with simply the name of the offending
// channel in reply.Data
reply := spb.CtrlMsg{
ServerID: p.s.serverID,
MsgType: spb.CtrlMsg_Partitioning,
}
gotError := false
sl := util.NewSublist()
for _, c := range channels {
if r := p.sl.Match(c); len(r) > 0 {
reply.Data = []byte(c)
gotError = true
break
}
sl.Insert(c, channelInterest)
}
if !gotError {
// Go over our channels and check with the other server sublist
for _, c := range p.channels {
if r := sl.Match(c); len(r) > 0 {
reply.Data = []byte(c)
break
}
}
}
replyBytes, _ := reply.Marshal()
// If there is no duplicate, reply.Data will be empty, which means
// that there was no conflict.
if err := p.nc.Publish(m.Reply, replyBytes); err != nil {
p.s.log.Errorf("Error sending reply to partitioning request: %v", err)
}
} | [
"func",
"(",
"p",
"*",
"partitions",
")",
"processChannelsListRequests",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"if",
"len",
"(",
"m",
".",
"Data",
")",
"==",
"0",
"||",
"m",
".",
"Reply",
"==",
"\"\"",
"{",
"return",
"\n",
"}",
"\n",
"req",
":=",
"spb",
".",
"CtrlMsg",
"{",
"}",
"\n",
"if",
"err",
":=",
"req",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
")",
";",
"err",
"!=",
"nil",
"{",
"p",
".",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Error processing partitioning request: %v\"",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"req",
".",
"ServerID",
"==",
"p",
".",
"s",
".",
"serverID",
"{",
"return",
"\n",
"}",
"\n",
"channels",
",",
"err",
":=",
"util",
".",
"DecodeChannels",
"(",
"req",
".",
"Data",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"p",
".",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Error processing partitioning request: %v\"",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"reply",
":=",
"spb",
".",
"CtrlMsg",
"{",
"ServerID",
":",
"p",
".",
"s",
".",
"serverID",
",",
"MsgType",
":",
"spb",
".",
"CtrlMsg_Partitioning",
",",
"}",
"\n",
"gotError",
":=",
"false",
"\n",
"sl",
":=",
"util",
".",
"NewSublist",
"(",
")",
"\n",
"for",
"_",
",",
"c",
":=",
"range",
"channels",
"{",
"if",
"r",
":=",
"p",
".",
"sl",
".",
"Match",
"(",
"c",
")",
";",
"len",
"(",
"r",
")",
">",
"0",
"{",
"reply",
".",
"Data",
"=",
"[",
"]",
"byte",
"(",
"c",
")",
"\n",
"gotError",
"=",
"true",
"\n",
"break",
"\n",
"}",
"\n",
"sl",
".",
"Insert",
"(",
"c",
",",
"channelInterest",
")",
"\n",
"}",
"\n",
"if",
"!",
"gotError",
"{",
"for",
"_",
",",
"c",
":=",
"range",
"p",
".",
"channels",
"{",
"if",
"r",
":=",
"sl",
".",
"Match",
"(",
"c",
")",
";",
"len",
"(",
"r",
")",
">",
"0",
"{",
"reply",
".",
"Data",
"=",
"[",
"]",
"byte",
"(",
"c",
")",
"\n",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"replyBytes",
",",
"_",
":=",
"reply",
".",
"Marshal",
"(",
")",
"\n",
"if",
"err",
":=",
"p",
".",
"nc",
".",
"Publish",
"(",
"m",
".",
"Reply",
",",
"replyBytes",
")",
";",
"err",
"!=",
"nil",
"{",
"p",
".",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Error sending reply to partitioning request: %v\"",
",",
"err",
")",
"\n",
"}",
"\n",
"}"
] | // Decode the incoming partitioning protocol message.
// It can be an HB, in which case, if it is from a new server
// we send our list to the cluster, or it can be a request
// from another server. If so, we reply to the given inbox
// with either an empty Data field or the name of the first
// channel we have in common. | [
"Decode",
"the",
"incoming",
"partitioning",
"protocol",
"message",
".",
"It",
"can",
"be",
"an",
"HB",
"in",
"which",
"case",
"if",
"it",
"is",
"from",
"a",
"new",
"server",
"we",
"send",
"our",
"list",
"to",
"the",
"cluster",
"or",
"it",
"can",
"be",
"a",
"request",
"from",
"another",
"server",
".",
"If",
"so",
"we",
"reply",
"to",
"the",
"given",
"inbox",
"with",
"either",
"an",
"empty",
"Data",
"field",
"or",
"the",
"name",
"of",
"the",
"first",
"channel",
"we",
"have",
"in",
"common",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/partitions.go#L196-L249 | train |
nats-io/nats-streaming-server | server/partitions.go | shutdown | func (p *partitions) shutdown() {
p.Lock()
defer p.Unlock()
if p.isShutdown {
return
}
p.isShutdown = true
if p.nc != nil {
p.nc.Close()
}
} | go | func (p *partitions) shutdown() {
p.Lock()
defer p.Unlock()
if p.isShutdown {
return
}
p.isShutdown = true
if p.nc != nil {
p.nc.Close()
}
} | [
"func",
"(",
"p",
"*",
"partitions",
")",
"shutdown",
"(",
")",
"{",
"p",
".",
"Lock",
"(",
")",
"\n",
"defer",
"p",
".",
"Unlock",
"(",
")",
"\n",
"if",
"p",
".",
"isShutdown",
"{",
"return",
"\n",
"}",
"\n",
"p",
".",
"isShutdown",
"=",
"true",
"\n",
"if",
"p",
".",
"nc",
"!=",
"nil",
"{",
"p",
".",
"nc",
".",
"Close",
"(",
")",
"\n",
"}",
"\n",
"}"
] | // Notifies all go-routines used by partitioning code that the
// server is shuting down and closes the internal NATS connection. | [
"Notifies",
"all",
"go",
"-",
"routines",
"used",
"by",
"partitioning",
"code",
"that",
"the",
"server",
"is",
"shuting",
"down",
"and",
"closes",
"the",
"internal",
"NATS",
"connection",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/partitions.go#L253-L263 | train |
nats-io/nats-streaming-server | server/conf.go | checkType | func checkType(name string, kind reflect.Kind, v interface{}) error {
actualKind := reflect.TypeOf(v).Kind()
if actualKind != kind {
return fmt.Errorf("parameter %q value is expected to be %v, got %v",
name, kind.String(), actualKind.String())
}
return nil
} | go | func checkType(name string, kind reflect.Kind, v interface{}) error {
actualKind := reflect.TypeOf(v).Kind()
if actualKind != kind {
return fmt.Errorf("parameter %q value is expected to be %v, got %v",
name, kind.String(), actualKind.String())
}
return nil
} | [
"func",
"checkType",
"(",
"name",
"string",
",",
"kind",
"reflect",
".",
"Kind",
",",
"v",
"interface",
"{",
"}",
")",
"error",
"{",
"actualKind",
":=",
"reflect",
".",
"TypeOf",
"(",
"v",
")",
".",
"Kind",
"(",
")",
"\n",
"if",
"actualKind",
"!=",
"kind",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"parameter %q value is expected to be %v, got %v\"",
",",
"name",
",",
"kind",
".",
"String",
"(",
")",
",",
"actualKind",
".",
"String",
"(",
")",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // checkType returns a formatted error if `v` is not of the expected kind. | [
"checkType",
"returns",
"a",
"formatted",
"error",
"if",
"v",
"is",
"not",
"of",
"the",
"expected",
"kind",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L179-L186 | train |
nats-io/nats-streaming-server | server/conf.go | parseTLS | func parseTLS(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("expected TLS to be a map/struct, got %v", itf)
}
for k, v := range m {
name := strings.ToLower(k)
switch name {
case "client_cert":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientCert = v.(string)
case "client_key":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientKey = v.(string)
case "client_ca", "client_cacert":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientCA = v.(string)
}
}
return nil
} | go | func parseTLS(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("expected TLS to be a map/struct, got %v", itf)
}
for k, v := range m {
name := strings.ToLower(k)
switch name {
case "client_cert":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientCert = v.(string)
case "client_key":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientKey = v.(string)
case "client_ca", "client_cacert":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
opts.ClientCA = v.(string)
}
}
return nil
} | [
"func",
"parseTLS",
"(",
"itf",
"interface",
"{",
"}",
",",
"opts",
"*",
"Options",
")",
"error",
"{",
"m",
",",
"ok",
":=",
"itf",
".",
"(",
"map",
"[",
"string",
"]",
"interface",
"{",
"}",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"expected TLS to be a map/struct, got %v\"",
",",
"itf",
")",
"\n",
"}",
"\n",
"for",
"k",
",",
"v",
":=",
"range",
"m",
"{",
"name",
":=",
"strings",
".",
"ToLower",
"(",
"k",
")",
"\n",
"switch",
"name",
"{",
"case",
"\"client_cert\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"String",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"opts",
".",
"ClientCert",
"=",
"v",
".",
"(",
"string",
")",
"\n",
"case",
"\"client_key\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"String",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"opts",
".",
"ClientKey",
"=",
"v",
".",
"(",
"string",
")",
"\n",
"case",
"\"client_ca\"",
",",
"\"client_cacert\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"String",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"opts",
".",
"ClientCA",
"=",
"v",
".",
"(",
"string",
")",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // parseTLS updates `opts` with TLS config | [
"parseTLS",
"updates",
"opts",
"with",
"TLS",
"config"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L189-L215 | train |
nats-io/nats-streaming-server | server/conf.go | parseStoreLimits | func parseStoreLimits(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("expected store limits to be a map/struct, got %v", itf)
}
for k, v := range m {
name := strings.ToLower(k)
switch name {
case "mc", "max_channels", "maxchannels":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
opts.MaxChannels = int(v.(int64))
case "channels", "channels_limits", "channelslimits", "per_channel", "per_channel_limits":
if err := parsePerChannelLimits(v, opts); err != nil {
return err
}
default:
// Check for the global limits (MaxMsgs, MaxBytes, etc..)
if err := parseChannelLimits(&opts.ChannelLimits, k, name, v, true); err != nil {
return err
}
}
}
return nil
} | go | func parseStoreLimits(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("expected store limits to be a map/struct, got %v", itf)
}
for k, v := range m {
name := strings.ToLower(k)
switch name {
case "mc", "max_channels", "maxchannels":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
opts.MaxChannels = int(v.(int64))
case "channels", "channels_limits", "channelslimits", "per_channel", "per_channel_limits":
if err := parsePerChannelLimits(v, opts); err != nil {
return err
}
default:
// Check for the global limits (MaxMsgs, MaxBytes, etc..)
if err := parseChannelLimits(&opts.ChannelLimits, k, name, v, true); err != nil {
return err
}
}
}
return nil
} | [
"func",
"parseStoreLimits",
"(",
"itf",
"interface",
"{",
"}",
",",
"opts",
"*",
"Options",
")",
"error",
"{",
"m",
",",
"ok",
":=",
"itf",
".",
"(",
"map",
"[",
"string",
"]",
"interface",
"{",
"}",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"expected store limits to be a map/struct, got %v\"",
",",
"itf",
")",
"\n",
"}",
"\n",
"for",
"k",
",",
"v",
":=",
"range",
"m",
"{",
"name",
":=",
"strings",
".",
"ToLower",
"(",
"k",
")",
"\n",
"switch",
"name",
"{",
"case",
"\"mc\"",
",",
"\"max_channels\"",
",",
"\"maxchannels\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"Int64",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"opts",
".",
"MaxChannels",
"=",
"int",
"(",
"v",
".",
"(",
"int64",
")",
")",
"\n",
"case",
"\"channels\"",
",",
"\"channels_limits\"",
",",
"\"channelslimits\"",
",",
"\"per_channel\"",
",",
"\"per_channel_limits\"",
":",
"if",
"err",
":=",
"parsePerChannelLimits",
"(",
"v",
",",
"opts",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"default",
":",
"if",
"err",
":=",
"parseChannelLimits",
"(",
"&",
"opts",
".",
"ChannelLimits",
",",
"k",
",",
"name",
",",
"v",
",",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // parseStoreLimits updates `opts` with store limits | [
"parseStoreLimits",
"updates",
"opts",
"with",
"store",
"limits"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L306-L331 | train |
nats-io/nats-streaming-server | server/conf.go | parseChannelLimits | func parseChannelLimits(cl *stores.ChannelLimits, k, name string, v interface{}, isGlobal bool) error {
switch name {
case "msu", "max_subs", "max_subscriptions", "maxsubscriptions":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxSubscriptions = int(v.(int64))
if !isGlobal && cl.MaxSubscriptions == 0 {
cl.MaxSubscriptions = -1
}
case "mm", "max_msgs", "maxmsgs", "max_count", "maxcount":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxMsgs = int(v.(int64))
if !isGlobal && cl.MaxMsgs == 0 {
cl.MaxMsgs = -1
}
case "mb", "max_bytes", "maxbytes":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxBytes = v.(int64)
if !isGlobal && cl.MaxBytes == 0 {
cl.MaxBytes = -1
}
case "ma", "max_age", "maxage":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
dur, err := time.ParseDuration(v.(string))
if err != nil {
return err
}
cl.MaxAge = dur
if !isGlobal && cl.MaxAge == 0 {
cl.MaxAge = -1
}
case "mi", "max_inactivity", "maxinactivity":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
dur, err := time.ParseDuration(v.(string))
if err != nil {
return err
}
cl.MaxInactivity = dur
if !isGlobal && cl.MaxInactivity == 0 {
cl.MaxInactivity = -1
}
}
return nil
} | go | func parseChannelLimits(cl *stores.ChannelLimits, k, name string, v interface{}, isGlobal bool) error {
switch name {
case "msu", "max_subs", "max_subscriptions", "maxsubscriptions":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxSubscriptions = int(v.(int64))
if !isGlobal && cl.MaxSubscriptions == 0 {
cl.MaxSubscriptions = -1
}
case "mm", "max_msgs", "maxmsgs", "max_count", "maxcount":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxMsgs = int(v.(int64))
if !isGlobal && cl.MaxMsgs == 0 {
cl.MaxMsgs = -1
}
case "mb", "max_bytes", "maxbytes":
if err := checkType(k, reflect.Int64, v); err != nil {
return err
}
cl.MaxBytes = v.(int64)
if !isGlobal && cl.MaxBytes == 0 {
cl.MaxBytes = -1
}
case "ma", "max_age", "maxage":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
dur, err := time.ParseDuration(v.(string))
if err != nil {
return err
}
cl.MaxAge = dur
if !isGlobal && cl.MaxAge == 0 {
cl.MaxAge = -1
}
case "mi", "max_inactivity", "maxinactivity":
if err := checkType(k, reflect.String, v); err != nil {
return err
}
dur, err := time.ParseDuration(v.(string))
if err != nil {
return err
}
cl.MaxInactivity = dur
if !isGlobal && cl.MaxInactivity == 0 {
cl.MaxInactivity = -1
}
}
return nil
} | [
"func",
"parseChannelLimits",
"(",
"cl",
"*",
"stores",
".",
"ChannelLimits",
",",
"k",
",",
"name",
"string",
",",
"v",
"interface",
"{",
"}",
",",
"isGlobal",
"bool",
")",
"error",
"{",
"switch",
"name",
"{",
"case",
"\"msu\"",
",",
"\"max_subs\"",
",",
"\"max_subscriptions\"",
",",
"\"maxsubscriptions\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"Int64",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"cl",
".",
"MaxSubscriptions",
"=",
"int",
"(",
"v",
".",
"(",
"int64",
")",
")",
"\n",
"if",
"!",
"isGlobal",
"&&",
"cl",
".",
"MaxSubscriptions",
"==",
"0",
"{",
"cl",
".",
"MaxSubscriptions",
"=",
"-",
"1",
"\n",
"}",
"\n",
"case",
"\"mm\"",
",",
"\"max_msgs\"",
",",
"\"maxmsgs\"",
",",
"\"max_count\"",
",",
"\"maxcount\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"Int64",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"cl",
".",
"MaxMsgs",
"=",
"int",
"(",
"v",
".",
"(",
"int64",
")",
")",
"\n",
"if",
"!",
"isGlobal",
"&&",
"cl",
".",
"MaxMsgs",
"==",
"0",
"{",
"cl",
".",
"MaxMsgs",
"=",
"-",
"1",
"\n",
"}",
"\n",
"case",
"\"mb\"",
",",
"\"max_bytes\"",
",",
"\"maxbytes\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"Int64",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"cl",
".",
"MaxBytes",
"=",
"v",
".",
"(",
"int64",
")",
"\n",
"if",
"!",
"isGlobal",
"&&",
"cl",
".",
"MaxBytes",
"==",
"0",
"{",
"cl",
".",
"MaxBytes",
"=",
"-",
"1",
"\n",
"}",
"\n",
"case",
"\"ma\"",
",",
"\"max_age\"",
",",
"\"maxage\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"String",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"dur",
",",
"err",
":=",
"time",
".",
"ParseDuration",
"(",
"v",
".",
"(",
"string",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"cl",
".",
"MaxAge",
"=",
"dur",
"\n",
"if",
"!",
"isGlobal",
"&&",
"cl",
".",
"MaxAge",
"==",
"0",
"{",
"cl",
".",
"MaxAge",
"=",
"-",
"1",
"\n",
"}",
"\n",
"case",
"\"mi\"",
",",
"\"max_inactivity\"",
",",
"\"maxinactivity\"",
":",
"if",
"err",
":=",
"checkType",
"(",
"k",
",",
"reflect",
".",
"String",
",",
"v",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"dur",
",",
"err",
":=",
"time",
".",
"ParseDuration",
"(",
"v",
".",
"(",
"string",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"cl",
".",
"MaxInactivity",
"=",
"dur",
"\n",
"if",
"!",
"isGlobal",
"&&",
"cl",
".",
"MaxInactivity",
"==",
"0",
"{",
"cl",
".",
"MaxInactivity",
"=",
"-",
"1",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // parseChannelLimits updates `cl` with channel limits. | [
"parseChannelLimits",
"updates",
"cl",
"with",
"channel",
"limits",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L334-L386 | train |
nats-io/nats-streaming-server | server/conf.go | parsePerChannelLimits | func parsePerChannelLimits(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("expected per channel limits to be a map/struct, got %v", itf)
}
for channelName, limits := range m {
limitsMap, ok := limits.(map[string]interface{})
if !ok {
return fmt.Errorf("expected channel limits to be a map/struct, got %v", limits)
}
if !util.IsChannelNameValid(channelName, true) {
return fmt.Errorf("invalid channel name %q", channelName)
}
cl := &stores.ChannelLimits{}
for k, v := range limitsMap {
name := strings.ToLower(k)
if err := parseChannelLimits(cl, k, name, v, false); err != nil {
return err
}
}
sl := &opts.StoreLimits
sl.AddPerChannel(channelName, cl)
}
return nil
} | go | func parsePerChannelLimits(itf interface{}, opts *Options) error {
m, ok := itf.(map[string]interface{})
if !ok {
return fmt.Errorf("expected per channel limits to be a map/struct, got %v", itf)
}
for channelName, limits := range m {
limitsMap, ok := limits.(map[string]interface{})
if !ok {
return fmt.Errorf("expected channel limits to be a map/struct, got %v", limits)
}
if !util.IsChannelNameValid(channelName, true) {
return fmt.Errorf("invalid channel name %q", channelName)
}
cl := &stores.ChannelLimits{}
for k, v := range limitsMap {
name := strings.ToLower(k)
if err := parseChannelLimits(cl, k, name, v, false); err != nil {
return err
}
}
sl := &opts.StoreLimits
sl.AddPerChannel(channelName, cl)
}
return nil
} | [
"func",
"parsePerChannelLimits",
"(",
"itf",
"interface",
"{",
"}",
",",
"opts",
"*",
"Options",
")",
"error",
"{",
"m",
",",
"ok",
":=",
"itf",
".",
"(",
"map",
"[",
"string",
"]",
"interface",
"{",
"}",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"expected per channel limits to be a map/struct, got %v\"",
",",
"itf",
")",
"\n",
"}",
"\n",
"for",
"channelName",
",",
"limits",
":=",
"range",
"m",
"{",
"limitsMap",
",",
"ok",
":=",
"limits",
".",
"(",
"map",
"[",
"string",
"]",
"interface",
"{",
"}",
")",
"\n",
"if",
"!",
"ok",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"expected channel limits to be a map/struct, got %v\"",
",",
"limits",
")",
"\n",
"}",
"\n",
"if",
"!",
"util",
".",
"IsChannelNameValid",
"(",
"channelName",
",",
"true",
")",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"invalid channel name %q\"",
",",
"channelName",
")",
"\n",
"}",
"\n",
"cl",
":=",
"&",
"stores",
".",
"ChannelLimits",
"{",
"}",
"\n",
"for",
"k",
",",
"v",
":=",
"range",
"limitsMap",
"{",
"name",
":=",
"strings",
".",
"ToLower",
"(",
"k",
")",
"\n",
"if",
"err",
":=",
"parseChannelLimits",
"(",
"cl",
",",
"k",
",",
"name",
",",
"v",
",",
"false",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"sl",
":=",
"&",
"opts",
".",
"StoreLimits",
"\n",
"sl",
".",
"AddPerChannel",
"(",
"channelName",
",",
"cl",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // parsePerChannelLimits updates `opts` with per channel limits. | [
"parsePerChannelLimits",
"updates",
"opts",
"with",
"per",
"channel",
"limits",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L389-L413 | train |
nats-io/nats-streaming-server | server/conf.go | getBytes | func getBytes(f *flag.Flag) (int64, error) {
var res map[string]interface{}
// Use NATS parser to do the conversion for us.
res, err := conf.Parse(fmt.Sprintf("bytes: %v", f.Value.String()))
if err != nil {
return 0, err
}
resVal := res["bytes"]
if resVal == nil || reflect.TypeOf(resVal).Kind() != reflect.Int64 {
return 0, fmt.Errorf("%v should be a size, got '%v'", f.Name, resVal)
}
return resVal.(int64), nil
} | go | func getBytes(f *flag.Flag) (int64, error) {
var res map[string]interface{}
// Use NATS parser to do the conversion for us.
res, err := conf.Parse(fmt.Sprintf("bytes: %v", f.Value.String()))
if err != nil {
return 0, err
}
resVal := res["bytes"]
if resVal == nil || reflect.TypeOf(resVal).Kind() != reflect.Int64 {
return 0, fmt.Errorf("%v should be a size, got '%v'", f.Name, resVal)
}
return resVal.(int64), nil
} | [
"func",
"getBytes",
"(",
"f",
"*",
"flag",
".",
"Flag",
")",
"(",
"int64",
",",
"error",
")",
"{",
"var",
"res",
"map",
"[",
"string",
"]",
"interface",
"{",
"}",
"\n",
"res",
",",
"err",
":=",
"conf",
".",
"Parse",
"(",
"fmt",
".",
"Sprintf",
"(",
"\"bytes: %v\"",
",",
"f",
".",
"Value",
".",
"String",
"(",
")",
")",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"0",
",",
"err",
"\n",
"}",
"\n",
"resVal",
":=",
"res",
"[",
"\"bytes\"",
"]",
"\n",
"if",
"resVal",
"==",
"nil",
"||",
"reflect",
".",
"TypeOf",
"(",
"resVal",
")",
".",
"Kind",
"(",
")",
"!=",
"reflect",
".",
"Int64",
"{",
"return",
"0",
",",
"fmt",
".",
"Errorf",
"(",
"\"%v should be a size, got '%v'\"",
",",
"f",
".",
"Name",
",",
"resVal",
")",
"\n",
"}",
"\n",
"return",
"resVal",
".",
"(",
"int64",
")",
",",
"nil",
"\n",
"}"
] | // getBytes returns the number of bytes from the flag's String size.
// For instance, 1KB would return 1024. | [
"getBytes",
"returns",
"the",
"number",
"of",
"bytes",
"from",
"the",
"flag",
"s",
"String",
"size",
".",
"For",
"instance",
"1KB",
"would",
"return",
"1024",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/conf.go#L703-L715 | train |
nats-io/nats-streaming-server | util/sublist.go | addToCache | func (s *Sublist) addToCache(subject string, element interface{}) {
for k, r := range s.cache {
if matchLiteral(k, subject) {
// Copy since others may have a reference.
nr := append([]interface{}(nil), r...)
nr = append(nr, element)
s.cache[k] = nr
}
}
} | go | func (s *Sublist) addToCache(subject string, element interface{}) {
for k, r := range s.cache {
if matchLiteral(k, subject) {
// Copy since others may have a reference.
nr := append([]interface{}(nil), r...)
nr = append(nr, element)
s.cache[k] = nr
}
}
} | [
"func",
"(",
"s",
"*",
"Sublist",
")",
"addToCache",
"(",
"subject",
"string",
",",
"element",
"interface",
"{",
"}",
")",
"{",
"for",
"k",
",",
"r",
":=",
"range",
"s",
".",
"cache",
"{",
"if",
"matchLiteral",
"(",
"k",
",",
"subject",
")",
"{",
"nr",
":=",
"append",
"(",
"[",
"]",
"interface",
"{",
"}",
"(",
"nil",
")",
",",
"r",
"...",
")",
"\n",
"nr",
"=",
"append",
"(",
"nr",
",",
"element",
")",
"\n",
"s",
".",
"cache",
"[",
"k",
"]",
"=",
"nr",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // addToCache will add the new entry to existing cache
// entries if needed. Assumes write lock is held. | [
"addToCache",
"will",
"add",
"the",
"new",
"entry",
"to",
"existing",
"cache",
"entries",
"if",
"needed",
".",
"Assumes",
"write",
"lock",
"is",
"held",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L139-L148 | train |
nats-io/nats-streaming-server | util/sublist.go | removeFromCache | func (s *Sublist) removeFromCache(subject string) {
for k := range s.cache {
if !matchLiteral(k, subject) {
continue
}
// Since someone else may be referencing, can't modify the list
// safely, just let it re-populate.
delete(s.cache, k)
}
} | go | func (s *Sublist) removeFromCache(subject string) {
for k := range s.cache {
if !matchLiteral(k, subject) {
continue
}
// Since someone else may be referencing, can't modify the list
// safely, just let it re-populate.
delete(s.cache, k)
}
} | [
"func",
"(",
"s",
"*",
"Sublist",
")",
"removeFromCache",
"(",
"subject",
"string",
")",
"{",
"for",
"k",
":=",
"range",
"s",
".",
"cache",
"{",
"if",
"!",
"matchLiteral",
"(",
"k",
",",
"subject",
")",
"{",
"continue",
"\n",
"}",
"\n",
"delete",
"(",
"s",
".",
"cache",
",",
"k",
")",
"\n",
"}",
"\n",
"}"
] | // removeFromCache will remove any active cache entries on that subject.
// Assumes write lock is held. | [
"removeFromCache",
"will",
"remove",
"any",
"active",
"cache",
"entries",
"on",
"that",
"subject",
".",
"Assumes",
"write",
"lock",
"is",
"held",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L152-L161 | train |
nats-io/nats-streaming-server | util/sublist.go | Match | func (s *Sublist) Match(subject string) []interface{} {
s.RLock()
rc, ok := s.cache[subject]
s.RUnlock()
if ok {
return rc
}
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
result := make([]interface{}, 0, 4)
s.Lock()
matchLevel(s.root, tokens, &result)
// Add to our cache
s.cache[subject] = result
// Bound the number of entries to sublistMaxCache
if len(s.cache) > slCacheMax {
for k := range s.cache {
delete(s.cache, k)
break
}
}
s.Unlock()
return result
} | go | func (s *Sublist) Match(subject string) []interface{} {
s.RLock()
rc, ok := s.cache[subject]
s.RUnlock()
if ok {
return rc
}
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
result := make([]interface{}, 0, 4)
s.Lock()
matchLevel(s.root, tokens, &result)
// Add to our cache
s.cache[subject] = result
// Bound the number of entries to sublistMaxCache
if len(s.cache) > slCacheMax {
for k := range s.cache {
delete(s.cache, k)
break
}
}
s.Unlock()
return result
} | [
"func",
"(",
"s",
"*",
"Sublist",
")",
"Match",
"(",
"subject",
"string",
")",
"[",
"]",
"interface",
"{",
"}",
"{",
"s",
".",
"RLock",
"(",
")",
"\n",
"rc",
",",
"ok",
":=",
"s",
".",
"cache",
"[",
"subject",
"]",
"\n",
"s",
".",
"RUnlock",
"(",
")",
"\n",
"if",
"ok",
"{",
"return",
"rc",
"\n",
"}",
"\n",
"tsa",
":=",
"[",
"32",
"]",
"string",
"{",
"}",
"\n",
"tokens",
":=",
"tsa",
"[",
":",
"0",
"]",
"\n",
"start",
":=",
"0",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"len",
"(",
"subject",
")",
";",
"i",
"++",
"{",
"if",
"subject",
"[",
"i",
"]",
"==",
"btsep",
"{",
"tokens",
"=",
"append",
"(",
"tokens",
",",
"subject",
"[",
"start",
":",
"i",
"]",
")",
"\n",
"start",
"=",
"i",
"+",
"1",
"\n",
"}",
"\n",
"}",
"\n",
"tokens",
"=",
"append",
"(",
"tokens",
",",
"subject",
"[",
"start",
":",
"]",
")",
"\n",
"result",
":=",
"make",
"(",
"[",
"]",
"interface",
"{",
"}",
",",
"0",
",",
"4",
")",
"\n",
"s",
".",
"Lock",
"(",
")",
"\n",
"matchLevel",
"(",
"s",
".",
"root",
",",
"tokens",
",",
"&",
"result",
")",
"\n",
"s",
".",
"cache",
"[",
"subject",
"]",
"=",
"result",
"\n",
"if",
"len",
"(",
"s",
".",
"cache",
")",
">",
"slCacheMax",
"{",
"for",
"k",
":=",
"range",
"s",
".",
"cache",
"{",
"delete",
"(",
"s",
".",
"cache",
",",
"k",
")",
"\n",
"break",
"\n",
"}",
"\n",
"}",
"\n",
"s",
".",
"Unlock",
"(",
")",
"\n",
"return",
"result",
"\n",
"}"
] | // Match will match all entries to the literal subject.
// It will return a set of results. | [
"Match",
"will",
"match",
"all",
"entries",
"to",
"the",
"literal",
"subject",
".",
"It",
"will",
"return",
"a",
"set",
"of",
"results",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L165-L200 | train |
nats-io/nats-streaming-server | util/sublist.go | Remove | func (s *Sublist) Remove(subject string, element interface{}) error {
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
s.Lock()
defer s.Unlock()
sfwc := false
l := s.root
var n *node
// Track levels for pruning
var lnts [32]lnt
levels := lnts[:0]
for _, t := range tokens {
if len(t) == 0 || sfwc {
return ErrInvalidSubject
}
if l == nil {
return ErrNotFound
}
switch t[0] {
case pwc:
n = l.pwc
case fwc:
n = l.fwc
sfwc = true
default:
n = l.nodes[t]
}
if n != nil {
levels = append(levels, lnt{l, n, t})
l = n.next
} else {
l = nil
}
}
if !s.removeFromNode(n, element) {
return ErrNotFound
}
s.count--
for i := len(levels) - 1; i >= 0; i-- {
l, n, t := levels[i].l, levels[i].n, levels[i].t
if n.isEmpty() {
l.pruneNode(n, t)
}
}
s.removeFromCache(subject)
return nil
} | go | func (s *Sublist) Remove(subject string, element interface{}) error {
tsa := [32]string{}
tokens := tsa[:0]
start := 0
for i := 0; i < len(subject); i++ {
if subject[i] == btsep {
tokens = append(tokens, subject[start:i])
start = i + 1
}
}
tokens = append(tokens, subject[start:])
s.Lock()
defer s.Unlock()
sfwc := false
l := s.root
var n *node
// Track levels for pruning
var lnts [32]lnt
levels := lnts[:0]
for _, t := range tokens {
if len(t) == 0 || sfwc {
return ErrInvalidSubject
}
if l == nil {
return ErrNotFound
}
switch t[0] {
case pwc:
n = l.pwc
case fwc:
n = l.fwc
sfwc = true
default:
n = l.nodes[t]
}
if n != nil {
levels = append(levels, lnt{l, n, t})
l = n.next
} else {
l = nil
}
}
if !s.removeFromNode(n, element) {
return ErrNotFound
}
s.count--
for i := len(levels) - 1; i >= 0; i-- {
l, n, t := levels[i].l, levels[i].n, levels[i].t
if n.isEmpty() {
l.pruneNode(n, t)
}
}
s.removeFromCache(subject)
return nil
} | [
"func",
"(",
"s",
"*",
"Sublist",
")",
"Remove",
"(",
"subject",
"string",
",",
"element",
"interface",
"{",
"}",
")",
"error",
"{",
"tsa",
":=",
"[",
"32",
"]",
"string",
"{",
"}",
"\n",
"tokens",
":=",
"tsa",
"[",
":",
"0",
"]",
"\n",
"start",
":=",
"0",
"\n",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"len",
"(",
"subject",
")",
";",
"i",
"++",
"{",
"if",
"subject",
"[",
"i",
"]",
"==",
"btsep",
"{",
"tokens",
"=",
"append",
"(",
"tokens",
",",
"subject",
"[",
"start",
":",
"i",
"]",
")",
"\n",
"start",
"=",
"i",
"+",
"1",
"\n",
"}",
"\n",
"}",
"\n",
"tokens",
"=",
"append",
"(",
"tokens",
",",
"subject",
"[",
"start",
":",
"]",
")",
"\n",
"s",
".",
"Lock",
"(",
")",
"\n",
"defer",
"s",
".",
"Unlock",
"(",
")",
"\n",
"sfwc",
":=",
"false",
"\n",
"l",
":=",
"s",
".",
"root",
"\n",
"var",
"n",
"*",
"node",
"\n",
"var",
"lnts",
"[",
"32",
"]",
"lnt",
"\n",
"levels",
":=",
"lnts",
"[",
":",
"0",
"]",
"\n",
"for",
"_",
",",
"t",
":=",
"range",
"tokens",
"{",
"if",
"len",
"(",
"t",
")",
"==",
"0",
"||",
"sfwc",
"{",
"return",
"ErrInvalidSubject",
"\n",
"}",
"\n",
"if",
"l",
"==",
"nil",
"{",
"return",
"ErrNotFound",
"\n",
"}",
"\n",
"switch",
"t",
"[",
"0",
"]",
"{",
"case",
"pwc",
":",
"n",
"=",
"l",
".",
"pwc",
"\n",
"case",
"fwc",
":",
"n",
"=",
"l",
".",
"fwc",
"\n",
"sfwc",
"=",
"true",
"\n",
"default",
":",
"n",
"=",
"l",
".",
"nodes",
"[",
"t",
"]",
"\n",
"}",
"\n",
"if",
"n",
"!=",
"nil",
"{",
"levels",
"=",
"append",
"(",
"levels",
",",
"lnt",
"{",
"l",
",",
"n",
",",
"t",
"}",
")",
"\n",
"l",
"=",
"n",
".",
"next",
"\n",
"}",
"else",
"{",
"l",
"=",
"nil",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"!",
"s",
".",
"removeFromNode",
"(",
"n",
",",
"element",
")",
"{",
"return",
"ErrNotFound",
"\n",
"}",
"\n",
"s",
".",
"count",
"--",
"\n",
"for",
"i",
":=",
"len",
"(",
"levels",
")",
"-",
"1",
";",
"i",
">=",
"0",
";",
"i",
"--",
"{",
"l",
",",
"n",
",",
"t",
":=",
"levels",
"[",
"i",
"]",
".",
"l",
",",
"levels",
"[",
"i",
"]",
".",
"n",
",",
"levels",
"[",
"i",
"]",
".",
"t",
"\n",
"if",
"n",
".",
"isEmpty",
"(",
")",
"{",
"l",
".",
"pruneNode",
"(",
"n",
",",
"t",
")",
"\n",
"}",
"\n",
"}",
"\n",
"s",
".",
"removeFromCache",
"(",
"subject",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // Remove will remove an element from the sublist. | [
"Remove",
"will",
"remove",
"an",
"element",
"from",
"the",
"sublist",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L238-L296 | train |
nats-io/nats-streaming-server | util/sublist.go | removeFromList | func removeFromList(element interface{}, l []interface{}) ([]interface{}, bool) {
for i := 0; i < len(l); i++ {
if l[i] == element {
last := len(l) - 1
l[i] = l[last]
l[last] = nil
l = l[:last]
return shrinkAsNeeded(l), true
}
}
return l, false
} | go | func removeFromList(element interface{}, l []interface{}) ([]interface{}, bool) {
for i := 0; i < len(l); i++ {
if l[i] == element {
last := len(l) - 1
l[i] = l[last]
l[last] = nil
l = l[:last]
return shrinkAsNeeded(l), true
}
}
return l, false
} | [
"func",
"removeFromList",
"(",
"element",
"interface",
"{",
"}",
",",
"l",
"[",
"]",
"interface",
"{",
"}",
")",
"(",
"[",
"]",
"interface",
"{",
"}",
",",
"bool",
")",
"{",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"len",
"(",
"l",
")",
";",
"i",
"++",
"{",
"if",
"l",
"[",
"i",
"]",
"==",
"element",
"{",
"last",
":=",
"len",
"(",
"l",
")",
"-",
"1",
"\n",
"l",
"[",
"i",
"]",
"=",
"l",
"[",
"last",
"]",
"\n",
"l",
"[",
"last",
"]",
"=",
"nil",
"\n",
"l",
"=",
"l",
"[",
":",
"last",
"]",
"\n",
"return",
"shrinkAsNeeded",
"(",
"l",
")",
",",
"true",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"l",
",",
"false",
"\n",
"}"
] | // Removes an element from a list. | [
"Removes",
"an",
"element",
"from",
"a",
"list",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L336-L347 | train |
nats-io/nats-streaming-server | util/sublist.go | shrinkAsNeeded | func shrinkAsNeeded(l []interface{}) []interface{} {
ll := len(l)
cl := cap(l)
// Don't bother if list not too big
if cl <= 8 {
return l
}
pFree := float32(cl-ll) / float32(cl)
if pFree > 0.50 {
return append([]interface{}(nil), l...)
}
return l
} | go | func shrinkAsNeeded(l []interface{}) []interface{} {
ll := len(l)
cl := cap(l)
// Don't bother if list not too big
if cl <= 8 {
return l
}
pFree := float32(cl-ll) / float32(cl)
if pFree > 0.50 {
return append([]interface{}(nil), l...)
}
return l
} | [
"func",
"shrinkAsNeeded",
"(",
"l",
"[",
"]",
"interface",
"{",
"}",
")",
"[",
"]",
"interface",
"{",
"}",
"{",
"ll",
":=",
"len",
"(",
"l",
")",
"\n",
"cl",
":=",
"cap",
"(",
"l",
")",
"\n",
"if",
"cl",
"<=",
"8",
"{",
"return",
"l",
"\n",
"}",
"\n",
"pFree",
":=",
"float32",
"(",
"cl",
"-",
"ll",
")",
"/",
"float32",
"(",
"cl",
")",
"\n",
"if",
"pFree",
">",
"0.50",
"{",
"return",
"append",
"(",
"[",
"]",
"interface",
"{",
"}",
"(",
"nil",
")",
",",
"l",
"...",
")",
"\n",
"}",
"\n",
"return",
"l",
"\n",
"}"
] | // Checks if we need to do a resize. This is for very large growth then
// subsequent return to a more normal size from unsubscribe. | [
"Checks",
"if",
"we",
"need",
"to",
"do",
"a",
"resize",
".",
"This",
"is",
"for",
"very",
"large",
"growth",
"then",
"subsequent",
"return",
"to",
"a",
"more",
"normal",
"size",
"from",
"unsubscribe",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L360-L372 | train |
nats-io/nats-streaming-server | util/sublist.go | CacheCount | func (s *Sublist) CacheCount() int {
s.RLock()
defer s.RUnlock()
return len(s.cache)
} | go | func (s *Sublist) CacheCount() int {
s.RLock()
defer s.RUnlock()
return len(s.cache)
} | [
"func",
"(",
"s",
"*",
"Sublist",
")",
"CacheCount",
"(",
")",
"int",
"{",
"s",
".",
"RLock",
"(",
")",
"\n",
"defer",
"s",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"len",
"(",
"s",
".",
"cache",
")",
"\n",
"}"
] | // CacheCount returns the number of result sets in the cache. | [
"CacheCount",
"returns",
"the",
"number",
"of",
"result",
"sets",
"in",
"the",
"cache",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/util/sublist.go#L382-L386 | train |
nats-io/nats-streaming-server | server/server.go | startDeleteTimer | func (c *channel) startDeleteTimer() {
c.activity.last = time.Now()
c.resetDeleteTimer(c.activity.maxInactivity)
} | go | func (c *channel) startDeleteTimer() {
c.activity.last = time.Now()
c.resetDeleteTimer(c.activity.maxInactivity)
} | [
"func",
"(",
"c",
"*",
"channel",
")",
"startDeleteTimer",
"(",
")",
"{",
"c",
".",
"activity",
".",
"last",
"=",
"time",
".",
"Now",
"(",
")",
"\n",
"c",
".",
"resetDeleteTimer",
"(",
"c",
".",
"activity",
".",
"maxInactivity",
")",
"\n",
"}"
] | // Starts the delete timer that when firing will post
// a channel delete request to the ioLoop.
// The channelStore's mutex must be held on entry. | [
"Starts",
"the",
"delete",
"timer",
"that",
"when",
"firing",
"will",
"post",
"a",
"channel",
"delete",
"request",
"to",
"the",
"ioLoop",
".",
"The",
"channelStore",
"s",
"mutex",
"must",
"be",
"held",
"on",
"entry",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L453-L456 | train |
nats-io/nats-streaming-server | server/server.go | stopDeleteTimer | func (c *channel) stopDeleteTimer() {
if c.activity.timer != nil {
c.activity.timer.Stop()
c.activity.timerSet = false
if c.stan.debug {
c.stan.log.Debugf("Channel %q delete timer stopped", c.name)
}
}
} | go | func (c *channel) stopDeleteTimer() {
if c.activity.timer != nil {
c.activity.timer.Stop()
c.activity.timerSet = false
if c.stan.debug {
c.stan.log.Debugf("Channel %q delete timer stopped", c.name)
}
}
} | [
"func",
"(",
"c",
"*",
"channel",
")",
"stopDeleteTimer",
"(",
")",
"{",
"if",
"c",
".",
"activity",
".",
"timer",
"!=",
"nil",
"{",
"c",
".",
"activity",
".",
"timer",
".",
"Stop",
"(",
")",
"\n",
"c",
".",
"activity",
".",
"timerSet",
"=",
"false",
"\n",
"if",
"c",
".",
"stan",
".",
"debug",
"{",
"c",
".",
"stan",
".",
"log",
".",
"Debugf",
"(",
"\"Channel %q delete timer stopped\"",
",",
"c",
".",
"name",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // Stops the delete timer.
// The channelStore's mutex must be held on entry. | [
"Stops",
"the",
"delete",
"timer",
".",
"The",
"channelStore",
"s",
"mutex",
"must",
"be",
"held",
"on",
"entry",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L460-L468 | train |
nats-io/nats-streaming-server | server/server.go | resetDeleteTimer | func (c *channel) resetDeleteTimer(newDuration time.Duration) {
a := c.activity
if a.timer == nil {
a.timer = time.AfterFunc(newDuration, func() {
c.stan.sendDeleteChannelRequest(c)
})
} else {
a.timer.Reset(newDuration)
}
if c.stan.debug {
c.stan.log.Debugf("Channel %q delete timer set to fire in %v", c.name, newDuration)
}
a.timerSet = true
} | go | func (c *channel) resetDeleteTimer(newDuration time.Duration) {
a := c.activity
if a.timer == nil {
a.timer = time.AfterFunc(newDuration, func() {
c.stan.sendDeleteChannelRequest(c)
})
} else {
a.timer.Reset(newDuration)
}
if c.stan.debug {
c.stan.log.Debugf("Channel %q delete timer set to fire in %v", c.name, newDuration)
}
a.timerSet = true
} | [
"func",
"(",
"c",
"*",
"channel",
")",
"resetDeleteTimer",
"(",
"newDuration",
"time",
".",
"Duration",
")",
"{",
"a",
":=",
"c",
".",
"activity",
"\n",
"if",
"a",
".",
"timer",
"==",
"nil",
"{",
"a",
".",
"timer",
"=",
"time",
".",
"AfterFunc",
"(",
"newDuration",
",",
"func",
"(",
")",
"{",
"c",
".",
"stan",
".",
"sendDeleteChannelRequest",
"(",
"c",
")",
"\n",
"}",
")",
"\n",
"}",
"else",
"{",
"a",
".",
"timer",
".",
"Reset",
"(",
"newDuration",
")",
"\n",
"}",
"\n",
"if",
"c",
".",
"stan",
".",
"debug",
"{",
"c",
".",
"stan",
".",
"log",
".",
"Debugf",
"(",
"\"Channel %q delete timer set to fire in %v\"",
",",
"c",
".",
"name",
",",
"newDuration",
")",
"\n",
"}",
"\n",
"a",
".",
"timerSet",
"=",
"true",
"\n",
"}"
] | // Resets the delete timer to the given duration.
// If the timer was not created, this call will create it.
// The channelStore's mutex must be held on entry. | [
"Resets",
"the",
"delete",
"timer",
"to",
"the",
"given",
"duration",
".",
"If",
"the",
"timer",
"was",
"not",
"created",
"this",
"call",
"will",
"create",
"it",
".",
"The",
"channelStore",
"s",
"mutex",
"must",
"be",
"held",
"on",
"entry",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L473-L486 | train |
nats-io/nats-streaming-server | server/server.go | pubMsgToMsgProto | func (c *channel) pubMsgToMsgProto(pm *pb.PubMsg, seq uint64) *pb.MsgProto {
m := &pb.MsgProto{
Sequence: seq,
Subject: pm.Subject,
Reply: pm.Reply,
Data: pm.Data,
Timestamp: time.Now().UnixNano(),
}
if c.lTimestamp > 0 && m.Timestamp < c.lTimestamp {
m.Timestamp = c.lTimestamp
}
c.lTimestamp = m.Timestamp
return m
} | go | func (c *channel) pubMsgToMsgProto(pm *pb.PubMsg, seq uint64) *pb.MsgProto {
m := &pb.MsgProto{
Sequence: seq,
Subject: pm.Subject,
Reply: pm.Reply,
Data: pm.Data,
Timestamp: time.Now().UnixNano(),
}
if c.lTimestamp > 0 && m.Timestamp < c.lTimestamp {
m.Timestamp = c.lTimestamp
}
c.lTimestamp = m.Timestamp
return m
} | [
"func",
"(",
"c",
"*",
"channel",
")",
"pubMsgToMsgProto",
"(",
"pm",
"*",
"pb",
".",
"PubMsg",
",",
"seq",
"uint64",
")",
"*",
"pb",
".",
"MsgProto",
"{",
"m",
":=",
"&",
"pb",
".",
"MsgProto",
"{",
"Sequence",
":",
"seq",
",",
"Subject",
":",
"pm",
".",
"Subject",
",",
"Reply",
":",
"pm",
".",
"Reply",
",",
"Data",
":",
"pm",
".",
"Data",
",",
"Timestamp",
":",
"time",
".",
"Now",
"(",
")",
".",
"UnixNano",
"(",
")",
",",
"}",
"\n",
"if",
"c",
".",
"lTimestamp",
">",
"0",
"&&",
"m",
".",
"Timestamp",
"<",
"c",
".",
"lTimestamp",
"{",
"m",
".",
"Timestamp",
"=",
"c",
".",
"lTimestamp",
"\n",
"}",
"\n",
"c",
".",
"lTimestamp",
"=",
"m",
".",
"Timestamp",
"\n",
"return",
"m",
"\n",
"}"
] | // pubMsgToMsgProto converts a PubMsg to a MsgProto and assigns a timestamp
// which is monotonic with respect to the channel. | [
"pubMsgToMsgProto",
"converts",
"a",
"PubMsg",
"to",
"a",
"MsgProto",
"and",
"assigns",
"a",
"timestamp",
"which",
"is",
"monotonic",
"with",
"respect",
"to",
"the",
"channel",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L490-L503 | train |
nats-io/nats-streaming-server | server/server.go | subToSnapshotRestoreRequests | func (s *StanServer) subToSnapshotRestoreRequests() error {
var (
msgBuf []byte
buf []byte
snapshotRestorePrefix = fmt.Sprintf("%s.%s.", defaultSnapshotPrefix, s.info.ClusterID)
prefixLen = len(snapshotRestorePrefix)
)
sub, err := s.ncsr.Subscribe(snapshotRestorePrefix+">", func(m *nats.Msg) {
if len(m.Data) != 16 {
s.log.Errorf("Invalid snapshot request, data len=%v", len(m.Data))
return
}
cname := m.Subject[prefixLen:]
c := s.channels.getIfNotAboutToBeDeleted(cname)
if c == nil {
s.ncsr.Publish(m.Reply, nil)
return
}
start := util.ByteOrder.Uint64(m.Data[:8])
end := util.ByteOrder.Uint64(m.Data[8:])
for seq := start; seq <= end; seq++ {
msg, err := c.store.Msgs.Lookup(seq)
if err != nil {
s.log.Errorf("Snapshot restore request error for channel %q, error looking up message %v: %v", c.name, seq, err)
return
}
if msg == nil {
// We don't have this message because of channel limits.
// Return nil to caller to signal this state.
buf = nil
} else {
msgBuf = util.EnsureBufBigEnough(msgBuf, msg.Size())
n, err := msg.MarshalTo(msgBuf)
if err != nil {
panic(err)
}
buf = msgBuf[:n]
}
if err := s.ncsr.Publish(m.Reply, buf); err != nil {
s.log.Errorf("Snapshot restore request error for channel %q, unable to send response for seq %v: %v", c.name, seq, err)
}
if buf == nil {
return
}
select {
case <-s.shutdownCh:
return
default:
}
}
})
if err != nil {
return err
}
sub.SetPendingLimits(-1, -1)
s.snapReqSub = sub
return nil
} | go | func (s *StanServer) subToSnapshotRestoreRequests() error {
var (
msgBuf []byte
buf []byte
snapshotRestorePrefix = fmt.Sprintf("%s.%s.", defaultSnapshotPrefix, s.info.ClusterID)
prefixLen = len(snapshotRestorePrefix)
)
sub, err := s.ncsr.Subscribe(snapshotRestorePrefix+">", func(m *nats.Msg) {
if len(m.Data) != 16 {
s.log.Errorf("Invalid snapshot request, data len=%v", len(m.Data))
return
}
cname := m.Subject[prefixLen:]
c := s.channels.getIfNotAboutToBeDeleted(cname)
if c == nil {
s.ncsr.Publish(m.Reply, nil)
return
}
start := util.ByteOrder.Uint64(m.Data[:8])
end := util.ByteOrder.Uint64(m.Data[8:])
for seq := start; seq <= end; seq++ {
msg, err := c.store.Msgs.Lookup(seq)
if err != nil {
s.log.Errorf("Snapshot restore request error for channel %q, error looking up message %v: %v", c.name, seq, err)
return
}
if msg == nil {
// We don't have this message because of channel limits.
// Return nil to caller to signal this state.
buf = nil
} else {
msgBuf = util.EnsureBufBigEnough(msgBuf, msg.Size())
n, err := msg.MarshalTo(msgBuf)
if err != nil {
panic(err)
}
buf = msgBuf[:n]
}
if err := s.ncsr.Publish(m.Reply, buf); err != nil {
s.log.Errorf("Snapshot restore request error for channel %q, unable to send response for seq %v: %v", c.name, seq, err)
}
if buf == nil {
return
}
select {
case <-s.shutdownCh:
return
default:
}
}
})
if err != nil {
return err
}
sub.SetPendingLimits(-1, -1)
s.snapReqSub = sub
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"subToSnapshotRestoreRequests",
"(",
")",
"error",
"{",
"var",
"(",
"msgBuf",
"[",
"]",
"byte",
"\n",
"buf",
"[",
"]",
"byte",
"\n",
"snapshotRestorePrefix",
"=",
"fmt",
".",
"Sprintf",
"(",
"\"%s.%s.\"",
",",
"defaultSnapshotPrefix",
",",
"s",
".",
"info",
".",
"ClusterID",
")",
"\n",
"prefixLen",
"=",
"len",
"(",
"snapshotRestorePrefix",
")",
"\n",
")",
"\n",
"sub",
",",
"err",
":=",
"s",
".",
"ncsr",
".",
"Subscribe",
"(",
"snapshotRestorePrefix",
"+",
"\">\"",
",",
"func",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"if",
"len",
"(",
"m",
".",
"Data",
")",
"!=",
"16",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Invalid snapshot request, data len=%v\"",
",",
"len",
"(",
"m",
".",
"Data",
")",
")",
"\n",
"return",
"\n",
"}",
"\n",
"cname",
":=",
"m",
".",
"Subject",
"[",
"prefixLen",
":",
"]",
"\n",
"c",
":=",
"s",
".",
"channels",
".",
"getIfNotAboutToBeDeleted",
"(",
"cname",
")",
"\n",
"if",
"c",
"==",
"nil",
"{",
"s",
".",
"ncsr",
".",
"Publish",
"(",
"m",
".",
"Reply",
",",
"nil",
")",
"\n",
"return",
"\n",
"}",
"\n",
"start",
":=",
"util",
".",
"ByteOrder",
".",
"Uint64",
"(",
"m",
".",
"Data",
"[",
":",
"8",
"]",
")",
"\n",
"end",
":=",
"util",
".",
"ByteOrder",
".",
"Uint64",
"(",
"m",
".",
"Data",
"[",
"8",
":",
"]",
")",
"\n",
"for",
"seq",
":=",
"start",
";",
"seq",
"<=",
"end",
";",
"seq",
"++",
"{",
"msg",
",",
"err",
":=",
"c",
".",
"store",
".",
"Msgs",
".",
"Lookup",
"(",
"seq",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Snapshot restore request error for channel %q, error looking up message %v: %v\"",
",",
"c",
".",
"name",
",",
"seq",
",",
"err",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"msg",
"==",
"nil",
"{",
"buf",
"=",
"nil",
"\n",
"}",
"else",
"{",
"msgBuf",
"=",
"util",
".",
"EnsureBufBigEnough",
"(",
"msgBuf",
",",
"msg",
".",
"Size",
"(",
")",
")",
"\n",
"n",
",",
"err",
":=",
"msg",
".",
"MarshalTo",
"(",
"msgBuf",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"buf",
"=",
"msgBuf",
"[",
":",
"n",
"]",
"\n",
"}",
"\n",
"if",
"err",
":=",
"s",
".",
"ncsr",
".",
"Publish",
"(",
"m",
".",
"Reply",
",",
"buf",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Snapshot restore request error for channel %q, unable to send response for seq %v: %v\"",
",",
"c",
".",
"name",
",",
"seq",
",",
"err",
")",
"\n",
"}",
"\n",
"if",
"buf",
"==",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"select",
"{",
"case",
"<-",
"s",
".",
"shutdownCh",
":",
"return",
"\n",
"default",
":",
"}",
"\n",
"}",
"\n",
"}",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"sub",
".",
"SetPendingLimits",
"(",
"-",
"1",
",",
"-",
"1",
")",
"\n",
"s",
".",
"snapReqSub",
"=",
"sub",
"\n",
"return",
"nil",
"\n",
"}"
] | // Sets a subscription that will handle snapshot restore requests from followers. | [
"Sets",
"a",
"subscription",
"that",
"will",
"handle",
"snapshot",
"restore",
"requests",
"from",
"followers",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L506-L564 | train |
nats-io/nats-streaming-server | server/server.go | lookupOrCreateChannel | func (s *StanServer) lookupOrCreateChannel(name string) (*channel, error) {
cs := s.channels
cs.RLock()
c := cs.channels[name]
if c != nil {
if c.activity != nil && c.activity.deleteInProgress {
cs.RUnlock()
return nil, ErrChanDelInProgress
}
cs.RUnlock()
return c, nil
}
cs.RUnlock()
return cs.createChannel(s, name)
} | go | func (s *StanServer) lookupOrCreateChannel(name string) (*channel, error) {
cs := s.channels
cs.RLock()
c := cs.channels[name]
if c != nil {
if c.activity != nil && c.activity.deleteInProgress {
cs.RUnlock()
return nil, ErrChanDelInProgress
}
cs.RUnlock()
return c, nil
}
cs.RUnlock()
return cs.createChannel(s, name)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"lookupOrCreateChannel",
"(",
"name",
"string",
")",
"(",
"*",
"channel",
",",
"error",
")",
"{",
"cs",
":=",
"s",
".",
"channels",
"\n",
"cs",
".",
"RLock",
"(",
")",
"\n",
"c",
":=",
"cs",
".",
"channels",
"[",
"name",
"]",
"\n",
"if",
"c",
"!=",
"nil",
"{",
"if",
"c",
".",
"activity",
"!=",
"nil",
"&&",
"c",
".",
"activity",
".",
"deleteInProgress",
"{",
"cs",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"nil",
",",
"ErrChanDelInProgress",
"\n",
"}",
"\n",
"cs",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"c",
",",
"nil",
"\n",
"}",
"\n",
"cs",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"cs",
".",
"createChannel",
"(",
"s",
",",
"name",
")",
"\n",
"}"
] | // Looks up, or create a new channel if it does not exist | [
"Looks",
"up",
"or",
"create",
"a",
"new",
"channel",
"if",
"it",
"does",
"not",
"exist"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L746-L760 | train |
nats-io/nats-streaming-server | server/server.go | createSubStore | func (s *StanServer) createSubStore() *subStore {
subs := &subStore{
psubs: make([]*subState, 0, 4),
qsubs: make(map[string]*queueState),
durables: make(map[string]*subState),
acks: make(map[string]*subState),
stan: s,
}
return subs
} | go | func (s *StanServer) createSubStore() *subStore {
subs := &subStore{
psubs: make([]*subState, 0, 4),
qsubs: make(map[string]*queueState),
durables: make(map[string]*subState),
acks: make(map[string]*subState),
stan: s,
}
return subs
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"createSubStore",
"(",
")",
"*",
"subStore",
"{",
"subs",
":=",
"&",
"subStore",
"{",
"psubs",
":",
"make",
"(",
"[",
"]",
"*",
"subState",
",",
"0",
",",
"4",
")",
",",
"qsubs",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"*",
"queueState",
")",
",",
"durables",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"*",
"subState",
")",
",",
"acks",
":",
"make",
"(",
"map",
"[",
"string",
"]",
"*",
"subState",
")",
",",
"stan",
":",
"s",
",",
"}",
"\n",
"return",
"subs",
"\n",
"}"
] | // createSubStore creates a new instance of `subStore`. | [
"createSubStore",
"creates",
"a",
"new",
"instance",
"of",
"subStore",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L788-L797 | train |
nats-io/nats-streaming-server | server/server.go | Store | func (ss *subStore) Store(sub *subState) error {
if sub == nil {
return nil
}
// Adds to storage.
// Use sub lock to avoid race with waitForAcks in some tests
sub.Lock()
err := sub.store.CreateSub(&sub.SubState)
sub.Unlock()
if err == nil {
err = sub.store.Flush()
}
if err != nil {
ss.stan.log.Errorf("Unable to store subscription [%v:%v] on [%s]: %v", sub.ClientID, sub.Inbox, sub.subject, err)
return err
}
ss.Lock()
ss.updateState(sub)
ss.Unlock()
return nil
} | go | func (ss *subStore) Store(sub *subState) error {
if sub == nil {
return nil
}
// Adds to storage.
// Use sub lock to avoid race with waitForAcks in some tests
sub.Lock()
err := sub.store.CreateSub(&sub.SubState)
sub.Unlock()
if err == nil {
err = sub.store.Flush()
}
if err != nil {
ss.stan.log.Errorf("Unable to store subscription [%v:%v] on [%s]: %v", sub.ClientID, sub.Inbox, sub.subject, err)
return err
}
ss.Lock()
ss.updateState(sub)
ss.Unlock()
return nil
} | [
"func",
"(",
"ss",
"*",
"subStore",
")",
"Store",
"(",
"sub",
"*",
"subState",
")",
"error",
"{",
"if",
"sub",
"==",
"nil",
"{",
"return",
"nil",
"\n",
"}",
"\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"err",
":=",
"sub",
".",
"store",
".",
"CreateSub",
"(",
"&",
"sub",
".",
"SubState",
")",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"if",
"err",
"==",
"nil",
"{",
"err",
"=",
"sub",
".",
"store",
".",
"Flush",
"(",
")",
"\n",
"}",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"ss",
".",
"stan",
".",
"log",
".",
"Errorf",
"(",
"\"Unable to store subscription [%v:%v] on [%s]: %v\"",
",",
"sub",
".",
"ClientID",
",",
"sub",
".",
"Inbox",
",",
"sub",
".",
"subject",
",",
"err",
")",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"ss",
".",
"Lock",
"(",
")",
"\n",
"ss",
".",
"updateState",
"(",
"sub",
")",
"\n",
"ss",
".",
"Unlock",
"(",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // Store adds this subscription to the server's `subStore` and also in storage | [
"Store",
"adds",
"this",
"subscription",
"to",
"the",
"server",
"s",
"subStore",
"and",
"also",
"in",
"storage"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L800-L823 | train |
nats-io/nats-streaming-server | server/server.go | hasActiveSubs | func (ss *subStore) hasActiveSubs() bool {
ss.RLock()
defer ss.RUnlock()
if len(ss.psubs) > 0 {
return true
}
for _, qsub := range ss.qsubs {
// For a durable queue group, when the group is offline,
// qsub.shadow is not nil, but the qsub.subs array should be
// empty.
if len(qsub.subs) > 0 {
return true
}
}
return false
} | go | func (ss *subStore) hasActiveSubs() bool {
ss.RLock()
defer ss.RUnlock()
if len(ss.psubs) > 0 {
return true
}
for _, qsub := range ss.qsubs {
// For a durable queue group, when the group is offline,
// qsub.shadow is not nil, but the qsub.subs array should be
// empty.
if len(qsub.subs) > 0 {
return true
}
}
return false
} | [
"func",
"(",
"ss",
"*",
"subStore",
")",
"hasActiveSubs",
"(",
")",
"bool",
"{",
"ss",
".",
"RLock",
"(",
")",
"\n",
"defer",
"ss",
".",
"RUnlock",
"(",
")",
"\n",
"if",
"len",
"(",
"ss",
".",
"psubs",
")",
">",
"0",
"{",
"return",
"true",
"\n",
"}",
"\n",
"for",
"_",
",",
"qsub",
":=",
"range",
"ss",
".",
"qsubs",
"{",
"if",
"len",
"(",
"qsub",
".",
"subs",
")",
">",
"0",
"{",
"return",
"true",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"false",
"\n",
"}"
] | // hasSubs returns true if there is any active subscription for this subStore.
// That is, offline durable subscriptions are ignored. | [
"hasSubs",
"returns",
"true",
"if",
"there",
"is",
"any",
"active",
"subscription",
"for",
"this",
"subStore",
".",
"That",
"is",
"offline",
"durable",
"subscriptions",
"are",
"ignored",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L907-L922 | train |
nats-io/nats-streaming-server | server/server.go | LookupByDurable | func (ss *subStore) LookupByDurable(durableName string) *subState {
ss.RLock()
sub := ss.durables[durableName]
ss.RUnlock()
return sub
} | go | func (ss *subStore) LookupByDurable(durableName string) *subState {
ss.RLock()
sub := ss.durables[durableName]
ss.RUnlock()
return sub
} | [
"func",
"(",
"ss",
"*",
"subStore",
")",
"LookupByDurable",
"(",
"durableName",
"string",
")",
"*",
"subState",
"{",
"ss",
".",
"RLock",
"(",
")",
"\n",
"sub",
":=",
"ss",
".",
"durables",
"[",
"durableName",
"]",
"\n",
"ss",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"sub",
"\n",
"}"
] | // Lookup by durable name. | [
"Lookup",
"by",
"durable",
"name",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1138-L1143 | train |
nats-io/nats-streaming-server | server/server.go | LookupByAckInbox | func (ss *subStore) LookupByAckInbox(ackInbox string) *subState {
ss.RLock()
sub := ss.acks[ackInbox]
ss.RUnlock()
return sub
} | go | func (ss *subStore) LookupByAckInbox(ackInbox string) *subState {
ss.RLock()
sub := ss.acks[ackInbox]
ss.RUnlock()
return sub
} | [
"func",
"(",
"ss",
"*",
"subStore",
")",
"LookupByAckInbox",
"(",
"ackInbox",
"string",
")",
"*",
"subState",
"{",
"ss",
".",
"RLock",
"(",
")",
"\n",
"sub",
":=",
"ss",
".",
"acks",
"[",
"ackInbox",
"]",
"\n",
"ss",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"sub",
"\n",
"}"
] | // Lookup by ackInbox name. | [
"Lookup",
"by",
"ackInbox",
"name",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1146-L1151 | train |
nats-io/nats-streaming-server | server/server.go | Clone | func (o *Options) Clone() *Options {
// A simple copy covers pretty much everything
clone := *o
// But we have the problem of the PerChannel map that needs
// to be copied.
clone.PerChannel = (&o.StoreLimits).ClonePerChannelMap()
// Make a copy of the clustering peers
if len(o.Clustering.Peers) > 0 {
clone.Clustering.Peers = make([]string, 0, len(o.Clustering.Peers))
clone.Clustering.Peers = append(clone.Clustering.Peers, o.Clustering.Peers...)
}
return &clone
} | go | func (o *Options) Clone() *Options {
// A simple copy covers pretty much everything
clone := *o
// But we have the problem of the PerChannel map that needs
// to be copied.
clone.PerChannel = (&o.StoreLimits).ClonePerChannelMap()
// Make a copy of the clustering peers
if len(o.Clustering.Peers) > 0 {
clone.Clustering.Peers = make([]string, 0, len(o.Clustering.Peers))
clone.Clustering.Peers = append(clone.Clustering.Peers, o.Clustering.Peers...)
}
return &clone
} | [
"func",
"(",
"o",
"*",
"Options",
")",
"Clone",
"(",
")",
"*",
"Options",
"{",
"clone",
":=",
"*",
"o",
"\n",
"clone",
".",
"PerChannel",
"=",
"(",
"&",
"o",
".",
"StoreLimits",
")",
".",
"ClonePerChannelMap",
"(",
")",
"\n",
"if",
"len",
"(",
"o",
".",
"Clustering",
".",
"Peers",
")",
">",
"0",
"{",
"clone",
".",
"Clustering",
".",
"Peers",
"=",
"make",
"(",
"[",
"]",
"string",
",",
"0",
",",
"len",
"(",
"o",
".",
"Clustering",
".",
"Peers",
")",
")",
"\n",
"clone",
".",
"Clustering",
".",
"Peers",
"=",
"append",
"(",
"clone",
".",
"Clustering",
".",
"Peers",
",",
"o",
".",
"Clustering",
".",
"Peers",
"...",
")",
"\n",
"}",
"\n",
"return",
"&",
"clone",
"\n",
"}"
] | // Clone returns a deep copy of the Options object. | [
"Clone",
"returns",
"a",
"deep",
"copy",
"of",
"the",
"Options",
"object",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1188-L1200 | train |
nats-io/nats-streaming-server | server/server.go | GetDefaultOptions | func GetDefaultOptions() (o *Options) {
opts := defaultOptions
opts.StoreLimits = stores.DefaultStoreLimits
return &opts
} | go | func GetDefaultOptions() (o *Options) {
opts := defaultOptions
opts.StoreLimits = stores.DefaultStoreLimits
return &opts
} | [
"func",
"GetDefaultOptions",
"(",
")",
"(",
"o",
"*",
"Options",
")",
"{",
"opts",
":=",
"defaultOptions",
"\n",
"opts",
".",
"StoreLimits",
"=",
"stores",
".",
"DefaultStoreLimits",
"\n",
"return",
"&",
"opts",
"\n",
"}"
] | // GetDefaultOptions returns default options for the NATS Streaming Server | [
"GetDefaultOptions",
"returns",
"default",
"options",
"for",
"the",
"NATS",
"Streaming",
"Server"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1217-L1221 | train |
nats-io/nats-streaming-server | server/server.go | RunServer | func RunServer(ID string) (*StanServer, error) {
sOpts := GetDefaultOptions()
sOpts.ID = ID
nOpts := DefaultNatsServerOptions
return RunServerWithOpts(sOpts, &nOpts)
} | go | func RunServer(ID string) (*StanServer, error) {
sOpts := GetDefaultOptions()
sOpts.ID = ID
nOpts := DefaultNatsServerOptions
return RunServerWithOpts(sOpts, &nOpts)
} | [
"func",
"RunServer",
"(",
"ID",
"string",
")",
"(",
"*",
"StanServer",
",",
"error",
")",
"{",
"sOpts",
":=",
"GetDefaultOptions",
"(",
")",
"\n",
"sOpts",
".",
"ID",
"=",
"ID",
"\n",
"nOpts",
":=",
"DefaultNatsServerOptions",
"\n",
"return",
"RunServerWithOpts",
"(",
"sOpts",
",",
"&",
"nOpts",
")",
"\n",
"}"
] | // RunServer will startup an embedded NATS Streaming Server and a nats-server to support it. | [
"RunServer",
"will",
"startup",
"an",
"embedded",
"NATS",
"Streaming",
"Server",
"and",
"a",
"nats",
"-",
"server",
"to",
"support",
"it",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1411-L1416 | train |
nats-io/nats-streaming-server | server/server.go | startRaftNode | func (s *StanServer) startRaftNode(hasStreamingState bool) error {
if err := s.createServerRaftNode(hasStreamingState); err != nil {
return err
}
node := s.raft
leaderWait := make(chan struct{}, 1)
leaderReady := func() {
select {
case leaderWait <- struct{}{}:
default:
}
}
if node.State() != raft.Leader {
leaderReady()
}
s.wg.Add(1)
go func() {
defer s.wg.Done()
for {
select {
case isLeader := <-node.notifyCh:
if isLeader {
err := s.leadershipAcquired()
leaderReady()
if err != nil {
s.log.Errorf("Error on leadership acquired: %v", err)
switch {
case err == raft.ErrRaftShutdown:
// Node shutdown, just return.
return
case err == raft.ErrLeadershipLost:
case err == raft.ErrNotLeader:
// Node lost leadership, continue loop.
continue
default:
// TODO: probably step down as leader?
panic(err)
}
}
} else {
s.leadershipLost()
}
case <-s.shutdownCh:
// Signal channel here to handle edge case where we might
// otherwise block forever on the channel when shutdown.
leaderReady()
return
}
}
}()
<-leaderWait
return nil
} | go | func (s *StanServer) startRaftNode(hasStreamingState bool) error {
if err := s.createServerRaftNode(hasStreamingState); err != nil {
return err
}
node := s.raft
leaderWait := make(chan struct{}, 1)
leaderReady := func() {
select {
case leaderWait <- struct{}{}:
default:
}
}
if node.State() != raft.Leader {
leaderReady()
}
s.wg.Add(1)
go func() {
defer s.wg.Done()
for {
select {
case isLeader := <-node.notifyCh:
if isLeader {
err := s.leadershipAcquired()
leaderReady()
if err != nil {
s.log.Errorf("Error on leadership acquired: %v", err)
switch {
case err == raft.ErrRaftShutdown:
// Node shutdown, just return.
return
case err == raft.ErrLeadershipLost:
case err == raft.ErrNotLeader:
// Node lost leadership, continue loop.
continue
default:
// TODO: probably step down as leader?
panic(err)
}
}
} else {
s.leadershipLost()
}
case <-s.shutdownCh:
// Signal channel here to handle edge case where we might
// otherwise block forever on the channel when shutdown.
leaderReady()
return
}
}
}()
<-leaderWait
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"startRaftNode",
"(",
"hasStreamingState",
"bool",
")",
"error",
"{",
"if",
"err",
":=",
"s",
".",
"createServerRaftNode",
"(",
"hasStreamingState",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"node",
":=",
"s",
".",
"raft",
"\n",
"leaderWait",
":=",
"make",
"(",
"chan",
"struct",
"{",
"}",
",",
"1",
")",
"\n",
"leaderReady",
":=",
"func",
"(",
")",
"{",
"select",
"{",
"case",
"leaderWait",
"<-",
"struct",
"{",
"}",
"{",
"}",
":",
"default",
":",
"}",
"\n",
"}",
"\n",
"if",
"node",
".",
"State",
"(",
")",
"!=",
"raft",
".",
"Leader",
"{",
"leaderReady",
"(",
")",
"\n",
"}",
"\n",
"s",
".",
"wg",
".",
"Add",
"(",
"1",
")",
"\n",
"go",
"func",
"(",
")",
"{",
"defer",
"s",
".",
"wg",
".",
"Done",
"(",
")",
"\n",
"for",
"{",
"select",
"{",
"case",
"isLeader",
":=",
"<-",
"node",
".",
"notifyCh",
":",
"if",
"isLeader",
"{",
"err",
":=",
"s",
".",
"leadershipAcquired",
"(",
")",
"\n",
"leaderReady",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Error on leadership acquired: %v\"",
",",
"err",
")",
"\n",
"switch",
"{",
"case",
"err",
"==",
"raft",
".",
"ErrRaftShutdown",
":",
"return",
"\n",
"case",
"err",
"==",
"raft",
".",
"ErrLeadershipLost",
":",
"case",
"err",
"==",
"raft",
".",
"ErrNotLeader",
":",
"continue",
"\n",
"default",
":",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"else",
"{",
"s",
".",
"leadershipLost",
"(",
")",
"\n",
"}",
"\n",
"case",
"<-",
"s",
".",
"shutdownCh",
":",
"leaderReady",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"(",
")",
"\n",
"<-",
"leaderWait",
"\n",
"return",
"nil",
"\n",
"}"
] | // startRaftNode creates and starts the Raft group.
// This should only be called if the server is running in clustered mode. | [
"startRaftNode",
"creates",
"and",
"starts",
"the",
"Raft",
"group",
".",
"This",
"should",
"only",
"be",
"called",
"if",
"the",
"server",
"is",
"running",
"in",
"clustered",
"mode",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1878-L1933 | train |
nats-io/nats-streaming-server | server/server.go | leadershipAcquired | func (s *StanServer) leadershipAcquired() error {
s.log.Noticef("server became leader, performing leader promotion actions")
defer s.log.Noticef("finished leader promotion actions")
// If we were not the leader, there should be nothing in the ioChannel
// (processing of client publishes). However, since a node could go
// from leader to follower to leader again, let's make sure that we
// synchronize with the ioLoop before we touch the channels' nextSequence.
sc, sdc := s.sendSynchronziationRequest()
// Wait for the ioLoop to reach that special iopm and notifies us (or
// give up if server is shutting down).
select {
case <-sc:
case <-s.ioChannelQuit:
close(sdc)
return nil
}
// Then, we will notify it back to unlock it when were are done here.
defer close(sdc)
// Start listening to snapshot restore requests here...
if err := s.subToSnapshotRestoreRequests(); err != nil {
return err
}
// Use a barrier to ensure all preceding operations are applied to the FSM
if err := s.raft.Barrier(0).Error(); err != nil {
return err
}
channels := s.channels.getAll()
for _, c := range channels {
// Update next sequence to assign.
lastSequence, err := c.store.Msgs.LastSequence()
if err != nil {
return err
}
// It is possible that nextSequence be set when restoring
// from snapshots. Set it to the max value.
if c.nextSequence <= lastSequence {
c.nextSequence = lastSequence + 1
}
}
// Setup client heartbeats and subscribe to acks for each sub.
for _, client := range s.clients.getClients() {
client.RLock()
cID := client.info.ID
for _, sub := range client.subs {
if err := sub.startAckSub(s.nca, s.processAckMsg); err != nil {
client.RUnlock()
return err
}
}
client.RUnlock()
s.clients.setClientHB(cID, s.opts.ClientHBInterval, func() {
s.checkClientHealth(cID)
})
}
// Start the internal subscriptions so we receive protocols from clients.
if err := s.initInternalSubs(true); err != nil {
return err
}
var allSubs []*subState
for _, c := range channels {
subs := c.ss.getAllSubs()
if len(subs) > 0 {
allSubs = append(allSubs, subs...)
}
if c.activity != nil {
s.channels.maybeStartChannelDeleteTimer(c.name, c)
}
}
if len(allSubs) > 0 {
s.startGoRoutine(func() {
s.performRedeliveryOnStartup(allSubs)
s.wg.Done()
})
}
if err := s.nc.Flush(); err != nil {
return err
}
if err := s.nca.Flush(); err != nil {
return err
}
atomic.StoreInt64(&s.raft.leader, 1)
return nil
} | go | func (s *StanServer) leadershipAcquired() error {
s.log.Noticef("server became leader, performing leader promotion actions")
defer s.log.Noticef("finished leader promotion actions")
// If we were not the leader, there should be nothing in the ioChannel
// (processing of client publishes). However, since a node could go
// from leader to follower to leader again, let's make sure that we
// synchronize with the ioLoop before we touch the channels' nextSequence.
sc, sdc := s.sendSynchronziationRequest()
// Wait for the ioLoop to reach that special iopm and notifies us (or
// give up if server is shutting down).
select {
case <-sc:
case <-s.ioChannelQuit:
close(sdc)
return nil
}
// Then, we will notify it back to unlock it when were are done here.
defer close(sdc)
// Start listening to snapshot restore requests here...
if err := s.subToSnapshotRestoreRequests(); err != nil {
return err
}
// Use a barrier to ensure all preceding operations are applied to the FSM
if err := s.raft.Barrier(0).Error(); err != nil {
return err
}
channels := s.channels.getAll()
for _, c := range channels {
// Update next sequence to assign.
lastSequence, err := c.store.Msgs.LastSequence()
if err != nil {
return err
}
// It is possible that nextSequence be set when restoring
// from snapshots. Set it to the max value.
if c.nextSequence <= lastSequence {
c.nextSequence = lastSequence + 1
}
}
// Setup client heartbeats and subscribe to acks for each sub.
for _, client := range s.clients.getClients() {
client.RLock()
cID := client.info.ID
for _, sub := range client.subs {
if err := sub.startAckSub(s.nca, s.processAckMsg); err != nil {
client.RUnlock()
return err
}
}
client.RUnlock()
s.clients.setClientHB(cID, s.opts.ClientHBInterval, func() {
s.checkClientHealth(cID)
})
}
// Start the internal subscriptions so we receive protocols from clients.
if err := s.initInternalSubs(true); err != nil {
return err
}
var allSubs []*subState
for _, c := range channels {
subs := c.ss.getAllSubs()
if len(subs) > 0 {
allSubs = append(allSubs, subs...)
}
if c.activity != nil {
s.channels.maybeStartChannelDeleteTimer(c.name, c)
}
}
if len(allSubs) > 0 {
s.startGoRoutine(func() {
s.performRedeliveryOnStartup(allSubs)
s.wg.Done()
})
}
if err := s.nc.Flush(); err != nil {
return err
}
if err := s.nca.Flush(); err != nil {
return err
}
atomic.StoreInt64(&s.raft.leader, 1)
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"leadershipAcquired",
"(",
")",
"error",
"{",
"s",
".",
"log",
".",
"Noticef",
"(",
"\"server became leader, performing leader promotion actions\"",
")",
"\n",
"defer",
"s",
".",
"log",
".",
"Noticef",
"(",
"\"finished leader promotion actions\"",
")",
"\n",
"sc",
",",
"sdc",
":=",
"s",
".",
"sendSynchronziationRequest",
"(",
")",
"\n",
"select",
"{",
"case",
"<-",
"sc",
":",
"case",
"<-",
"s",
".",
"ioChannelQuit",
":",
"close",
"(",
"sdc",
")",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"defer",
"close",
"(",
"sdc",
")",
"\n",
"if",
"err",
":=",
"s",
".",
"subToSnapshotRestoreRequests",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"err",
":=",
"s",
".",
"raft",
".",
"Barrier",
"(",
"0",
")",
".",
"Error",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"channels",
":=",
"s",
".",
"channels",
".",
"getAll",
"(",
")",
"\n",
"for",
"_",
",",
"c",
":=",
"range",
"channels",
"{",
"lastSequence",
",",
"err",
":=",
"c",
".",
"store",
".",
"Msgs",
".",
"LastSequence",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"c",
".",
"nextSequence",
"<=",
"lastSequence",
"{",
"c",
".",
"nextSequence",
"=",
"lastSequence",
"+",
"1",
"\n",
"}",
"\n",
"}",
"\n",
"for",
"_",
",",
"client",
":=",
"range",
"s",
".",
"clients",
".",
"getClients",
"(",
")",
"{",
"client",
".",
"RLock",
"(",
")",
"\n",
"cID",
":=",
"client",
".",
"info",
".",
"ID",
"\n",
"for",
"_",
",",
"sub",
":=",
"range",
"client",
".",
"subs",
"{",
"if",
"err",
":=",
"sub",
".",
"startAckSub",
"(",
"s",
".",
"nca",
",",
"s",
".",
"processAckMsg",
")",
";",
"err",
"!=",
"nil",
"{",
"client",
".",
"RUnlock",
"(",
")",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"client",
".",
"RUnlock",
"(",
")",
"\n",
"s",
".",
"clients",
".",
"setClientHB",
"(",
"cID",
",",
"s",
".",
"opts",
".",
"ClientHBInterval",
",",
"func",
"(",
")",
"{",
"s",
".",
"checkClientHealth",
"(",
"cID",
")",
"\n",
"}",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"s",
".",
"initInternalSubs",
"(",
"true",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"var",
"allSubs",
"[",
"]",
"*",
"subState",
"\n",
"for",
"_",
",",
"c",
":=",
"range",
"channels",
"{",
"subs",
":=",
"c",
".",
"ss",
".",
"getAllSubs",
"(",
")",
"\n",
"if",
"len",
"(",
"subs",
")",
">",
"0",
"{",
"allSubs",
"=",
"append",
"(",
"allSubs",
",",
"subs",
"...",
")",
"\n",
"}",
"\n",
"if",
"c",
".",
"activity",
"!=",
"nil",
"{",
"s",
".",
"channels",
".",
"maybeStartChannelDeleteTimer",
"(",
"c",
".",
"name",
",",
"c",
")",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"len",
"(",
"allSubs",
")",
">",
"0",
"{",
"s",
".",
"startGoRoutine",
"(",
"func",
"(",
")",
"{",
"s",
".",
"performRedeliveryOnStartup",
"(",
"allSubs",
")",
"\n",
"s",
".",
"wg",
".",
"Done",
"(",
")",
"\n",
"}",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"s",
".",
"nc",
".",
"Flush",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"if",
"err",
":=",
"s",
".",
"nca",
".",
"Flush",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"atomic",
".",
"StoreInt64",
"(",
"&",
"s",
".",
"raft",
".",
"leader",
",",
"1",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // leadershipAcquired should be called when this node is elected leader.
// This should only be called when the server is running in clustered mode. | [
"leadershipAcquired",
"should",
"be",
"called",
"when",
"this",
"node",
"is",
"elected",
"leader",
".",
"This",
"should",
"only",
"be",
"called",
"when",
"the",
"server",
"is",
"running",
"in",
"clustered",
"mode",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L1945-L2037 | train |
nats-io/nats-streaming-server | server/server.go | leadershipLost | func (s *StanServer) leadershipLost() {
s.log.Noticef("server lost leadership, performing leader stepdown actions")
defer s.log.Noticef("finished leader stepdown actions")
// Cancel outstanding client heartbeats. We aren't concerned about races
// where new clients might be connecting because at this point, the server
// will no longer accept new client connections, but even if it did, the
// heartbeat would be automatically removed when it fires.
for _, client := range s.clients.getClients() {
s.clients.removeClientHB(client)
// Ensure subs ackTimer is stopped
subs := client.getSubsCopy()
for _, sub := range subs {
sub.Lock()
sub.stopAckSub()
sub.clearAckTimer()
s.clearSentAndAck(sub)
sub.Unlock()
}
}
// Unsubscribe to the snapshot request per channel since we are no longer
// leader.
for _, c := range s.channels.getAll() {
if c.activity != nil {
s.channels.stopDeleteTimer(c)
}
}
// Only the leader will receive protocols from clients
s.unsubscribeInternalSubs()
atomic.StoreInt64(&s.raft.leader, 0)
} | go | func (s *StanServer) leadershipLost() {
s.log.Noticef("server lost leadership, performing leader stepdown actions")
defer s.log.Noticef("finished leader stepdown actions")
// Cancel outstanding client heartbeats. We aren't concerned about races
// where new clients might be connecting because at this point, the server
// will no longer accept new client connections, but even if it did, the
// heartbeat would be automatically removed when it fires.
for _, client := range s.clients.getClients() {
s.clients.removeClientHB(client)
// Ensure subs ackTimer is stopped
subs := client.getSubsCopy()
for _, sub := range subs {
sub.Lock()
sub.stopAckSub()
sub.clearAckTimer()
s.clearSentAndAck(sub)
sub.Unlock()
}
}
// Unsubscribe to the snapshot request per channel since we are no longer
// leader.
for _, c := range s.channels.getAll() {
if c.activity != nil {
s.channels.stopDeleteTimer(c)
}
}
// Only the leader will receive protocols from clients
s.unsubscribeInternalSubs()
atomic.StoreInt64(&s.raft.leader, 0)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"leadershipLost",
"(",
")",
"{",
"s",
".",
"log",
".",
"Noticef",
"(",
"\"server lost leadership, performing leader stepdown actions\"",
")",
"\n",
"defer",
"s",
".",
"log",
".",
"Noticef",
"(",
"\"finished leader stepdown actions\"",
")",
"\n",
"for",
"_",
",",
"client",
":=",
"range",
"s",
".",
"clients",
".",
"getClients",
"(",
")",
"{",
"s",
".",
"clients",
".",
"removeClientHB",
"(",
"client",
")",
"\n",
"subs",
":=",
"client",
".",
"getSubsCopy",
"(",
")",
"\n",
"for",
"_",
",",
"sub",
":=",
"range",
"subs",
"{",
"sub",
".",
"Lock",
"(",
")",
"\n",
"sub",
".",
"stopAckSub",
"(",
")",
"\n",
"sub",
".",
"clearAckTimer",
"(",
")",
"\n",
"s",
".",
"clearSentAndAck",
"(",
"sub",
")",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"}",
"\n",
"for",
"_",
",",
"c",
":=",
"range",
"s",
".",
"channels",
".",
"getAll",
"(",
")",
"{",
"if",
"c",
".",
"activity",
"!=",
"nil",
"{",
"s",
".",
"channels",
".",
"stopDeleteTimer",
"(",
"c",
")",
"\n",
"}",
"\n",
"}",
"\n",
"s",
".",
"unsubscribeInternalSubs",
"(",
")",
"\n",
"atomic",
".",
"StoreInt64",
"(",
"&",
"s",
".",
"raft",
".",
"leader",
",",
"0",
")",
"\n",
"}"
] | // leadershipLost should be called when this node loses leadership.
// This should only be called when the server is running in clustered mode. | [
"leadershipLost",
"should",
"be",
"called",
"when",
"this",
"node",
"loses",
"leadership",
".",
"This",
"should",
"only",
"be",
"called",
"when",
"the",
"server",
"is",
"running",
"in",
"clustered",
"mode",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2041-L2074 | train |
nats-io/nats-streaming-server | server/server.go | ensureRunningStandAlone | func (s *StanServer) ensureRunningStandAlone() error {
clusterID := s.info.ClusterID
hbInbox := nats.NewInbox()
timeout := time.Millisecond * 250
// We cannot use the client's API here as it will create a dependency
// cycle in the streaming client, so build our request and see if we
// get a response.
req := &pb.ConnectRequest{ClientID: clusterID, HeartbeatInbox: hbInbox}
b, _ := req.Marshal()
reply, err := s.nc.Request(s.info.Discovery, b, timeout)
if err == nats.ErrTimeout {
s.log.Debugf("Did not detect another server instance")
return nil
}
if err != nil {
return fmt.Errorf("request error detecting another server instance: %v", err)
}
// See if the response is valid and can be unmarshalled.
cr := &pb.ConnectResponse{}
err = cr.Unmarshal(reply.Data)
if err != nil {
// Something other than a compatible streaming server responded.
// This may cause other problems in the long run, so better fail
// the startup early.
return fmt.Errorf("unmarshall error while detecting another server instance: %v", err)
}
// Another streaming server was found, cleanup then return error.
clreq := &pb.CloseRequest{ClientID: clusterID}
b, _ = clreq.Marshal()
s.nc.Request(cr.CloseRequests, b, timeout)
return fmt.Errorf("discovered another streaming server with cluster ID %q", clusterID)
} | go | func (s *StanServer) ensureRunningStandAlone() error {
clusterID := s.info.ClusterID
hbInbox := nats.NewInbox()
timeout := time.Millisecond * 250
// We cannot use the client's API here as it will create a dependency
// cycle in the streaming client, so build our request and see if we
// get a response.
req := &pb.ConnectRequest{ClientID: clusterID, HeartbeatInbox: hbInbox}
b, _ := req.Marshal()
reply, err := s.nc.Request(s.info.Discovery, b, timeout)
if err == nats.ErrTimeout {
s.log.Debugf("Did not detect another server instance")
return nil
}
if err != nil {
return fmt.Errorf("request error detecting another server instance: %v", err)
}
// See if the response is valid and can be unmarshalled.
cr := &pb.ConnectResponse{}
err = cr.Unmarshal(reply.Data)
if err != nil {
// Something other than a compatible streaming server responded.
// This may cause other problems in the long run, so better fail
// the startup early.
return fmt.Errorf("unmarshall error while detecting another server instance: %v", err)
}
// Another streaming server was found, cleanup then return error.
clreq := &pb.CloseRequest{ClientID: clusterID}
b, _ = clreq.Marshal()
s.nc.Request(cr.CloseRequests, b, timeout)
return fmt.Errorf("discovered another streaming server with cluster ID %q", clusterID)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"ensureRunningStandAlone",
"(",
")",
"error",
"{",
"clusterID",
":=",
"s",
".",
"info",
".",
"ClusterID",
"\n",
"hbInbox",
":=",
"nats",
".",
"NewInbox",
"(",
")",
"\n",
"timeout",
":=",
"time",
".",
"Millisecond",
"*",
"250",
"\n",
"req",
":=",
"&",
"pb",
".",
"ConnectRequest",
"{",
"ClientID",
":",
"clusterID",
",",
"HeartbeatInbox",
":",
"hbInbox",
"}",
"\n",
"b",
",",
"_",
":=",
"req",
".",
"Marshal",
"(",
")",
"\n",
"reply",
",",
"err",
":=",
"s",
".",
"nc",
".",
"Request",
"(",
"s",
".",
"info",
".",
"Discovery",
",",
"b",
",",
"timeout",
")",
"\n",
"if",
"err",
"==",
"nats",
".",
"ErrTimeout",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"Did not detect another server instance\"",
")",
"\n",
"return",
"nil",
"\n",
"}",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"request error detecting another server instance: %v\"",
",",
"err",
")",
"\n",
"}",
"\n",
"cr",
":=",
"&",
"pb",
".",
"ConnectResponse",
"{",
"}",
"\n",
"err",
"=",
"cr",
".",
"Unmarshal",
"(",
"reply",
".",
"Data",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"unmarshall error while detecting another server instance: %v\"",
",",
"err",
")",
"\n",
"}",
"\n",
"clreq",
":=",
"&",
"pb",
".",
"CloseRequest",
"{",
"ClientID",
":",
"clusterID",
"}",
"\n",
"b",
",",
"_",
"=",
"clreq",
".",
"Marshal",
"(",
")",
"\n",
"s",
".",
"nc",
".",
"Request",
"(",
"cr",
".",
"CloseRequests",
",",
"b",
",",
"timeout",
")",
"\n",
"return",
"fmt",
".",
"Errorf",
"(",
"\"discovered another streaming server with cluster ID %q\"",
",",
"clusterID",
")",
"\n",
"}"
] | // ensureRunningStandAlone prevents this streaming server from starting
// if another is found using the same cluster ID - a possibility when
// routing is enabled.
// This runs under sever's lock so nothing should grab the server lock here. | [
"ensureRunningStandAlone",
"prevents",
"this",
"streaming",
"server",
"from",
"starting",
"if",
"another",
"is",
"found",
"using",
"the",
"same",
"cluster",
"ID",
"-",
"a",
"possibility",
"when",
"routing",
"is",
"enabled",
".",
"This",
"runs",
"under",
"sever",
"s",
"lock",
"so",
"nothing",
"should",
"grab",
"the",
"server",
"lock",
"here",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2200-L2232 | train |
nats-io/nats-streaming-server | server/server.go | processRecoveredClients | func (s *StanServer) processRecoveredClients(clients []*stores.Client) {
if !s.isClustered {
s.clients.recoverClients(clients)
}
} | go | func (s *StanServer) processRecoveredClients(clients []*stores.Client) {
if !s.isClustered {
s.clients.recoverClients(clients)
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processRecoveredClients",
"(",
"clients",
"[",
"]",
"*",
"stores",
".",
"Client",
")",
"{",
"if",
"!",
"s",
".",
"isClustered",
"{",
"s",
".",
"clients",
".",
"recoverClients",
"(",
"clients",
")",
"\n",
"}",
"\n",
"}"
] | // Binds server's view of a client with stored Client objects. | [
"Binds",
"server",
"s",
"view",
"of",
"a",
"client",
"with",
"stored",
"Client",
"objects",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2235-L2239 | train |
nats-io/nats-streaming-server | server/server.go | processRecoveredChannels | func (s *StanServer) processRecoveredChannels(channels map[string]*stores.RecoveredChannel) ([]*subState, error) {
allSubs := make([]*subState, 0, 16)
for channelName, recoveredChannel := range channels {
channel, err := s.channels.create(s, channelName, recoveredChannel.Channel)
if err != nil {
return nil, err
}
if !s.isClustered {
// Get the recovered subscriptions for this channel.
for _, recSub := range recoveredChannel.Subscriptions {
sub := s.recoverOneSub(channel, recSub.Sub, recSub.Pending, nil)
if sub != nil {
// Subscribe to subscription ACKs
if err := sub.startAckSub(s.nca, s.processAckMsg); err != nil {
return nil, err
}
allSubs = append(allSubs, sub)
}
}
// Now that we have recovered possible subscriptions for this channel,
// check if we should start the delete timer.
if channel.activity != nil {
s.channels.maybeStartChannelDeleteTimer(channelName, channel)
}
}
}
return allSubs, nil
} | go | func (s *StanServer) processRecoveredChannels(channels map[string]*stores.RecoveredChannel) ([]*subState, error) {
allSubs := make([]*subState, 0, 16)
for channelName, recoveredChannel := range channels {
channel, err := s.channels.create(s, channelName, recoveredChannel.Channel)
if err != nil {
return nil, err
}
if !s.isClustered {
// Get the recovered subscriptions for this channel.
for _, recSub := range recoveredChannel.Subscriptions {
sub := s.recoverOneSub(channel, recSub.Sub, recSub.Pending, nil)
if sub != nil {
// Subscribe to subscription ACKs
if err := sub.startAckSub(s.nca, s.processAckMsg); err != nil {
return nil, err
}
allSubs = append(allSubs, sub)
}
}
// Now that we have recovered possible subscriptions for this channel,
// check if we should start the delete timer.
if channel.activity != nil {
s.channels.maybeStartChannelDeleteTimer(channelName, channel)
}
}
}
return allSubs, nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processRecoveredChannels",
"(",
"channels",
"map",
"[",
"string",
"]",
"*",
"stores",
".",
"RecoveredChannel",
")",
"(",
"[",
"]",
"*",
"subState",
",",
"error",
")",
"{",
"allSubs",
":=",
"make",
"(",
"[",
"]",
"*",
"subState",
",",
"0",
",",
"16",
")",
"\n",
"for",
"channelName",
",",
"recoveredChannel",
":=",
"range",
"channels",
"{",
"channel",
",",
"err",
":=",
"s",
".",
"channels",
".",
"create",
"(",
"s",
",",
"channelName",
",",
"recoveredChannel",
".",
"Channel",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"if",
"!",
"s",
".",
"isClustered",
"{",
"for",
"_",
",",
"recSub",
":=",
"range",
"recoveredChannel",
".",
"Subscriptions",
"{",
"sub",
":=",
"s",
".",
"recoverOneSub",
"(",
"channel",
",",
"recSub",
".",
"Sub",
",",
"recSub",
".",
"Pending",
",",
"nil",
")",
"\n",
"if",
"sub",
"!=",
"nil",
"{",
"if",
"err",
":=",
"sub",
".",
"startAckSub",
"(",
"s",
".",
"nca",
",",
"s",
".",
"processAckMsg",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"allSubs",
"=",
"append",
"(",
"allSubs",
",",
"sub",
")",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"channel",
".",
"activity",
"!=",
"nil",
"{",
"s",
".",
"channels",
".",
"maybeStartChannelDeleteTimer",
"(",
"channelName",
",",
"channel",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"allSubs",
",",
"nil",
"\n",
"}"
] | // Reconstruct the subscription state on restart. | [
"Reconstruct",
"the",
"subscription",
"state",
"on",
"restart",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2242-L2270 | train |
nats-io/nats-streaming-server | server/server.go | performRedeliveryOnStartup | func (s *StanServer) performRedeliveryOnStartup(recoveredSubs []*subState) {
queues := make(map[*queueState]*channel)
for _, sub := range recoveredSubs {
// Ignore subs that did not have any ack pendings on startup.
sub.Lock()
// Consider this subscription ready to receive messages
sub.initialized = true
// If this is a durable and it is offline, then skip the rest.
if sub.isOfflineDurableSubscriber() {
sub.newOnHold = false
sub.Unlock()
continue
}
// Unlock in order to call function below
sub.Unlock()
// Send old messages (lock is acquired in that function)
s.performAckExpirationRedelivery(sub, true)
// Regrab lock
sub.Lock()
// Allow new messages to be delivered
sub.newOnHold = false
subject := sub.subject
qs := sub.qstate
sub.Unlock()
c := s.channels.get(subject)
if c == nil {
continue
}
// Kick delivery of (possible) new messages
if qs != nil {
queues[qs] = c
} else {
s.sendAvailableMessages(c, sub)
}
}
// Kick delivery for queues that had members with newOnHold
for qs, c := range queues {
qs.Lock()
qs.newOnHold = false
qs.Unlock()
s.sendAvailableMessagesToQueue(c, qs)
}
} | go | func (s *StanServer) performRedeliveryOnStartup(recoveredSubs []*subState) {
queues := make(map[*queueState]*channel)
for _, sub := range recoveredSubs {
// Ignore subs that did not have any ack pendings on startup.
sub.Lock()
// Consider this subscription ready to receive messages
sub.initialized = true
// If this is a durable and it is offline, then skip the rest.
if sub.isOfflineDurableSubscriber() {
sub.newOnHold = false
sub.Unlock()
continue
}
// Unlock in order to call function below
sub.Unlock()
// Send old messages (lock is acquired in that function)
s.performAckExpirationRedelivery(sub, true)
// Regrab lock
sub.Lock()
// Allow new messages to be delivered
sub.newOnHold = false
subject := sub.subject
qs := sub.qstate
sub.Unlock()
c := s.channels.get(subject)
if c == nil {
continue
}
// Kick delivery of (possible) new messages
if qs != nil {
queues[qs] = c
} else {
s.sendAvailableMessages(c, sub)
}
}
// Kick delivery for queues that had members with newOnHold
for qs, c := range queues {
qs.Lock()
qs.newOnHold = false
qs.Unlock()
s.sendAvailableMessagesToQueue(c, qs)
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"performRedeliveryOnStartup",
"(",
"recoveredSubs",
"[",
"]",
"*",
"subState",
")",
"{",
"queues",
":=",
"make",
"(",
"map",
"[",
"*",
"queueState",
"]",
"*",
"channel",
")",
"\n",
"for",
"_",
",",
"sub",
":=",
"range",
"recoveredSubs",
"{",
"sub",
".",
"Lock",
"(",
")",
"\n",
"sub",
".",
"initialized",
"=",
"true",
"\n",
"if",
"sub",
".",
"isOfflineDurableSubscriber",
"(",
")",
"{",
"sub",
".",
"newOnHold",
"=",
"false",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"continue",
"\n",
"}",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"s",
".",
"performAckExpirationRedelivery",
"(",
"sub",
",",
"true",
")",
"\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"sub",
".",
"newOnHold",
"=",
"false",
"\n",
"subject",
":=",
"sub",
".",
"subject",
"\n",
"qs",
":=",
"sub",
".",
"qstate",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"c",
":=",
"s",
".",
"channels",
".",
"get",
"(",
"subject",
")",
"\n",
"if",
"c",
"==",
"nil",
"{",
"continue",
"\n",
"}",
"\n",
"if",
"qs",
"!=",
"nil",
"{",
"queues",
"[",
"qs",
"]",
"=",
"c",
"\n",
"}",
"else",
"{",
"s",
".",
"sendAvailableMessages",
"(",
"c",
",",
"sub",
")",
"\n",
"}",
"\n",
"}",
"\n",
"for",
"qs",
",",
"c",
":=",
"range",
"queues",
"{",
"qs",
".",
"Lock",
"(",
")",
"\n",
"qs",
".",
"newOnHold",
"=",
"false",
"\n",
"qs",
".",
"Unlock",
"(",
")",
"\n",
"s",
".",
"sendAvailableMessagesToQueue",
"(",
"c",
",",
"qs",
")",
"\n",
"}",
"\n",
"}"
] | // Redelivers unacknowledged messages, releases the hold for new messages delivery,
// and kicks delivery of available messages. | [
"Redelivers",
"unacknowledged",
"messages",
"releases",
"the",
"hold",
"for",
"new",
"messages",
"delivery",
"and",
"kicks",
"delivery",
"of",
"available",
"messages",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2389-L2432 | train |
nats-io/nats-streaming-server | server/server.go | initSubscriptions | func (s *StanServer) initSubscriptions() error {
// Do not create internal subscriptions in clustered mode,
// the leader will when it gets elected.
if !s.isClustered {
createSubOnClientPublish := true
if s.partitions != nil {
// Receive published messages from clients, but only on the list
// of static channels.
if err := s.partitions.initSubscriptions(); err != nil {
return err
}
// Since we create a subscription per channel, do not create
// the internal subscription on the > wildcard
createSubOnClientPublish = false
}
if err := s.initInternalSubs(createSubOnClientPublish); err != nil {
return err
}
}
s.log.Debugf("Discover subject: %s", s.info.Discovery)
// For partitions, we actually print the list of channels
// in the startup banner, so we don't need to repeat them here.
if s.partitions != nil {
s.log.Debugf("Publish subjects root: %s", s.info.Publish)
} else {
s.log.Debugf("Publish subject: %s.>", s.info.Publish)
}
s.log.Debugf("Subscribe subject: %s", s.info.Subscribe)
s.log.Debugf("Subscription Close subject: %s", s.info.SubClose)
s.log.Debugf("Unsubscribe subject: %s", s.info.Unsubscribe)
s.log.Debugf("Close subject: %s", s.info.Close)
return nil
} | go | func (s *StanServer) initSubscriptions() error {
// Do not create internal subscriptions in clustered mode,
// the leader will when it gets elected.
if !s.isClustered {
createSubOnClientPublish := true
if s.partitions != nil {
// Receive published messages from clients, but only on the list
// of static channels.
if err := s.partitions.initSubscriptions(); err != nil {
return err
}
// Since we create a subscription per channel, do not create
// the internal subscription on the > wildcard
createSubOnClientPublish = false
}
if err := s.initInternalSubs(createSubOnClientPublish); err != nil {
return err
}
}
s.log.Debugf("Discover subject: %s", s.info.Discovery)
// For partitions, we actually print the list of channels
// in the startup banner, so we don't need to repeat them here.
if s.partitions != nil {
s.log.Debugf("Publish subjects root: %s", s.info.Publish)
} else {
s.log.Debugf("Publish subject: %s.>", s.info.Publish)
}
s.log.Debugf("Subscribe subject: %s", s.info.Subscribe)
s.log.Debugf("Subscription Close subject: %s", s.info.SubClose)
s.log.Debugf("Unsubscribe subject: %s", s.info.Unsubscribe)
s.log.Debugf("Close subject: %s", s.info.Close)
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"initSubscriptions",
"(",
")",
"error",
"{",
"if",
"!",
"s",
".",
"isClustered",
"{",
"createSubOnClientPublish",
":=",
"true",
"\n",
"if",
"s",
".",
"partitions",
"!=",
"nil",
"{",
"if",
"err",
":=",
"s",
".",
"partitions",
".",
"initSubscriptions",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"createSubOnClientPublish",
"=",
"false",
"\n",
"}",
"\n",
"if",
"err",
":=",
"s",
".",
"initInternalSubs",
"(",
"createSubOnClientPublish",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"}",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"Discover subject: %s\"",
",",
"s",
".",
"info",
".",
"Discovery",
")",
"\n",
"if",
"s",
".",
"partitions",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"Publish subjects root: %s\"",
",",
"s",
".",
"info",
".",
"Publish",
")",
"\n",
"}",
"else",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"Publish subject: %s.>\"",
",",
"s",
".",
"info",
".",
"Publish",
")",
"\n",
"}",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"Subscribe subject: %s\"",
",",
"s",
".",
"info",
".",
"Subscribe",
")",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"Subscription Close subject: %s\"",
",",
"s",
".",
"info",
".",
"SubClose",
")",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"Unsubscribe subject: %s\"",
",",
"s",
".",
"info",
".",
"Unsubscribe",
")",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"Close subject: %s\"",
",",
"s",
".",
"info",
".",
"Close",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // initSubscriptions will setup initial subscriptions for discovery etc. | [
"initSubscriptions",
"will",
"setup",
"initial",
"subscriptions",
"for",
"discovery",
"etc",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2435-L2471 | train |
nats-io/nats-streaming-server | server/server.go | connectCB | func (s *StanServer) connectCB(m *nats.Msg) {
req := &pb.ConnectRequest{}
err := req.Unmarshal(m.Data)
if err != nil || req.HeartbeatInbox == "" {
s.log.Errorf("[Client:?] Invalid conn request: ClientID=%s, Inbox=%s, err=%v",
req.ClientID, req.HeartbeatInbox, err)
s.sendConnectErr(m.Reply, ErrInvalidConnReq.Error())
return
}
if !clientIDRegEx.MatchString(req.ClientID) {
s.log.Errorf("[Client:%s] Invalid ClientID, only alphanumeric and `-` or `_` characters allowed", req.ClientID)
s.sendConnectErr(m.Reply, ErrInvalidClientID.Error())
return
}
// If the client ID is already registered, check to see if it's the case
// that the client refreshed (e.g. it crashed and came back) or if the
// connection is a duplicate. If it refreshed, we will close the old
// client and open a new one.
client := s.clients.lookup(req.ClientID)
if client != nil {
// When detecting a duplicate, the processing of the connect request
// is going to be processed in a go-routine. We need however to keep
// track and fail another request on the same client ID until the
// current one has finished.
s.cliDupCIDsMu.Lock()
if _, exists := s.cliDipCIDsMap[req.ClientID]; exists {
s.cliDupCIDsMu.Unlock()
s.log.Debugf("[Client:%s] Connect failed; already connected", req.ClientID)
s.sendConnectErr(m.Reply, ErrInvalidClient.Error())
return
}
s.cliDipCIDsMap[req.ClientID] = struct{}{}
s.cliDupCIDsMu.Unlock()
s.startGoRoutine(func() {
defer s.wg.Done()
isDup := false
if s.isDuplicateConnect(client) {
s.log.Debugf("[Client:%s] Connect failed; already connected", req.ClientID)
s.sendConnectErr(m.Reply, ErrInvalidClient.Error())
isDup = true
}
s.cliDupCIDsMu.Lock()
if !isDup {
s.handleConnect(req, m, true)
}
delete(s.cliDipCIDsMap, req.ClientID)
s.cliDupCIDsMu.Unlock()
})
return
}
s.cliDupCIDsMu.Lock()
s.handleConnect(req, m, false)
s.cliDupCIDsMu.Unlock()
} | go | func (s *StanServer) connectCB(m *nats.Msg) {
req := &pb.ConnectRequest{}
err := req.Unmarshal(m.Data)
if err != nil || req.HeartbeatInbox == "" {
s.log.Errorf("[Client:?] Invalid conn request: ClientID=%s, Inbox=%s, err=%v",
req.ClientID, req.HeartbeatInbox, err)
s.sendConnectErr(m.Reply, ErrInvalidConnReq.Error())
return
}
if !clientIDRegEx.MatchString(req.ClientID) {
s.log.Errorf("[Client:%s] Invalid ClientID, only alphanumeric and `-` or `_` characters allowed", req.ClientID)
s.sendConnectErr(m.Reply, ErrInvalidClientID.Error())
return
}
// If the client ID is already registered, check to see if it's the case
// that the client refreshed (e.g. it crashed and came back) or if the
// connection is a duplicate. If it refreshed, we will close the old
// client and open a new one.
client := s.clients.lookup(req.ClientID)
if client != nil {
// When detecting a duplicate, the processing of the connect request
// is going to be processed in a go-routine. We need however to keep
// track and fail another request on the same client ID until the
// current one has finished.
s.cliDupCIDsMu.Lock()
if _, exists := s.cliDipCIDsMap[req.ClientID]; exists {
s.cliDupCIDsMu.Unlock()
s.log.Debugf("[Client:%s] Connect failed; already connected", req.ClientID)
s.sendConnectErr(m.Reply, ErrInvalidClient.Error())
return
}
s.cliDipCIDsMap[req.ClientID] = struct{}{}
s.cliDupCIDsMu.Unlock()
s.startGoRoutine(func() {
defer s.wg.Done()
isDup := false
if s.isDuplicateConnect(client) {
s.log.Debugf("[Client:%s] Connect failed; already connected", req.ClientID)
s.sendConnectErr(m.Reply, ErrInvalidClient.Error())
isDup = true
}
s.cliDupCIDsMu.Lock()
if !isDup {
s.handleConnect(req, m, true)
}
delete(s.cliDipCIDsMap, req.ClientID)
s.cliDupCIDsMu.Unlock()
})
return
}
s.cliDupCIDsMu.Lock()
s.handleConnect(req, m, false)
s.cliDupCIDsMu.Unlock()
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"connectCB",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"req",
":=",
"&",
"pb",
".",
"ConnectRequest",
"{",
"}",
"\n",
"err",
":=",
"req",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
")",
"\n",
"if",
"err",
"!=",
"nil",
"||",
"req",
".",
"HeartbeatInbox",
"==",
"\"\"",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"[Client:?] Invalid conn request: ClientID=%s, Inbox=%s, err=%v\"",
",",
"req",
".",
"ClientID",
",",
"req",
".",
"HeartbeatInbox",
",",
"err",
")",
"\n",
"s",
".",
"sendConnectErr",
"(",
"m",
".",
"Reply",
",",
"ErrInvalidConnReq",
".",
"Error",
"(",
")",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"!",
"clientIDRegEx",
".",
"MatchString",
"(",
"req",
".",
"ClientID",
")",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"[Client:%s] Invalid ClientID, only alphanumeric and `-` or `_` characters allowed\"",
",",
"req",
".",
"ClientID",
")",
"\n",
"s",
".",
"sendConnectErr",
"(",
"m",
".",
"Reply",
",",
"ErrInvalidClientID",
".",
"Error",
"(",
")",
")",
"\n",
"return",
"\n",
"}",
"\n",
"client",
":=",
"s",
".",
"clients",
".",
"lookup",
"(",
"req",
".",
"ClientID",
")",
"\n",
"if",
"client",
"!=",
"nil",
"{",
"s",
".",
"cliDupCIDsMu",
".",
"Lock",
"(",
")",
"\n",
"if",
"_",
",",
"exists",
":=",
"s",
".",
"cliDipCIDsMap",
"[",
"req",
".",
"ClientID",
"]",
";",
"exists",
"{",
"s",
".",
"cliDupCIDsMu",
".",
"Unlock",
"(",
")",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"[Client:%s] Connect failed; already connected\"",
",",
"req",
".",
"ClientID",
")",
"\n",
"s",
".",
"sendConnectErr",
"(",
"m",
".",
"Reply",
",",
"ErrInvalidClient",
".",
"Error",
"(",
")",
")",
"\n",
"return",
"\n",
"}",
"\n",
"s",
".",
"cliDipCIDsMap",
"[",
"req",
".",
"ClientID",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"s",
".",
"cliDupCIDsMu",
".",
"Unlock",
"(",
")",
"\n",
"s",
".",
"startGoRoutine",
"(",
"func",
"(",
")",
"{",
"defer",
"s",
".",
"wg",
".",
"Done",
"(",
")",
"\n",
"isDup",
":=",
"false",
"\n",
"if",
"s",
".",
"isDuplicateConnect",
"(",
"client",
")",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"[Client:%s] Connect failed; already connected\"",
",",
"req",
".",
"ClientID",
")",
"\n",
"s",
".",
"sendConnectErr",
"(",
"m",
".",
"Reply",
",",
"ErrInvalidClient",
".",
"Error",
"(",
")",
")",
"\n",
"isDup",
"=",
"true",
"\n",
"}",
"\n",
"s",
".",
"cliDupCIDsMu",
".",
"Lock",
"(",
")",
"\n",
"if",
"!",
"isDup",
"{",
"s",
".",
"handleConnect",
"(",
"req",
",",
"m",
",",
"true",
")",
"\n",
"}",
"\n",
"delete",
"(",
"s",
".",
"cliDipCIDsMap",
",",
"req",
".",
"ClientID",
")",
"\n",
"s",
".",
"cliDupCIDsMu",
".",
"Unlock",
"(",
")",
"\n",
"}",
")",
"\n",
"return",
"\n",
"}",
"\n",
"s",
".",
"cliDupCIDsMu",
".",
"Lock",
"(",
")",
"\n",
"s",
".",
"handleConnect",
"(",
"req",
",",
"m",
",",
"false",
")",
"\n",
"s",
".",
"cliDupCIDsMu",
".",
"Unlock",
"(",
")",
"\n",
"}"
] | // Process a client connect request | [
"Process",
"a",
"client",
"connect",
"request"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2558-L2613 | train |
nats-io/nats-streaming-server | server/server.go | isDuplicateConnect | func (s *StanServer) isDuplicateConnect(client *client) bool {
client.RLock()
hbInbox := client.info.HbInbox
client.RUnlock()
// This is the HbInbox from the "old" client. See if it is up and
// running by sending a ping to that inbox.
_, err := s.nc.Request(hbInbox, nil, s.dupCIDTimeout)
// If err is nil, the currently registered client responded, so this is a
// duplicate.
return err == nil
} | go | func (s *StanServer) isDuplicateConnect(client *client) bool {
client.RLock()
hbInbox := client.info.HbInbox
client.RUnlock()
// This is the HbInbox from the "old" client. See if it is up and
// running by sending a ping to that inbox.
_, err := s.nc.Request(hbInbox, nil, s.dupCIDTimeout)
// If err is nil, the currently registered client responded, so this is a
// duplicate.
return err == nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"isDuplicateConnect",
"(",
"client",
"*",
"client",
")",
"bool",
"{",
"client",
".",
"RLock",
"(",
")",
"\n",
"hbInbox",
":=",
"client",
".",
"info",
".",
"HbInbox",
"\n",
"client",
".",
"RUnlock",
"(",
")",
"\n",
"_",
",",
"err",
":=",
"s",
".",
"nc",
".",
"Request",
"(",
"hbInbox",
",",
"nil",
",",
"s",
".",
"dupCIDTimeout",
")",
"\n",
"return",
"err",
"==",
"nil",
"\n",
"}"
] | // isDuplicateConnect determines if the given client ID is a duplicate
// connection by pinging the old client's heartbeat inbox and checking if it
// responds. If it does, it's a duplicate connection. | [
"isDuplicateConnect",
"determines",
"if",
"the",
"given",
"client",
"ID",
"is",
"a",
"duplicate",
"connection",
"by",
"pinging",
"the",
"old",
"client",
"s",
"heartbeat",
"inbox",
"and",
"checking",
"if",
"it",
"responds",
".",
"If",
"it",
"does",
"it",
"s",
"a",
"duplicate",
"connection",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2638-L2650 | train |
nats-io/nats-streaming-server | server/server.go | replicateDeleteChannel | func (s *StanServer) replicateDeleteChannel(channel string) {
op := &spb.RaftOperation{
OpType: spb.RaftOperation_DeleteChannel,
Channel: channel,
}
data, err := op.Marshal()
if err != nil {
panic(err)
}
// Wait on result of replication.
if err = s.raft.Apply(data, 0).Error(); err != nil {
// If we have lost leadership, clear the deleteInProgress flag.
cs := s.channels
cs.Lock()
c := cs.channels[channel]
if c != nil && c.activity != nil {
c.activity.deleteInProgress = false
}
cs.Unlock()
}
} | go | func (s *StanServer) replicateDeleteChannel(channel string) {
op := &spb.RaftOperation{
OpType: spb.RaftOperation_DeleteChannel,
Channel: channel,
}
data, err := op.Marshal()
if err != nil {
panic(err)
}
// Wait on result of replication.
if err = s.raft.Apply(data, 0).Error(); err != nil {
// If we have lost leadership, clear the deleteInProgress flag.
cs := s.channels
cs.Lock()
c := cs.channels[channel]
if c != nil && c.activity != nil {
c.activity.deleteInProgress = false
}
cs.Unlock()
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"replicateDeleteChannel",
"(",
"channel",
"string",
")",
"{",
"op",
":=",
"&",
"spb",
".",
"RaftOperation",
"{",
"OpType",
":",
"spb",
".",
"RaftOperation_DeleteChannel",
",",
"Channel",
":",
"channel",
",",
"}",
"\n",
"data",
",",
"err",
":=",
"op",
".",
"Marshal",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"if",
"err",
"=",
"s",
".",
"raft",
".",
"Apply",
"(",
"data",
",",
"0",
")",
".",
"Error",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"cs",
":=",
"s",
".",
"channels",
"\n",
"cs",
".",
"Lock",
"(",
")",
"\n",
"c",
":=",
"cs",
".",
"channels",
"[",
"channel",
"]",
"\n",
"if",
"c",
"!=",
"nil",
"&&",
"c",
".",
"activity",
"!=",
"nil",
"{",
"c",
".",
"activity",
".",
"deleteInProgress",
"=",
"false",
"\n",
"}",
"\n",
"cs",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"}"
] | // Leader invokes this to replicate the command to delete a channel. | [
"Leader",
"invokes",
"this",
"to",
"replicate",
"the",
"command",
"to",
"delete",
"a",
"channel",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2672-L2692 | train |
nats-io/nats-streaming-server | server/server.go | handleChannelDelete | func (s *StanServer) handleChannelDelete(c *channel) {
delete := false
cs := s.channels
cs.Lock()
a := c.activity
if a.preventDelete || a.deleteInProgress || c.ss.hasActiveSubs() {
if s.debug {
s.log.Debugf("Channel %q cannot be deleted: preventDelete=%v inProgress=%v hasActiveSubs=%v",
c.name, a.preventDelete, a.deleteInProgress, c.ss.hasActiveSubs())
}
c.stopDeleteTimer()
} else {
elapsed := time.Since(a.last)
if elapsed >= a.maxInactivity {
if s.debug {
s.log.Debugf("Channel %q is being deleted", c.name)
}
c.stopDeleteTimer()
// Leave in map for now, but mark as deleted. If we removed before
// completion of the removal, a new lookup could re-create while
// in the process of deleting it.
a.deleteInProgress = true
delete = true
} else {
var next time.Duration
if elapsed < 0 {
next = a.maxInactivity
} else {
// elapsed < a.maxInactivity
next = a.maxInactivity - elapsed
}
if s.debug {
s.log.Debugf("Channel %q cannot be deleted now, reset timer to fire in %v",
c.name, next)
}
c.resetDeleteTimer(next)
}
}
cs.Unlock()
if delete {
if testDeleteChannel {
time.Sleep(time.Second)
}
if s.isClustered {
s.replicateDeleteChannel(c.name)
} else {
s.processDeleteChannel(c.name)
}
}
} | go | func (s *StanServer) handleChannelDelete(c *channel) {
delete := false
cs := s.channels
cs.Lock()
a := c.activity
if a.preventDelete || a.deleteInProgress || c.ss.hasActiveSubs() {
if s.debug {
s.log.Debugf("Channel %q cannot be deleted: preventDelete=%v inProgress=%v hasActiveSubs=%v",
c.name, a.preventDelete, a.deleteInProgress, c.ss.hasActiveSubs())
}
c.stopDeleteTimer()
} else {
elapsed := time.Since(a.last)
if elapsed >= a.maxInactivity {
if s.debug {
s.log.Debugf("Channel %q is being deleted", c.name)
}
c.stopDeleteTimer()
// Leave in map for now, but mark as deleted. If we removed before
// completion of the removal, a new lookup could re-create while
// in the process of deleting it.
a.deleteInProgress = true
delete = true
} else {
var next time.Duration
if elapsed < 0 {
next = a.maxInactivity
} else {
// elapsed < a.maxInactivity
next = a.maxInactivity - elapsed
}
if s.debug {
s.log.Debugf("Channel %q cannot be deleted now, reset timer to fire in %v",
c.name, next)
}
c.resetDeleteTimer(next)
}
}
cs.Unlock()
if delete {
if testDeleteChannel {
time.Sleep(time.Second)
}
if s.isClustered {
s.replicateDeleteChannel(c.name)
} else {
s.processDeleteChannel(c.name)
}
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"handleChannelDelete",
"(",
"c",
"*",
"channel",
")",
"{",
"delete",
":=",
"false",
"\n",
"cs",
":=",
"s",
".",
"channels",
"\n",
"cs",
".",
"Lock",
"(",
")",
"\n",
"a",
":=",
"c",
".",
"activity",
"\n",
"if",
"a",
".",
"preventDelete",
"||",
"a",
".",
"deleteInProgress",
"||",
"c",
".",
"ss",
".",
"hasActiveSubs",
"(",
")",
"{",
"if",
"s",
".",
"debug",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"Channel %q cannot be deleted: preventDelete=%v inProgress=%v hasActiveSubs=%v\"",
",",
"c",
".",
"name",
",",
"a",
".",
"preventDelete",
",",
"a",
".",
"deleteInProgress",
",",
"c",
".",
"ss",
".",
"hasActiveSubs",
"(",
")",
")",
"\n",
"}",
"\n",
"c",
".",
"stopDeleteTimer",
"(",
")",
"\n",
"}",
"else",
"{",
"elapsed",
":=",
"time",
".",
"Since",
"(",
"a",
".",
"last",
")",
"\n",
"if",
"elapsed",
">=",
"a",
".",
"maxInactivity",
"{",
"if",
"s",
".",
"debug",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"Channel %q is being deleted\"",
",",
"c",
".",
"name",
")",
"\n",
"}",
"\n",
"c",
".",
"stopDeleteTimer",
"(",
")",
"\n",
"a",
".",
"deleteInProgress",
"=",
"true",
"\n",
"delete",
"=",
"true",
"\n",
"}",
"else",
"{",
"var",
"next",
"time",
".",
"Duration",
"\n",
"if",
"elapsed",
"<",
"0",
"{",
"next",
"=",
"a",
".",
"maxInactivity",
"\n",
"}",
"else",
"{",
"next",
"=",
"a",
".",
"maxInactivity",
"-",
"elapsed",
"\n",
"}",
"\n",
"if",
"s",
".",
"debug",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"Channel %q cannot be deleted now, reset timer to fire in %v\"",
",",
"c",
".",
"name",
",",
"next",
")",
"\n",
"}",
"\n",
"c",
".",
"resetDeleteTimer",
"(",
"next",
")",
"\n",
"}",
"\n",
"}",
"\n",
"cs",
".",
"Unlock",
"(",
")",
"\n",
"if",
"delete",
"{",
"if",
"testDeleteChannel",
"{",
"time",
".",
"Sleep",
"(",
"time",
".",
"Second",
")",
"\n",
"}",
"\n",
"if",
"s",
".",
"isClustered",
"{",
"s",
".",
"replicateDeleteChannel",
"(",
"c",
".",
"name",
")",
"\n",
"}",
"else",
"{",
"s",
".",
"processDeleteChannel",
"(",
"c",
".",
"name",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // Check if the channel can be deleted. If so, do it in place.
// This is called from the ioLoop by the leader or a standlone server. | [
"Check",
"if",
"the",
"channel",
"can",
"be",
"deleted",
".",
"If",
"so",
"do",
"it",
"in",
"place",
".",
"This",
"is",
"called",
"from",
"the",
"ioLoop",
"by",
"the",
"leader",
"or",
"a",
"standlone",
"server",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2696-L2745 | train |
nats-io/nats-streaming-server | server/server.go | processDeleteChannel | func (s *StanServer) processDeleteChannel(channel string) {
cs := s.channels
cs.Lock()
defer cs.Unlock()
c := cs.channels[channel]
if c == nil {
s.log.Errorf("Error deleting channel %q: not found", channel)
return
}
if c.activity != nil && c.activity.preventDelete {
s.log.Errorf("The channel %q cannot be deleted at this time since a subscription has been created", channel)
return
}
// Delete from store
if err := cs.store.DeleteChannel(channel); err != nil {
s.log.Errorf("Error deleting channel %q: %v", channel, err)
if c.activity != nil {
c.activity.deleteInProgress = false
c.startDeleteTimer()
}
return
}
delete(s.channels.channels, channel)
s.log.Noticef("Channel %q has been deleted", channel)
} | go | func (s *StanServer) processDeleteChannel(channel string) {
cs := s.channels
cs.Lock()
defer cs.Unlock()
c := cs.channels[channel]
if c == nil {
s.log.Errorf("Error deleting channel %q: not found", channel)
return
}
if c.activity != nil && c.activity.preventDelete {
s.log.Errorf("The channel %q cannot be deleted at this time since a subscription has been created", channel)
return
}
// Delete from store
if err := cs.store.DeleteChannel(channel); err != nil {
s.log.Errorf("Error deleting channel %q: %v", channel, err)
if c.activity != nil {
c.activity.deleteInProgress = false
c.startDeleteTimer()
}
return
}
delete(s.channels.channels, channel)
s.log.Noticef("Channel %q has been deleted", channel)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processDeleteChannel",
"(",
"channel",
"string",
")",
"{",
"cs",
":=",
"s",
".",
"channels",
"\n",
"cs",
".",
"Lock",
"(",
")",
"\n",
"defer",
"cs",
".",
"Unlock",
"(",
")",
"\n",
"c",
":=",
"cs",
".",
"channels",
"[",
"channel",
"]",
"\n",
"if",
"c",
"==",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Error deleting channel %q: not found\"",
",",
"channel",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"c",
".",
"activity",
"!=",
"nil",
"&&",
"c",
".",
"activity",
".",
"preventDelete",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"The channel %q cannot be deleted at this time since a subscription has been created\"",
",",
"channel",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"err",
":=",
"cs",
".",
"store",
".",
"DeleteChannel",
"(",
"channel",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Error deleting channel %q: %v\"",
",",
"channel",
",",
"err",
")",
"\n",
"if",
"c",
".",
"activity",
"!=",
"nil",
"{",
"c",
".",
"activity",
".",
"deleteInProgress",
"=",
"false",
"\n",
"c",
".",
"startDeleteTimer",
"(",
")",
"\n",
"}",
"\n",
"return",
"\n",
"}",
"\n",
"delete",
"(",
"s",
".",
"channels",
".",
"channels",
",",
"channel",
")",
"\n",
"s",
".",
"log",
".",
"Noticef",
"(",
"\"Channel %q has been deleted\"",
",",
"channel",
")",
"\n",
"}"
] | // Actual deletetion of the channel. | [
"Actual",
"deletetion",
"of",
"the",
"channel",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2748-L2772 | train |
nats-io/nats-streaming-server | server/server.go | checkClientHealth | func (s *StanServer) checkClientHealth(clientID string) {
client := s.clients.lookup(clientID)
if client == nil {
return
}
// If clustered and we lost leadership, we should stop
// heartbeating as the new leader will take over.
if s.isClustered && !s.isLeader() {
// Do not remove client HB here. We do that in
// leadershipLost. We could be here because the
// callback fired while we are not yet finished
// acquiring leadership.
client.Lock()
if client.hbt != nil {
client.hbt.Reset(s.opts.ClientHBInterval)
}
client.Unlock()
return
}
client.RLock()
hbInbox := client.info.HbInbox
client.RUnlock()
// Sends the HB request. This call blocks for ClientHBTimeout,
// do not hold the lock for that long!
_, err := s.nc.Request(hbInbox, nil, s.opts.ClientHBTimeout)
// Grab the lock now.
client.Lock()
// Client could have been unregistered, in which case
// client.hbt will be nil.
if client.hbt == nil {
client.Unlock()
return
}
hadFailed := client.fhb > 0
// If we did not get the reply, increase the number of
// failed heartbeats.
if err != nil {
client.fhb++
// If we have reached the max number of failures
if client.fhb > s.opts.ClientHBFailCount {
s.log.Debugf("[Client:%s] Timed out on heartbeats", clientID)
// close the client (connection). This locks the
// client object internally so unlock here.
client.Unlock()
// If clustered, thread operations through Raft.
if s.isClustered {
s.barrier(func() {
if err := s.replicateConnClose(&pb.CloseRequest{ClientID: clientID}); err != nil {
s.log.Errorf("[Client:%s] Failed to replicate disconnect on heartbeat expiration: %v",
clientID, err)
}
})
} else {
s.closeClient(clientID)
}
return
}
} else {
// We got the reply, reset the number of failed heartbeats.
client.fhb = 0
}
// Reset the timer to fire again.
client.hbt.Reset(s.opts.ClientHBInterval)
var (
subs []*subState
hasFailedHB = client.fhb > 0
)
if (hadFailed && !hasFailedHB) || (!hadFailed && hasFailedHB) {
// Get a copy of subscribers and client.fhb while under lock
subs = client.getSubsCopy()
}
client.Unlock()
if len(subs) > 0 {
// Push the info about presence of failed heartbeats down to
// subscribers, so they have easier access to that info in
// the redelivery attempt code.
for _, sub := range subs {
sub.Lock()
sub.hasFailedHB = hasFailedHB
sub.Unlock()
}
}
} | go | func (s *StanServer) checkClientHealth(clientID string) {
client := s.clients.lookup(clientID)
if client == nil {
return
}
// If clustered and we lost leadership, we should stop
// heartbeating as the new leader will take over.
if s.isClustered && !s.isLeader() {
// Do not remove client HB here. We do that in
// leadershipLost. We could be here because the
// callback fired while we are not yet finished
// acquiring leadership.
client.Lock()
if client.hbt != nil {
client.hbt.Reset(s.opts.ClientHBInterval)
}
client.Unlock()
return
}
client.RLock()
hbInbox := client.info.HbInbox
client.RUnlock()
// Sends the HB request. This call blocks for ClientHBTimeout,
// do not hold the lock for that long!
_, err := s.nc.Request(hbInbox, nil, s.opts.ClientHBTimeout)
// Grab the lock now.
client.Lock()
// Client could have been unregistered, in which case
// client.hbt will be nil.
if client.hbt == nil {
client.Unlock()
return
}
hadFailed := client.fhb > 0
// If we did not get the reply, increase the number of
// failed heartbeats.
if err != nil {
client.fhb++
// If we have reached the max number of failures
if client.fhb > s.opts.ClientHBFailCount {
s.log.Debugf("[Client:%s] Timed out on heartbeats", clientID)
// close the client (connection). This locks the
// client object internally so unlock here.
client.Unlock()
// If clustered, thread operations through Raft.
if s.isClustered {
s.barrier(func() {
if err := s.replicateConnClose(&pb.CloseRequest{ClientID: clientID}); err != nil {
s.log.Errorf("[Client:%s] Failed to replicate disconnect on heartbeat expiration: %v",
clientID, err)
}
})
} else {
s.closeClient(clientID)
}
return
}
} else {
// We got the reply, reset the number of failed heartbeats.
client.fhb = 0
}
// Reset the timer to fire again.
client.hbt.Reset(s.opts.ClientHBInterval)
var (
subs []*subState
hasFailedHB = client.fhb > 0
)
if (hadFailed && !hasFailedHB) || (!hadFailed && hasFailedHB) {
// Get a copy of subscribers and client.fhb while under lock
subs = client.getSubsCopy()
}
client.Unlock()
if len(subs) > 0 {
// Push the info about presence of failed heartbeats down to
// subscribers, so they have easier access to that info in
// the redelivery attempt code.
for _, sub := range subs {
sub.Lock()
sub.hasFailedHB = hasFailedHB
sub.Unlock()
}
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"checkClientHealth",
"(",
"clientID",
"string",
")",
"{",
"client",
":=",
"s",
".",
"clients",
".",
"lookup",
"(",
"clientID",
")",
"\n",
"if",
"client",
"==",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"if",
"s",
".",
"isClustered",
"&&",
"!",
"s",
".",
"isLeader",
"(",
")",
"{",
"client",
".",
"Lock",
"(",
")",
"\n",
"if",
"client",
".",
"hbt",
"!=",
"nil",
"{",
"client",
".",
"hbt",
".",
"Reset",
"(",
"s",
".",
"opts",
".",
"ClientHBInterval",
")",
"\n",
"}",
"\n",
"client",
".",
"Unlock",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n",
"client",
".",
"RLock",
"(",
")",
"\n",
"hbInbox",
":=",
"client",
".",
"info",
".",
"HbInbox",
"\n",
"client",
".",
"RUnlock",
"(",
")",
"\n",
"_",
",",
"err",
":=",
"s",
".",
"nc",
".",
"Request",
"(",
"hbInbox",
",",
"nil",
",",
"s",
".",
"opts",
".",
"ClientHBTimeout",
")",
"\n",
"client",
".",
"Lock",
"(",
")",
"\n",
"if",
"client",
".",
"hbt",
"==",
"nil",
"{",
"client",
".",
"Unlock",
"(",
")",
"\n",
"return",
"\n",
"}",
"\n",
"hadFailed",
":=",
"client",
".",
"fhb",
">",
"0",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"client",
".",
"fhb",
"++",
"\n",
"if",
"client",
".",
"fhb",
">",
"s",
".",
"opts",
".",
"ClientHBFailCount",
"{",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"[Client:%s] Timed out on heartbeats\"",
",",
"clientID",
")",
"\n",
"client",
".",
"Unlock",
"(",
")",
"\n",
"if",
"s",
".",
"isClustered",
"{",
"s",
".",
"barrier",
"(",
"func",
"(",
")",
"{",
"if",
"err",
":=",
"s",
".",
"replicateConnClose",
"(",
"&",
"pb",
".",
"CloseRequest",
"{",
"ClientID",
":",
"clientID",
"}",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"[Client:%s] Failed to replicate disconnect on heartbeat expiration: %v\"",
",",
"clientID",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
")",
"\n",
"}",
"else",
"{",
"s",
".",
"closeClient",
"(",
"clientID",
")",
"\n",
"}",
"\n",
"return",
"\n",
"}",
"\n",
"}",
"else",
"{",
"client",
".",
"fhb",
"=",
"0",
"\n",
"}",
"\n",
"client",
".",
"hbt",
".",
"Reset",
"(",
"s",
".",
"opts",
".",
"ClientHBInterval",
")",
"\n",
"var",
"(",
"subs",
"[",
"]",
"*",
"subState",
"\n",
"hasFailedHB",
"=",
"client",
".",
"fhb",
">",
"0",
"\n",
")",
"\n",
"if",
"(",
"hadFailed",
"&&",
"!",
"hasFailedHB",
")",
"||",
"(",
"!",
"hadFailed",
"&&",
"hasFailedHB",
")",
"{",
"subs",
"=",
"client",
".",
"getSubsCopy",
"(",
")",
"\n",
"}",
"\n",
"client",
".",
"Unlock",
"(",
")",
"\n",
"if",
"len",
"(",
"subs",
")",
">",
"0",
"{",
"for",
"_",
",",
"sub",
":=",
"range",
"subs",
"{",
"sub",
".",
"Lock",
"(",
")",
"\n",
"sub",
".",
"hasFailedHB",
"=",
"hasFailedHB",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}"
] | // Send a heartbeat call to the client. | [
"Send",
"a",
"heartbeat",
"call",
"to",
"the",
"client",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2859-L2944 | train |
nats-io/nats-streaming-server | server/server.go | closeClient | func (s *StanServer) closeClient(clientID string) error {
s.closeMu.Lock()
defer s.closeMu.Unlock()
// Lookup client first, will unregister only after removing its subscriptions
client := s.clients.lookup(clientID)
if client == nil {
s.log.Errorf("Unknown client %q in close request", clientID)
return ErrUnknownClient
}
// Remove all non-durable subscribers.
s.removeAllNonDurableSubscribers(client)
// Remove from our clientStore.
if _, err := s.clients.unregister(clientID); err != nil {
s.log.Errorf("Error unregistering client %q: %v", clientID, err)
}
if s.debug {
client.RLock()
hbInbox := client.info.HbInbox
client.RUnlock()
s.log.Debugf("[Client:%s] Closed (Inbox=%v)", clientID, hbInbox)
}
return nil
} | go | func (s *StanServer) closeClient(clientID string) error {
s.closeMu.Lock()
defer s.closeMu.Unlock()
// Lookup client first, will unregister only after removing its subscriptions
client := s.clients.lookup(clientID)
if client == nil {
s.log.Errorf("Unknown client %q in close request", clientID)
return ErrUnknownClient
}
// Remove all non-durable subscribers.
s.removeAllNonDurableSubscribers(client)
// Remove from our clientStore.
if _, err := s.clients.unregister(clientID); err != nil {
s.log.Errorf("Error unregistering client %q: %v", clientID, err)
}
if s.debug {
client.RLock()
hbInbox := client.info.HbInbox
client.RUnlock()
s.log.Debugf("[Client:%s] Closed (Inbox=%v)", clientID, hbInbox)
}
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"closeClient",
"(",
"clientID",
"string",
")",
"error",
"{",
"s",
".",
"closeMu",
".",
"Lock",
"(",
")",
"\n",
"defer",
"s",
".",
"closeMu",
".",
"Unlock",
"(",
")",
"\n",
"client",
":=",
"s",
".",
"clients",
".",
"lookup",
"(",
"clientID",
")",
"\n",
"if",
"client",
"==",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Unknown client %q in close request\"",
",",
"clientID",
")",
"\n",
"return",
"ErrUnknownClient",
"\n",
"}",
"\n",
"s",
".",
"removeAllNonDurableSubscribers",
"(",
"client",
")",
"\n",
"if",
"_",
",",
"err",
":=",
"s",
".",
"clients",
".",
"unregister",
"(",
"clientID",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Error unregistering client %q: %v\"",
",",
"clientID",
",",
"err",
")",
"\n",
"}",
"\n",
"if",
"s",
".",
"debug",
"{",
"client",
".",
"RLock",
"(",
")",
"\n",
"hbInbox",
":=",
"client",
".",
"info",
".",
"HbInbox",
"\n",
"client",
".",
"RUnlock",
"(",
")",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"[Client:%s] Closed (Inbox=%v)\"",
",",
"clientID",
",",
"hbInbox",
")",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // Close a client | [
"Close",
"a",
"client"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2947-L2972 | train |
nats-io/nats-streaming-server | server/server.go | processCloseRequest | func (s *StanServer) processCloseRequest(m *nats.Msg) {
req := &pb.CloseRequest{}
err := req.Unmarshal(m.Data)
if err != nil {
s.log.Errorf("Received invalid close request, subject=%s", m.Subject)
s.sendCloseResponse(m.Reply, ErrInvalidCloseReq)
return
}
s.barrier(func() {
var err error
// If clustered, thread operations through Raft.
if s.isClustered {
err = s.replicateConnClose(req)
} else {
err = s.closeClient(req.ClientID)
}
// If there was an error, it has been already logged.
// Send response, if err is nil, will be a success response.
s.sendCloseResponse(m.Reply, err)
})
} | go | func (s *StanServer) processCloseRequest(m *nats.Msg) {
req := &pb.CloseRequest{}
err := req.Unmarshal(m.Data)
if err != nil {
s.log.Errorf("Received invalid close request, subject=%s", m.Subject)
s.sendCloseResponse(m.Reply, ErrInvalidCloseReq)
return
}
s.barrier(func() {
var err error
// If clustered, thread operations through Raft.
if s.isClustered {
err = s.replicateConnClose(req)
} else {
err = s.closeClient(req.ClientID)
}
// If there was an error, it has been already logged.
// Send response, if err is nil, will be a success response.
s.sendCloseResponse(m.Reply, err)
})
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processCloseRequest",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"req",
":=",
"&",
"pb",
".",
"CloseRequest",
"{",
"}",
"\n",
"err",
":=",
"req",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Received invalid close request, subject=%s\"",
",",
"m",
".",
"Subject",
")",
"\n",
"s",
".",
"sendCloseResponse",
"(",
"m",
".",
"Reply",
",",
"ErrInvalidCloseReq",
")",
"\n",
"return",
"\n",
"}",
"\n",
"s",
".",
"barrier",
"(",
"func",
"(",
")",
"{",
"var",
"err",
"error",
"\n",
"if",
"s",
".",
"isClustered",
"{",
"err",
"=",
"s",
".",
"replicateConnClose",
"(",
"req",
")",
"\n",
"}",
"else",
"{",
"err",
"=",
"s",
".",
"closeClient",
"(",
"req",
".",
"ClientID",
")",
"\n",
"}",
"\n",
"s",
".",
"sendCloseResponse",
"(",
"m",
".",
"Reply",
",",
"err",
")",
"\n",
"}",
")",
"\n",
"}"
] | // processCloseRequest will process connection close requests from clients. | [
"processCloseRequest",
"will",
"process",
"connection",
"close",
"requests",
"from",
"clients",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L2975-L2997 | train |
nats-io/nats-streaming-server | server/server.go | processClientPublish | func (s *StanServer) processClientPublish(m *nats.Msg) {
iopm := &ioPendingMsg{m: m}
pm := &iopm.pm
if pm.Unmarshal(m.Data) != nil {
if s.processCtrlMsg(m) {
return
}
// else we will report an error below...
}
// Make sure we have a guid and valid channel name.
if pm.Guid == "" || !util.IsChannelNameValid(pm.Subject, false) {
s.log.Errorf("Received invalid client publish message %v", pm)
s.sendPublishErr(m.Reply, pm.Guid, ErrInvalidPubReq)
return
}
if s.debug {
s.log.Tracef("[Client:%s] Received message from publisher subj=%s guid=%s", pm.ClientID, pm.Subject, pm.Guid)
}
// Check if the client is valid. We do this after the clustered check so
// that only the leader performs this check.
valid := false
if s.partitions != nil {
// In partitioning mode it is possible that we get there
// before the connect request is processed. If so, make sure we wait
// for conn request to be processed first. Check clientCheckTimeout
// doc for details.
valid = s.clients.isValidWithTimeout(pm.ClientID, pm.ConnID, clientCheckTimeout)
} else {
valid = s.clients.isValid(pm.ClientID, pm.ConnID)
}
if !valid {
s.log.Errorf("Received invalid client publish message %v", pm)
s.sendPublishErr(m.Reply, pm.Guid, ErrInvalidPubReq)
return
}
s.ioChannel <- iopm
} | go | func (s *StanServer) processClientPublish(m *nats.Msg) {
iopm := &ioPendingMsg{m: m}
pm := &iopm.pm
if pm.Unmarshal(m.Data) != nil {
if s.processCtrlMsg(m) {
return
}
// else we will report an error below...
}
// Make sure we have a guid and valid channel name.
if pm.Guid == "" || !util.IsChannelNameValid(pm.Subject, false) {
s.log.Errorf("Received invalid client publish message %v", pm)
s.sendPublishErr(m.Reply, pm.Guid, ErrInvalidPubReq)
return
}
if s.debug {
s.log.Tracef("[Client:%s] Received message from publisher subj=%s guid=%s", pm.ClientID, pm.Subject, pm.Guid)
}
// Check if the client is valid. We do this after the clustered check so
// that only the leader performs this check.
valid := false
if s.partitions != nil {
// In partitioning mode it is possible that we get there
// before the connect request is processed. If so, make sure we wait
// for conn request to be processed first. Check clientCheckTimeout
// doc for details.
valid = s.clients.isValidWithTimeout(pm.ClientID, pm.ConnID, clientCheckTimeout)
} else {
valid = s.clients.isValid(pm.ClientID, pm.ConnID)
}
if !valid {
s.log.Errorf("Received invalid client publish message %v", pm)
s.sendPublishErr(m.Reply, pm.Guid, ErrInvalidPubReq)
return
}
s.ioChannel <- iopm
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processClientPublish",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"iopm",
":=",
"&",
"ioPendingMsg",
"{",
"m",
":",
"m",
"}",
"\n",
"pm",
":=",
"&",
"iopm",
".",
"pm",
"\n",
"if",
"pm",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
")",
"!=",
"nil",
"{",
"if",
"s",
".",
"processCtrlMsg",
"(",
"m",
")",
"{",
"return",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"pm",
".",
"Guid",
"==",
"\"\"",
"||",
"!",
"util",
".",
"IsChannelNameValid",
"(",
"pm",
".",
"Subject",
",",
"false",
")",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Received invalid client publish message %v\"",
",",
"pm",
")",
"\n",
"s",
".",
"sendPublishErr",
"(",
"m",
".",
"Reply",
",",
"pm",
".",
"Guid",
",",
"ErrInvalidPubReq",
")",
"\n",
"return",
"\n",
"}",
"\n",
"if",
"s",
".",
"debug",
"{",
"s",
".",
"log",
".",
"Tracef",
"(",
"\"[Client:%s] Received message from publisher subj=%s guid=%s\"",
",",
"pm",
".",
"ClientID",
",",
"pm",
".",
"Subject",
",",
"pm",
".",
"Guid",
")",
"\n",
"}",
"\n",
"valid",
":=",
"false",
"\n",
"if",
"s",
".",
"partitions",
"!=",
"nil",
"{",
"valid",
"=",
"s",
".",
"clients",
".",
"isValidWithTimeout",
"(",
"pm",
".",
"ClientID",
",",
"pm",
".",
"ConnID",
",",
"clientCheckTimeout",
")",
"\n",
"}",
"else",
"{",
"valid",
"=",
"s",
".",
"clients",
".",
"isValid",
"(",
"pm",
".",
"ClientID",
",",
"pm",
".",
"ConnID",
")",
"\n",
"}",
"\n",
"if",
"!",
"valid",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Received invalid client publish message %v\"",
",",
"pm",
")",
"\n",
"s",
".",
"sendPublishErr",
"(",
"m",
".",
"Reply",
",",
"pm",
".",
"Guid",
",",
"ErrInvalidPubReq",
")",
"\n",
"return",
"\n",
"}",
"\n",
"s",
".",
"ioChannel",
"<-",
"iopm",
"\n",
"}"
] | // processClientPublish process inbound messages from clients. | [
"processClientPublish",
"process",
"inbound",
"messages",
"from",
"clients",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3030-L3070 | train |
nats-io/nats-streaming-server | server/server.go | processClientPings | func (s *StanServer) processClientPings(m *nats.Msg) {
if len(m.Data) == 0 {
return
}
ping := &pb.Ping{}
if err := ping.Unmarshal(m.Data); err != nil {
return
}
var reply []byte
client := s.clients.lookupByConnID(ping.ConnID)
if client != nil {
// If the client has failed heartbeats and since the
// server just received a PING from the client, reset
// the server-to-client HB timer so that a PING is
// sent soon and the client's subscriptions failedHB
// is cleared.
client.RLock()
hasFailedHBs := client.fhb > 0
client.RUnlock()
if hasFailedHBs {
client.Lock()
client.hbt.Reset(time.Millisecond)
client.Unlock()
}
if s.pingResponseOKBytes == nil {
s.pingResponseOKBytes, _ = (&pb.PingResponse{}).Marshal()
}
reply = s.pingResponseOKBytes
} else {
if s.pingResponseInvalidClientBytes == nil {
pingError := &pb.PingResponse{
Error: "client has been replaced or is no longer registered",
}
s.pingResponseInvalidClientBytes, _ = pingError.Marshal()
}
reply = s.pingResponseInvalidClientBytes
}
s.ncs.Publish(m.Reply, reply)
} | go | func (s *StanServer) processClientPings(m *nats.Msg) {
if len(m.Data) == 0 {
return
}
ping := &pb.Ping{}
if err := ping.Unmarshal(m.Data); err != nil {
return
}
var reply []byte
client := s.clients.lookupByConnID(ping.ConnID)
if client != nil {
// If the client has failed heartbeats and since the
// server just received a PING from the client, reset
// the server-to-client HB timer so that a PING is
// sent soon and the client's subscriptions failedHB
// is cleared.
client.RLock()
hasFailedHBs := client.fhb > 0
client.RUnlock()
if hasFailedHBs {
client.Lock()
client.hbt.Reset(time.Millisecond)
client.Unlock()
}
if s.pingResponseOKBytes == nil {
s.pingResponseOKBytes, _ = (&pb.PingResponse{}).Marshal()
}
reply = s.pingResponseOKBytes
} else {
if s.pingResponseInvalidClientBytes == nil {
pingError := &pb.PingResponse{
Error: "client has been replaced or is no longer registered",
}
s.pingResponseInvalidClientBytes, _ = pingError.Marshal()
}
reply = s.pingResponseInvalidClientBytes
}
s.ncs.Publish(m.Reply, reply)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processClientPings",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"if",
"len",
"(",
"m",
".",
"Data",
")",
"==",
"0",
"{",
"return",
"\n",
"}",
"\n",
"ping",
":=",
"&",
"pb",
".",
"Ping",
"{",
"}",
"\n",
"if",
"err",
":=",
"ping",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"var",
"reply",
"[",
"]",
"byte",
"\n",
"client",
":=",
"s",
".",
"clients",
".",
"lookupByConnID",
"(",
"ping",
".",
"ConnID",
")",
"\n",
"if",
"client",
"!=",
"nil",
"{",
"client",
".",
"RLock",
"(",
")",
"\n",
"hasFailedHBs",
":=",
"client",
".",
"fhb",
">",
"0",
"\n",
"client",
".",
"RUnlock",
"(",
")",
"\n",
"if",
"hasFailedHBs",
"{",
"client",
".",
"Lock",
"(",
")",
"\n",
"client",
".",
"hbt",
".",
"Reset",
"(",
"time",
".",
"Millisecond",
")",
"\n",
"client",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"if",
"s",
".",
"pingResponseOKBytes",
"==",
"nil",
"{",
"s",
".",
"pingResponseOKBytes",
",",
"_",
"=",
"(",
"&",
"pb",
".",
"PingResponse",
"{",
"}",
")",
".",
"Marshal",
"(",
")",
"\n",
"}",
"\n",
"reply",
"=",
"s",
".",
"pingResponseOKBytes",
"\n",
"}",
"else",
"{",
"if",
"s",
".",
"pingResponseInvalidClientBytes",
"==",
"nil",
"{",
"pingError",
":=",
"&",
"pb",
".",
"PingResponse",
"{",
"Error",
":",
"\"client has been replaced or is no longer registered\"",
",",
"}",
"\n",
"s",
".",
"pingResponseInvalidClientBytes",
",",
"_",
"=",
"pingError",
".",
"Marshal",
"(",
")",
"\n",
"}",
"\n",
"reply",
"=",
"s",
".",
"pingResponseInvalidClientBytes",
"\n",
"}",
"\n",
"s",
".",
"ncs",
".",
"Publish",
"(",
"m",
".",
"Reply",
",",
"reply",
")",
"\n",
"}"
] | // processClientPings receives a PING from a client. The payload is the client's UID.
// If the client is present, a response with nil payload is sent back to indicate
// success, otherwise the payload contains an error message. | [
"processClientPings",
"receives",
"a",
"PING",
"from",
"a",
"client",
".",
"The",
"payload",
"is",
"the",
"client",
"s",
"UID",
".",
"If",
"the",
"client",
"is",
"present",
"a",
"response",
"with",
"nil",
"payload",
"is",
"sent",
"back",
"to",
"indicate",
"success",
"otherwise",
"the",
"payload",
"contains",
"an",
"error",
"message",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3075-L3113 | train |
nats-io/nats-streaming-server | server/server.go | sendMsgToQueueGroup | func (s *StanServer) sendMsgToQueueGroup(qs *queueState, m *pb.MsgProto, force bool) (*subState, bool, bool) {
sub := findBestQueueSub(qs.subs)
if sub == nil {
return nil, false, false
}
sub.Lock()
wasStalled := sub.stalled
didSend, sendMore := s.sendMsgToSub(sub, m, force)
// If this is not a redelivery and the sub was not stalled, but now is,
// bump the number of stalled members.
if !force && !wasStalled && sub.stalled {
qs.stalledSubCount++
}
if didSend && sub.LastSent > qs.lastSent {
qs.lastSent = sub.LastSent
}
sub.Unlock()
return sub, didSend, sendMore
} | go | func (s *StanServer) sendMsgToQueueGroup(qs *queueState, m *pb.MsgProto, force bool) (*subState, bool, bool) {
sub := findBestQueueSub(qs.subs)
if sub == nil {
return nil, false, false
}
sub.Lock()
wasStalled := sub.stalled
didSend, sendMore := s.sendMsgToSub(sub, m, force)
// If this is not a redelivery and the sub was not stalled, but now is,
// bump the number of stalled members.
if !force && !wasStalled && sub.stalled {
qs.stalledSubCount++
}
if didSend && sub.LastSent > qs.lastSent {
qs.lastSent = sub.LastSent
}
sub.Unlock()
return sub, didSend, sendMore
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"sendMsgToQueueGroup",
"(",
"qs",
"*",
"queueState",
",",
"m",
"*",
"pb",
".",
"MsgProto",
",",
"force",
"bool",
")",
"(",
"*",
"subState",
",",
"bool",
",",
"bool",
")",
"{",
"sub",
":=",
"findBestQueueSub",
"(",
"qs",
".",
"subs",
")",
"\n",
"if",
"sub",
"==",
"nil",
"{",
"return",
"nil",
",",
"false",
",",
"false",
"\n",
"}",
"\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"wasStalled",
":=",
"sub",
".",
"stalled",
"\n",
"didSend",
",",
"sendMore",
":=",
"s",
".",
"sendMsgToSub",
"(",
"sub",
",",
"m",
",",
"force",
")",
"\n",
"if",
"!",
"force",
"&&",
"!",
"wasStalled",
"&&",
"sub",
".",
"stalled",
"{",
"qs",
".",
"stalledSubCount",
"++",
"\n",
"}",
"\n",
"if",
"didSend",
"&&",
"sub",
".",
"LastSent",
">",
"qs",
".",
"lastSent",
"{",
"qs",
".",
"lastSent",
"=",
"sub",
".",
"LastSent",
"\n",
"}",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"return",
"sub",
",",
"didSend",
",",
"sendMore",
"\n",
"}"
] | // Send a message to the queue group
// Assumes qs lock held for write | [
"Send",
"a",
"message",
"to",
"the",
"queue",
"group",
"Assumes",
"qs",
"lock",
"held",
"for",
"write"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3174-L3192 | train |
nats-io/nats-streaming-server | server/server.go | processMsg | func (s *StanServer) processMsg(c *channel) {
ss := c.ss
// Since we iterate through them all.
ss.RLock()
// Walk the plain subscribers and deliver to each one
for _, sub := range ss.psubs {
s.sendAvailableMessages(c, sub)
}
// Check the queue subscribers
for _, qs := range ss.qsubs {
s.sendAvailableMessagesToQueue(c, qs)
}
ss.RUnlock()
} | go | func (s *StanServer) processMsg(c *channel) {
ss := c.ss
// Since we iterate through them all.
ss.RLock()
// Walk the plain subscribers and deliver to each one
for _, sub := range ss.psubs {
s.sendAvailableMessages(c, sub)
}
// Check the queue subscribers
for _, qs := range ss.qsubs {
s.sendAvailableMessagesToQueue(c, qs)
}
ss.RUnlock()
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processMsg",
"(",
"c",
"*",
"channel",
")",
"{",
"ss",
":=",
"c",
".",
"ss",
"\n",
"ss",
".",
"RLock",
"(",
")",
"\n",
"for",
"_",
",",
"sub",
":=",
"range",
"ss",
".",
"psubs",
"{",
"s",
".",
"sendAvailableMessages",
"(",
"c",
",",
"sub",
")",
"\n",
"}",
"\n",
"for",
"_",
",",
"qs",
":=",
"range",
"ss",
".",
"qsubs",
"{",
"s",
".",
"sendAvailableMessagesToQueue",
"(",
"c",
",",
"qs",
")",
"\n",
"}",
"\n",
"ss",
".",
"RUnlock",
"(",
")",
"\n",
"}"
] | // processMsg will process a message, and possibly send to clients, etc. | [
"processMsg",
"will",
"process",
"a",
"message",
"and",
"possibly",
"send",
"to",
"clients",
"etc",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3195-L3210 | train |
nats-io/nats-streaming-server | server/server.go | makeSortedSequences | func makeSortedSequences(sequences map[uint64]int64) []uint64 {
results := make([]uint64, 0, len(sequences))
for seq := range sequences {
results = append(results, seq)
}
sort.Sort(bySeq(results))
return results
} | go | func makeSortedSequences(sequences map[uint64]int64) []uint64 {
results := make([]uint64, 0, len(sequences))
for seq := range sequences {
results = append(results, seq)
}
sort.Sort(bySeq(results))
return results
} | [
"func",
"makeSortedSequences",
"(",
"sequences",
"map",
"[",
"uint64",
"]",
"int64",
")",
"[",
"]",
"uint64",
"{",
"results",
":=",
"make",
"(",
"[",
"]",
"uint64",
",",
"0",
",",
"len",
"(",
"sequences",
")",
")",
"\n",
"for",
"seq",
":=",
"range",
"sequences",
"{",
"results",
"=",
"append",
"(",
"results",
",",
"seq",
")",
"\n",
"}",
"\n",
"sort",
".",
"Sort",
"(",
"bySeq",
"(",
"results",
")",
")",
"\n",
"return",
"results",
"\n",
"}"
] | // Returns an array of message sequence numbers ordered by sequence. | [
"Returns",
"an",
"array",
"of",
"message",
"sequence",
"numbers",
"ordered",
"by",
"sequence",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3220-L3227 | train |
nats-io/nats-streaming-server | server/server.go | performDurableRedelivery | func (s *StanServer) performDurableRedelivery(c *channel, sub *subState) {
// Sort our messages outstanding from acksPending, grab some state and unlock.
sub.RLock()
sortedSeqs := makeSortedSequences(sub.acksPending)
clientID := sub.ClientID
newOnHold := sub.newOnHold
subID := sub.ID
sub.RUnlock()
if s.debug && len(sortedSeqs) > 0 {
sub.RLock()
durName := sub.DurableName
if durName == "" {
durName = sub.QGroup
}
sub.RUnlock()
s.log.Debugf("[Client:%s] Redelivering to subid=%d, durable=%s", clientID, subID, durName)
}
// If we don't find the client, we are done.
if s.clients.lookup(clientID) != nil {
// Go through all messages
for _, seq := range sortedSeqs {
m := s.getMsgForRedelivery(c, sub, seq)
if m == nil {
continue
}
if s.trace {
s.log.Tracef("[Client:%s] Redelivering to subid=%d, seq=%d", clientID, subID, m.Sequence)
}
// Flag as redelivered.
m.Redelivered = true
sub.Lock()
// Force delivery
s.sendMsgToSub(sub, m, forceDelivery)
sub.Unlock()
}
}
// Release newOnHold if needed.
if newOnHold {
sub.Lock()
sub.newOnHold = false
sub.Unlock()
}
} | go | func (s *StanServer) performDurableRedelivery(c *channel, sub *subState) {
// Sort our messages outstanding from acksPending, grab some state and unlock.
sub.RLock()
sortedSeqs := makeSortedSequences(sub.acksPending)
clientID := sub.ClientID
newOnHold := sub.newOnHold
subID := sub.ID
sub.RUnlock()
if s.debug && len(sortedSeqs) > 0 {
sub.RLock()
durName := sub.DurableName
if durName == "" {
durName = sub.QGroup
}
sub.RUnlock()
s.log.Debugf("[Client:%s] Redelivering to subid=%d, durable=%s", clientID, subID, durName)
}
// If we don't find the client, we are done.
if s.clients.lookup(clientID) != nil {
// Go through all messages
for _, seq := range sortedSeqs {
m := s.getMsgForRedelivery(c, sub, seq)
if m == nil {
continue
}
if s.trace {
s.log.Tracef("[Client:%s] Redelivering to subid=%d, seq=%d", clientID, subID, m.Sequence)
}
// Flag as redelivered.
m.Redelivered = true
sub.Lock()
// Force delivery
s.sendMsgToSub(sub, m, forceDelivery)
sub.Unlock()
}
}
// Release newOnHold if needed.
if newOnHold {
sub.Lock()
sub.newOnHold = false
sub.Unlock()
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"performDurableRedelivery",
"(",
"c",
"*",
"channel",
",",
"sub",
"*",
"subState",
")",
"{",
"sub",
".",
"RLock",
"(",
")",
"\n",
"sortedSeqs",
":=",
"makeSortedSequences",
"(",
"sub",
".",
"acksPending",
")",
"\n",
"clientID",
":=",
"sub",
".",
"ClientID",
"\n",
"newOnHold",
":=",
"sub",
".",
"newOnHold",
"\n",
"subID",
":=",
"sub",
".",
"ID",
"\n",
"sub",
".",
"RUnlock",
"(",
")",
"\n",
"if",
"s",
".",
"debug",
"&&",
"len",
"(",
"sortedSeqs",
")",
">",
"0",
"{",
"sub",
".",
"RLock",
"(",
")",
"\n",
"durName",
":=",
"sub",
".",
"DurableName",
"\n",
"if",
"durName",
"==",
"\"\"",
"{",
"durName",
"=",
"sub",
".",
"QGroup",
"\n",
"}",
"\n",
"sub",
".",
"RUnlock",
"(",
")",
"\n",
"s",
".",
"log",
".",
"Debugf",
"(",
"\"[Client:%s] Redelivering to subid=%d, durable=%s\"",
",",
"clientID",
",",
"subID",
",",
"durName",
")",
"\n",
"}",
"\n",
"if",
"s",
".",
"clients",
".",
"lookup",
"(",
"clientID",
")",
"!=",
"nil",
"{",
"for",
"_",
",",
"seq",
":=",
"range",
"sortedSeqs",
"{",
"m",
":=",
"s",
".",
"getMsgForRedelivery",
"(",
"c",
",",
"sub",
",",
"seq",
")",
"\n",
"if",
"m",
"==",
"nil",
"{",
"continue",
"\n",
"}",
"\n",
"if",
"s",
".",
"trace",
"{",
"s",
".",
"log",
".",
"Tracef",
"(",
"\"[Client:%s] Redelivering to subid=%d, seq=%d\"",
",",
"clientID",
",",
"subID",
",",
"m",
".",
"Sequence",
")",
"\n",
"}",
"\n",
"m",
".",
"Redelivered",
"=",
"true",
"\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"s",
".",
"sendMsgToSub",
"(",
"sub",
",",
"m",
",",
"forceDelivery",
")",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"}",
"\n",
"if",
"newOnHold",
"{",
"sub",
".",
"Lock",
"(",
")",
"\n",
"sub",
".",
"newOnHold",
"=",
"false",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"}"
] | // Redeliver all outstanding messages to a durable subscriber, used on resubscribe. | [
"Redeliver",
"all",
"outstanding",
"messages",
"to",
"a",
"durable",
"subscriber",
"used",
"on",
"resubscribe",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3256-L3303 | train |
nats-io/nats-streaming-server | server/server.go | collectSentOrAck | func (s *StanServer) collectSentOrAck(sub *subState, sent bool, sequence uint64) {
sr := s.ssarepl
if sub.replicate == nil {
sub.replicate = &subSentAndAck{
sent: make([]uint64, 0, 100),
ack: make([]uint64, 0, 100),
}
}
r := sub.replicate
if sent {
r.sent = append(r.sent, sequence)
} else {
r.ack = append(r.ack, sequence)
}
// This function is called with exactly one event at a time.
// Use exact count to decide when to add to given map. This
// avoid the need for booleans to not add more than once.
l := len(r.sent) + len(r.ack)
if l == 1 {
sr.waiting.Store(sub, struct{}{})
} else if l == 100 {
sr.waiting.Delete(sub)
sr.ready.Store(sub, struct{}{})
signalCh(sr.notifyCh)
}
} | go | func (s *StanServer) collectSentOrAck(sub *subState, sent bool, sequence uint64) {
sr := s.ssarepl
if sub.replicate == nil {
sub.replicate = &subSentAndAck{
sent: make([]uint64, 0, 100),
ack: make([]uint64, 0, 100),
}
}
r := sub.replicate
if sent {
r.sent = append(r.sent, sequence)
} else {
r.ack = append(r.ack, sequence)
}
// This function is called with exactly one event at a time.
// Use exact count to decide when to add to given map. This
// avoid the need for booleans to not add more than once.
l := len(r.sent) + len(r.ack)
if l == 1 {
sr.waiting.Store(sub, struct{}{})
} else if l == 100 {
sr.waiting.Delete(sub)
sr.ready.Store(sub, struct{}{})
signalCh(sr.notifyCh)
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"collectSentOrAck",
"(",
"sub",
"*",
"subState",
",",
"sent",
"bool",
",",
"sequence",
"uint64",
")",
"{",
"sr",
":=",
"s",
".",
"ssarepl",
"\n",
"if",
"sub",
".",
"replicate",
"==",
"nil",
"{",
"sub",
".",
"replicate",
"=",
"&",
"subSentAndAck",
"{",
"sent",
":",
"make",
"(",
"[",
"]",
"uint64",
",",
"0",
",",
"100",
")",
",",
"ack",
":",
"make",
"(",
"[",
"]",
"uint64",
",",
"0",
",",
"100",
")",
",",
"}",
"\n",
"}",
"\n",
"r",
":=",
"sub",
".",
"replicate",
"\n",
"if",
"sent",
"{",
"r",
".",
"sent",
"=",
"append",
"(",
"r",
".",
"sent",
",",
"sequence",
")",
"\n",
"}",
"else",
"{",
"r",
".",
"ack",
"=",
"append",
"(",
"r",
".",
"ack",
",",
"sequence",
")",
"\n",
"}",
"\n",
"l",
":=",
"len",
"(",
"r",
".",
"sent",
")",
"+",
"len",
"(",
"r",
".",
"ack",
")",
"\n",
"if",
"l",
"==",
"1",
"{",
"sr",
".",
"waiting",
".",
"Store",
"(",
"sub",
",",
"struct",
"{",
"}",
"{",
"}",
")",
"\n",
"}",
"else",
"if",
"l",
"==",
"100",
"{",
"sr",
".",
"waiting",
".",
"Delete",
"(",
"sub",
")",
"\n",
"sr",
".",
"ready",
".",
"Store",
"(",
"sub",
",",
"struct",
"{",
"}",
"{",
"}",
")",
"\n",
"signalCh",
"(",
"sr",
".",
"notifyCh",
")",
"\n",
"}",
"\n",
"}"
] | // Keep track of sent or ack messages.
// If the number of operations reach a certain threshold,
// the sub is added to list of subs that should be flushed asap.
// This call does not do actual RAFT replication and should not block.
// Caller holds the sub's Lock. | [
"Keep",
"track",
"of",
"sent",
"or",
"ack",
"messages",
".",
"If",
"the",
"number",
"of",
"operations",
"reach",
"a",
"certain",
"threshold",
"the",
"sub",
"is",
"added",
"to",
"list",
"of",
"subs",
"that",
"should",
"be",
"flushed",
"asap",
".",
"This",
"call",
"does",
"not",
"do",
"actual",
"RAFT",
"replication",
"and",
"should",
"not",
"block",
".",
"Caller",
"holds",
"the",
"sub",
"s",
"Lock",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3466-L3491 | train |
nats-io/nats-streaming-server | server/server.go | replicateSubSentAndAck | func (s *StanServer) replicateSubSentAndAck(sub *subState) {
var data []byte
sr := s.ssarepl
sub.Lock()
r := sub.replicate
if r != nil && len(r.sent)+len(r.ack) > 0 {
data = createSubSentAndAckProto(sub, r)
r.sent = r.sent[:0]
r.ack = r.ack[:0]
r.applying = true
}
sub.Unlock()
if data != nil {
if testSubSentAndAckSlowApply {
time.Sleep(100 * time.Millisecond)
}
s.raft.Apply(data, 0)
sub.Lock()
r = sub.replicate
// If r is nil it means either that the leader lost leadrship,
// in which case we don't do anything, or the sub/conn is being
// closed and endSubSentAndAckReplication() is waiting on a
// channel stored in "gates" map. If we find it, signal.
if r == nil {
if c, ok := sr.gates.Load(sub); ok {
sr.gates.Delete(sub)
signalCh(c.(chan struct{}))
}
} else {
r.applying = false
}
sub.Unlock()
}
} | go | func (s *StanServer) replicateSubSentAndAck(sub *subState) {
var data []byte
sr := s.ssarepl
sub.Lock()
r := sub.replicate
if r != nil && len(r.sent)+len(r.ack) > 0 {
data = createSubSentAndAckProto(sub, r)
r.sent = r.sent[:0]
r.ack = r.ack[:0]
r.applying = true
}
sub.Unlock()
if data != nil {
if testSubSentAndAckSlowApply {
time.Sleep(100 * time.Millisecond)
}
s.raft.Apply(data, 0)
sub.Lock()
r = sub.replicate
// If r is nil it means either that the leader lost leadrship,
// in which case we don't do anything, or the sub/conn is being
// closed and endSubSentAndAckReplication() is waiting on a
// channel stored in "gates" map. If we find it, signal.
if r == nil {
if c, ok := sr.gates.Load(sub); ok {
sr.gates.Delete(sub)
signalCh(c.(chan struct{}))
}
} else {
r.applying = false
}
sub.Unlock()
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"replicateSubSentAndAck",
"(",
"sub",
"*",
"subState",
")",
"{",
"var",
"data",
"[",
"]",
"byte",
"\n",
"sr",
":=",
"s",
".",
"ssarepl",
"\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"r",
":=",
"sub",
".",
"replicate",
"\n",
"if",
"r",
"!=",
"nil",
"&&",
"len",
"(",
"r",
".",
"sent",
")",
"+",
"len",
"(",
"r",
".",
"ack",
")",
">",
"0",
"{",
"data",
"=",
"createSubSentAndAckProto",
"(",
"sub",
",",
"r",
")",
"\n",
"r",
".",
"sent",
"=",
"r",
".",
"sent",
"[",
":",
"0",
"]",
"\n",
"r",
".",
"ack",
"=",
"r",
".",
"ack",
"[",
":",
"0",
"]",
"\n",
"r",
".",
"applying",
"=",
"true",
"\n",
"}",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"if",
"data",
"!=",
"nil",
"{",
"if",
"testSubSentAndAckSlowApply",
"{",
"time",
".",
"Sleep",
"(",
"100",
"*",
"time",
".",
"Millisecond",
")",
"\n",
"}",
"\n",
"s",
".",
"raft",
".",
"Apply",
"(",
"data",
",",
"0",
")",
"\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"r",
"=",
"sub",
".",
"replicate",
"\n",
"if",
"r",
"==",
"nil",
"{",
"if",
"c",
",",
"ok",
":=",
"sr",
".",
"gates",
".",
"Load",
"(",
"sub",
")",
";",
"ok",
"{",
"sr",
".",
"gates",
".",
"Delete",
"(",
"sub",
")",
"\n",
"signalCh",
"(",
"c",
".",
"(",
"chan",
"struct",
"{",
"}",
")",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"r",
".",
"applying",
"=",
"false",
"\n",
"}",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"}"
] | // Replicates through RAFT | [
"Replicates",
"through",
"RAFT"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3494-L3530 | train |
nats-io/nats-streaming-server | server/server.go | createSubSentAndAckProto | func createSubSentAndAckProto(sub *subState, r *subSentAndAck) []byte {
op := &spb.RaftOperation{
OpType: spb.RaftOperation_SendAndAck,
SubSentAck: &spb.SubSentAndAck{
Channel: sub.subject,
AckInbox: sub.AckInbox,
Sent: r.sent,
Ack: r.ack,
},
}
data, err := op.Marshal()
if err != nil {
panic(err)
}
return data
} | go | func createSubSentAndAckProto(sub *subState, r *subSentAndAck) []byte {
op := &spb.RaftOperation{
OpType: spb.RaftOperation_SendAndAck,
SubSentAck: &spb.SubSentAndAck{
Channel: sub.subject,
AckInbox: sub.AckInbox,
Sent: r.sent,
Ack: r.ack,
},
}
data, err := op.Marshal()
if err != nil {
panic(err)
}
return data
} | [
"func",
"createSubSentAndAckProto",
"(",
"sub",
"*",
"subState",
",",
"r",
"*",
"subSentAndAck",
")",
"[",
"]",
"byte",
"{",
"op",
":=",
"&",
"spb",
".",
"RaftOperation",
"{",
"OpType",
":",
"spb",
".",
"RaftOperation_SendAndAck",
",",
"SubSentAck",
":",
"&",
"spb",
".",
"SubSentAndAck",
"{",
"Channel",
":",
"sub",
".",
"subject",
",",
"AckInbox",
":",
"sub",
".",
"AckInbox",
",",
"Sent",
":",
"r",
".",
"sent",
",",
"Ack",
":",
"r",
".",
"ack",
",",
"}",
",",
"}",
"\n",
"data",
",",
"err",
":=",
"op",
".",
"Marshal",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"return",
"data",
"\n",
"}"
] | // Little helper function to create a RaftOperation_SendAndAck protocol
// and serialize it. | [
"Little",
"helper",
"function",
"to",
"create",
"a",
"RaftOperation_SendAndAck",
"protocol",
"and",
"serialize",
"it",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3534-L3549 | train |
nats-io/nats-streaming-server | server/server.go | clearSentAndAck | func (s *StanServer) clearSentAndAck(sub *subState) {
sr := s.ssarepl
sr.waiting.Delete(sub)
sr.ready.Delete(sub)
sub.replicate = nil
} | go | func (s *StanServer) clearSentAndAck(sub *subState) {
sr := s.ssarepl
sr.waiting.Delete(sub)
sr.ready.Delete(sub)
sub.replicate = nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"clearSentAndAck",
"(",
"sub",
"*",
"subState",
")",
"{",
"sr",
":=",
"s",
".",
"ssarepl",
"\n",
"sr",
".",
"waiting",
".",
"Delete",
"(",
"sub",
")",
"\n",
"sr",
".",
"ready",
".",
"Delete",
"(",
"sub",
")",
"\n",
"sub",
".",
"replicate",
"=",
"nil",
"\n",
"}"
] | // Sub lock is held on entry | [
"Sub",
"lock",
"is",
"held",
"on",
"entry"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3588-L3593 | train |
nats-io/nats-streaming-server | server/server.go | sendMsgToSub | func (s *StanServer) sendMsgToSub(sub *subState, m *pb.MsgProto, force bool) (bool, bool) {
if sub == nil || m == nil || !sub.initialized || (sub.newOnHold && !m.Redelivered) {
return false, false
}
// Don't send if we have too many outstanding already, unless forced to send.
ap := int32(len(sub.acksPending))
if !force && (ap >= sub.MaxInFlight) {
sub.stalled = true
return false, false
}
if s.trace {
var action string
if m.Redelivered {
action = "Redelivering"
} else {
action = "Delivering"
}
s.log.Tracef("[Client:%s] %s msg to subid=%d, subject=%s, seq=%d",
sub.ClientID, action, sub.ID, m.Subject, m.Sequence)
}
// Marshal of a pb.MsgProto cannot fail
b, _ := m.Marshal()
// but protect against a store implementation that may incorrectly
// return an empty message.
if len(b) == 0 {
panic("store implementation returned an empty message")
}
if err := s.ncs.Publish(sub.Inbox, b); err != nil {
s.log.Errorf("[Client:%s] Failed sending to subid=%d, subject=%s, seq=%d, err=%v",
sub.ClientID, sub.ID, m.Subject, m.Sequence, err)
return false, false
}
// Setup the ackTimer as needed now. I don't want to use defer in this
// function, and want to make sure that if we exit before the end, the
// timer is set. It will be adjusted/stopped as needed.
if sub.ackTimer == nil {
s.setupAckTimer(sub, sub.ackWait)
}
// If this message is already pending, do not add it again to the store.
if expTime, present := sub.acksPending[m.Sequence]; present {
// However, update the next expiration time.
if expTime == 0 {
// That can happen after a server restart, so need to use
// the current time.
expTime = time.Now().UnixNano()
}
// bump the next expiration time with the sub's ackWait.
expTime += int64(sub.ackWait)
sub.acksPending[m.Sequence] = expTime
return true, true
}
// If in cluster mode, schedule replication of the sent event.
if s.isClustered {
s.collectSentOrAck(sub, replicateSent, m.Sequence)
}
// Store in storage
if err := sub.store.AddSeqPending(sub.ID, m.Sequence); err != nil {
s.log.Errorf("[Client:%s] Unable to add pending message to subid=%d, subject=%s, seq=%d, err=%v",
sub.ClientID, sub.ID, sub.subject, m.Sequence, err)
return false, false
}
// Update LastSent if applicable
if m.Sequence > sub.LastSent {
sub.LastSent = m.Sequence
}
// Store in ackPending.
// Use current time to compute expiration time instead of m.Timestamp.
// A message can be persisted in the log and send much later to a
// new subscriber. Basing expiration time on m.Timestamp would
// likely set the expiration time in the past!
sub.acksPending[m.Sequence] = time.Now().UnixNano() + int64(sub.ackWait)
// Now that we have added to acksPending, check again if we
// have reached the max and tell the caller that it should not
// be sending more at this time.
if !force && (ap+1 == sub.MaxInFlight) {
sub.stalled = true
return true, false
}
return true, true
} | go | func (s *StanServer) sendMsgToSub(sub *subState, m *pb.MsgProto, force bool) (bool, bool) {
if sub == nil || m == nil || !sub.initialized || (sub.newOnHold && !m.Redelivered) {
return false, false
}
// Don't send if we have too many outstanding already, unless forced to send.
ap := int32(len(sub.acksPending))
if !force && (ap >= sub.MaxInFlight) {
sub.stalled = true
return false, false
}
if s.trace {
var action string
if m.Redelivered {
action = "Redelivering"
} else {
action = "Delivering"
}
s.log.Tracef("[Client:%s] %s msg to subid=%d, subject=%s, seq=%d",
sub.ClientID, action, sub.ID, m.Subject, m.Sequence)
}
// Marshal of a pb.MsgProto cannot fail
b, _ := m.Marshal()
// but protect against a store implementation that may incorrectly
// return an empty message.
if len(b) == 0 {
panic("store implementation returned an empty message")
}
if err := s.ncs.Publish(sub.Inbox, b); err != nil {
s.log.Errorf("[Client:%s] Failed sending to subid=%d, subject=%s, seq=%d, err=%v",
sub.ClientID, sub.ID, m.Subject, m.Sequence, err)
return false, false
}
// Setup the ackTimer as needed now. I don't want to use defer in this
// function, and want to make sure that if we exit before the end, the
// timer is set. It will be adjusted/stopped as needed.
if sub.ackTimer == nil {
s.setupAckTimer(sub, sub.ackWait)
}
// If this message is already pending, do not add it again to the store.
if expTime, present := sub.acksPending[m.Sequence]; present {
// However, update the next expiration time.
if expTime == 0 {
// That can happen after a server restart, so need to use
// the current time.
expTime = time.Now().UnixNano()
}
// bump the next expiration time with the sub's ackWait.
expTime += int64(sub.ackWait)
sub.acksPending[m.Sequence] = expTime
return true, true
}
// If in cluster mode, schedule replication of the sent event.
if s.isClustered {
s.collectSentOrAck(sub, replicateSent, m.Sequence)
}
// Store in storage
if err := sub.store.AddSeqPending(sub.ID, m.Sequence); err != nil {
s.log.Errorf("[Client:%s] Unable to add pending message to subid=%d, subject=%s, seq=%d, err=%v",
sub.ClientID, sub.ID, sub.subject, m.Sequence, err)
return false, false
}
// Update LastSent if applicable
if m.Sequence > sub.LastSent {
sub.LastSent = m.Sequence
}
// Store in ackPending.
// Use current time to compute expiration time instead of m.Timestamp.
// A message can be persisted in the log and send much later to a
// new subscriber. Basing expiration time on m.Timestamp would
// likely set the expiration time in the past!
sub.acksPending[m.Sequence] = time.Now().UnixNano() + int64(sub.ackWait)
// Now that we have added to acksPending, check again if we
// have reached the max and tell the caller that it should not
// be sending more at this time.
if !force && (ap+1 == sub.MaxInFlight) {
sub.stalled = true
return true, false
}
return true, true
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"sendMsgToSub",
"(",
"sub",
"*",
"subState",
",",
"m",
"*",
"pb",
".",
"MsgProto",
",",
"force",
"bool",
")",
"(",
"bool",
",",
"bool",
")",
"{",
"if",
"sub",
"==",
"nil",
"||",
"m",
"==",
"nil",
"||",
"!",
"sub",
".",
"initialized",
"||",
"(",
"sub",
".",
"newOnHold",
"&&",
"!",
"m",
".",
"Redelivered",
")",
"{",
"return",
"false",
",",
"false",
"\n",
"}",
"\n",
"ap",
":=",
"int32",
"(",
"len",
"(",
"sub",
".",
"acksPending",
")",
")",
"\n",
"if",
"!",
"force",
"&&",
"(",
"ap",
">=",
"sub",
".",
"MaxInFlight",
")",
"{",
"sub",
".",
"stalled",
"=",
"true",
"\n",
"return",
"false",
",",
"false",
"\n",
"}",
"\n",
"if",
"s",
".",
"trace",
"{",
"var",
"action",
"string",
"\n",
"if",
"m",
".",
"Redelivered",
"{",
"action",
"=",
"\"Redelivering\"",
"\n",
"}",
"else",
"{",
"action",
"=",
"\"Delivering\"",
"\n",
"}",
"\n",
"s",
".",
"log",
".",
"Tracef",
"(",
"\"[Client:%s] %s msg to subid=%d, subject=%s, seq=%d\"",
",",
"sub",
".",
"ClientID",
",",
"action",
",",
"sub",
".",
"ID",
",",
"m",
".",
"Subject",
",",
"m",
".",
"Sequence",
")",
"\n",
"}",
"\n",
"b",
",",
"_",
":=",
"m",
".",
"Marshal",
"(",
")",
"\n",
"if",
"len",
"(",
"b",
")",
"==",
"0",
"{",
"panic",
"(",
"\"store implementation returned an empty message\"",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"s",
".",
"ncs",
".",
"Publish",
"(",
"sub",
".",
"Inbox",
",",
"b",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"[Client:%s] Failed sending to subid=%d, subject=%s, seq=%d, err=%v\"",
",",
"sub",
".",
"ClientID",
",",
"sub",
".",
"ID",
",",
"m",
".",
"Subject",
",",
"m",
".",
"Sequence",
",",
"err",
")",
"\n",
"return",
"false",
",",
"false",
"\n",
"}",
"\n",
"if",
"sub",
".",
"ackTimer",
"==",
"nil",
"{",
"s",
".",
"setupAckTimer",
"(",
"sub",
",",
"sub",
".",
"ackWait",
")",
"\n",
"}",
"\n",
"if",
"expTime",
",",
"present",
":=",
"sub",
".",
"acksPending",
"[",
"m",
".",
"Sequence",
"]",
";",
"present",
"{",
"if",
"expTime",
"==",
"0",
"{",
"expTime",
"=",
"time",
".",
"Now",
"(",
")",
".",
"UnixNano",
"(",
")",
"\n",
"}",
"\n",
"expTime",
"+=",
"int64",
"(",
"sub",
".",
"ackWait",
")",
"\n",
"sub",
".",
"acksPending",
"[",
"m",
".",
"Sequence",
"]",
"=",
"expTime",
"\n",
"return",
"true",
",",
"true",
"\n",
"}",
"\n",
"if",
"s",
".",
"isClustered",
"{",
"s",
".",
"collectSentOrAck",
"(",
"sub",
",",
"replicateSent",
",",
"m",
".",
"Sequence",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"sub",
".",
"store",
".",
"AddSeqPending",
"(",
"sub",
".",
"ID",
",",
"m",
".",
"Sequence",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"[Client:%s] Unable to add pending message to subid=%d, subject=%s, seq=%d, err=%v\"",
",",
"sub",
".",
"ClientID",
",",
"sub",
".",
"ID",
",",
"sub",
".",
"subject",
",",
"m",
".",
"Sequence",
",",
"err",
")",
"\n",
"return",
"false",
",",
"false",
"\n",
"}",
"\n",
"if",
"m",
".",
"Sequence",
">",
"sub",
".",
"LastSent",
"{",
"sub",
".",
"LastSent",
"=",
"m",
".",
"Sequence",
"\n",
"}",
"\n",
"sub",
".",
"acksPending",
"[",
"m",
".",
"Sequence",
"]",
"=",
"time",
".",
"Now",
"(",
")",
".",
"UnixNano",
"(",
")",
"+",
"int64",
"(",
"sub",
".",
"ackWait",
")",
"\n",
"if",
"!",
"force",
"&&",
"(",
"ap",
"+",
"1",
"==",
"sub",
".",
"MaxInFlight",
")",
"{",
"sub",
".",
"stalled",
"=",
"true",
"\n",
"return",
"true",
",",
"false",
"\n",
"}",
"\n",
"return",
"true",
",",
"true",
"\n",
"}"
] | // Sends the message to the subscriber
// Unless `force` is true, in which case message is always sent, if the number
// of acksPending is greater or equal to the sub's MaxInFlight limit, messages
// are not sent and subscriber is marked as stalled.
// Sub lock should be held before calling. | [
"Sends",
"the",
"message",
"to",
"the",
"subscriber",
"Unless",
"force",
"is",
"true",
"in",
"which",
"case",
"message",
"is",
"always",
"sent",
"if",
"the",
"number",
"of",
"acksPending",
"is",
"greater",
"or",
"equal",
"to",
"the",
"sub",
"s",
"MaxInFlight",
"limit",
"messages",
"are",
"not",
"sent",
"and",
"subscriber",
"is",
"marked",
"as",
"stalled",
".",
"Sub",
"lock",
"should",
"be",
"held",
"before",
"calling",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3688-L3778 | train |
nats-io/nats-streaming-server | server/server.go | setupAckTimer | func (s *StanServer) setupAckTimer(sub *subState, d time.Duration) {
sub.ackTimer = time.AfterFunc(d, func() {
s.performAckExpirationRedelivery(sub, false)
})
} | go | func (s *StanServer) setupAckTimer(sub *subState, d time.Duration) {
sub.ackTimer = time.AfterFunc(d, func() {
s.performAckExpirationRedelivery(sub, false)
})
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"setupAckTimer",
"(",
"sub",
"*",
"subState",
",",
"d",
"time",
".",
"Duration",
")",
"{",
"sub",
".",
"ackTimer",
"=",
"time",
".",
"AfterFunc",
"(",
"d",
",",
"func",
"(",
")",
"{",
"s",
".",
"performAckExpirationRedelivery",
"(",
"sub",
",",
"false",
")",
"\n",
"}",
")",
"\n",
"}"
] | // Sets up the ackTimer to fire at the given duration.
// sub's lock held on entry. | [
"Sets",
"up",
"the",
"ackTimer",
"to",
"fire",
"at",
"the",
"given",
"duration",
".",
"sub",
"s",
"lock",
"held",
"on",
"entry",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L3782-L3786 | train |
nats-io/nats-streaming-server | server/server.go | sendDeleteChannelRequest | func (s *StanServer) sendDeleteChannelRequest(c *channel) {
iopm := &ioPendingMsg{c: c, dc: true}
s.ioChannel <- iopm
} | go | func (s *StanServer) sendDeleteChannelRequest(c *channel) {
iopm := &ioPendingMsg{c: c, dc: true}
s.ioChannel <- iopm
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"sendDeleteChannelRequest",
"(",
"c",
"*",
"channel",
")",
"{",
"iopm",
":=",
"&",
"ioPendingMsg",
"{",
"c",
":",
"c",
",",
"dc",
":",
"true",
"}",
"\n",
"s",
".",
"ioChannel",
"<-",
"iopm",
"\n",
"}"
] | // Sends a special ioPendingMsg to indicate that we should attempt
// to delete the given channel. | [
"Sends",
"a",
"special",
"ioPendingMsg",
"to",
"indicate",
"that",
"we",
"should",
"attempt",
"to",
"delete",
"the",
"given",
"channel",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4007-L4010 | train |
nats-io/nats-streaming-server | server/server.go | ackPublisher | func (s *StanServer) ackPublisher(iopm *ioPendingMsg) {
msgAck := &iopm.pa
msgAck.Guid = iopm.pm.Guid
needed := msgAck.Size()
s.tmpBuf = util.EnsureBufBigEnough(s.tmpBuf, needed)
n, _ := msgAck.MarshalTo(s.tmpBuf)
if s.trace {
pm := &iopm.pm
s.log.Tracef("[Client:%s] Acking Publisher subj=%s guid=%s", pm.ClientID, pm.Subject, pm.Guid)
}
s.ncs.Publish(iopm.m.Reply, s.tmpBuf[:n])
} | go | func (s *StanServer) ackPublisher(iopm *ioPendingMsg) {
msgAck := &iopm.pa
msgAck.Guid = iopm.pm.Guid
needed := msgAck.Size()
s.tmpBuf = util.EnsureBufBigEnough(s.tmpBuf, needed)
n, _ := msgAck.MarshalTo(s.tmpBuf)
if s.trace {
pm := &iopm.pm
s.log.Tracef("[Client:%s] Acking Publisher subj=%s guid=%s", pm.ClientID, pm.Subject, pm.Guid)
}
s.ncs.Publish(iopm.m.Reply, s.tmpBuf[:n])
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"ackPublisher",
"(",
"iopm",
"*",
"ioPendingMsg",
")",
"{",
"msgAck",
":=",
"&",
"iopm",
".",
"pa",
"\n",
"msgAck",
".",
"Guid",
"=",
"iopm",
".",
"pm",
".",
"Guid",
"\n",
"needed",
":=",
"msgAck",
".",
"Size",
"(",
")",
"\n",
"s",
".",
"tmpBuf",
"=",
"util",
".",
"EnsureBufBigEnough",
"(",
"s",
".",
"tmpBuf",
",",
"needed",
")",
"\n",
"n",
",",
"_",
":=",
"msgAck",
".",
"MarshalTo",
"(",
"s",
".",
"tmpBuf",
")",
"\n",
"if",
"s",
".",
"trace",
"{",
"pm",
":=",
"&",
"iopm",
".",
"pm",
"\n",
"s",
".",
"log",
".",
"Tracef",
"(",
"\"[Client:%s] Acking Publisher subj=%s guid=%s\"",
",",
"pm",
".",
"ClientID",
",",
"pm",
".",
"Subject",
",",
"pm",
".",
"Guid",
")",
"\n",
"}",
"\n",
"s",
".",
"ncs",
".",
"Publish",
"(",
"iopm",
".",
"m",
".",
"Reply",
",",
"s",
".",
"tmpBuf",
"[",
":",
"n",
"]",
")",
"\n",
"}"
] | // ackPublisher sends the ack for a message. | [
"ackPublisher",
"sends",
"the",
"ack",
"for",
"a",
"message",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4052-L4063 | train |
nats-io/nats-streaming-server | server/server.go | deleteFromList | func (sub *subState) deleteFromList(sl []*subState) ([]*subState, bool) {
for i := 0; i < len(sl); i++ {
if sl[i] == sub {
sl[i] = sl[len(sl)-1]
sl[len(sl)-1] = nil
sl = sl[:len(sl)-1]
return shrinkSubListIfNeeded(sl), true
}
}
return sl, false
} | go | func (sub *subState) deleteFromList(sl []*subState) ([]*subState, bool) {
for i := 0; i < len(sl); i++ {
if sl[i] == sub {
sl[i] = sl[len(sl)-1]
sl[len(sl)-1] = nil
sl = sl[:len(sl)-1]
return shrinkSubListIfNeeded(sl), true
}
}
return sl, false
} | [
"func",
"(",
"sub",
"*",
"subState",
")",
"deleteFromList",
"(",
"sl",
"[",
"]",
"*",
"subState",
")",
"(",
"[",
"]",
"*",
"subState",
",",
"bool",
")",
"{",
"for",
"i",
":=",
"0",
";",
"i",
"<",
"len",
"(",
"sl",
")",
";",
"i",
"++",
"{",
"if",
"sl",
"[",
"i",
"]",
"==",
"sub",
"{",
"sl",
"[",
"i",
"]",
"=",
"sl",
"[",
"len",
"(",
"sl",
")",
"-",
"1",
"]",
"\n",
"sl",
"[",
"len",
"(",
"sl",
")",
"-",
"1",
"]",
"=",
"nil",
"\n",
"sl",
"=",
"sl",
"[",
":",
"len",
"(",
"sl",
")",
"-",
"1",
"]",
"\n",
"return",
"shrinkSubListIfNeeded",
"(",
"sl",
")",
",",
"true",
"\n",
"}",
"\n",
"}",
"\n",
"return",
"sl",
",",
"false",
"\n",
"}"
] | // Delete a sub from a given list. | [
"Delete",
"a",
"sub",
"from",
"a",
"given",
"list",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4066-L4076 | train |
nats-io/nats-streaming-server | server/server.go | shrinkSubListIfNeeded | func shrinkSubListIfNeeded(sl []*subState) []*subState {
lsl := len(sl)
csl := cap(sl)
// Don't bother if list not too big
if csl <= 8 {
return sl
}
pFree := float32(csl-lsl) / float32(csl)
if pFree > 0.50 {
return append([]*subState(nil), sl...)
}
return sl
} | go | func shrinkSubListIfNeeded(sl []*subState) []*subState {
lsl := len(sl)
csl := cap(sl)
// Don't bother if list not too big
if csl <= 8 {
return sl
}
pFree := float32(csl-lsl) / float32(csl)
if pFree > 0.50 {
return append([]*subState(nil), sl...)
}
return sl
} | [
"func",
"shrinkSubListIfNeeded",
"(",
"sl",
"[",
"]",
"*",
"subState",
")",
"[",
"]",
"*",
"subState",
"{",
"lsl",
":=",
"len",
"(",
"sl",
")",
"\n",
"csl",
":=",
"cap",
"(",
"sl",
")",
"\n",
"if",
"csl",
"<=",
"8",
"{",
"return",
"sl",
"\n",
"}",
"\n",
"pFree",
":=",
"float32",
"(",
"csl",
"-",
"lsl",
")",
"/",
"float32",
"(",
"csl",
")",
"\n",
"if",
"pFree",
">",
"0.50",
"{",
"return",
"append",
"(",
"[",
"]",
"*",
"subState",
"(",
"nil",
")",
",",
"sl",
"...",
")",
"\n",
"}",
"\n",
"return",
"sl",
"\n",
"}"
] | // Checks if we need to do a resize. This is for very large growth then
// subsequent return to a more normal size. | [
"Checks",
"if",
"we",
"need",
"to",
"do",
"a",
"resize",
".",
"This",
"is",
"for",
"very",
"large",
"growth",
"then",
"subsequent",
"return",
"to",
"a",
"more",
"normal",
"size",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4080-L4092 | train |
nats-io/nats-streaming-server | server/server.go | removeAllNonDurableSubscribers | func (s *StanServer) removeAllNonDurableSubscribers(client *client) {
// client has been unregistered and no other routine can add/remove
// subscriptions, so it is safe to use the original.
client.RLock()
subs := client.subs
clientID := client.info.ID
client.RUnlock()
var (
storesToFlush = map[string]stores.SubStore{}
channels = map[string]struct{}{}
)
for _, sub := range subs {
sub.RLock()
subject := sub.subject
isDurable := sub.IsDurable
subStore := sub.store
sub.RUnlock()
// Get the channel
c := s.channels.get(subject)
if c == nil {
continue
}
// Don't remove durables
c.ss.Remove(c, sub, false)
// If the sub is a durable, there may have been an update to storage,
// so we will want to flush the store. In clustering, during replay,
// subStore may be nil.
if isDurable && subStore != nil {
storesToFlush[subject] = subStore
}
channels[subject] = struct{}{}
}
if len(storesToFlush) > 0 {
for subject, subStore := range storesToFlush {
if err := subStore.Flush(); err != nil {
s.log.Errorf("[Client:%s] Error flushing store while removing subscriptions: subject=%s, err=%v", clientID, subject, err)
}
}
}
for channel := range channels {
s.channels.maybeStartChannelDeleteTimer(channel, nil)
}
} | go | func (s *StanServer) removeAllNonDurableSubscribers(client *client) {
// client has been unregistered and no other routine can add/remove
// subscriptions, so it is safe to use the original.
client.RLock()
subs := client.subs
clientID := client.info.ID
client.RUnlock()
var (
storesToFlush = map[string]stores.SubStore{}
channels = map[string]struct{}{}
)
for _, sub := range subs {
sub.RLock()
subject := sub.subject
isDurable := sub.IsDurable
subStore := sub.store
sub.RUnlock()
// Get the channel
c := s.channels.get(subject)
if c == nil {
continue
}
// Don't remove durables
c.ss.Remove(c, sub, false)
// If the sub is a durable, there may have been an update to storage,
// so we will want to flush the store. In clustering, during replay,
// subStore may be nil.
if isDurable && subStore != nil {
storesToFlush[subject] = subStore
}
channels[subject] = struct{}{}
}
if len(storesToFlush) > 0 {
for subject, subStore := range storesToFlush {
if err := subStore.Flush(); err != nil {
s.log.Errorf("[Client:%s] Error flushing store while removing subscriptions: subject=%s, err=%v", clientID, subject, err)
}
}
}
for channel := range channels {
s.channels.maybeStartChannelDeleteTimer(channel, nil)
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"removeAllNonDurableSubscribers",
"(",
"client",
"*",
"client",
")",
"{",
"client",
".",
"RLock",
"(",
")",
"\n",
"subs",
":=",
"client",
".",
"subs",
"\n",
"clientID",
":=",
"client",
".",
"info",
".",
"ID",
"\n",
"client",
".",
"RUnlock",
"(",
")",
"\n",
"var",
"(",
"storesToFlush",
"=",
"map",
"[",
"string",
"]",
"stores",
".",
"SubStore",
"{",
"}",
"\n",
"channels",
"=",
"map",
"[",
"string",
"]",
"struct",
"{",
"}",
"{",
"}",
"\n",
")",
"\n",
"for",
"_",
",",
"sub",
":=",
"range",
"subs",
"{",
"sub",
".",
"RLock",
"(",
")",
"\n",
"subject",
":=",
"sub",
".",
"subject",
"\n",
"isDurable",
":=",
"sub",
".",
"IsDurable",
"\n",
"subStore",
":=",
"sub",
".",
"store",
"\n",
"sub",
".",
"RUnlock",
"(",
")",
"\n",
"c",
":=",
"s",
".",
"channels",
".",
"get",
"(",
"subject",
")",
"\n",
"if",
"c",
"==",
"nil",
"{",
"continue",
"\n",
"}",
"\n",
"c",
".",
"ss",
".",
"Remove",
"(",
"c",
",",
"sub",
",",
"false",
")",
"\n",
"if",
"isDurable",
"&&",
"subStore",
"!=",
"nil",
"{",
"storesToFlush",
"[",
"subject",
"]",
"=",
"subStore",
"\n",
"}",
"\n",
"channels",
"[",
"subject",
"]",
"=",
"struct",
"{",
"}",
"{",
"}",
"\n",
"}",
"\n",
"if",
"len",
"(",
"storesToFlush",
")",
">",
"0",
"{",
"for",
"subject",
",",
"subStore",
":=",
"range",
"storesToFlush",
"{",
"if",
"err",
":=",
"subStore",
".",
"Flush",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"[Client:%s] Error flushing store while removing subscriptions: subject=%s, err=%v\"",
",",
"clientID",
",",
"subject",
",",
"err",
")",
"\n",
"}",
"\n",
"}",
"\n",
"}",
"\n",
"for",
"channel",
":=",
"range",
"channels",
"{",
"s",
".",
"channels",
".",
"maybeStartChannelDeleteTimer",
"(",
"channel",
",",
"nil",
")",
"\n",
"}",
"\n",
"}"
] | // removeAllNonDurableSubscribers will remove all non-durable subscribers for the client. | [
"removeAllNonDurableSubscribers",
"will",
"remove",
"all",
"non",
"-",
"durable",
"subscribers",
"for",
"the",
"client",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4095-L4137 | train |
nats-io/nats-streaming-server | server/server.go | processUnsubscribeRequest | func (s *StanServer) processUnsubscribeRequest(m *nats.Msg) {
req := &pb.UnsubscribeRequest{}
err := req.Unmarshal(m.Data)
if err != nil {
s.log.Errorf("Invalid unsub request from %s", m.Subject)
s.sendSubscriptionResponseErr(m.Reply, ErrInvalidUnsubReq)
return
}
s.performmUnsubOrCloseSubscription(m, req, false)
} | go | func (s *StanServer) processUnsubscribeRequest(m *nats.Msg) {
req := &pb.UnsubscribeRequest{}
err := req.Unmarshal(m.Data)
if err != nil {
s.log.Errorf("Invalid unsub request from %s", m.Subject)
s.sendSubscriptionResponseErr(m.Reply, ErrInvalidUnsubReq)
return
}
s.performmUnsubOrCloseSubscription(m, req, false)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processUnsubscribeRequest",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"req",
":=",
"&",
"pb",
".",
"UnsubscribeRequest",
"{",
"}",
"\n",
"err",
":=",
"req",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Invalid unsub request from %s\"",
",",
"m",
".",
"Subject",
")",
"\n",
"s",
".",
"sendSubscriptionResponseErr",
"(",
"m",
".",
"Reply",
",",
"ErrInvalidUnsubReq",
")",
"\n",
"return",
"\n",
"}",
"\n",
"s",
".",
"performmUnsubOrCloseSubscription",
"(",
"m",
",",
"req",
",",
"false",
")",
"\n",
"}"
] | // processUnsubscribeRequest will process a unsubscribe request. | [
"processUnsubscribeRequest",
"will",
"process",
"a",
"unsubscribe",
"request",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4140-L4149 | train |
nats-io/nats-streaming-server | server/server.go | performmUnsubOrCloseSubscription | func (s *StanServer) performmUnsubOrCloseSubscription(m *nats.Msg, req *pb.UnsubscribeRequest, isSubClose bool) {
// With partitioning, first verify that this server is handling this
// channel. If not, do not return an error, since another server will
// handle it. If no other server is, the client will get a timeout.
if s.partitions != nil {
if r := s.partitions.sl.Match(req.Subject); len(r) == 0 {
return
}
}
s.barrier(func() {
var err error
if s.isClustered {
if isSubClose {
err = s.replicateCloseSubscription(req)
} else {
err = s.replicateRemoveSubscription(req)
}
} else {
s.closeMu.Lock()
err = s.unsubscribe(req, isSubClose)
s.closeMu.Unlock()
}
// If there was an error, it has been already logged.
if err == nil {
// This will check if the channel has MaxInactivity defined,
// if so and there is no active subscription, it will start the
// delete timer.
s.channels.maybeStartChannelDeleteTimer(req.Subject, nil)
}
// If err is nil, it will be a non-error response
s.sendSubscriptionResponseErr(m.Reply, err)
})
} | go | func (s *StanServer) performmUnsubOrCloseSubscription(m *nats.Msg, req *pb.UnsubscribeRequest, isSubClose bool) {
// With partitioning, first verify that this server is handling this
// channel. If not, do not return an error, since another server will
// handle it. If no other server is, the client will get a timeout.
if s.partitions != nil {
if r := s.partitions.sl.Match(req.Subject); len(r) == 0 {
return
}
}
s.barrier(func() {
var err error
if s.isClustered {
if isSubClose {
err = s.replicateCloseSubscription(req)
} else {
err = s.replicateRemoveSubscription(req)
}
} else {
s.closeMu.Lock()
err = s.unsubscribe(req, isSubClose)
s.closeMu.Unlock()
}
// If there was an error, it has been already logged.
if err == nil {
// This will check if the channel has MaxInactivity defined,
// if so and there is no active subscription, it will start the
// delete timer.
s.channels.maybeStartChannelDeleteTimer(req.Subject, nil)
}
// If err is nil, it will be a non-error response
s.sendSubscriptionResponseErr(m.Reply, err)
})
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"performmUnsubOrCloseSubscription",
"(",
"m",
"*",
"nats",
".",
"Msg",
",",
"req",
"*",
"pb",
".",
"UnsubscribeRequest",
",",
"isSubClose",
"bool",
")",
"{",
"if",
"s",
".",
"partitions",
"!=",
"nil",
"{",
"if",
"r",
":=",
"s",
".",
"partitions",
".",
"sl",
".",
"Match",
"(",
"req",
".",
"Subject",
")",
";",
"len",
"(",
"r",
")",
"==",
"0",
"{",
"return",
"\n",
"}",
"\n",
"}",
"\n",
"s",
".",
"barrier",
"(",
"func",
"(",
")",
"{",
"var",
"err",
"error",
"\n",
"if",
"s",
".",
"isClustered",
"{",
"if",
"isSubClose",
"{",
"err",
"=",
"s",
".",
"replicateCloseSubscription",
"(",
"req",
")",
"\n",
"}",
"else",
"{",
"err",
"=",
"s",
".",
"replicateRemoveSubscription",
"(",
"req",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"s",
".",
"closeMu",
".",
"Lock",
"(",
")",
"\n",
"err",
"=",
"s",
".",
"unsubscribe",
"(",
"req",
",",
"isSubClose",
")",
"\n",
"s",
".",
"closeMu",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"if",
"err",
"==",
"nil",
"{",
"s",
".",
"channels",
".",
"maybeStartChannelDeleteTimer",
"(",
"req",
".",
"Subject",
",",
"nil",
")",
"\n",
"}",
"\n",
"s",
".",
"sendSubscriptionResponseErr",
"(",
"m",
".",
"Reply",
",",
"err",
")",
"\n",
"}",
")",
"\n",
"}"
] | // performmUnsubOrCloseSubscription processes the unsub or close subscription
// request. | [
"performmUnsubOrCloseSubscription",
"processes",
"the",
"unsub",
"or",
"close",
"subscription",
"request",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4184-L4219 | train |
nats-io/nats-streaming-server | server/server.go | clearAckTimer | func (sub *subState) clearAckTimer() {
if sub.ackTimer != nil {
sub.ackTimer.Stop()
sub.ackTimer = nil
}
} | go | func (sub *subState) clearAckTimer() {
if sub.ackTimer != nil {
sub.ackTimer.Stop()
sub.ackTimer = nil
}
} | [
"func",
"(",
"sub",
"*",
"subState",
")",
"clearAckTimer",
"(",
")",
"{",
"if",
"sub",
".",
"ackTimer",
"!=",
"nil",
"{",
"sub",
".",
"ackTimer",
".",
"Stop",
"(",
")",
"\n",
"sub",
".",
"ackTimer",
"=",
"nil",
"\n",
"}",
"\n",
"}"
] | // Clear the ackTimer.
// sub Lock held in entry. | [
"Clear",
"the",
"ackTimer",
".",
"sub",
"Lock",
"held",
"in",
"entry",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4305-L4310 | train |
nats-io/nats-streaming-server | server/server.go | adjustAckTimer | func (sub *subState) adjustAckTimer(nextExpirationTime int64) {
sub.Lock()
defer sub.Unlock()
// Possible that the subscriber has been destroyed, and timer cleared
if sub.ackTimer == nil {
return
}
// Check if there are still pending acks
if len(sub.acksPending) > 0 {
// Capture time
now := time.Now().UnixNano()
// If the next expiration time is 0 or less than now,
// use the default ackWait
if nextExpirationTime <= now {
sub.ackTimer.Reset(sub.ackWait)
} else {
// Compute the time the ackTimer should fire, based
// on the given next expiration time and now.
fireIn := (nextExpirationTime - now)
sub.ackTimer.Reset(time.Duration(fireIn))
}
} else {
// No more pending acks, clear the timer.
sub.clearAckTimer()
}
} | go | func (sub *subState) adjustAckTimer(nextExpirationTime int64) {
sub.Lock()
defer sub.Unlock()
// Possible that the subscriber has been destroyed, and timer cleared
if sub.ackTimer == nil {
return
}
// Check if there are still pending acks
if len(sub.acksPending) > 0 {
// Capture time
now := time.Now().UnixNano()
// If the next expiration time is 0 or less than now,
// use the default ackWait
if nextExpirationTime <= now {
sub.ackTimer.Reset(sub.ackWait)
} else {
// Compute the time the ackTimer should fire, based
// on the given next expiration time and now.
fireIn := (nextExpirationTime - now)
sub.ackTimer.Reset(time.Duration(fireIn))
}
} else {
// No more pending acks, clear the timer.
sub.clearAckTimer()
}
} | [
"func",
"(",
"sub",
"*",
"subState",
")",
"adjustAckTimer",
"(",
"nextExpirationTime",
"int64",
")",
"{",
"sub",
".",
"Lock",
"(",
")",
"\n",
"defer",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"if",
"sub",
".",
"ackTimer",
"==",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"if",
"len",
"(",
"sub",
".",
"acksPending",
")",
">",
"0",
"{",
"now",
":=",
"time",
".",
"Now",
"(",
")",
".",
"UnixNano",
"(",
")",
"\n",
"if",
"nextExpirationTime",
"<=",
"now",
"{",
"sub",
".",
"ackTimer",
".",
"Reset",
"(",
"sub",
".",
"ackWait",
")",
"\n",
"}",
"else",
"{",
"fireIn",
":=",
"(",
"nextExpirationTime",
"-",
"now",
")",
"\n",
"sub",
".",
"ackTimer",
".",
"Reset",
"(",
"time",
".",
"Duration",
"(",
"fireIn",
")",
")",
"\n",
"}",
"\n",
"}",
"else",
"{",
"sub",
".",
"clearAckTimer",
"(",
")",
"\n",
"}",
"\n",
"}"
] | // adjustAckTimer adjusts the timer based on a given next
// expiration time.
// The timer will be stopped if there is no more pending ack.
// If there are pending acks, the timer will be reset to the
// default sub.ackWait value if the given expiration time is
// 0 or in the past. Otherwise, it is set to the remaining time
// between the given expiration time and now. | [
"adjustAckTimer",
"adjusts",
"the",
"timer",
"based",
"on",
"a",
"given",
"next",
"expiration",
"time",
".",
"The",
"timer",
"will",
"be",
"stopped",
"if",
"there",
"is",
"no",
"more",
"pending",
"ack",
".",
"If",
"there",
"are",
"pending",
"acks",
"the",
"timer",
"will",
"be",
"reset",
"to",
"the",
"default",
"sub",
".",
"ackWait",
"value",
"if",
"the",
"given",
"expiration",
"time",
"is",
"0",
"or",
"in",
"the",
"past",
".",
"Otherwise",
"it",
"is",
"set",
"to",
"the",
"remaining",
"time",
"between",
"the",
"given",
"expiration",
"time",
"and",
"now",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4319-L4347 | train |
nats-io/nats-streaming-server | server/server.go | startAckSub | func (sub *subState) startAckSub(nc *nats.Conn, cb nats.MsgHandler) error {
ackSub, err := nc.Subscribe(sub.AckInbox, cb)
if err != nil {
return err
}
sub.Lock()
// Should not occur, but if it was already set,
// unsubscribe old and replace.
sub.stopAckSub()
sub.ackSub = ackSub
sub.ackSub.SetPendingLimits(-1, -1)
sub.Unlock()
return nil
} | go | func (sub *subState) startAckSub(nc *nats.Conn, cb nats.MsgHandler) error {
ackSub, err := nc.Subscribe(sub.AckInbox, cb)
if err != nil {
return err
}
sub.Lock()
// Should not occur, but if it was already set,
// unsubscribe old and replace.
sub.stopAckSub()
sub.ackSub = ackSub
sub.ackSub.SetPendingLimits(-1, -1)
sub.Unlock()
return nil
} | [
"func",
"(",
"sub",
"*",
"subState",
")",
"startAckSub",
"(",
"nc",
"*",
"nats",
".",
"Conn",
",",
"cb",
"nats",
".",
"MsgHandler",
")",
"error",
"{",
"ackSub",
",",
"err",
":=",
"nc",
".",
"Subscribe",
"(",
"sub",
".",
"AckInbox",
",",
"cb",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"sub",
".",
"stopAckSub",
"(",
")",
"\n",
"sub",
".",
"ackSub",
"=",
"ackSub",
"\n",
"sub",
".",
"ackSub",
".",
"SetPendingLimits",
"(",
"-",
"1",
",",
"-",
"1",
")",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // Subscribes to the AckInbox subject in order to process subscription's acks
// if not already done.
// This function grabs and releases the sub's lock. | [
"Subscribes",
"to",
"the",
"AckInbox",
"subject",
"in",
"order",
"to",
"process",
"subscription",
"s",
"acks",
"if",
"not",
"already",
"done",
".",
"This",
"function",
"grabs",
"and",
"releases",
"the",
"sub",
"s",
"lock",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4352-L4365 | train |
nats-io/nats-streaming-server | server/server.go | stopAckSub | func (sub *subState) stopAckSub() {
if sub.ackSub != nil {
sub.ackSub.Unsubscribe()
sub.ackSub = nil
}
} | go | func (sub *subState) stopAckSub() {
if sub.ackSub != nil {
sub.ackSub.Unsubscribe()
sub.ackSub = nil
}
} | [
"func",
"(",
"sub",
"*",
"subState",
")",
"stopAckSub",
"(",
")",
"{",
"if",
"sub",
".",
"ackSub",
"!=",
"nil",
"{",
"sub",
".",
"ackSub",
".",
"Unsubscribe",
"(",
")",
"\n",
"sub",
".",
"ackSub",
"=",
"nil",
"\n",
"}",
"\n",
"}"
] | // Stops subscribing to AckInbox.
// Lock assumed held on entry. | [
"Stops",
"subscribing",
"to",
"AckInbox",
".",
"Lock",
"assumed",
"held",
"on",
"entry",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4369-L4374 | train |
nats-io/nats-streaming-server | server/server.go | isShadowQueueDurable | func (sub *subState) isShadowQueueDurable() bool {
return sub.IsDurable && sub.QGroup != "" && sub.ClientID == ""
} | go | func (sub *subState) isShadowQueueDurable() bool {
return sub.IsDurable && sub.QGroup != "" && sub.ClientID == ""
} | [
"func",
"(",
"sub",
"*",
"subState",
")",
"isShadowQueueDurable",
"(",
")",
"bool",
"{",
"return",
"sub",
".",
"IsDurable",
"&&",
"sub",
".",
"QGroup",
"!=",
"\"\"",
"&&",
"sub",
".",
"ClientID",
"==",
"\"\"",
"\n",
"}"
] | // Returns true if this is a "shadow" durable queue subscriber | [
"Returns",
"true",
"if",
"this",
"is",
"a",
"shadow",
"durable",
"queue",
"subscriber"
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4395-L4397 | train |
nats-io/nats-streaming-server | server/server.go | replicateSub | func (s *StanServer) replicateSub(sr *pb.SubscriptionRequest, ackInbox string, subID uint64) (*subState, error) {
op := &spb.RaftOperation{
OpType: spb.RaftOperation_Subscribe,
Sub: &spb.AddSubscription{
Request: sr,
AckInbox: ackInbox,
ID: subID,
},
}
data, err := op.Marshal()
if err != nil {
panic(err)
}
// Replicate operation and wait on result.
future := s.raft.Apply(data, 0)
if err := future.Error(); err != nil {
return nil, err
}
rs := future.Response().(*replicatedSub)
return rs.sub, rs.err
} | go | func (s *StanServer) replicateSub(sr *pb.SubscriptionRequest, ackInbox string, subID uint64) (*subState, error) {
op := &spb.RaftOperation{
OpType: spb.RaftOperation_Subscribe,
Sub: &spb.AddSubscription{
Request: sr,
AckInbox: ackInbox,
ID: subID,
},
}
data, err := op.Marshal()
if err != nil {
panic(err)
}
// Replicate operation and wait on result.
future := s.raft.Apply(data, 0)
if err := future.Error(); err != nil {
return nil, err
}
rs := future.Response().(*replicatedSub)
return rs.sub, rs.err
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"replicateSub",
"(",
"sr",
"*",
"pb",
".",
"SubscriptionRequest",
",",
"ackInbox",
"string",
",",
"subID",
"uint64",
")",
"(",
"*",
"subState",
",",
"error",
")",
"{",
"op",
":=",
"&",
"spb",
".",
"RaftOperation",
"{",
"OpType",
":",
"spb",
".",
"RaftOperation_Subscribe",
",",
"Sub",
":",
"&",
"spb",
".",
"AddSubscription",
"{",
"Request",
":",
"sr",
",",
"AckInbox",
":",
"ackInbox",
",",
"ID",
":",
"subID",
",",
"}",
",",
"}",
"\n",
"data",
",",
"err",
":=",
"op",
".",
"Marshal",
"(",
")",
"\n",
"if",
"err",
"!=",
"nil",
"{",
"panic",
"(",
"err",
")",
"\n",
"}",
"\n",
"future",
":=",
"s",
".",
"raft",
".",
"Apply",
"(",
"data",
",",
"0",
")",
"\n",
"if",
"err",
":=",
"future",
".",
"Error",
"(",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"nil",
",",
"err",
"\n",
"}",
"\n",
"rs",
":=",
"future",
".",
"Response",
"(",
")",
".",
"(",
"*",
"replicatedSub",
")",
"\n",
"return",
"rs",
".",
"sub",
",",
"rs",
".",
"err",
"\n",
"}"
] | // replicateSub replicates the SubscriptionRequest to nodes in the cluster via
// Raft. | [
"replicateSub",
"replicates",
"the",
"SubscriptionRequest",
"to",
"nodes",
"in",
"the",
"cluster",
"via",
"Raft",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4419-L4439 | train |
nats-io/nats-streaming-server | server/server.go | addSubscription | func (s *StanServer) addSubscription(ss *subStore, sub *subState) error {
// Store in client
if !s.clients.addSub(sub.ClientID, sub) {
return fmt.Errorf("can't find clientID: %v", sub.ClientID)
}
// Store this subscription in subStore
if err := ss.Store(sub); err != nil {
s.clients.removeSub(sub.ClientID, sub)
return err
}
return nil
} | go | func (s *StanServer) addSubscription(ss *subStore, sub *subState) error {
// Store in client
if !s.clients.addSub(sub.ClientID, sub) {
return fmt.Errorf("can't find clientID: %v", sub.ClientID)
}
// Store this subscription in subStore
if err := ss.Store(sub); err != nil {
s.clients.removeSub(sub.ClientID, sub)
return err
}
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"addSubscription",
"(",
"ss",
"*",
"subStore",
",",
"sub",
"*",
"subState",
")",
"error",
"{",
"if",
"!",
"s",
".",
"clients",
".",
"addSub",
"(",
"sub",
".",
"ClientID",
",",
"sub",
")",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"can't find clientID: %v\"",
",",
"sub",
".",
"ClientID",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"ss",
".",
"Store",
"(",
"sub",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"clients",
".",
"removeSub",
"(",
"sub",
".",
"ClientID",
",",
"sub",
")",
"\n",
"return",
"err",
"\n",
"}",
"\n",
"return",
"nil",
"\n",
"}"
] | // addSubscription adds `sub` to the client and store. | [
"addSubscription",
"adds",
"sub",
"to",
"the",
"client",
"and",
"store",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4442-L4453 | train |
nats-io/nats-streaming-server | server/server.go | updateDurable | func (s *StanServer) updateDurable(ss *subStore, sub *subState) error {
// Reset the hasFailedHB boolean since it may have been set
// if the client previously crashed and server set this
// flag to its subs.
sub.hasFailedHB = false
// Store in the client
if !s.clients.addSub(sub.ClientID, sub) {
return fmt.Errorf("can't find clientID: %v", sub.ClientID)
}
// Update this subscription in the store
if err := sub.store.UpdateSub(&sub.SubState); err != nil {
return err
}
ss.Lock()
// Do this only for durable subscribers (not durable queue subscribers).
if sub.isDurableSubscriber() {
// Add back into plain subscribers
ss.psubs = append(ss.psubs, sub)
}
// And in ackInbox lookup map.
ss.acks[sub.AckInbox] = sub
ss.Unlock()
return nil
} | go | func (s *StanServer) updateDurable(ss *subStore, sub *subState) error {
// Reset the hasFailedHB boolean since it may have been set
// if the client previously crashed and server set this
// flag to its subs.
sub.hasFailedHB = false
// Store in the client
if !s.clients.addSub(sub.ClientID, sub) {
return fmt.Errorf("can't find clientID: %v", sub.ClientID)
}
// Update this subscription in the store
if err := sub.store.UpdateSub(&sub.SubState); err != nil {
return err
}
ss.Lock()
// Do this only for durable subscribers (not durable queue subscribers).
if sub.isDurableSubscriber() {
// Add back into plain subscribers
ss.psubs = append(ss.psubs, sub)
}
// And in ackInbox lookup map.
ss.acks[sub.AckInbox] = sub
ss.Unlock()
return nil
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"updateDurable",
"(",
"ss",
"*",
"subStore",
",",
"sub",
"*",
"subState",
")",
"error",
"{",
"sub",
".",
"hasFailedHB",
"=",
"false",
"\n",
"if",
"!",
"s",
".",
"clients",
".",
"addSub",
"(",
"sub",
".",
"ClientID",
",",
"sub",
")",
"{",
"return",
"fmt",
".",
"Errorf",
"(",
"\"can't find clientID: %v\"",
",",
"sub",
".",
"ClientID",
")",
"\n",
"}",
"\n",
"if",
"err",
":=",
"sub",
".",
"store",
".",
"UpdateSub",
"(",
"&",
"sub",
".",
"SubState",
")",
";",
"err",
"!=",
"nil",
"{",
"return",
"err",
"\n",
"}",
"\n",
"ss",
".",
"Lock",
"(",
")",
"\n",
"if",
"sub",
".",
"isDurableSubscriber",
"(",
")",
"{",
"ss",
".",
"psubs",
"=",
"append",
"(",
"ss",
".",
"psubs",
",",
"sub",
")",
"\n",
"}",
"\n",
"ss",
".",
"acks",
"[",
"sub",
".",
"AckInbox",
"]",
"=",
"sub",
"\n",
"ss",
".",
"Unlock",
"(",
")",
"\n",
"return",
"nil",
"\n",
"}"
] | // updateDurable adds back `sub` to the client and updates the store.
// No lock is needed for `sub` since it has just been created. | [
"updateDurable",
"adds",
"back",
"sub",
"to",
"the",
"client",
"and",
"updates",
"the",
"store",
".",
"No",
"lock",
"is",
"needed",
"for",
"sub",
"since",
"it",
"has",
"just",
"been",
"created",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4457-L4481 | train |
nats-io/nats-streaming-server | server/server.go | processAckMsg | func (s *StanServer) processAckMsg(m *nats.Msg) {
ack := &pb.Ack{}
if ack.Unmarshal(m.Data) != nil {
if s.processCtrlMsg(m) {
return
}
}
c := s.channels.get(ack.Subject)
if c == nil {
s.log.Errorf("Unable to process ack seq=%d, channel %s not found", ack.Sequence, ack.Subject)
return
}
sub := c.ss.LookupByAckInbox(m.Subject)
if sub == nil {
return
}
s.processAck(c, sub, ack.Sequence, true)
} | go | func (s *StanServer) processAckMsg(m *nats.Msg) {
ack := &pb.Ack{}
if ack.Unmarshal(m.Data) != nil {
if s.processCtrlMsg(m) {
return
}
}
c := s.channels.get(ack.Subject)
if c == nil {
s.log.Errorf("Unable to process ack seq=%d, channel %s not found", ack.Sequence, ack.Subject)
return
}
sub := c.ss.LookupByAckInbox(m.Subject)
if sub == nil {
return
}
s.processAck(c, sub, ack.Sequence, true)
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processAckMsg",
"(",
"m",
"*",
"nats",
".",
"Msg",
")",
"{",
"ack",
":=",
"&",
"pb",
".",
"Ack",
"{",
"}",
"\n",
"if",
"ack",
".",
"Unmarshal",
"(",
"m",
".",
"Data",
")",
"!=",
"nil",
"{",
"if",
"s",
".",
"processCtrlMsg",
"(",
"m",
")",
"{",
"return",
"\n",
"}",
"\n",
"}",
"\n",
"c",
":=",
"s",
".",
"channels",
".",
"get",
"(",
"ack",
".",
"Subject",
")",
"\n",
"if",
"c",
"==",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"Unable to process ack seq=%d, channel %s not found\"",
",",
"ack",
".",
"Sequence",
",",
"ack",
".",
"Subject",
")",
"\n",
"return",
"\n",
"}",
"\n",
"sub",
":=",
"c",
".",
"ss",
".",
"LookupByAckInbox",
"(",
"m",
".",
"Subject",
")",
"\n",
"if",
"sub",
"==",
"nil",
"{",
"return",
"\n",
"}",
"\n",
"s",
".",
"processAck",
"(",
"c",
",",
"sub",
",",
"ack",
".",
"Sequence",
",",
"true",
")",
"\n",
"}"
] | // processAckMsg processes inbound acks from clients for delivered messages. | [
"processAckMsg",
"processes",
"inbound",
"acks",
"from",
"clients",
"for",
"delivered",
"messages",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4879-L4896 | train |
nats-io/nats-streaming-server | server/server.go | processAck | func (s *StanServer) processAck(c *channel, sub *subState, sequence uint64, fromUser bool) {
var stalled bool
// This is immutable, so can grab outside of sub's lock.
// If we have a queue group, we want to grab queue's lock before
// sub's lock.
qs := sub.qstate
if qs != nil {
qs.Lock()
}
sub.Lock()
persistAck := func(aSub *subState) bool {
if err := aSub.store.AckSeqPending(aSub.ID, sequence); err != nil {
s.log.Errorf("[Client:%s] Unable to persist ack for subid=%d, subject=%s, seq=%d, err=%v",
aSub.ClientID, aSub.ID, aSub.subject, sequence, err)
return false
}
return true
}
if _, found := sub.acksPending[sequence]; found {
// If in cluster mode, schedule replication of the ack.
if s.isClustered {
s.collectSentOrAck(sub, replicateAck, sequence)
}
if s.trace && fromUser {
s.log.Tracef("[Client:%s] Processing ack for subid=%d, subject=%s, seq=%d",
sub.ClientID, sub.ID, sub.subject, sequence)
}
if !persistAck(sub) {
sub.Unlock()
if qs != nil {
qs.Unlock()
}
return
}
delete(sub.acksPending, sequence)
} else if qs != nil && fromUser {
// For queue members, if this is not an internally generated ACK
// and we don't find the sequence in this sub's pending, we are
// going to look for it in other members and process it if found.
sub.Unlock()
for _, qsub := range qs.subs {
if qsub == sub {
continue
}
qsub.Lock()
if _, found := qsub.acksPending[sequence]; found {
delete(qsub.acksPending, sequence)
persistAck(qsub)
qsub.Unlock()
break
}
qsub.Unlock()
}
sub.Lock()
// Proceed with original sub (regardless if member was found
// or not) so that server sends more messages if needed.
}
if sub.stalled && int32(len(sub.acksPending)) < sub.MaxInFlight {
// For queue, we must not check the queue stalled count here. The queue
// as a whole may not be stalled, yet, if this sub was stalled, it is
// not now since the pending acks is below MaxInflight. The server should
// try to send available messages.
// It works also if the queue *was* stalled (all members were stalled),
// then this member is no longer stalled, which release the queue.
// Trigger send of available messages by setting this to true.
stalled = true
// Clear the stalled flag from this sub
sub.stalled = false
// .. and update the queue's stalled members count if this is a queue sub.
if qs != nil && qs.stalledSubCount > 0 {
qs.stalledSubCount--
}
}
sub.Unlock()
if qs != nil {
qs.Unlock()
}
// Leave the reset/cancel of the ackTimer to the redelivery cb.
if !stalled {
return
}
if sub.qstate != nil {
s.sendAvailableMessagesToQueue(c, sub.qstate)
} else {
s.sendAvailableMessages(c, sub)
}
} | go | func (s *StanServer) processAck(c *channel, sub *subState, sequence uint64, fromUser bool) {
var stalled bool
// This is immutable, so can grab outside of sub's lock.
// If we have a queue group, we want to grab queue's lock before
// sub's lock.
qs := sub.qstate
if qs != nil {
qs.Lock()
}
sub.Lock()
persistAck := func(aSub *subState) bool {
if err := aSub.store.AckSeqPending(aSub.ID, sequence); err != nil {
s.log.Errorf("[Client:%s] Unable to persist ack for subid=%d, subject=%s, seq=%d, err=%v",
aSub.ClientID, aSub.ID, aSub.subject, sequence, err)
return false
}
return true
}
if _, found := sub.acksPending[sequence]; found {
// If in cluster mode, schedule replication of the ack.
if s.isClustered {
s.collectSentOrAck(sub, replicateAck, sequence)
}
if s.trace && fromUser {
s.log.Tracef("[Client:%s] Processing ack for subid=%d, subject=%s, seq=%d",
sub.ClientID, sub.ID, sub.subject, sequence)
}
if !persistAck(sub) {
sub.Unlock()
if qs != nil {
qs.Unlock()
}
return
}
delete(sub.acksPending, sequence)
} else if qs != nil && fromUser {
// For queue members, if this is not an internally generated ACK
// and we don't find the sequence in this sub's pending, we are
// going to look for it in other members and process it if found.
sub.Unlock()
for _, qsub := range qs.subs {
if qsub == sub {
continue
}
qsub.Lock()
if _, found := qsub.acksPending[sequence]; found {
delete(qsub.acksPending, sequence)
persistAck(qsub)
qsub.Unlock()
break
}
qsub.Unlock()
}
sub.Lock()
// Proceed with original sub (regardless if member was found
// or not) so that server sends more messages if needed.
}
if sub.stalled && int32(len(sub.acksPending)) < sub.MaxInFlight {
// For queue, we must not check the queue stalled count here. The queue
// as a whole may not be stalled, yet, if this sub was stalled, it is
// not now since the pending acks is below MaxInflight. The server should
// try to send available messages.
// It works also if the queue *was* stalled (all members were stalled),
// then this member is no longer stalled, which release the queue.
// Trigger send of available messages by setting this to true.
stalled = true
// Clear the stalled flag from this sub
sub.stalled = false
// .. and update the queue's stalled members count if this is a queue sub.
if qs != nil && qs.stalledSubCount > 0 {
qs.stalledSubCount--
}
}
sub.Unlock()
if qs != nil {
qs.Unlock()
}
// Leave the reset/cancel of the ackTimer to the redelivery cb.
if !stalled {
return
}
if sub.qstate != nil {
s.sendAvailableMessagesToQueue(c, sub.qstate)
} else {
s.sendAvailableMessages(c, sub)
}
} | [
"func",
"(",
"s",
"*",
"StanServer",
")",
"processAck",
"(",
"c",
"*",
"channel",
",",
"sub",
"*",
"subState",
",",
"sequence",
"uint64",
",",
"fromUser",
"bool",
")",
"{",
"var",
"stalled",
"bool",
"\n",
"qs",
":=",
"sub",
".",
"qstate",
"\n",
"if",
"qs",
"!=",
"nil",
"{",
"qs",
".",
"Lock",
"(",
")",
"\n",
"}",
"\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"persistAck",
":=",
"func",
"(",
"aSub",
"*",
"subState",
")",
"bool",
"{",
"if",
"err",
":=",
"aSub",
".",
"store",
".",
"AckSeqPending",
"(",
"aSub",
".",
"ID",
",",
"sequence",
")",
";",
"err",
"!=",
"nil",
"{",
"s",
".",
"log",
".",
"Errorf",
"(",
"\"[Client:%s] Unable to persist ack for subid=%d, subject=%s, seq=%d, err=%v\"",
",",
"aSub",
".",
"ClientID",
",",
"aSub",
".",
"ID",
",",
"aSub",
".",
"subject",
",",
"sequence",
",",
"err",
")",
"\n",
"return",
"false",
"\n",
"}",
"\n",
"return",
"true",
"\n",
"}",
"\n",
"if",
"_",
",",
"found",
":=",
"sub",
".",
"acksPending",
"[",
"sequence",
"]",
";",
"found",
"{",
"if",
"s",
".",
"isClustered",
"{",
"s",
".",
"collectSentOrAck",
"(",
"sub",
",",
"replicateAck",
",",
"sequence",
")",
"\n",
"}",
"\n",
"if",
"s",
".",
"trace",
"&&",
"fromUser",
"{",
"s",
".",
"log",
".",
"Tracef",
"(",
"\"[Client:%s] Processing ack for subid=%d, subject=%s, seq=%d\"",
",",
"sub",
".",
"ClientID",
",",
"sub",
".",
"ID",
",",
"sub",
".",
"subject",
",",
"sequence",
")",
"\n",
"}",
"\n",
"if",
"!",
"persistAck",
"(",
"sub",
")",
"{",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"if",
"qs",
"!=",
"nil",
"{",
"qs",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"return",
"\n",
"}",
"\n",
"delete",
"(",
"sub",
".",
"acksPending",
",",
"sequence",
")",
"\n",
"}",
"else",
"if",
"qs",
"!=",
"nil",
"&&",
"fromUser",
"{",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"for",
"_",
",",
"qsub",
":=",
"range",
"qs",
".",
"subs",
"{",
"if",
"qsub",
"==",
"sub",
"{",
"continue",
"\n",
"}",
"\n",
"qsub",
".",
"Lock",
"(",
")",
"\n",
"if",
"_",
",",
"found",
":=",
"qsub",
".",
"acksPending",
"[",
"sequence",
"]",
";",
"found",
"{",
"delete",
"(",
"qsub",
".",
"acksPending",
",",
"sequence",
")",
"\n",
"persistAck",
"(",
"qsub",
")",
"\n",
"qsub",
".",
"Unlock",
"(",
")",
"\n",
"break",
"\n",
"}",
"\n",
"qsub",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"sub",
".",
"Lock",
"(",
")",
"\n",
"}",
"\n",
"if",
"sub",
".",
"stalled",
"&&",
"int32",
"(",
"len",
"(",
"sub",
".",
"acksPending",
")",
")",
"<",
"sub",
".",
"MaxInFlight",
"{",
"stalled",
"=",
"true",
"\n",
"sub",
".",
"stalled",
"=",
"false",
"\n",
"if",
"qs",
"!=",
"nil",
"&&",
"qs",
".",
"stalledSubCount",
">",
"0",
"{",
"qs",
".",
"stalledSubCount",
"--",
"\n",
"}",
"\n",
"}",
"\n",
"sub",
".",
"Unlock",
"(",
")",
"\n",
"if",
"qs",
"!=",
"nil",
"{",
"qs",
".",
"Unlock",
"(",
")",
"\n",
"}",
"\n",
"if",
"!",
"stalled",
"{",
"return",
"\n",
"}",
"\n",
"if",
"sub",
".",
"qstate",
"!=",
"nil",
"{",
"s",
".",
"sendAvailableMessagesToQueue",
"(",
"c",
",",
"sub",
".",
"qstate",
")",
"\n",
"}",
"else",
"{",
"s",
".",
"sendAvailableMessages",
"(",
"c",
",",
"sub",
")",
"\n",
"}",
"\n",
"}"
] | // processAck processes an ack and if needed sends more messages. | [
"processAck",
"processes",
"an",
"ack",
"and",
"if",
"needed",
"sends",
"more",
"messages",
"."
] | 57c6c84265c0012a1efef365703c221329804d4c | https://github.com/nats-io/nats-streaming-server/blob/57c6c84265c0012a1efef365703c221329804d4c/server/server.go#L4899-L4994 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.