repo
stringlengths
5
67
path
stringlengths
4
218
func_name
stringlengths
0
151
original_string
stringlengths
52
373k
language
stringclasses
6 values
code
stringlengths
52
373k
code_tokens
listlengths
10
512
docstring
stringlengths
3
47.2k
docstring_tokens
listlengths
3
234
sha
stringlengths
40
40
url
stringlengths
85
339
partition
stringclasses
3 values
grafana/metrictank
mdata/chunk/tsz/tsz.go
NewIterator4h
func NewIterator4h(b []byte, intervalHint uint32) (*Iter4h, error) { return bstreamIterator4h(newBReader(b), intervalHint) }
go
func NewIterator4h(b []byte, intervalHint uint32) (*Iter4h, error) { return bstreamIterator4h(newBReader(b), intervalHint) }
[ "func", "NewIterator4h", "(", "b", "[", "]", "byte", ",", "intervalHint", "uint32", ")", "(", "*", "Iter4h", ",", "error", ")", "{", "return", "bstreamIterator4h", "(", "newBReader", "(", "b", ")", ",", "intervalHint", ")", "\n", "}" ]
// NewIterator4h creates an Iter4h
[ "NewIterator4h", "creates", "an", "Iter4h" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/chunk/tsz/tsz.go#L191-L193
train
grafana/metrictank
cluster/manager.go
clusterStats
func (c *MemberlistManager) clusterStats() { primReady := 0 primNotReady := 0 secReady := 0 secNotReady := 0 queryReady := 0 queryNotReady := 0 partitions := make(map[int32]int) for _, p := range c.members { if p.Primary { if p.IsReady() { primReady++ } else { primNotReady++ } } else if p.Mode != ModeQuery { if p.IsReady() { secReady++ } else { secNotReady++ } } else { if p.IsReady() { queryReady++ } else { queryNotReady++ } } for _, partition := range p.Partitions { partitions[partition]++ } } totalPrimaryReady.Set(primReady) totalPrimaryNotReady.Set(primNotReady) totalSecondaryReady.Set(secReady) totalSecondaryNotReady.Set(secNotReady) totalQueryReady.Set(queryReady) totalQueryNotReady.Set(queryNotReady) totalPartitions.Set(len(partitions)) }
go
func (c *MemberlistManager) clusterStats() { primReady := 0 primNotReady := 0 secReady := 0 secNotReady := 0 queryReady := 0 queryNotReady := 0 partitions := make(map[int32]int) for _, p := range c.members { if p.Primary { if p.IsReady() { primReady++ } else { primNotReady++ } } else if p.Mode != ModeQuery { if p.IsReady() { secReady++ } else { secNotReady++ } } else { if p.IsReady() { queryReady++ } else { queryNotReady++ } } for _, partition := range p.Partitions { partitions[partition]++ } } totalPrimaryReady.Set(primReady) totalPrimaryNotReady.Set(primNotReady) totalSecondaryReady.Set(secReady) totalSecondaryNotReady.Set(secNotReady) totalQueryReady.Set(queryReady) totalQueryNotReady.Set(queryNotReady) totalPartitions.Set(len(partitions)) }
[ "func", "(", "c", "*", "MemberlistManager", ")", "clusterStats", "(", ")", "{", "primReady", ":=", "0", "\n", "primNotReady", ":=", "0", "\n", "secReady", ":=", "0", "\n", "secNotReady", ":=", "0", "\n", "queryReady", ":=", "0", "\n", "queryNotReady", ":...
// report the cluster stats every time there is a change to the cluster state. // it is assumed that the lock is acquired before calling this method.
[ "report", "the", "cluster", "stats", "every", "time", "there", "is", "a", "change", "to", "the", "cluster", "state", ".", "it", "is", "assumed", "that", "the", "lock", "is", "acquired", "before", "calling", "this", "method", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/manager.go#L190-L231
train
grafana/metrictank
cluster/manager.go
NodeMeta
func (c *MemberlistManager) NodeMeta(limit int) []byte { c.RLock() meta, err := json.Marshal(c.members[c.nodeName]) c.RUnlock() if err != nil { log.Fatalf("CLU manager: %s", err.Error()) } return meta }
go
func (c *MemberlistManager) NodeMeta(limit int) []byte { c.RLock() meta, err := json.Marshal(c.members[c.nodeName]) c.RUnlock() if err != nil { log.Fatalf("CLU manager: %s", err.Error()) } return meta }
[ "func", "(", "c", "*", "MemberlistManager", ")", "NodeMeta", "(", "limit", "int", ")", "[", "]", "byte", "{", "c", ".", "RLock", "(", ")", "\n", "meta", ",", "err", ":=", "json", ".", "Marshal", "(", "c", ".", "members", "[", "c", ".", "nodeName"...
// NodeMeta is used to retrieve meta-data about the current node // when broadcasting an alive message. It's length is limited to // the given byte size. This metadata is available in the HTTPNode structure.
[ "NodeMeta", "is", "used", "to", "retrieve", "meta", "-", "data", "about", "the", "current", "node", "when", "broadcasting", "an", "alive", "message", ".", "It", "s", "length", "is", "limited", "to", "the", "given", "byte", "size", ".", "This", "metadata", ...
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/manager.go#L331-L339
train
grafana/metrictank
cluster/manager.go
IsReady
func (c *MemberlistManager) IsReady() bool { c.RLock() defer c.RUnlock() return c.members[c.nodeName].IsReady() }
go
func (c *MemberlistManager) IsReady() bool { c.RLock() defer c.RUnlock() return c.members[c.nodeName].IsReady() }
[ "func", "(", "c", "*", "MemberlistManager", ")", "IsReady", "(", ")", "bool", "{", "c", ".", "RLock", "(", ")", "\n", "defer", "c", ".", "RUnlock", "(", ")", "\n", "return", "c", ".", "members", "[", "c", ".", "nodeName", "]", ".", "IsReady", "("...
// Returns true if this node is a ready to accept requests // from users.
[ "Returns", "true", "if", "this", "node", "is", "a", "ready", "to", "accept", "requests", "from", "users", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/manager.go#L377-L381
train
grafana/metrictank
cluster/manager.go
SetState
func (c *MemberlistManager) SetState(state NodeState) { c.Lock() node := c.members[c.nodeName] if !node.SetState(state) { c.Unlock() return } c.members[c.nodeName] = node c.Unlock() nodeReady.Set(state == NodeReady) c.BroadcastUpdate() }
go
func (c *MemberlistManager) SetState(state NodeState) { c.Lock() node := c.members[c.nodeName] if !node.SetState(state) { c.Unlock() return } c.members[c.nodeName] = node c.Unlock() nodeReady.Set(state == NodeReady) c.BroadcastUpdate() }
[ "func", "(", "c", "*", "MemberlistManager", ")", "SetState", "(", "state", "NodeState", ")", "{", "c", ".", "Lock", "(", ")", "\n", "node", ":=", "c", ".", "members", "[", "c", ".", "nodeName", "]", "\n", "if", "!", "node", ".", "SetState", "(", ...
// Set the state of this node.
[ "Set", "the", "state", "of", "this", "node", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/manager.go#L389-L400
train
grafana/metrictank
cluster/manager.go
IsPrimary
func (c *MemberlistManager) IsPrimary() bool { c.RLock() defer c.RUnlock() return c.members[c.nodeName].Primary }
go
func (c *MemberlistManager) IsPrimary() bool { c.RLock() defer c.RUnlock() return c.members[c.nodeName].Primary }
[ "func", "(", "c", "*", "MemberlistManager", ")", "IsPrimary", "(", ")", "bool", "{", "c", ".", "RLock", "(", ")", "\n", "defer", "c", ".", "RUnlock", "(", ")", "\n", "return", "c", ".", "members", "[", "c", ".", "nodeName", "]", ".", "Primary", "...
// Returns true if the this node is a set as a primary node that should write data to cassandra.
[ "Returns", "true", "if", "the", "this", "node", "is", "a", "set", "as", "a", "primary", "node", "that", "should", "write", "data", "to", "cassandra", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/manager.go#L403-L407
train
grafana/metrictank
cluster/manager.go
SetPrimary
func (c *MemberlistManager) SetPrimary(primary bool) { c.Lock() node := c.members[c.nodeName] if !node.SetPrimary(primary) { c.Unlock() return } c.members[c.nodeName] = node c.Unlock() nodePrimary.Set(primary) c.BroadcastUpdate() }
go
func (c *MemberlistManager) SetPrimary(primary bool) { c.Lock() node := c.members[c.nodeName] if !node.SetPrimary(primary) { c.Unlock() return } c.members[c.nodeName] = node c.Unlock() nodePrimary.Set(primary) c.BroadcastUpdate() }
[ "func", "(", "c", "*", "MemberlistManager", ")", "SetPrimary", "(", "primary", "bool", ")", "{", "c", ".", "Lock", "(", ")", "\n", "node", ":=", "c", ".", "members", "[", "c", ".", "nodeName", "]", "\n", "if", "!", "node", ".", "SetPrimary", "(", ...
// SetPrimary sets the primary status of this node
[ "SetPrimary", "sets", "the", "primary", "status", "of", "this", "node" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cluster/manager.go#L410-L421
train
grafana/metrictank
expr/func_movingaverage.go
Signature
func (s *FuncMovingAverage) Signature() ([]Arg, []Arg) { return []Arg{ ArgSeriesList{val: &s.in}, // this could be an int OR a string. // we need to figure out the interval of the data we will consume // and request from -= interval * points // interestingly the from adjustment might mean the archive TTL is no longer sufficient and push the request into a different rollup archive, which we should probably // account for. let's solve all of this later. ArgInt{val: &s.window}, }, []Arg{ArgSeriesList{}} }
go
func (s *FuncMovingAverage) Signature() ([]Arg, []Arg) { return []Arg{ ArgSeriesList{val: &s.in}, // this could be an int OR a string. // we need to figure out the interval of the data we will consume // and request from -= interval * points // interestingly the from adjustment might mean the archive TTL is no longer sufficient and push the request into a different rollup archive, which we should probably // account for. let's solve all of this later. ArgInt{val: &s.window}, }, []Arg{ArgSeriesList{}} }
[ "func", "(", "s", "*", "FuncMovingAverage", ")", "Signature", "(", ")", "(", "[", "]", "Arg", ",", "[", "]", "Arg", ")", "{", "return", "[", "]", "Arg", "{", "ArgSeriesList", "{", "val", ":", "&", "s", ".", "in", "}", ",", "ArgInt", "{", "val",...
// note if input is 1 series, then output is too. not sure how to communicate that
[ "note", "if", "input", "is", "1", "series", "then", "output", "is", "too", ".", "not", "sure", "how", "to", "communicate", "that" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/expr/func_movingaverage.go#L18-L28
train
grafana/metrictank
expr/func_aggregate.go
NewAggregateConstructor
func NewAggregateConstructor(aggDescription string, aggFunc crossSeriesAggFunc) func() GraphiteFunc { return func() GraphiteFunc { return &FuncAggregate{agg: seriesAggregator{function: aggFunc, name: aggDescription}} } }
go
func NewAggregateConstructor(aggDescription string, aggFunc crossSeriesAggFunc) func() GraphiteFunc { return func() GraphiteFunc { return &FuncAggregate{agg: seriesAggregator{function: aggFunc, name: aggDescription}} } }
[ "func", "NewAggregateConstructor", "(", "aggDescription", "string", ",", "aggFunc", "crossSeriesAggFunc", ")", "func", "(", ")", "GraphiteFunc", "{", "return", "func", "(", ")", "GraphiteFunc", "{", "return", "&", "FuncAggregate", "{", "agg", ":", "seriesAggregato...
// NewAggregateConstructor takes an agg string and returns a constructor function
[ "NewAggregateConstructor", "takes", "an", "agg", "string", "and", "returns", "a", "constructor", "function" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/expr/func_aggregate.go#L16-L20
train
grafana/metrictank
input/kafkamdm/kafkamdm.go
tryGetOffset
func (k *KafkaMdm) tryGetOffset(topic string, partition int32, offset int64, attempts int, sleep time.Duration) (int64, error) { var val int64 var err error var offsetStr string switch offset { case sarama.OffsetNewest: offsetStr = "newest" case sarama.OffsetOldest: offsetStr = "oldest" default: offsetStr = strconv.FormatInt(offset, 10) } attempt := 1 for { val, err = k.client.GetOffset(topic, partition, offset) if err == nil { break } err = fmt.Errorf("failed to get offset %s of partition %s:%d. %s (attempt %d/%d)", offsetStr, topic, partition, err, attempt, attempts) if attempt == attempts { break } log.Warnf("kafkamdm: %s", err.Error()) attempt += 1 time.Sleep(sleep) } return val, err }
go
func (k *KafkaMdm) tryGetOffset(topic string, partition int32, offset int64, attempts int, sleep time.Duration) (int64, error) { var val int64 var err error var offsetStr string switch offset { case sarama.OffsetNewest: offsetStr = "newest" case sarama.OffsetOldest: offsetStr = "oldest" default: offsetStr = strconv.FormatInt(offset, 10) } attempt := 1 for { val, err = k.client.GetOffset(topic, partition, offset) if err == nil { break } err = fmt.Errorf("failed to get offset %s of partition %s:%d. %s (attempt %d/%d)", offsetStr, topic, partition, err, attempt, attempts) if attempt == attempts { break } log.Warnf("kafkamdm: %s", err.Error()) attempt += 1 time.Sleep(sleep) } return val, err }
[ "func", "(", "k", "*", "KafkaMdm", ")", "tryGetOffset", "(", "topic", "string", ",", "partition", "int32", ",", "offset", "int64", ",", "attempts", "int", ",", "sleep", "time", ".", "Duration", ")", "(", "int64", ",", "error", ")", "{", "var", "val", ...
// tryGetOffset will to query kafka repeatedly for the requested offset and give up after attempts unsuccesfull attempts // an error is returned when it had to give up
[ "tryGetOffset", "will", "to", "query", "kafka", "repeatedly", "for", "the", "requested", "offset", "and", "give", "up", "after", "attempts", "unsuccesfull", "attempts", "an", "error", "is", "returned", "when", "it", "had", "to", "give", "up" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/input/kafkamdm/kafkamdm.go#L218-L249
train
grafana/metrictank
input/kafkamdm/kafkamdm.go
consumePartition
func (k *KafkaMdm) consumePartition(topic string, partition int32, currentOffset int64) { defer k.wg.Done() // determine the pos of the topic and the initial offset of our consumer newest, err := k.tryGetOffset(topic, partition, sarama.OffsetNewest, 7, time.Second*10) if err != nil { log.Errorf("kafkamdm: %s", err.Error()) k.cancel() return } if currentOffset == sarama.OffsetNewest { currentOffset = newest } else if currentOffset == sarama.OffsetOldest { currentOffset, err = k.tryGetOffset(topic, partition, sarama.OffsetOldest, 7, time.Second*10) if err != nil { log.Errorf("kafkamdm: %s", err.Error()) k.cancel() return } } kafkaStats := kafkaStats[partition] kafkaStats.Offset.Set(int(currentOffset)) kafkaStats.LogSize.Set(int(newest)) kafkaStats.Lag.Set(int(newest - currentOffset)) go k.trackStats(topic, partition) log.Infof("kafkamdm: consuming from %s:%d from offset %d", topic, partition, currentOffset) pc, err := k.consumer.ConsumePartition(topic, partition, currentOffset) if err != nil { log.Errorf("kafkamdm: failed to start partitionConsumer for %s:%d. %s", topic, partition, err) k.cancel() return } messages := pc.Messages() for { select { case msg, ok := <-messages: // https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions#why-am-i-getting-a-nil-message-from-the-sarama-consumer if !ok { log.Errorf("kafkamdm: kafka consumer for %s:%d has shutdown. stop consuming", topic, partition) k.cancel() return } if log.IsLevelEnabled(log.DebugLevel) { log.Debugf("kafkamdm: received message: Topic %s, Partition: %d, Offset: %d, Key: %x", msg.Topic, msg.Partition, msg.Offset, msg.Key) } k.handleMsg(msg.Value, partition) kafkaStats.Offset.Set(int(msg.Offset)) case <-k.shutdown: pc.Close() log.Infof("kafkamdm: consumer for %s:%d ended.", topic, partition) return } } }
go
func (k *KafkaMdm) consumePartition(topic string, partition int32, currentOffset int64) { defer k.wg.Done() // determine the pos of the topic and the initial offset of our consumer newest, err := k.tryGetOffset(topic, partition, sarama.OffsetNewest, 7, time.Second*10) if err != nil { log.Errorf("kafkamdm: %s", err.Error()) k.cancel() return } if currentOffset == sarama.OffsetNewest { currentOffset = newest } else if currentOffset == sarama.OffsetOldest { currentOffset, err = k.tryGetOffset(topic, partition, sarama.OffsetOldest, 7, time.Second*10) if err != nil { log.Errorf("kafkamdm: %s", err.Error()) k.cancel() return } } kafkaStats := kafkaStats[partition] kafkaStats.Offset.Set(int(currentOffset)) kafkaStats.LogSize.Set(int(newest)) kafkaStats.Lag.Set(int(newest - currentOffset)) go k.trackStats(topic, partition) log.Infof("kafkamdm: consuming from %s:%d from offset %d", topic, partition, currentOffset) pc, err := k.consumer.ConsumePartition(topic, partition, currentOffset) if err != nil { log.Errorf("kafkamdm: failed to start partitionConsumer for %s:%d. %s", topic, partition, err) k.cancel() return } messages := pc.Messages() for { select { case msg, ok := <-messages: // https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions#why-am-i-getting-a-nil-message-from-the-sarama-consumer if !ok { log.Errorf("kafkamdm: kafka consumer for %s:%d has shutdown. stop consuming", topic, partition) k.cancel() return } if log.IsLevelEnabled(log.DebugLevel) { log.Debugf("kafkamdm: received message: Topic %s, Partition: %d, Offset: %d, Key: %x", msg.Topic, msg.Partition, msg.Offset, msg.Key) } k.handleMsg(msg.Value, partition) kafkaStats.Offset.Set(int(msg.Offset)) case <-k.shutdown: pc.Close() log.Infof("kafkamdm: consumer for %s:%d ended.", topic, partition) return } } }
[ "func", "(", "k", "*", "KafkaMdm", ")", "consumePartition", "(", "topic", "string", ",", "partition", "int32", ",", "currentOffset", "int64", ")", "{", "defer", "k", ".", "wg", ".", "Done", "(", ")", "\n", "newest", ",", "err", ":=", "k", ".", "tryGe...
// consumePartition consumes from the topic until k.shutdown is triggered.
[ "consumePartition", "consumes", "from", "the", "topic", "until", "k", ".", "shutdown", "is", "triggered", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/input/kafkamdm/kafkamdm.go#L252-L307
train
grafana/metrictank
idx/cassandra/cassandra.go
InitBare
func (c *CasIdx) InitBare() error { var err error tmpSession, err := c.cluster.CreateSession() if err != nil { return fmt.Errorf("failed to create cassandra session: %s", err) } // read templates schemaKeyspace := util.ReadEntry(c.cfg.schemaFile, "schema_keyspace").(string) schemaTable := util.ReadEntry(c.cfg.schemaFile, "schema_table").(string) // create the keyspace or ensure it exists if c.cfg.createKeyspace { log.Infof("cassandra-idx: ensuring that keyspace %s exists.", c.cfg.keyspace) err = tmpSession.Query(fmt.Sprintf(schemaKeyspace, c.cfg.keyspace)).Exec() if err != nil { return fmt.Errorf("failed to initialize cassandra keyspace: %s", err) } log.Info("cassandra-idx: ensuring that table metric_idx exists.") err = tmpSession.Query(fmt.Sprintf(schemaTable, c.cfg.keyspace)).Exec() if err != nil { return fmt.Errorf("failed to initialize cassandra table: %s", err) } c.EnsureArchiveTableExists(tmpSession) } else { var keyspaceMetadata *gocql.KeyspaceMetadata for attempt := 1; attempt > 0; attempt++ { keyspaceMetadata, err = tmpSession.KeyspaceMetadata(c.cfg.keyspace) if err != nil { if attempt >= 5 { return fmt.Errorf("cassandra keyspace not found. %d attempts", attempt) } log.Warnf("cassandra-idx: cassandra keyspace not found. retrying in 5s. attempt: %d", attempt) time.Sleep(5 * time.Second) } else { if _, ok := keyspaceMetadata.Tables["metric_idx"]; ok { break } else { if attempt >= 5 { return fmt.Errorf("cassandra table not found. %d attempts", attempt) } log.Warnf("cassandra-idx: cassandra table not found. retrying in 5s. attempt: %d", attempt) time.Sleep(5 * time.Second) } } } } tmpSession.Close() c.cluster.Keyspace = c.cfg.keyspace session, err := c.cluster.CreateSession() if err != nil { return fmt.Errorf("failed to create cassandra session: %s", err) } c.session = session return nil }
go
func (c *CasIdx) InitBare() error { var err error tmpSession, err := c.cluster.CreateSession() if err != nil { return fmt.Errorf("failed to create cassandra session: %s", err) } // read templates schemaKeyspace := util.ReadEntry(c.cfg.schemaFile, "schema_keyspace").(string) schemaTable := util.ReadEntry(c.cfg.schemaFile, "schema_table").(string) // create the keyspace or ensure it exists if c.cfg.createKeyspace { log.Infof("cassandra-idx: ensuring that keyspace %s exists.", c.cfg.keyspace) err = tmpSession.Query(fmt.Sprintf(schemaKeyspace, c.cfg.keyspace)).Exec() if err != nil { return fmt.Errorf("failed to initialize cassandra keyspace: %s", err) } log.Info("cassandra-idx: ensuring that table metric_idx exists.") err = tmpSession.Query(fmt.Sprintf(schemaTable, c.cfg.keyspace)).Exec() if err != nil { return fmt.Errorf("failed to initialize cassandra table: %s", err) } c.EnsureArchiveTableExists(tmpSession) } else { var keyspaceMetadata *gocql.KeyspaceMetadata for attempt := 1; attempt > 0; attempt++ { keyspaceMetadata, err = tmpSession.KeyspaceMetadata(c.cfg.keyspace) if err != nil { if attempt >= 5 { return fmt.Errorf("cassandra keyspace not found. %d attempts", attempt) } log.Warnf("cassandra-idx: cassandra keyspace not found. retrying in 5s. attempt: %d", attempt) time.Sleep(5 * time.Second) } else { if _, ok := keyspaceMetadata.Tables["metric_idx"]; ok { break } else { if attempt >= 5 { return fmt.Errorf("cassandra table not found. %d attempts", attempt) } log.Warnf("cassandra-idx: cassandra table not found. retrying in 5s. attempt: %d", attempt) time.Sleep(5 * time.Second) } } } } tmpSession.Close() c.cluster.Keyspace = c.cfg.keyspace session, err := c.cluster.CreateSession() if err != nil { return fmt.Errorf("failed to create cassandra session: %s", err) } c.session = session return nil }
[ "func", "(", "c", "*", "CasIdx", ")", "InitBare", "(", ")", "error", "{", "var", "err", "error", "\n", "tmpSession", ",", "err", ":=", "c", ".", "cluster", ".", "CreateSession", "(", ")", "\n", "if", "err", "!=", "nil", "{", "return", "fmt", ".", ...
// InitBare makes sure the keyspace, tables, and index exists in cassandra and creates a session
[ "InitBare", "makes", "sure", "the", "keyspace", "tables", "and", "index", "exists", "in", "cassandra", "and", "creates", "a", "session" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/cassandra.go#L113-L171
train
grafana/metrictank
idx/cassandra/cassandra.go
EnsureArchiveTableExists
func (c *CasIdx) EnsureArchiveTableExists(session *gocql.Session) error { var err error if session == nil { session, err = c.cluster.CreateSession() if err != nil { return fmt.Errorf("failed to create cassandra session: %s", err) } } schemaArchiveTable := util.ReadEntry(c.cfg.schemaFile, "schema_archive_table").(string) if c.cfg.createKeyspace { log.Info("cassandra-idx: ensuring that table metric_idx_archive exists.") err = session.Query(fmt.Sprintf(schemaArchiveTable, c.cfg.keyspace)).Exec() if err != nil { return fmt.Errorf("failed to initialize cassandra table: %s", err) } } else { var keyspaceMetadata *gocql.KeyspaceMetadata keyspaceMetadata, err = session.KeyspaceMetadata(c.cfg.keyspace) if err != nil { return fmt.Errorf("failed to read cassandra tables: %s", err) } if _, ok := keyspaceMetadata.Tables["metric_idx_archive"]; !ok { return fmt.Errorf("table metric_idx_archive does not exist") } } return nil }
go
func (c *CasIdx) EnsureArchiveTableExists(session *gocql.Session) error { var err error if session == nil { session, err = c.cluster.CreateSession() if err != nil { return fmt.Errorf("failed to create cassandra session: %s", err) } } schemaArchiveTable := util.ReadEntry(c.cfg.schemaFile, "schema_archive_table").(string) if c.cfg.createKeyspace { log.Info("cassandra-idx: ensuring that table metric_idx_archive exists.") err = session.Query(fmt.Sprintf(schemaArchiveTable, c.cfg.keyspace)).Exec() if err != nil { return fmt.Errorf("failed to initialize cassandra table: %s", err) } } else { var keyspaceMetadata *gocql.KeyspaceMetadata keyspaceMetadata, err = session.KeyspaceMetadata(c.cfg.keyspace) if err != nil { return fmt.Errorf("failed to read cassandra tables: %s", err) } if _, ok := keyspaceMetadata.Tables["metric_idx_archive"]; !ok { return fmt.Errorf("table metric_idx_archive does not exist") } } return nil }
[ "func", "(", "c", "*", "CasIdx", ")", "EnsureArchiveTableExists", "(", "session", "*", "gocql", ".", "Session", ")", "error", "{", "var", "err", "error", "\n", "if", "session", "==", "nil", "{", "session", ",", "err", "=", "c", ".", "cluster", ".", "...
// EnsureArchiveTableExists checks if the index archive table exists or not. If it does not exist and // the create-keyspace flag is true, then it will create it, if it doesn't exist and the create-keyspace // flag is false, then it will return an error. If the table exists then it just returns nil. // The index archive table is not required for Metrictank to run, it's only required by the // mt-index-prune utility to archive old metrics from the index.
[ "EnsureArchiveTableExists", "checks", "if", "the", "index", "archive", "table", "exists", "or", "not", ".", "If", "it", "does", "not", "exist", "and", "the", "create", "-", "keyspace", "flag", "is", "true", "then", "it", "will", "create", "it", "if", "it",...
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/cassandra.go#L178-L206
train
grafana/metrictank
idx/cassandra/cassandra.go
Init
func (c *CasIdx) Init() error { log.Infof("initializing cassandra-idx. Hosts=%s", c.cfg.hosts) if err := c.MemoryIndex.Init(); err != nil { return err } if err := c.InitBare(); err != nil { return err } if c.cfg.updateCassIdx { c.wg.Add(c.cfg.numConns) for i := 0; i < c.cfg.numConns; i++ { go c.processWriteQueue() } log.Infof("cassandra-idx: started %d writeQueue handlers", c.cfg.numConns) } //Rebuild the in-memory index. c.rebuildIndex() if memory.IndexRules.Prunable() { go c.prune() } return nil }
go
func (c *CasIdx) Init() error { log.Infof("initializing cassandra-idx. Hosts=%s", c.cfg.hosts) if err := c.MemoryIndex.Init(); err != nil { return err } if err := c.InitBare(); err != nil { return err } if c.cfg.updateCassIdx { c.wg.Add(c.cfg.numConns) for i := 0; i < c.cfg.numConns; i++ { go c.processWriteQueue() } log.Infof("cassandra-idx: started %d writeQueue handlers", c.cfg.numConns) } //Rebuild the in-memory index. c.rebuildIndex() if memory.IndexRules.Prunable() { go c.prune() } return nil }
[ "func", "(", "c", "*", "CasIdx", ")", "Init", "(", ")", "error", "{", "log", ".", "Infof", "(", "\"initializing cassandra-idx. Hosts=%s\"", ",", "c", ".", "cfg", ".", "hosts", ")", "\n", "if", "err", ":=", "c", ".", "MemoryIndex", ".", "Init", "(", "...
// Init makes sure the needed keyspace, table, index in cassandra exists, creates the session, // rebuilds the in-memory index, sets up write queues, metrics and pruning routines
[ "Init", "makes", "sure", "the", "needed", "keyspace", "table", "index", "in", "cassandra", "exists", "creates", "the", "session", "rebuilds", "the", "in", "-", "memory", "index", "sets", "up", "write", "queues", "metrics", "and", "pruning", "routines" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/cassandra.go#L210-L235
train
grafana/metrictank
idx/cassandra/cassandra.go
updateCassandra
func (c *CasIdx) updateCassandra(now uint32, inMemory bool, archive idx.Archive, partition int32) idx.Archive { // if the entry has not been saved for 1.5x updateInterval // then perform a blocking save. if archive.LastSave < (now - c.updateInterval32 - c.updateInterval32/2) { log.Debugf("cassandra-idx: updating def %s in index.", archive.MetricDefinition.Id) c.writeQueue <- writeReq{recvTime: time.Now(), def: &archive.MetricDefinition} archive.LastSave = now c.MemoryIndex.UpdateArchive(archive) } else { // perform a non-blocking write to the writeQueue. If the queue is full, then // this will fail and we won't update the LastSave timestamp. The next time // the metric is seen, the previous lastSave timestamp will still be in place and so // we will try and save again. This will continue until we are successful or the // lastSave timestamp become more then 1.5 x UpdateInterval, in which case we will // do a blocking write to the queue. select { case c.writeQueue <- writeReq{recvTime: time.Now(), def: &archive.MetricDefinition}: archive.LastSave = now c.MemoryIndex.UpdateArchive(archive) default: statSaveSkipped.Inc() log.Debugf("cassandra-idx: writeQueue is full, update of %s not saved this time.", archive.MetricDefinition.Id) } } return archive }
go
func (c *CasIdx) updateCassandra(now uint32, inMemory bool, archive idx.Archive, partition int32) idx.Archive { // if the entry has not been saved for 1.5x updateInterval // then perform a blocking save. if archive.LastSave < (now - c.updateInterval32 - c.updateInterval32/2) { log.Debugf("cassandra-idx: updating def %s in index.", archive.MetricDefinition.Id) c.writeQueue <- writeReq{recvTime: time.Now(), def: &archive.MetricDefinition} archive.LastSave = now c.MemoryIndex.UpdateArchive(archive) } else { // perform a non-blocking write to the writeQueue. If the queue is full, then // this will fail and we won't update the LastSave timestamp. The next time // the metric is seen, the previous lastSave timestamp will still be in place and so // we will try and save again. This will continue until we are successful or the // lastSave timestamp become more then 1.5 x UpdateInterval, in which case we will // do a blocking write to the queue. select { case c.writeQueue <- writeReq{recvTime: time.Now(), def: &archive.MetricDefinition}: archive.LastSave = now c.MemoryIndex.UpdateArchive(archive) default: statSaveSkipped.Inc() log.Debugf("cassandra-idx: writeQueue is full, update of %s not saved this time.", archive.MetricDefinition.Id) } } return archive }
[ "func", "(", "c", "*", "CasIdx", ")", "updateCassandra", "(", "now", "uint32", ",", "inMemory", "bool", ",", "archive", "idx", ".", "Archive", ",", "partition", "int32", ")", "idx", ".", "Archive", "{", "if", "archive", ".", "LastSave", "<", "(", "now"...
// updateCassandra saves the archive to cassandra and // updates the memory index with the updated fields.
[ "updateCassandra", "saves", "the", "archive", "to", "cassandra", "and", "updates", "the", "memory", "index", "with", "the", "updated", "fields", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/cassandra.go#L316-L342
train
grafana/metrictank
idx/cassandra/cassandra.go
LoadPartitions
func (c *CasIdx) LoadPartitions(partitions []int32, defs []schema.MetricDefinition, now time.Time) []schema.MetricDefinition { placeholders := make([]string, len(partitions)) for i, p := range partitions { placeholders[i] = strconv.Itoa(int(p)) } q := fmt.Sprintf("SELECT id, orgid, partition, name, interval, unit, mtype, tags, lastupdate from metric_idx where partition in (%s)", strings.Join(placeholders, ",")) iter := c.session.Query(q).Iter() return c.load(defs, iter, now) }
go
func (c *CasIdx) LoadPartitions(partitions []int32, defs []schema.MetricDefinition, now time.Time) []schema.MetricDefinition { placeholders := make([]string, len(partitions)) for i, p := range partitions { placeholders[i] = strconv.Itoa(int(p)) } q := fmt.Sprintf("SELECT id, orgid, partition, name, interval, unit, mtype, tags, lastupdate from metric_idx where partition in (%s)", strings.Join(placeholders, ",")) iter := c.session.Query(q).Iter() return c.load(defs, iter, now) }
[ "func", "(", "c", "*", "CasIdx", ")", "LoadPartitions", "(", "partitions", "[", "]", "int32", ",", "defs", "[", "]", "schema", ".", "MetricDefinition", ",", "now", "time", ".", "Time", ")", "[", "]", "schema", ".", "MetricDefinition", "{", "placeholders"...
// LoadPartitions appends MetricDefinitions from the given partitions to defs and returns the modified defs, honoring pruning settings relative to now
[ "LoadPartitions", "appends", "MetricDefinitions", "from", "the", "given", "partitions", "to", "defs", "and", "returns", "the", "modified", "defs", "honoring", "pruning", "settings", "relative", "to", "now" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/cassandra.go#L379-L387
train
grafana/metrictank
idx/cassandra/cassandra.go
load
func (c *CasIdx) load(defs []schema.MetricDefinition, iter cqlIterator, now time.Time) []schema.MetricDefinition { defsByNames := make(map[string][]*schema.MetricDefinition) var id, name, unit, mtype string var orgId, interval int var partition int32 var lastupdate int64 var tags []string for iter.Scan(&id, &orgId, &partition, &name, &interval, &unit, &mtype, &tags, &lastupdate) { mkey, err := schema.MKeyFromString(id) if err != nil { log.Errorf("cassandra-idx: load() could not parse ID %q: %s -> skipping", id, err) continue } if orgId < 0 { orgId = int(idx.OrgIdPublic) } mdef := &schema.MetricDefinition{ Id: mkey, OrgId: uint32(orgId), Partition: partition, Name: name, Interval: interval, Unit: unit, Mtype: mtype, Tags: tags, LastUpdate: lastupdate, } nameWithTags := mdef.NameWithTags() defsByNames[nameWithTags] = append(defsByNames[nameWithTags], mdef) } if err := iter.Close(); err != nil { log.Fatalf("Could not close iterator: %s", err.Error()) } // getting all cutoffs once saves having to recompute everytime we have a match cutoffs := memory.IndexRules.Cutoffs(now) NAMES: for nameWithTags, defsByName := range defsByNames { irId, _ := memory.IndexRules.Match(nameWithTags) cutoff := cutoffs[irId] for _, def := range defsByName { if def.LastUpdate >= cutoff { // if any of the defs for a given nameWithTags is not stale, then we need to load // all the defs for that nameWithTags. for _, defToAdd := range defsByNames[nameWithTags] { defs = append(defs, *defToAdd) } continue NAMES } } } return defs }
go
func (c *CasIdx) load(defs []schema.MetricDefinition, iter cqlIterator, now time.Time) []schema.MetricDefinition { defsByNames := make(map[string][]*schema.MetricDefinition) var id, name, unit, mtype string var orgId, interval int var partition int32 var lastupdate int64 var tags []string for iter.Scan(&id, &orgId, &partition, &name, &interval, &unit, &mtype, &tags, &lastupdate) { mkey, err := schema.MKeyFromString(id) if err != nil { log.Errorf("cassandra-idx: load() could not parse ID %q: %s -> skipping", id, err) continue } if orgId < 0 { orgId = int(idx.OrgIdPublic) } mdef := &schema.MetricDefinition{ Id: mkey, OrgId: uint32(orgId), Partition: partition, Name: name, Interval: interval, Unit: unit, Mtype: mtype, Tags: tags, LastUpdate: lastupdate, } nameWithTags := mdef.NameWithTags() defsByNames[nameWithTags] = append(defsByNames[nameWithTags], mdef) } if err := iter.Close(); err != nil { log.Fatalf("Could not close iterator: %s", err.Error()) } // getting all cutoffs once saves having to recompute everytime we have a match cutoffs := memory.IndexRules.Cutoffs(now) NAMES: for nameWithTags, defsByName := range defsByNames { irId, _ := memory.IndexRules.Match(nameWithTags) cutoff := cutoffs[irId] for _, def := range defsByName { if def.LastUpdate >= cutoff { // if any of the defs for a given nameWithTags is not stale, then we need to load // all the defs for that nameWithTags. for _, defToAdd := range defsByNames[nameWithTags] { defs = append(defs, *defToAdd) } continue NAMES } } } return defs }
[ "func", "(", "c", "*", "CasIdx", ")", "load", "(", "defs", "[", "]", "schema", ".", "MetricDefinition", ",", "iter", "cqlIterator", ",", "now", "time", ".", "Time", ")", "[", "]", "schema", ".", "MetricDefinition", "{", "defsByNames", ":=", "make", "("...
// load appends MetricDefinitions from the iterator to defs and returns the modified defs, honoring pruning settings relative to now
[ "load", "appends", "MetricDefinitions", "from", "the", "iterator", "to", "defs", "and", "returns", "the", "modified", "defs", "honoring", "pruning", "settings", "relative", "to", "now" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/cassandra.go#L390-L445
train
grafana/metrictank
idx/cassandra/cassandra.go
ArchiveDefs
func (c *CasIdx) ArchiveDefs(defs []schema.MetricDefinition) (int, error) { defChan := make(chan *schema.MetricDefinition, c.cfg.numConns) g, ctx := errgroup.WithContext(context.Background()) // keep track of how many defs were successfully archived. success := make([]int, c.cfg.numConns) for i := 0; i < c.cfg.numConns; i++ { i := i g.Go(func() error { for { select { case def, ok := <-defChan: if !ok { return nil } err := c.addDefToArchive(*def) if err != nil { // If we failed to add the def to the archive table then just continue on to the next def. // As we havnet yet removed the this def from the metric_idx table yet, the next time archiving // is performed the this def will be processed again. As no action is needed by an operator, we // just log this as a warning. log.Warnf("cassandra-idx: Failed add def to archive table. error=%s. def=%+v", err, *def) continue } err = c.deleteDef(def.Id, def.Partition) if err != nil { // The next time archiving is performed this def will be processed again. Re-adding the def to the archive // table will just be treated like an update with only the archived_at field changing. As no action is needed // by an operator, we just log this as a warning. log.Warnf("cassandra-idx: Failed to remove archived def from metric_idx table. error=%s. def=%+v", err, *def) continue } // increment counter of defs successfully archived success[i] = success[i] + 1 case <-ctx.Done(): return ctx.Err() } } }) } for i := range defs { defChan <- &defs[i] } close(defChan) // wait for all goroutines to complete. err := g.Wait() // get the count of defs successfully archived. total := 0 for _, count := range success { total = total + count } return total, err }
go
func (c *CasIdx) ArchiveDefs(defs []schema.MetricDefinition) (int, error) { defChan := make(chan *schema.MetricDefinition, c.cfg.numConns) g, ctx := errgroup.WithContext(context.Background()) // keep track of how many defs were successfully archived. success := make([]int, c.cfg.numConns) for i := 0; i < c.cfg.numConns; i++ { i := i g.Go(func() error { for { select { case def, ok := <-defChan: if !ok { return nil } err := c.addDefToArchive(*def) if err != nil { // If we failed to add the def to the archive table then just continue on to the next def. // As we havnet yet removed the this def from the metric_idx table yet, the next time archiving // is performed the this def will be processed again. As no action is needed by an operator, we // just log this as a warning. log.Warnf("cassandra-idx: Failed add def to archive table. error=%s. def=%+v", err, *def) continue } err = c.deleteDef(def.Id, def.Partition) if err != nil { // The next time archiving is performed this def will be processed again. Re-adding the def to the archive // table will just be treated like an update with only the archived_at field changing. As no action is needed // by an operator, we just log this as a warning. log.Warnf("cassandra-idx: Failed to remove archived def from metric_idx table. error=%s. def=%+v", err, *def) continue } // increment counter of defs successfully archived success[i] = success[i] + 1 case <-ctx.Done(): return ctx.Err() } } }) } for i := range defs { defChan <- &defs[i] } close(defChan) // wait for all goroutines to complete. err := g.Wait() // get the count of defs successfully archived. total := 0 for _, count := range success { total = total + count } return total, err }
[ "func", "(", "c", "*", "CasIdx", ")", "ArchiveDefs", "(", "defs", "[", "]", "schema", ".", "MetricDefinition", ")", "(", "int", ",", "error", ")", "{", "defChan", ":=", "make", "(", "chan", "*", "schema", ".", "MetricDefinition", ",", "c", ".", "cfg"...
// ArchiveDefs writes each of the provided defs to the archive table and // then deletes the defs from the metric_idx table.
[ "ArchiveDefs", "writes", "each", "of", "the", "provided", "defs", "to", "the", "archive", "table", "and", "then", "deletes", "the", "defs", "from", "the", "metric_idx", "table", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/cassandra.go#L449-L507
train
grafana/metrictank
mdata/notifierKafka/notifierKafka.go
flush
func (c *NotifierKafka) flush() { if len(c.buf) == 0 { return } // In order to correctly route the saveMessages to the correct partition, // we can't send them in batches anymore. payload := make([]*sarama.ProducerMessage, 0, len(c.buf)) var pMsg mdata.PersistMessageBatch for i, msg := range c.buf { amkey, err := schema.AMKeyFromString(msg.Key) if err != nil { log.Errorf("kafka-cluster: failed to parse key %q", msg.Key) continue } partition, ok := c.handler.PartitionOf(amkey.MKey) if !ok { log.Errorf("kafka-cluster: failed to lookup metricDef with id %s", msg.Key) continue } buf := bytes.NewBuffer(c.bPool.Get()) binary.Write(buf, binary.LittleEndian, uint8(mdata.PersistMessageBatchV1)) encoder := json.NewEncoder(buf) pMsg = mdata.PersistMessageBatch{Instance: c.instance, SavedChunks: c.buf[i : i+1]} err = encoder.Encode(&pMsg) if err != nil { log.Fatalf("kafka-cluster: failed to marshal persistMessage to json.") } messagesSize.Value(buf.Len()) kafkaMsg := &sarama.ProducerMessage{ Topic: topic, Value: sarama.ByteEncoder(buf.Bytes()), Partition: partition, } payload = append(payload, kafkaMsg) } c.buf = nil go func() { log.Debugf("kafka-cluster: sending %d batch metricPersist messages", len(payload)) sent := false for !sent { err := c.producer.SendMessages(payload) if err != nil { log.Warnf("kafka-cluster: publisher %s", err) } else { sent = true } time.Sleep(time.Second) } messagesPublished.Add(len(payload)) // put our buffers back in the bufferPool for _, msg := range payload { c.bPool.Put([]byte(msg.Value.(sarama.ByteEncoder))) } }() }
go
func (c *NotifierKafka) flush() { if len(c.buf) == 0 { return } // In order to correctly route the saveMessages to the correct partition, // we can't send them in batches anymore. payload := make([]*sarama.ProducerMessage, 0, len(c.buf)) var pMsg mdata.PersistMessageBatch for i, msg := range c.buf { amkey, err := schema.AMKeyFromString(msg.Key) if err != nil { log.Errorf("kafka-cluster: failed to parse key %q", msg.Key) continue } partition, ok := c.handler.PartitionOf(amkey.MKey) if !ok { log.Errorf("kafka-cluster: failed to lookup metricDef with id %s", msg.Key) continue } buf := bytes.NewBuffer(c.bPool.Get()) binary.Write(buf, binary.LittleEndian, uint8(mdata.PersistMessageBatchV1)) encoder := json.NewEncoder(buf) pMsg = mdata.PersistMessageBatch{Instance: c.instance, SavedChunks: c.buf[i : i+1]} err = encoder.Encode(&pMsg) if err != nil { log.Fatalf("kafka-cluster: failed to marshal persistMessage to json.") } messagesSize.Value(buf.Len()) kafkaMsg := &sarama.ProducerMessage{ Topic: topic, Value: sarama.ByteEncoder(buf.Bytes()), Partition: partition, } payload = append(payload, kafkaMsg) } c.buf = nil go func() { log.Debugf("kafka-cluster: sending %d batch metricPersist messages", len(payload)) sent := false for !sent { err := c.producer.SendMessages(payload) if err != nil { log.Warnf("kafka-cluster: publisher %s", err) } else { sent = true } time.Sleep(time.Second) } messagesPublished.Add(len(payload)) // put our buffers back in the bufferPool for _, msg := range payload { c.bPool.Put([]byte(msg.Value.(sarama.ByteEncoder))) } }() }
[ "func", "(", "c", "*", "NotifierKafka", ")", "flush", "(", ")", "{", "if", "len", "(", "c", ".", "buf", ")", "==", "0", "{", "return", "\n", "}", "\n", "payload", ":=", "make", "(", "[", "]", "*", "sarama", ".", "ProducerMessage", ",", "0", ","...
// flush makes sure the batch gets sent, asynchronously.
[ "flush", "makes", "sure", "the", "batch", "gets", "sent", "asynchronously", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/notifierKafka/notifierKafka.go#L201-L259
train
grafana/metrictank
api/cluster.go
indexFind
func (s *Server) indexFind(ctx *middleware.Context, req models.IndexFind) { resp := models.NewIndexFindResp() // query nodes don't own any data if s.MetricIndex == nil { response.Write(ctx, response.NewMsgp(200, resp)) return } for _, pattern := range req.Patterns { nodes, err := s.MetricIndex.Find(req.OrgId, pattern, req.From) if err != nil { response.Write(ctx, response.WrapError(err)) return } resp.Nodes[pattern] = nodes } response.Write(ctx, response.NewMsgp(200, resp)) }
go
func (s *Server) indexFind(ctx *middleware.Context, req models.IndexFind) { resp := models.NewIndexFindResp() // query nodes don't own any data if s.MetricIndex == nil { response.Write(ctx, response.NewMsgp(200, resp)) return } for _, pattern := range req.Patterns { nodes, err := s.MetricIndex.Find(req.OrgId, pattern, req.From) if err != nil { response.Write(ctx, response.WrapError(err)) return } resp.Nodes[pattern] = nodes } response.Write(ctx, response.NewMsgp(200, resp)) }
[ "func", "(", "s", "*", "Server", ")", "indexFind", "(", "ctx", "*", "middleware", ".", "Context", ",", "req", "models", ".", "IndexFind", ")", "{", "resp", ":=", "models", ".", "NewIndexFindResp", "(", ")", "\n", "if", "s", ".", "MetricIndex", "==", ...
// IndexFind returns a sequence of msgp encoded idx.Node's
[ "IndexFind", "returns", "a", "sequence", "of", "msgp", "encoded", "idx", ".", "Node", "s" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/cluster.go#L114-L132
train
grafana/metrictank
api/cluster.go
indexGet
func (s *Server) indexGet(ctx *middleware.Context, req models.IndexGet) { // query nodes don't own any data. if s.MetricIndex == nil { response.Write(ctx, response.NewMsgp(404, nil)) return } def, ok := s.MetricIndex.Get(req.MKey) if !ok { response.Write(ctx, response.NewError(http.StatusNotFound, "Not Found")) return } response.Write(ctx, response.NewMsgp(200, &def)) }
go
func (s *Server) indexGet(ctx *middleware.Context, req models.IndexGet) { // query nodes don't own any data. if s.MetricIndex == nil { response.Write(ctx, response.NewMsgp(404, nil)) return } def, ok := s.MetricIndex.Get(req.MKey) if !ok { response.Write(ctx, response.NewError(http.StatusNotFound, "Not Found")) return } response.Write(ctx, response.NewMsgp(200, &def)) }
[ "func", "(", "s", "*", "Server", ")", "indexGet", "(", "ctx", "*", "middleware", ".", "Context", ",", "req", "models", ".", "IndexGet", ")", "{", "if", "s", ".", "MetricIndex", "==", "nil", "{", "response", ".", "Write", "(", "ctx", ",", "response", ...
// IndexGet returns a msgp encoded schema.MetricDefinition
[ "IndexGet", "returns", "a", "msgp", "encoded", "schema", ".", "MetricDefinition" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/cluster.go#L236-L251
train
grafana/metrictank
api/cluster.go
indexList
func (s *Server) indexList(ctx *middleware.Context, req models.IndexList) { // query nodes don't own any data. if s.MetricIndex == nil { response.Write(ctx, response.NewMsgpArray(200, nil)) return } defs := s.MetricIndex.List(req.OrgId) resp := make([]msgp.Marshaler, len(defs)) for i := range defs { d := defs[i] resp[i] = &d } response.Write(ctx, response.NewMsgpArray(200, resp)) }
go
func (s *Server) indexList(ctx *middleware.Context, req models.IndexList) { // query nodes don't own any data. if s.MetricIndex == nil { response.Write(ctx, response.NewMsgpArray(200, nil)) return } defs := s.MetricIndex.List(req.OrgId) resp := make([]msgp.Marshaler, len(defs)) for i := range defs { d := defs[i] resp[i] = &d } response.Write(ctx, response.NewMsgpArray(200, resp)) }
[ "func", "(", "s", "*", "Server", ")", "indexList", "(", "ctx", "*", "middleware", ".", "Context", ",", "req", "models", ".", "IndexList", ")", "{", "if", "s", ".", "MetricIndex", "==", "nil", "{", "response", ".", "Write", "(", "ctx", ",", "response"...
// IndexList returns msgp encoded schema.MetricDefinition's
[ "IndexList", "returns", "msgp", "encoded", "schema", ".", "MetricDefinition", "s" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/cluster.go#L254-L269
train
grafana/metrictank
tracing/tracing.go
Error
func Error(span opentracing.Span, err error) { span.LogFields(log.Error(err)) }
go
func Error(span opentracing.Span, err error) { span.LogFields(log.Error(err)) }
[ "func", "Error", "(", "span", "opentracing", ".", "Span", ",", "err", "error", ")", "{", "span", ".", "LogFields", "(", "log", ".", "Error", "(", "err", ")", ")", "\n", "}" ]
// Error logs error
[ "Error", "logs", "error" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/tracing/tracing.go#L26-L28
train
grafana/metrictank
tracing/tracing.go
Errorf
func Errorf(span opentracing.Span, format string, a ...interface{}) { span.LogFields(log.Error(fmt.Errorf(format, a...))) }
go
func Errorf(span opentracing.Span, format string, a ...interface{}) { span.LogFields(log.Error(fmt.Errorf(format, a...))) }
[ "func", "Errorf", "(", "span", "opentracing", ".", "Span", ",", "format", "string", ",", "a", "...", "interface", "{", "}", ")", "{", "span", ".", "LogFields", "(", "log", ".", "Error", "(", "fmt", ".", "Errorf", "(", "format", ",", "a", "...", ")"...
// Errorf logs error
[ "Errorf", "logs", "error" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/tracing/tracing.go#L31-L33
train
grafana/metrictank
clock/clock.go
AlignedTick
func AlignedTick(period time.Duration) <-chan time.Time { // note that time.Ticker is not an interface, // and that if we instantiate one, we can't write to its channel // hence we can't leverage that type. c := make(chan time.Time) go func() { for { unix := time.Now().UnixNano() diff := time.Duration(period - (time.Duration(unix) % period)) time.Sleep(diff) select { case c <- time.Now(): default: } } }() return c }
go
func AlignedTick(period time.Duration) <-chan time.Time { // note that time.Ticker is not an interface, // and that if we instantiate one, we can't write to its channel // hence we can't leverage that type. c := make(chan time.Time) go func() { for { unix := time.Now().UnixNano() diff := time.Duration(period - (time.Duration(unix) % period)) time.Sleep(diff) select { case c <- time.Now(): default: } } }() return c }
[ "func", "AlignedTick", "(", "period", "time", ".", "Duration", ")", "<-", "chan", "time", ".", "Time", "{", "c", ":=", "make", "(", "chan", "time", ".", "Time", ")", "\n", "go", "func", "(", ")", "{", "for", "{", "unix", ":=", "time", ".", "Now",...
// AlignedTick returns a tick channel so that, let's say interval is a second // then it will tick at every whole second, or if it's 60s than it's every whole // minute. Note that in my testing this is about .0001 to 0.0002 seconds later due // to scheduling etc.
[ "AlignedTick", "returns", "a", "tick", "channel", "so", "that", "let", "s", "say", "interval", "is", "a", "second", "then", "it", "will", "tick", "at", "every", "whole", "second", "or", "if", "it", "s", "60s", "than", "it", "s", "every", "whole", "minu...
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/clock/clock.go#L9-L26
train
grafana/metrictank
api/middleware/tracer.go
Tracer
func Tracer(tracer opentracing.Tracer) macaron.Handler { return func(macCtx *macaron.Context) { path := pathSlug(macCtx.Req.URL.Path) // graphite cluster requests use local=1 // this way we can differentiate "full" render requests from client to MT (encompassing data processing, proxing to graphite, etc) // from "subrequests" where metrictank is called by graphite and graphite does the processing and returns to the client if macCtx.Req.Request.Form.Get("local") == "1" { path += "-local" } spanCtx, _ := tracer.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(macCtx.Req.Header)) span := tracer.StartSpan("HTTP "+macCtx.Req.Method+" "+path, ext.RPCServerOption(spanCtx)) ext.HTTPMethod.Set(span, macCtx.Req.Method) ext.HTTPUrl.Set(span, macCtx.Req.URL.String()) ext.Component.Set(span, "metrictank/api") macCtx.Req = macaron.Request{macCtx.Req.WithContext(opentracing.ContextWithSpan(macCtx.Req.Context(), span))} macCtx.Resp = &TracingResponseWriter{ ResponseWriter: macCtx.Resp, } macCtx.MapTo(macCtx.Resp, (*http.ResponseWriter)(nil)) rw := macCtx.Resp.(*TracingResponseWriter) // if tracing is enabled (context is not a opentracing.noopSpanContext) // store traceID in output headers if spanCtx, ok := span.Context().(jaeger.SpanContext); ok { traceID := spanCtx.TraceID().String() headers := macCtx.Resp.Header() headers["Trace-Id"] = []string{traceID} } // call next handler. This will return after all handlers // have completed and the request has been sent. macCtx.Next() // if tracing has been disabled we return directly without calling // span.Finish() if noTrace, ok := macCtx.Data["noTrace"]; ok && noTrace.(bool) { return } status := rw.Status() ext.HTTPStatusCode.Set(span, uint16(status)) if status >= 200 && status < 300 { span.SetTag("http.size", rw.Size()) } if status >= 400 { tracing.Error(span, errors.New(string(rw.errBody))) if status >= http.StatusInternalServerError { tracing.Failure(span) } } span.Finish() } }
go
func Tracer(tracer opentracing.Tracer) macaron.Handler { return func(macCtx *macaron.Context) { path := pathSlug(macCtx.Req.URL.Path) // graphite cluster requests use local=1 // this way we can differentiate "full" render requests from client to MT (encompassing data processing, proxing to graphite, etc) // from "subrequests" where metrictank is called by graphite and graphite does the processing and returns to the client if macCtx.Req.Request.Form.Get("local") == "1" { path += "-local" } spanCtx, _ := tracer.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(macCtx.Req.Header)) span := tracer.StartSpan("HTTP "+macCtx.Req.Method+" "+path, ext.RPCServerOption(spanCtx)) ext.HTTPMethod.Set(span, macCtx.Req.Method) ext.HTTPUrl.Set(span, macCtx.Req.URL.String()) ext.Component.Set(span, "metrictank/api") macCtx.Req = macaron.Request{macCtx.Req.WithContext(opentracing.ContextWithSpan(macCtx.Req.Context(), span))} macCtx.Resp = &TracingResponseWriter{ ResponseWriter: macCtx.Resp, } macCtx.MapTo(macCtx.Resp, (*http.ResponseWriter)(nil)) rw := macCtx.Resp.(*TracingResponseWriter) // if tracing is enabled (context is not a opentracing.noopSpanContext) // store traceID in output headers if spanCtx, ok := span.Context().(jaeger.SpanContext); ok { traceID := spanCtx.TraceID().String() headers := macCtx.Resp.Header() headers["Trace-Id"] = []string{traceID} } // call next handler. This will return after all handlers // have completed and the request has been sent. macCtx.Next() // if tracing has been disabled we return directly without calling // span.Finish() if noTrace, ok := macCtx.Data["noTrace"]; ok && noTrace.(bool) { return } status := rw.Status() ext.HTTPStatusCode.Set(span, uint16(status)) if status >= 200 && status < 300 { span.SetTag("http.size", rw.Size()) } if status >= 400 { tracing.Error(span, errors.New(string(rw.errBody))) if status >= http.StatusInternalServerError { tracing.Failure(span) } } span.Finish() } }
[ "func", "Tracer", "(", "tracer", "opentracing", ".", "Tracer", ")", "macaron", ".", "Handler", "{", "return", "func", "(", "macCtx", "*", "macaron", ".", "Context", ")", "{", "path", ":=", "pathSlug", "(", "macCtx", ".", "Req", ".", "URL", ".", "Path",...
// Tracer returns a middleware that traces requests
[ "Tracer", "returns", "a", "middleware", "that", "traces", "requests" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/middleware/tracer.go#L32-L88
train
grafana/metrictank
conf/schemas.go
Get
func (s Schemas) Get(i uint16) Schema { if i+1 > uint16(len(s.index)) { return s.DefaultSchema } return s.index[i] }
go
func (s Schemas) Get(i uint16) Schema { if i+1 > uint16(len(s.index)) { return s.DefaultSchema } return s.index[i] }
[ "func", "(", "s", "Schemas", ")", "Get", "(", "i", "uint16", ")", "Schema", "{", "if", "i", "+", "1", ">", "uint16", "(", "len", "(", "s", ".", "index", ")", ")", "{", "return", "s", ".", "DefaultSchema", "\n", "}", "\n", "return", "s", ".", ...
// Get returns the schema setting corresponding to the given index
[ "Get", "returns", "the", "schema", "setting", "corresponding", "to", "the", "given", "index" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/schemas.go#L215-L220
train
grafana/metrictank
conf/schemas.go
TTLs
func (schemas Schemas) TTLs() []uint32 { ttls := make(map[uint32]struct{}) for _, s := range schemas.raw { for _, r := range s.Retentions { ttls[uint32(r.MaxRetention())] = struct{}{} } } for _, r := range schemas.DefaultSchema.Retentions { ttls[uint32(r.MaxRetention())] = struct{}{} } var ttlSlice []uint32 for ttl := range ttls { ttlSlice = append(ttlSlice, ttl) } return ttlSlice }
go
func (schemas Schemas) TTLs() []uint32 { ttls := make(map[uint32]struct{}) for _, s := range schemas.raw { for _, r := range s.Retentions { ttls[uint32(r.MaxRetention())] = struct{}{} } } for _, r := range schemas.DefaultSchema.Retentions { ttls[uint32(r.MaxRetention())] = struct{}{} } var ttlSlice []uint32 for ttl := range ttls { ttlSlice = append(ttlSlice, ttl) } return ttlSlice }
[ "func", "(", "schemas", "Schemas", ")", "TTLs", "(", ")", "[", "]", "uint32", "{", "ttls", ":=", "make", "(", "map", "[", "uint32", "]", "struct", "{", "}", ")", "\n", "for", "_", ",", "s", ":=", "range", "schemas", ".", "raw", "{", "for", "_",...
// TTLs returns a slice of all TTL's seen amongst all archives of all schemas
[ "TTLs", "returns", "a", "slice", "of", "all", "TTL", "s", "seen", "amongst", "all", "archives", "of", "all", "schemas" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/schemas.go#L223-L238
train
grafana/metrictank
conf/schemas.go
MaxChunkSpan
func (schemas Schemas) MaxChunkSpan() uint32 { max := uint32(0) for _, s := range schemas.raw { for _, r := range s.Retentions { max = util.Max(max, r.ChunkSpan) } } for _, r := range schemas.DefaultSchema.Retentions { max = util.Max(max, r.ChunkSpan) } return max }
go
func (schemas Schemas) MaxChunkSpan() uint32 { max := uint32(0) for _, s := range schemas.raw { for _, r := range s.Retentions { max = util.Max(max, r.ChunkSpan) } } for _, r := range schemas.DefaultSchema.Retentions { max = util.Max(max, r.ChunkSpan) } return max }
[ "func", "(", "schemas", "Schemas", ")", "MaxChunkSpan", "(", ")", "uint32", "{", "max", ":=", "uint32", "(", "0", ")", "\n", "for", "_", ",", "s", ":=", "range", "schemas", ".", "raw", "{", "for", "_", ",", "r", ":=", "range", "s", ".", "Retentio...
// MaxChunkSpan returns the largest chunkspan seen amongst all archives of all schemas
[ "MaxChunkSpan", "returns", "the", "largest", "chunkspan", "seen", "amongst", "all", "archives", "of", "all", "schemas" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/schemas.go#L241-L252
train
grafana/metrictank
cmd/mt-index-cat/out/tpl_pattern_custom.go
patternCustom
func patternCustom(in ...interface{}) string { usage := func() { PatternCustomUsage("") os.Exit(-1) } // one or more of "<chance> <operation>" followed by an input string at the end. if len(in) < 3 || len(in)%2 != 1 { usage() } input, ok := in[len(in)-1].(string) if !ok { usage() } var buckets []bucket var sum int for i := 0; i < len(in)-2; i += 2 { chance, ok := in[i].(int) if !ok { usage() } patt, ok := in[i+1].(string) if !ok { usage() } if patt == "pass" { sum += chance buckets = append(buckets, bucket{ chance: chance, fn: Passthrough, }) continue } if patt[0] < '0' || patt[0] > '9' { usage() } num := int(patt[0] - '0') // parse ascii number to int if patt[1:] != "rcnw" && patt[1:] != "rccw" { usage() } var fn func(in string) string if patt[1:] == "rcnw" { fn = ReplaceRandomConsecutiveNodesWildcard(num) } else { fn = ReplaceRandomConsecutiveCharsWildcard(num) } sum += chance buckets = append(buckets, bucket{ chance: chance, fn: fn, }) } if sum != 100 { usage() } pos := rand.Intn(100) sum = 0 for _, b := range buckets { if pos < sum+b.chance { return b.fn(input) } sum += b.chance } panic("should never happen") return "foo" }
go
func patternCustom(in ...interface{}) string { usage := func() { PatternCustomUsage("") os.Exit(-1) } // one or more of "<chance> <operation>" followed by an input string at the end. if len(in) < 3 || len(in)%2 != 1 { usage() } input, ok := in[len(in)-1].(string) if !ok { usage() } var buckets []bucket var sum int for i := 0; i < len(in)-2; i += 2 { chance, ok := in[i].(int) if !ok { usage() } patt, ok := in[i+1].(string) if !ok { usage() } if patt == "pass" { sum += chance buckets = append(buckets, bucket{ chance: chance, fn: Passthrough, }) continue } if patt[0] < '0' || patt[0] > '9' { usage() } num := int(patt[0] - '0') // parse ascii number to int if patt[1:] != "rcnw" && patt[1:] != "rccw" { usage() } var fn func(in string) string if patt[1:] == "rcnw" { fn = ReplaceRandomConsecutiveNodesWildcard(num) } else { fn = ReplaceRandomConsecutiveCharsWildcard(num) } sum += chance buckets = append(buckets, bucket{ chance: chance, fn: fn, }) } if sum != 100 { usage() } pos := rand.Intn(100) sum = 0 for _, b := range buckets { if pos < sum+b.chance { return b.fn(input) } sum += b.chance } panic("should never happen") return "foo" }
[ "func", "patternCustom", "(", "in", "...", "interface", "{", "}", ")", "string", "{", "usage", ":=", "func", "(", ")", "{", "PatternCustomUsage", "(", "\"\"", ")", "\n", "os", ".", "Exit", "(", "-", "1", ")", "\n", "}", "\n", "if", "len", "(", "i...
// percentage chance, and function
[ "percentage", "chance", "and", "function" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-index-cat/out/tpl_pattern_custom.go#L28-L95
train
grafana/metrictank
cmd/mt-index-cat/out/tpl_pattern_custom.go
ReplaceRandomConsecutiveNodesWildcard
func ReplaceRandomConsecutiveNodesWildcard(num int) func(in string) string { return func(in string) string { parts := strings.Split(in, ".") if len(parts) < num { log.Fatalf("metric %q has not enough nodes to replace %d nodes", in, num) } pos := rand.Intn(len(parts) - num + 1) for i := pos; i < pos+num; i++ { parts[pos] = "*" } return strings.Join(parts, ".") } }
go
func ReplaceRandomConsecutiveNodesWildcard(num int) func(in string) string { return func(in string) string { parts := strings.Split(in, ".") if len(parts) < num { log.Fatalf("metric %q has not enough nodes to replace %d nodes", in, num) } pos := rand.Intn(len(parts) - num + 1) for i := pos; i < pos+num; i++ { parts[pos] = "*" } return strings.Join(parts, ".") } }
[ "func", "ReplaceRandomConsecutiveNodesWildcard", "(", "num", "int", ")", "func", "(", "in", "string", ")", "string", "{", "return", "func", "(", "in", "string", ")", "string", "{", "parts", ":=", "strings", ".", "Split", "(", "in", ",", "\".\"", ")", "\n...
// ReplaceRandomConsecutiveNodesWildcard returns a function that will replace num consecutive random nodes with wildcards // the implementation is rather naive and can be optimized
[ "ReplaceRandomConsecutiveNodesWildcard", "returns", "a", "function", "that", "will", "replace", "num", "consecutive", "random", "nodes", "with", "wildcards", "the", "implementation", "is", "rather", "naive", "and", "can", "be", "optimized" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-index-cat/out/tpl_pattern_custom.go#L103-L115
train
grafana/metrictank
cmd/mt-store-cat/series.go
printPointSummary
func printPointSummary(ctx context.Context, store *cassandra.CassandraStore, tables []cassandra.Table, metrics []Metric, fromUnix, toUnix, fix uint32) { for _, metric := range metrics { fmt.Println("## Metric", metric) for _, table := range tables { fmt.Println("### Table", table.Name) if fix != 0 { points := getSeries(ctx, store, table, metric.AMKey, fromUnix, toUnix, fix) printPointsSummary(points, fromUnix, toUnix) } else { igens, err := store.SearchTable(ctx, metric.AMKey, table, fromUnix, toUnix) if err != nil { panic(err) } printSummary(igens, fromUnix, toUnix) } } } }
go
func printPointSummary(ctx context.Context, store *cassandra.CassandraStore, tables []cassandra.Table, metrics []Metric, fromUnix, toUnix, fix uint32) { for _, metric := range metrics { fmt.Println("## Metric", metric) for _, table := range tables { fmt.Println("### Table", table.Name) if fix != 0 { points := getSeries(ctx, store, table, metric.AMKey, fromUnix, toUnix, fix) printPointsSummary(points, fromUnix, toUnix) } else { igens, err := store.SearchTable(ctx, metric.AMKey, table, fromUnix, toUnix) if err != nil { panic(err) } printSummary(igens, fromUnix, toUnix) } } } }
[ "func", "printPointSummary", "(", "ctx", "context", ".", "Context", ",", "store", "*", "cassandra", ".", "CassandraStore", ",", "tables", "[", "]", "cassandra", ".", "Table", ",", "metrics", "[", "]", "Metric", ",", "fromUnix", ",", "toUnix", ",", "fix", ...
// printPointSummary prints a summarized view of the points in the store corresponding to the given requirements
[ "printPointSummary", "prints", "a", "summarized", "view", "of", "the", "points", "in", "the", "store", "corresponding", "to", "the", "given", "requirements" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-store-cat/series.go#L36-L53
train
grafana/metrictank
cmd/mt-index-cat/out/template_functions.go
pattern
func pattern(in string) string { mode := rand.Intn(3) if mode == 0 { // in this mode, replaces a node with a wildcard parts := strings.Split(in, ".") parts[rand.Intn(len(parts))] = "*" return strings.Join(parts, ".") } else if mode == 1 { // randomly replace chars with a * // note that in 1/5 cases, nothing happens // and otherwise, sometimes valid patterns are produced, // but it's also possible to produce patterns that won't match anything (if '.' was taken out) if len(in) < 5 { log.Fatalf("metric %q too short for pattern replacement", in) } chars := rand.Intn(5) pos := rand.Intn(len(in) - chars) return in[0:pos] + "*" + in[pos+chars:] } // mode 3: do nothing :) return in }
go
func pattern(in string) string { mode := rand.Intn(3) if mode == 0 { // in this mode, replaces a node with a wildcard parts := strings.Split(in, ".") parts[rand.Intn(len(parts))] = "*" return strings.Join(parts, ".") } else if mode == 1 { // randomly replace chars with a * // note that in 1/5 cases, nothing happens // and otherwise, sometimes valid patterns are produced, // but it's also possible to produce patterns that won't match anything (if '.' was taken out) if len(in) < 5 { log.Fatalf("metric %q too short for pattern replacement", in) } chars := rand.Intn(5) pos := rand.Intn(len(in) - chars) return in[0:pos] + "*" + in[pos+chars:] } // mode 3: do nothing :) return in }
[ "func", "pattern", "(", "in", "string", ")", "string", "{", "mode", ":=", "rand", ".", "Intn", "(", "3", ")", "\n", "if", "mode", "==", "0", "{", "parts", ":=", "strings", ".", "Split", "(", "in", ",", "\".\"", ")", "\n", "parts", "[", "rand", ...
// random choice between replacing a node with a wildcard, a char with a wildcard, and passthrough
[ "random", "choice", "between", "replacing", "a", "node", "with", "a", "wildcard", "a", "char", "with", "a", "wildcard", "and", "passthrough" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-index-cat/out/template_functions.go#L10-L31
train
grafana/metrictank
cmd/mt-index-cat/out/template_functions.go
roundDuration
func roundDuration(in int64) int64 { abs := in if abs < 0 { abs = -abs } if abs <= 10 { // 10s -> don't round return in } else if abs <= 60 { // 1min -> round to 10s return round(in, 10) } else if abs <= 600 { // 10min -> round to 1min return round(in, 60) } else if abs <= 3600 { // 1h -> round to 10min return round(in, 600) } else if abs <= 3600*24 { // 24h -> round to 1h return round(in, 3600) } else if abs <= 3600*24*7 { // 7d -> round to 1d return round(in, 3600*24) } else if abs <= 3600*24*30 { // 30d -> round to 7d return round(in, 3600*24*7) } // default to rounding to months return round(in, 3600*24*30) }
go
func roundDuration(in int64) int64 { abs := in if abs < 0 { abs = -abs } if abs <= 10 { // 10s -> don't round return in } else if abs <= 60 { // 1min -> round to 10s return round(in, 10) } else if abs <= 600 { // 10min -> round to 1min return round(in, 60) } else if abs <= 3600 { // 1h -> round to 10min return round(in, 600) } else if abs <= 3600*24 { // 24h -> round to 1h return round(in, 3600) } else if abs <= 3600*24*7 { // 7d -> round to 1d return round(in, 3600*24) } else if abs <= 3600*24*30 { // 30d -> round to 7d return round(in, 3600*24*7) } // default to rounding to months return round(in, 3600*24*30) }
[ "func", "roundDuration", "(", "in", "int64", ")", "int64", "{", "abs", ":=", "in", "\n", "if", "abs", "<", "0", "{", "abs", "=", "-", "abs", "\n", "}", "\n", "if", "abs", "<=", "10", "{", "return", "in", "\n", "}", "else", "if", "abs", "<=", ...
// roundDuration rounds a second-specified duration for rough classification
[ "roundDuration", "rounds", "a", "second", "-", "specified", "duration", "for", "rough", "classification" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-index-cat/out/template_functions.go#L39-L61
train
grafana/metrictank
cmd/mt-index-cat/out/template_functions.go
round
func round(d, r int64) int64 { neg := d < 0 if neg { d = -d } if m := d % r; m+m < r { d = d - m } else { d = d + r - m } if neg { return -d } return d }
go
func round(d, r int64) int64 { neg := d < 0 if neg { d = -d } if m := d % r; m+m < r { d = d - m } else { d = d + r - m } if neg { return -d } return d }
[ "func", "round", "(", "d", ",", "r", "int64", ")", "int64", "{", "neg", ":=", "d", "<", "0", "\n", "if", "neg", "{", "d", "=", "-", "d", "\n", "}", "\n", "if", "m", ":=", "d", "%", "r", ";", "m", "+", "m", "<", "r", "{", "d", "=", "d"...
// round rounds number d to the nearest r-boundary
[ "round", "rounds", "number", "d", "to", "the", "nearest", "r", "-", "boundary" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-index-cat/out/template_functions.go#L64-L78
train
grafana/metrictank
cmd/mt-store-cat/chunk_by_ttl.go
showKeyTTL
func showKeyTTL(iter *gocql.Iter, groupTTL string) { roundTTL := 1 switch groupTTL { case "m": roundTTL = 60 case "h": roundTTL = 60 * 60 case "d": roundTTL = 60 * 60 * 24 } var b bucket bucketMap := make(map[bucket]int) for iter.Scan(&b.key, &b.ttl) { b.ttl /= roundTTL bucketMap[b] += 1 } var bucketList []bucketWithCount for b, count := range bucketMap { bucketList = append(bucketList, bucketWithCount{ b.key, b.ttl, count, }) } sort.Sort(byTTL(bucketList)) for _, b := range bucketList { fmt.Printf("%s %d%s %d\n", b.key, b.ttl, groupTTL, b.c) } err := iter.Close() if err != nil { log.Errorf("cassandra query error. %s", err) } }
go
func showKeyTTL(iter *gocql.Iter, groupTTL string) { roundTTL := 1 switch groupTTL { case "m": roundTTL = 60 case "h": roundTTL = 60 * 60 case "d": roundTTL = 60 * 60 * 24 } var b bucket bucketMap := make(map[bucket]int) for iter.Scan(&b.key, &b.ttl) { b.ttl /= roundTTL bucketMap[b] += 1 } var bucketList []bucketWithCount for b, count := range bucketMap { bucketList = append(bucketList, bucketWithCount{ b.key, b.ttl, count, }) } sort.Sort(byTTL(bucketList)) for _, b := range bucketList { fmt.Printf("%s %d%s %d\n", b.key, b.ttl, groupTTL, b.c) } err := iter.Close() if err != nil { log.Errorf("cassandra query error. %s", err) } }
[ "func", "showKeyTTL", "(", "iter", "*", "gocql", ".", "Iter", ",", "groupTTL", "string", ")", "{", "roundTTL", ":=", "1", "\n", "switch", "groupTTL", "{", "case", "\"m\"", ":", "roundTTL", "=", "60", "\n", "case", "\"h\"", ":", "roundTTL", "=", "60", ...
// shows an overview of all keys and their ttls and closes the iter // iter must return rows of key and ttl.
[ "shows", "an", "overview", "of", "all", "keys", "and", "their", "ttls", "and", "closes", "the", "iter", "iter", "must", "return", "rows", "of", "key", "and", "ttl", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-store-cat/chunk_by_ttl.go#L30-L65
train
grafana/metrictank
conf/retention.go
ParseRetentions
func ParseRetentions(defs string) (Retentions, error) { retentions := make(Retentions, 0) for i, def := range strings.Split(defs, ",") { def = strings.TrimSpace(def) parts := strings.Split(def, ":") if len(parts) < 2 || len(parts) > 5 { return nil, fmt.Errorf("bad retentions spec %q", def) } // try old format val1, err1 := strconv.ParseInt(parts[0], 10, 0) val2, err2 := strconv.ParseInt(parts[1], 10, 0) var retention Retention var err error if err1 == nil && err2 == nil { retention = NewRetention(int(val1), int(val2)) } else { // try new format retention, err = ParseRetentionNew(def) if err != nil { return nil, err } } if i != 0 && !schema.IsSpanValid(uint32(retention.SecondsPerPoint)) { return nil, fmt.Errorf("invalid retention: can't encode span of %d", retention.SecondsPerPoint) } if len(parts) >= 3 { retention.ChunkSpan, err = dur.ParseNDuration(parts[2]) if err != nil { return nil, err } if (Month_sec % retention.ChunkSpan) != 0 { return nil, errors.New("chunkSpan must fit without remainders into month_sec (28*24*60*60)") } _, ok := chunk.RevChunkSpans[retention.ChunkSpan] if !ok { return nil, fmt.Errorf("chunkSpan %s is not a valid value (https://github.com/grafana/metrictank/blob/master/docs/memory-server.md#valid-chunk-spans)", parts[2]) } } else { // default to a valid chunkspan that can hold at least 100 points, or select the largest one otherwise. approxSpan := uint32(retention.SecondsPerPoint * 100) var span uint32 for _, span = range chunk.ChunkSpans { if span >= approxSpan { break } } retention.ChunkSpan = span } retention.NumChunks = 2 if len(parts) >= 4 { i, err := strconv.Atoi(parts[3]) if err != nil { return nil, err } retention.NumChunks = uint32(i) } if len(parts) == 5 { // user is allowed to specify both a bool or a timestamp. // internally we map both to timestamp. // 0 (default) is effectively the same as 'true' // math.MaxUint32 is effectively the same as 'false' readyInt, err := strconv.ParseUint(parts[4], 10, 32) if err == nil { retention.Ready = uint32(readyInt) } else { readyBool, err := strconv.ParseBool(parts[4]) if err != nil { return nil, errReadyFormat } if !readyBool { retention.Ready = math.MaxUint32 } } } retentions = append(retentions, retention) } return retentions, retentions.Validate() }
go
func ParseRetentions(defs string) (Retentions, error) { retentions := make(Retentions, 0) for i, def := range strings.Split(defs, ",") { def = strings.TrimSpace(def) parts := strings.Split(def, ":") if len(parts) < 2 || len(parts) > 5 { return nil, fmt.Errorf("bad retentions spec %q", def) } // try old format val1, err1 := strconv.ParseInt(parts[0], 10, 0) val2, err2 := strconv.ParseInt(parts[1], 10, 0) var retention Retention var err error if err1 == nil && err2 == nil { retention = NewRetention(int(val1), int(val2)) } else { // try new format retention, err = ParseRetentionNew(def) if err != nil { return nil, err } } if i != 0 && !schema.IsSpanValid(uint32(retention.SecondsPerPoint)) { return nil, fmt.Errorf("invalid retention: can't encode span of %d", retention.SecondsPerPoint) } if len(parts) >= 3 { retention.ChunkSpan, err = dur.ParseNDuration(parts[2]) if err != nil { return nil, err } if (Month_sec % retention.ChunkSpan) != 0 { return nil, errors.New("chunkSpan must fit without remainders into month_sec (28*24*60*60)") } _, ok := chunk.RevChunkSpans[retention.ChunkSpan] if !ok { return nil, fmt.Errorf("chunkSpan %s is not a valid value (https://github.com/grafana/metrictank/blob/master/docs/memory-server.md#valid-chunk-spans)", parts[2]) } } else { // default to a valid chunkspan that can hold at least 100 points, or select the largest one otherwise. approxSpan := uint32(retention.SecondsPerPoint * 100) var span uint32 for _, span = range chunk.ChunkSpans { if span >= approxSpan { break } } retention.ChunkSpan = span } retention.NumChunks = 2 if len(parts) >= 4 { i, err := strconv.Atoi(parts[3]) if err != nil { return nil, err } retention.NumChunks = uint32(i) } if len(parts) == 5 { // user is allowed to specify both a bool or a timestamp. // internally we map both to timestamp. // 0 (default) is effectively the same as 'true' // math.MaxUint32 is effectively the same as 'false' readyInt, err := strconv.ParseUint(parts[4], 10, 32) if err == nil { retention.Ready = uint32(readyInt) } else { readyBool, err := strconv.ParseBool(parts[4]) if err != nil { return nil, errReadyFormat } if !readyBool { retention.Ready = math.MaxUint32 } } } retentions = append(retentions, retention) } return retentions, retentions.Validate() }
[ "func", "ParseRetentions", "(", "defs", "string", ")", "(", "Retentions", ",", "error", ")", "{", "retentions", ":=", "make", "(", "Retentions", ",", "0", ")", "\n", "for", "i", ",", "def", ":=", "range", "strings", ".", "Split", "(", "defs", ",", "\...
// ParseRetentions parses retention definitions into a Retentions structure
[ "ParseRetentions", "parses", "retention", "definitions", "into", "a", "Retentions", "structure" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/retention.go#L92-L173
train
grafana/metrictank
cmd/mt-store-cat/metrics.go
getMetrics
func getMetrics(store *cassandra.CassandraStore, prefix, substr, glob string, archive schema.Archive) ([]Metric, error) { var metrics []Metric iter := store.Session.Query("select id, name from metric_idx").Iter() var m Metric var idString string for iter.Scan(&idString, &m.name) { if match(prefix, substr, glob, m) { mkey, err := schema.MKeyFromString(idString) if err != nil { panic(err) } m.AMKey = schema.AMKey{ MKey: mkey, Archive: archive, } metrics = append(metrics, m) } } err := iter.Close() if err != nil { return metrics, err } sort.Sort(MetricsByName(metrics)) return metrics, nil }
go
func getMetrics(store *cassandra.CassandraStore, prefix, substr, glob string, archive schema.Archive) ([]Metric, error) { var metrics []Metric iter := store.Session.Query("select id, name from metric_idx").Iter() var m Metric var idString string for iter.Scan(&idString, &m.name) { if match(prefix, substr, glob, m) { mkey, err := schema.MKeyFromString(idString) if err != nil { panic(err) } m.AMKey = schema.AMKey{ MKey: mkey, Archive: archive, } metrics = append(metrics, m) } } err := iter.Close() if err != nil { return metrics, err } sort.Sort(MetricsByName(metrics)) return metrics, nil }
[ "func", "getMetrics", "(", "store", "*", "cassandra", ".", "CassandraStore", ",", "prefix", ",", "substr", ",", "glob", "string", ",", "archive", "schema", ".", "Archive", ")", "(", "[", "]", "Metric", ",", "error", ")", "{", "var", "metrics", "[", "]"...
// getMetrics lists all metrics from the store matching the given condition.
[ "getMetrics", "lists", "all", "metrics", "from", "the", "store", "matching", "the", "given", "condition", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-store-cat/metrics.go#L53-L77
train
grafana/metrictank
cmd/mt-store-cat/metrics.go
getMetric
func getMetric(store *cassandra.CassandraStore, amkey schema.AMKey) ([]Metric, error) { var metrics []Metric // index only stores MKey's, not AMKey's. iter := store.Session.Query("select name from metric_idx where id=? ALLOW FILTERING", amkey.MKey.String()).Iter() var m Metric for iter.Scan(&m.name) { m.AMKey = amkey metrics = append(metrics, m) } if len(metrics) > 1 { panic(fmt.Sprintf("wtf. found more than one entry for id %s: %v", amkey.String(), metrics)) } err := iter.Close() if err != nil { return metrics, err } return metrics, nil }
go
func getMetric(store *cassandra.CassandraStore, amkey schema.AMKey) ([]Metric, error) { var metrics []Metric // index only stores MKey's, not AMKey's. iter := store.Session.Query("select name from metric_idx where id=? ALLOW FILTERING", amkey.MKey.String()).Iter() var m Metric for iter.Scan(&m.name) { m.AMKey = amkey metrics = append(metrics, m) } if len(metrics) > 1 { panic(fmt.Sprintf("wtf. found more than one entry for id %s: %v", amkey.String(), metrics)) } err := iter.Close() if err != nil { return metrics, err } return metrics, nil }
[ "func", "getMetric", "(", "store", "*", "cassandra", ".", "CassandraStore", ",", "amkey", "schema", ".", "AMKey", ")", "(", "[", "]", "Metric", ",", "error", ")", "{", "var", "metrics", "[", "]", "Metric", "\n", "iter", ":=", "store", ".", "Session", ...
// getMetric returns the metric for the given AMKey
[ "getMetric", "returns", "the", "metric", "for", "the", "given", "AMKey" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-store-cat/metrics.go#L80-L97
train
grafana/metrictank
consolidation/consolidate.go
ConsolidateStable
func ConsolidateStable(points []schema.Point, interval, maxDataPoints uint32, consolidator Consolidator) ([]schema.Point, uint32) { aggNum := AggEvery(uint32(len(points)), maxDataPoints) // note that the amount of points to strip is always < 1 postAggInterval's worth. // there's 2 important considerations here: // 1) we shouldn't make any too drastic alterations of the timerange returned compared to the requested time range // 2) the stripping effort shouldn't significantly alter the output otherwise things get confusing // these 2 remarks boil down to "the amount of points stripped should be a small fraction of the amount of input points" // we use this simple heuristic: // only nudge if we have points > 2 * postAggInterval's worth where "postAggInterval's worth is aggNum points" // // this also assures that in the special case where people request MaxDataPoints=1 we will always consolidate // all points together and don't trim a significant amount of the points // that are expected to go into the aggregation // e.g. consider a case where we have points with ts 140,150,160,170 // aggNum = aggEvery(4/1) = 4, postAggInterval is thus 40. // strict application of the logic would return 1 point with ts=200 (aggregation of all points 170-200 which is 1 point) // and strip the first 3 points, // which is not what we want. since we only have a small set of points, better to incorporate all points into 1 bucket with ts 170. // note that in this case (where we don't nudge) the timestamps in output are not cleanly divisible by postAggInterval // we only start stripping if we have more than 2*4=8 points // see the unit tests which explore cases like this (TestConsolidateStableNoTrimDueToNotManyPoints) if len(points) > int(2*aggNum) { _, num := nudge(points[0].Ts, interval, aggNum) points = points[num:] } points = Consolidate(points, aggNum, consolidator) interval *= aggNum return points, interval }
go
func ConsolidateStable(points []schema.Point, interval, maxDataPoints uint32, consolidator Consolidator) ([]schema.Point, uint32) { aggNum := AggEvery(uint32(len(points)), maxDataPoints) // note that the amount of points to strip is always < 1 postAggInterval's worth. // there's 2 important considerations here: // 1) we shouldn't make any too drastic alterations of the timerange returned compared to the requested time range // 2) the stripping effort shouldn't significantly alter the output otherwise things get confusing // these 2 remarks boil down to "the amount of points stripped should be a small fraction of the amount of input points" // we use this simple heuristic: // only nudge if we have points > 2 * postAggInterval's worth where "postAggInterval's worth is aggNum points" // // this also assures that in the special case where people request MaxDataPoints=1 we will always consolidate // all points together and don't trim a significant amount of the points // that are expected to go into the aggregation // e.g. consider a case where we have points with ts 140,150,160,170 // aggNum = aggEvery(4/1) = 4, postAggInterval is thus 40. // strict application of the logic would return 1 point with ts=200 (aggregation of all points 170-200 which is 1 point) // and strip the first 3 points, // which is not what we want. since we only have a small set of points, better to incorporate all points into 1 bucket with ts 170. // note that in this case (where we don't nudge) the timestamps in output are not cleanly divisible by postAggInterval // we only start stripping if we have more than 2*4=8 points // see the unit tests which explore cases like this (TestConsolidateStableNoTrimDueToNotManyPoints) if len(points) > int(2*aggNum) { _, num := nudge(points[0].Ts, interval, aggNum) points = points[num:] } points = Consolidate(points, aggNum, consolidator) interval *= aggNum return points, interval }
[ "func", "ConsolidateStable", "(", "points", "[", "]", "schema", ".", "Point", ",", "interval", ",", "maxDataPoints", "uint32", ",", "consolidator", "Consolidator", ")", "(", "[", "]", "schema", ".", "Point", ",", "uint32", ")", "{", "aggNum", ":=", "AggEve...
// ConsolidateStable consolidates points in a "stable" way, meaning if you run the same function again so that the input // receives new points at the end and old points get removed at the beginning, we keep picking the same points to consolidate together // interval is the interval between the input points
[ "ConsolidateStable", "consolidates", "points", "in", "a", "stable", "way", "meaning", "if", "you", "run", "the", "same", "function", "again", "so", "that", "the", "input", "receives", "new", "points", "at", "the", "end", "and", "old", "points", "get", "remov...
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/consolidation/consolidate.go#L79-L108
train
grafana/metrictank
api/dataprocessor.go
doRecover
func doRecover(errp *error) { e := recover() if e != nil { if _, ok := e.(runtime.Error); ok { panic(e) } if err, ok := e.(error); ok { *errp = err } else if errStr, ok := e.(string); ok { *errp = errors.New(errStr) } else { *errp = fmt.Errorf("%v", e) } } return }
go
func doRecover(errp *error) { e := recover() if e != nil { if _, ok := e.(runtime.Error); ok { panic(e) } if err, ok := e.(error); ok { *errp = err } else if errStr, ok := e.(string); ok { *errp = errors.New(errStr) } else { *errp = fmt.Errorf("%v", e) } } return }
[ "func", "doRecover", "(", "errp", "*", "error", ")", "{", "e", ":=", "recover", "(", ")", "\n", "if", "e", "!=", "nil", "{", "if", "_", ",", "ok", ":=", "e", ".", "(", "runtime", ".", "Error", ")", ";", "ok", "{", "panic", "(", "e", ")", "\...
// doRecover is the handler that turns panics into returns from the top level of getTarget.
[ "doRecover", "is", "the", "handler", "that", "turns", "panics", "into", "returns", "from", "the", "top", "level", "of", "getTarget", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/dataprocessor.go#L25-L40
train
grafana/metrictank
api/dataprocessor.go
getTargetsRemote
func (s *Server) getTargetsRemote(ctx context.Context, remoteReqs map[string][]models.Req) ([]models.Series, error) { responses := make(chan getTargetsResp, len(remoteReqs)) rCtx, cancel := context.WithCancel(ctx) defer cancel() wg := sync.WaitGroup{} wg.Add(len(remoteReqs)) for _, nodeReqs := range remoteReqs { log.Debugf("DP getTargetsRemote: handling %d reqs from %s", len(nodeReqs), nodeReqs[0].Node.GetName()) go func(reqs []models.Req) { defer wg.Done() node := reqs[0].Node buf, err := node.Post(rCtx, "getTargetsRemote", "/getdata", models.GetData{Requests: reqs}) if err != nil { cancel() responses <- getTargetsResp{nil, err} return } var resp models.GetDataResp _, err = resp.UnmarshalMsg(buf) if err != nil { cancel() log.Errorf("DP getTargetsRemote: error unmarshaling body from %s/getdata: %q", node.GetName(), err.Error()) responses <- getTargetsResp{nil, err} return } log.Debugf("DP getTargetsRemote: %s returned %d series", node.GetName(), len(resp.Series)) responses <- getTargetsResp{resp.Series, nil} }(nodeReqs) } // wait for all getTargetsRemote goroutines to end, then close our responses channel go func() { wg.Wait() close(responses) }() out := make([]models.Series, 0) for resp := range responses { if resp.err != nil { return nil, resp.err } out = append(out, resp.series...) } log.Debugf("DP getTargetsRemote: total of %d series found on peers", len(out)) return out, nil }
go
func (s *Server) getTargetsRemote(ctx context.Context, remoteReqs map[string][]models.Req) ([]models.Series, error) { responses := make(chan getTargetsResp, len(remoteReqs)) rCtx, cancel := context.WithCancel(ctx) defer cancel() wg := sync.WaitGroup{} wg.Add(len(remoteReqs)) for _, nodeReqs := range remoteReqs { log.Debugf("DP getTargetsRemote: handling %d reqs from %s", len(nodeReqs), nodeReqs[0].Node.GetName()) go func(reqs []models.Req) { defer wg.Done() node := reqs[0].Node buf, err := node.Post(rCtx, "getTargetsRemote", "/getdata", models.GetData{Requests: reqs}) if err != nil { cancel() responses <- getTargetsResp{nil, err} return } var resp models.GetDataResp _, err = resp.UnmarshalMsg(buf) if err != nil { cancel() log.Errorf("DP getTargetsRemote: error unmarshaling body from %s/getdata: %q", node.GetName(), err.Error()) responses <- getTargetsResp{nil, err} return } log.Debugf("DP getTargetsRemote: %s returned %d series", node.GetName(), len(resp.Series)) responses <- getTargetsResp{resp.Series, nil} }(nodeReqs) } // wait for all getTargetsRemote goroutines to end, then close our responses channel go func() { wg.Wait() close(responses) }() out := make([]models.Series, 0) for resp := range responses { if resp.err != nil { return nil, resp.err } out = append(out, resp.series...) } log.Debugf("DP getTargetsRemote: total of %d series found on peers", len(out)) return out, nil }
[ "func", "(", "s", "*", "Server", ")", "getTargetsRemote", "(", "ctx", "context", ".", "Context", ",", "remoteReqs", "map", "[", "string", "]", "[", "]", "models", ".", "Req", ")", "(", "[", "]", "models", ".", "Series", ",", "error", ")", "{", "res...
// getTargetsRemote issues the requests on other nodes // it's nothing more than a thin network wrapper around getTargetsLocal of a peer.
[ "getTargetsRemote", "issues", "the", "requests", "on", "other", "nodes", "it", "s", "nothing", "more", "than", "a", "thin", "network", "wrapper", "around", "getTargetsLocal", "of", "a", "peer", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/dataprocessor.go#L194-L239
train
grafana/metrictank
api/dataprocessor.go
getTargetsLocal
func (s *Server) getTargetsLocal(ctx context.Context, reqs []models.Req) ([]models.Series, error) { log.Debugf("DP getTargetsLocal: handling %d reqs locally", len(reqs)) responses := make(chan getTargetsResp, len(reqs)) var wg sync.WaitGroup reqLimiter := util.NewLimiter(getTargetsConcurrency) rCtx, cancel := context.WithCancel(ctx) defer cancel() LOOP: for _, req := range reqs { // if there are already getDataConcurrency goroutines running, then block // until a slot becomes free or our context is canceled. if !reqLimiter.Acquire(rCtx) { //request canceled break LOOP } wg.Add(1) go func(req models.Req) { rCtx, span := tracing.NewSpan(rCtx, s.Tracer, "getTargetsLocal") req.Trace(span) pre := time.Now() points, interval, err := s.getTarget(rCtx, req) if err != nil { tags.Error.Set(span, true) cancel() // cancel all other requests. responses <- getTargetsResp{nil, err} } else { getTargetDuration.Value(time.Now().Sub(pre)) responses <- getTargetsResp{[]models.Series{{ Target: req.Target, // always simply the metric name from index Datapoints: points, Interval: interval, QueryPatt: req.Pattern, // foo.* or foo.bar whatever the etName arg was QueryFrom: req.From, QueryTo: req.To, QueryCons: req.ConsReq, Consolidator: req.Consolidator, }}, nil} } wg.Done() // pop an item of our limiter so that other requests can be processed. reqLimiter.Release() span.Finish() }(req) } go func() { wg.Wait() close(responses) }() out := make([]models.Series, 0, len(reqs)) for resp := range responses { if resp.err != nil { return nil, resp.err } out = append(out, resp.series...) } log.Debugf("DP getTargetsLocal: %d series found locally", len(out)) return out, nil }
go
func (s *Server) getTargetsLocal(ctx context.Context, reqs []models.Req) ([]models.Series, error) { log.Debugf("DP getTargetsLocal: handling %d reqs locally", len(reqs)) responses := make(chan getTargetsResp, len(reqs)) var wg sync.WaitGroup reqLimiter := util.NewLimiter(getTargetsConcurrency) rCtx, cancel := context.WithCancel(ctx) defer cancel() LOOP: for _, req := range reqs { // if there are already getDataConcurrency goroutines running, then block // until a slot becomes free or our context is canceled. if !reqLimiter.Acquire(rCtx) { //request canceled break LOOP } wg.Add(1) go func(req models.Req) { rCtx, span := tracing.NewSpan(rCtx, s.Tracer, "getTargetsLocal") req.Trace(span) pre := time.Now() points, interval, err := s.getTarget(rCtx, req) if err != nil { tags.Error.Set(span, true) cancel() // cancel all other requests. responses <- getTargetsResp{nil, err} } else { getTargetDuration.Value(time.Now().Sub(pre)) responses <- getTargetsResp{[]models.Series{{ Target: req.Target, // always simply the metric name from index Datapoints: points, Interval: interval, QueryPatt: req.Pattern, // foo.* or foo.bar whatever the etName arg was QueryFrom: req.From, QueryTo: req.To, QueryCons: req.ConsReq, Consolidator: req.Consolidator, }}, nil} } wg.Done() // pop an item of our limiter so that other requests can be processed. reqLimiter.Release() span.Finish() }(req) } go func() { wg.Wait() close(responses) }() out := make([]models.Series, 0, len(reqs)) for resp := range responses { if resp.err != nil { return nil, resp.err } out = append(out, resp.series...) } log.Debugf("DP getTargetsLocal: %d series found locally", len(out)) return out, nil }
[ "func", "(", "s", "*", "Server", ")", "getTargetsLocal", "(", "ctx", "context", ".", "Context", ",", "reqs", "[", "]", "models", ".", "Req", ")", "(", "[", "]", "models", ".", "Series", ",", "error", ")", "{", "log", ".", "Debugf", "(", "\"DP getTa...
// error is the error of the first failing target request
[ "error", "is", "the", "error", "of", "the", "first", "failing", "target", "request" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/dataprocessor.go#L242-L302
train
grafana/metrictank
api/dataprocessor.go
mergeSeries
func mergeSeries(in []models.Series) []models.Series { type segment struct { target string query string from uint32 to uint32 con consolidation.Consolidator } seriesByTarget := make(map[segment][]models.Series) for _, series := range in { s := segment{ series.Target, series.QueryPatt, series.QueryFrom, series.QueryTo, series.Consolidator, } seriesByTarget[s] = append(seriesByTarget[s], series) } merged := make([]models.Series, len(seriesByTarget)) i := 0 for _, series := range seriesByTarget { if len(series) == 1 { merged[i] = series[0] } else { //we use the first series in the list as our result. We check over every // point and if it is null, we then check the other series for a non null // value to use instead. log.Debugf("DP mergeSeries: %s has multiple series.", series[0].Target) for i := range series[0].Datapoints { for j := 0; j < len(series); j++ { if !math.IsNaN(series[j].Datapoints[i].Val) { series[0].Datapoints[i].Val = series[j].Datapoints[i].Val break } } } merged[i] = series[0] } i++ } return merged }
go
func mergeSeries(in []models.Series) []models.Series { type segment struct { target string query string from uint32 to uint32 con consolidation.Consolidator } seriesByTarget := make(map[segment][]models.Series) for _, series := range in { s := segment{ series.Target, series.QueryPatt, series.QueryFrom, series.QueryTo, series.Consolidator, } seriesByTarget[s] = append(seriesByTarget[s], series) } merged := make([]models.Series, len(seriesByTarget)) i := 0 for _, series := range seriesByTarget { if len(series) == 1 { merged[i] = series[0] } else { //we use the first series in the list as our result. We check over every // point and if it is null, we then check the other series for a non null // value to use instead. log.Debugf("DP mergeSeries: %s has multiple series.", series[0].Target) for i := range series[0].Datapoints { for j := 0; j < len(series); j++ { if !math.IsNaN(series[j].Datapoints[i].Val) { series[0].Datapoints[i].Val = series[j].Datapoints[i].Val break } } } merged[i] = series[0] } i++ } return merged }
[ "func", "mergeSeries", "(", "in", "[", "]", "models", ".", "Series", ")", "[", "]", "models", ".", "Series", "{", "type", "segment", "struct", "{", "target", "string", "\n", "query", "string", "\n", "from", "uint32", "\n", "to", "uint32", "\n", "con", ...
// check for duplicate series names for the same query. If found merge the results.
[ "check", "for", "duplicate", "series", "names", "for", "the", "same", "query", ".", "If", "found", "merge", "the", "results", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/dataprocessor.go#L592-L634
train
grafana/metrictank
mdata/cwr.go
NewChunkWriteRequest
func NewChunkWriteRequest(metric *AggMetric, key schema.AMKey, chunk *chunk.Chunk, ttl, span uint32, ts time.Time) ChunkWriteRequest { return ChunkWriteRequest{metric, key, chunk, ttl, span, ts} }
go
func NewChunkWriteRequest(metric *AggMetric, key schema.AMKey, chunk *chunk.Chunk, ttl, span uint32, ts time.Time) ChunkWriteRequest { return ChunkWriteRequest{metric, key, chunk, ttl, span, ts} }
[ "func", "NewChunkWriteRequest", "(", "metric", "*", "AggMetric", ",", "key", "schema", ".", "AMKey", ",", "chunk", "*", "chunk", ".", "Chunk", ",", "ttl", ",", "span", "uint32", ",", "ts", "time", ".", "Time", ")", "ChunkWriteRequest", "{", "return", "Ch...
// NewChunkWriteRequest creates a new ChunkWriteRequest
[ "NewChunkWriteRequest", "creates", "a", "new", "ChunkWriteRequest" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/cwr.go#L21-L23
train
grafana/metrictank
mdata/chunk/tsz/tszlong.go
Iter
func (s *SeriesLong) Iter() *IterLong { s.Lock() w := s.bw.clone() s.Unlock() finishV2(w) iter, _ := bstreamIteratorLong(s.T0, w) return iter }
go
func (s *SeriesLong) Iter() *IterLong { s.Lock() w := s.bw.clone() s.Unlock() finishV2(w) iter, _ := bstreamIteratorLong(s.T0, w) return iter }
[ "func", "(", "s", "*", "SeriesLong", ")", "Iter", "(", ")", "*", "IterLong", "{", "s", ".", "Lock", "(", ")", "\n", "w", ":=", "s", ".", "bw", ".", "clone", "(", ")", "\n", "s", ".", "Unlock", "(", ")", "\n", "finishV2", "(", "w", ")", "\n"...
// IterLong lets you iterate over a series. It is not concurrency-safe.
[ "IterLong", "lets", "you", "iterate", "over", "a", "series", ".", "It", "is", "not", "concurrency", "-", "safe", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/chunk/tsz/tszlong.go#L143-L151
train
grafana/metrictank
mdata/chunk/tsz/tszlong.go
NewIteratorLong
func NewIteratorLong(t0 uint32, b []byte) (*IterLong, error) { return bstreamIteratorLong(t0, newBReader(b)) }
go
func NewIteratorLong(t0 uint32, b []byte) (*IterLong, error) { return bstreamIteratorLong(t0, newBReader(b)) }
[ "func", "NewIteratorLong", "(", "t0", "uint32", ",", "b", "[", "]", "byte", ")", "(", "*", "IterLong", ",", "error", ")", "{", "return", "bstreamIteratorLong", "(", "t0", ",", "newBReader", "(", "b", ")", ")", "\n", "}" ]
// NewIteratorLong for the series
[ "NewIteratorLong", "for", "the", "series" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/chunk/tsz/tszlong.go#L182-L184
train
grafana/metrictank
mdata/chunk/tsz/tszlong.go
Next
func (it *IterLong) Next() bool { if it.err != nil || it.finished { return false } var first bool if it.t == 0 { it.t = it.T0 first = true } // read delta-of-delta dod, ok := it.dod() if !ok { return false } it.tDelta += uint32(dod) it.t = it.t + it.tDelta if first { // first point. read the float raw v, err := it.br.readBits(64) if err != nil { it.err = err return false } it.val = math.Float64frombits(v) return true } // read compressed value bit, err := it.br.readBit() if err != nil { it.err = err return false } if bit == zero { // it.val = it.val } else { bit, itErr := it.br.readBit() if itErr != nil { it.err = err return false } if bit == zero { // reuse leading/trailing zero bits // it.leading, it.trailing = it.leading, it.trailing } else { bits, err := it.br.readBits(5) if err != nil { it.err = err return false } it.leading = uint8(bits) bits, err = it.br.readBits(6) if err != nil { it.err = err return false } mbits := uint8(bits) // 0 significant bits here means we overflowed and we actually need 64; see comment in encoder if mbits == 0 { mbits = 64 } it.trailing = 64 - it.leading - mbits } mbits := int(64 - it.leading - it.trailing) bits, err := it.br.readBits(mbits) if err != nil { it.err = err return false } vbits := math.Float64bits(it.val) vbits ^= (bits << it.trailing) it.val = math.Float64frombits(vbits) } return true }
go
func (it *IterLong) Next() bool { if it.err != nil || it.finished { return false } var first bool if it.t == 0 { it.t = it.T0 first = true } // read delta-of-delta dod, ok := it.dod() if !ok { return false } it.tDelta += uint32(dod) it.t = it.t + it.tDelta if first { // first point. read the float raw v, err := it.br.readBits(64) if err != nil { it.err = err return false } it.val = math.Float64frombits(v) return true } // read compressed value bit, err := it.br.readBit() if err != nil { it.err = err return false } if bit == zero { // it.val = it.val } else { bit, itErr := it.br.readBit() if itErr != nil { it.err = err return false } if bit == zero { // reuse leading/trailing zero bits // it.leading, it.trailing = it.leading, it.trailing } else { bits, err := it.br.readBits(5) if err != nil { it.err = err return false } it.leading = uint8(bits) bits, err = it.br.readBits(6) if err != nil { it.err = err return false } mbits := uint8(bits) // 0 significant bits here means we overflowed and we actually need 64; see comment in encoder if mbits == 0 { mbits = 64 } it.trailing = 64 - it.leading - mbits } mbits := int(64 - it.leading - it.trailing) bits, err := it.br.readBits(mbits) if err != nil { it.err = err return false } vbits := math.Float64bits(it.val) vbits ^= (bits << it.trailing) it.val = math.Float64frombits(vbits) } return true }
[ "func", "(", "it", "*", "IterLong", ")", "Next", "(", ")", "bool", "{", "if", "it", ".", "err", "!=", "nil", "||", "it", ".", "finished", "{", "return", "false", "\n", "}", "\n", "var", "first", "bool", "\n", "if", "it", ".", "t", "==", "0", ...
// Next iteration of the series iterator
[ "Next", "iteration", "of", "the", "series", "iterator" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/chunk/tsz/tszlong.go#L241-L325
train
grafana/metrictank
stats/tick.go
tick
func tick(period time.Duration) chan time.Time { ch := make(chan time.Time) go func() { for { now := time.Now() nowUnix := now.UnixNano() diff := period - (time.Duration(nowUnix) % period) ideal := now.Add(diff) time.Sleep(diff) // try to write, if it blocks, skip the tick select { case ch <- ideal: default: } } }() return ch }
go
func tick(period time.Duration) chan time.Time { ch := make(chan time.Time) go func() { for { now := time.Now() nowUnix := now.UnixNano() diff := period - (time.Duration(nowUnix) % period) ideal := now.Add(diff) time.Sleep(diff) // try to write, if it blocks, skip the tick select { case ch <- ideal: default: } } }() return ch }
[ "func", "tick", "(", "period", "time", ".", "Duration", ")", "chan", "time", ".", "Time", "{", "ch", ":=", "make", "(", "chan", "time", ".", "Time", ")", "\n", "go", "func", "(", ")", "{", "for", "{", "now", ":=", "time", ".", "Now", "(", ")", ...
// provides "clean" ticks at precise intervals, and delivers them shortly after
[ "provides", "clean", "ticks", "at", "precise", "intervals", "and", "delivers", "them", "shortly", "after" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/stats/tick.go#L6-L24
train
grafana/metrictank
api/models/request.go
Trace
func (r Req) Trace(span opentracing.Span) { span.SetTag("key", r.MKey) span.SetTag("target", r.Target) span.SetTag("pattern", r.Pattern) span.SetTag("from", r.From) span.SetTag("to", r.To) span.SetTag("span", r.To-r.From-1) span.SetTag("mdp", r.MaxPoints) span.SetTag("rawInterval", r.RawInterval) span.SetTag("cons", r.Consolidator) span.SetTag("consReq", r.ConsReq) span.SetTag("schemaId", r.SchemaId) span.SetTag("aggId", r.AggId) span.SetTag("archive", r.Archive) span.SetTag("archInterval", r.ArchInterval) span.SetTag("TTL", r.TTL) span.SetTag("outInterval", r.OutInterval) span.SetTag("aggNum", r.AggNum) }
go
func (r Req) Trace(span opentracing.Span) { span.SetTag("key", r.MKey) span.SetTag("target", r.Target) span.SetTag("pattern", r.Pattern) span.SetTag("from", r.From) span.SetTag("to", r.To) span.SetTag("span", r.To-r.From-1) span.SetTag("mdp", r.MaxPoints) span.SetTag("rawInterval", r.RawInterval) span.SetTag("cons", r.Consolidator) span.SetTag("consReq", r.ConsReq) span.SetTag("schemaId", r.SchemaId) span.SetTag("aggId", r.AggId) span.SetTag("archive", r.Archive) span.SetTag("archInterval", r.ArchInterval) span.SetTag("TTL", r.TTL) span.SetTag("outInterval", r.OutInterval) span.SetTag("aggNum", r.AggNum) }
[ "func", "(", "r", "Req", ")", "Trace", "(", "span", "opentracing", ".", "Span", ")", "{", "span", ".", "SetTag", "(", "\"key\"", ",", "r", ".", "MKey", ")", "\n", "span", ".", "SetTag", "(", "\"target\"", ",", "r", ".", "Target", ")", "\n", "span...
// Trace puts all request properties as tags in a span // good for when a span deals with 1 request
[ "Trace", "puts", "all", "request", "properties", "as", "tags", "in", "a", "span", "good", "for", "when", "a", "span", "deals", "with", "1", "request" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/models/request.go#L75-L93
train
grafana/metrictank
api/models/request.go
TraceLog
func (r Req) TraceLog(span opentracing.Span) { span.LogFields( log.Object("key", r.MKey), log.String("target", r.Target), log.String("pattern", r.Pattern), log.Int("from", int(r.From)), log.Int("to", int(r.To)), log.Int("span", int(r.To-r.From-1)), log.Int("mdp", int(r.MaxPoints)), log.Int("rawInterval", int(r.RawInterval)), log.String("cons", r.Consolidator.String()), log.String("consReq", r.ConsReq.String()), log.Int("schemaId", int(r.SchemaId)), log.Int("aggId", int(r.AggId)), log.Int("archive", r.Archive), log.Int("archInterval", int(r.ArchInterval)), log.Int("TTL", int(r.TTL)), log.Int("outInterval", int(r.OutInterval)), log.Int("aggNum", int(r.AggNum)), ) }
go
func (r Req) TraceLog(span opentracing.Span) { span.LogFields( log.Object("key", r.MKey), log.String("target", r.Target), log.String("pattern", r.Pattern), log.Int("from", int(r.From)), log.Int("to", int(r.To)), log.Int("span", int(r.To-r.From-1)), log.Int("mdp", int(r.MaxPoints)), log.Int("rawInterval", int(r.RawInterval)), log.String("cons", r.Consolidator.String()), log.String("consReq", r.ConsReq.String()), log.Int("schemaId", int(r.SchemaId)), log.Int("aggId", int(r.AggId)), log.Int("archive", r.Archive), log.Int("archInterval", int(r.ArchInterval)), log.Int("TTL", int(r.TTL)), log.Int("outInterval", int(r.OutInterval)), log.Int("aggNum", int(r.AggNum)), ) }
[ "func", "(", "r", "Req", ")", "TraceLog", "(", "span", "opentracing", ".", "Span", ")", "{", "span", ".", "LogFields", "(", "log", ".", "Object", "(", "\"key\"", ",", "r", ".", "MKey", ")", ",", "log", ".", "String", "(", "\"target\"", ",", "r", ...
// TraceLog puts all request properties in a span log entry // good for when a span deals with multiple requests // note that the amount of data generated here can be up to // 1000~1500 bytes
[ "TraceLog", "puts", "all", "request", "properties", "in", "a", "span", "log", "entry", "good", "for", "when", "a", "span", "deals", "with", "multiple", "requests", "note", "that", "the", "amount", "of", "data", "generated", "here", "can", "be", "up", "to",...
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/models/request.go#L99-L119
train
grafana/metrictank
conf/aggregations.go
NewAggregations
func NewAggregations() Aggregations { return Aggregations{ Data: make([]Aggregation, 0), DefaultAggregation: Aggregation{ Name: "default", Pattern: regexp.MustCompile(".*"), XFilesFactor: 0.5, AggregationMethod: []Method{Avg}, }, } }
go
func NewAggregations() Aggregations { return Aggregations{ Data: make([]Aggregation, 0), DefaultAggregation: Aggregation{ Name: "default", Pattern: regexp.MustCompile(".*"), XFilesFactor: 0.5, AggregationMethod: []Method{Avg}, }, } }
[ "func", "NewAggregations", "(", ")", "Aggregations", "{", "return", "Aggregations", "{", "Data", ":", "make", "(", "[", "]", "Aggregation", ",", "0", ")", ",", "DefaultAggregation", ":", "Aggregation", "{", "Name", ":", "\"default\"", ",", "Pattern", ":", ...
// NewAggregations create instance of Aggregations
[ "NewAggregations", "create", "instance", "of", "Aggregations" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/aggregations.go#L26-L36
train
grafana/metrictank
conf/aggregations.go
ReadAggregations
func ReadAggregations(file string) (Aggregations, error) { config, err := configparser.Read(file) if err != nil { return Aggregations{}, err } sections, err := config.AllSections() if err != nil { return Aggregations{}, err } result := NewAggregations() for _, s := range sections { item := Aggregation{} item.Name = strings.Trim(strings.SplitN(s.String(), "\n", 2)[0], " []") if item.Name == "" || strings.HasPrefix(item.Name, "#") { continue } item.Pattern, err = regexp.Compile(s.ValueOf("pattern")) if err != nil { return Aggregations{}, fmt.Errorf("[%s]: failed to parse pattern %q: %s", item.Name, s.ValueOf("pattern"), err.Error()) } item.XFilesFactor, err = strconv.ParseFloat(s.ValueOf("xFilesFactor"), 64) if err != nil { return Aggregations{}, fmt.Errorf("[%s]: failed to parse xFilesFactor %q: %s", item.Name, s.ValueOf("xFilesFactor"), err.Error()) } aggregationMethodStr := s.ValueOf("aggregationMethod") methodStrs := strings.Split(aggregationMethodStr, ",") for _, methodStr := range methodStrs { switch methodStr { case "average", "avg": item.AggregationMethod = append(item.AggregationMethod, Avg) case "sum": item.AggregationMethod = append(item.AggregationMethod, Sum) case "last": item.AggregationMethod = append(item.AggregationMethod, Lst) case "max": item.AggregationMethod = append(item.AggregationMethod, Max) case "min": item.AggregationMethod = append(item.AggregationMethod, Min) default: return result, fmt.Errorf("[%s]: unknown aggregation method %q", item.Name, methodStr) } } result.Data = append(result.Data, item) } return result, nil }
go
func ReadAggregations(file string) (Aggregations, error) { config, err := configparser.Read(file) if err != nil { return Aggregations{}, err } sections, err := config.AllSections() if err != nil { return Aggregations{}, err } result := NewAggregations() for _, s := range sections { item := Aggregation{} item.Name = strings.Trim(strings.SplitN(s.String(), "\n", 2)[0], " []") if item.Name == "" || strings.HasPrefix(item.Name, "#") { continue } item.Pattern, err = regexp.Compile(s.ValueOf("pattern")) if err != nil { return Aggregations{}, fmt.Errorf("[%s]: failed to parse pattern %q: %s", item.Name, s.ValueOf("pattern"), err.Error()) } item.XFilesFactor, err = strconv.ParseFloat(s.ValueOf("xFilesFactor"), 64) if err != nil { return Aggregations{}, fmt.Errorf("[%s]: failed to parse xFilesFactor %q: %s", item.Name, s.ValueOf("xFilesFactor"), err.Error()) } aggregationMethodStr := s.ValueOf("aggregationMethod") methodStrs := strings.Split(aggregationMethodStr, ",") for _, methodStr := range methodStrs { switch methodStr { case "average", "avg": item.AggregationMethod = append(item.AggregationMethod, Avg) case "sum": item.AggregationMethod = append(item.AggregationMethod, Sum) case "last": item.AggregationMethod = append(item.AggregationMethod, Lst) case "max": item.AggregationMethod = append(item.AggregationMethod, Max) case "min": item.AggregationMethod = append(item.AggregationMethod, Min) default: return result, fmt.Errorf("[%s]: unknown aggregation method %q", item.Name, methodStr) } } result.Data = append(result.Data, item) } return result, nil }
[ "func", "ReadAggregations", "(", "file", "string", ")", "(", "Aggregations", ",", "error", ")", "{", "config", ",", "err", ":=", "configparser", ".", "Read", "(", "file", ")", "\n", "if", "err", "!=", "nil", "{", "return", "Aggregations", "{", "}", ","...
// ReadAggregations returns the defined aggregations from a storage-aggregation.conf file // and adds the default
[ "ReadAggregations", "returns", "the", "defined", "aggregations", "from", "a", "storage", "-", "aggregation", ".", "conf", "file", "and", "adds", "the", "default" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/aggregations.go#L40-L92
train
grafana/metrictank
conf/aggregations.go
Match
func (a Aggregations) Match(metric string) (uint16, Aggregation) { for i, s := range a.Data { if s.Pattern.MatchString(metric) { return uint16(i), s } } return uint16(len(a.Data)), a.DefaultAggregation }
go
func (a Aggregations) Match(metric string) (uint16, Aggregation) { for i, s := range a.Data { if s.Pattern.MatchString(metric) { return uint16(i), s } } return uint16(len(a.Data)), a.DefaultAggregation }
[ "func", "(", "a", "Aggregations", ")", "Match", "(", "metric", "string", ")", "(", "uint16", ",", "Aggregation", ")", "{", "for", "i", ",", "s", ":=", "range", "a", ".", "Data", "{", "if", "s", ".", "Pattern", ".", "MatchString", "(", "metric", ")"...
// Match returns the correct aggregation setting for the given metric // it can always find a valid setting, because there's a default catch all // also returns the index of the setting, to efficiently reference it
[ "Match", "returns", "the", "correct", "aggregation", "setting", "for", "the", "given", "metric", "it", "can", "always", "find", "a", "valid", "setting", "because", "there", "s", "a", "default", "catch", "all", "also", "returns", "the", "index", "of", "the", ...
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/aggregations.go#L97-L104
train
grafana/metrictank
conf/aggregations.go
Get
func (a Aggregations) Get(i uint16) Aggregation { if i+1 > uint16(len(a.Data)) { return a.DefaultAggregation } return a.Data[i] }
go
func (a Aggregations) Get(i uint16) Aggregation { if i+1 > uint16(len(a.Data)) { return a.DefaultAggregation } return a.Data[i] }
[ "func", "(", "a", "Aggregations", ")", "Get", "(", "i", "uint16", ")", "Aggregation", "{", "if", "i", "+", "1", ">", "uint16", "(", "len", "(", "a", ".", "Data", ")", ")", "{", "return", "a", ".", "DefaultAggregation", "\n", "}", "\n", "return", ...
// Get returns the aggregation setting corresponding to the given index
[ "Get", "returns", "the", "aggregation", "setting", "corresponding", "to", "the", "given", "index" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/conf/aggregations.go#L107-L112
train
grafana/metrictank
idx/idx.go
NewArchiveBare
func NewArchiveBare(name string) Archive { return Archive{ MetricDefinition: schema.MetricDefinition{ Name: name, }, } }
go
func NewArchiveBare(name string) Archive { return Archive{ MetricDefinition: schema.MetricDefinition{ Name: name, }, } }
[ "func", "NewArchiveBare", "(", "name", "string", ")", "Archive", "{", "return", "Archive", "{", "MetricDefinition", ":", "schema", ".", "MetricDefinition", "{", "Name", ":", "name", ",", "}", ",", "}", "\n", "}" ]
// used primarily by tests, for convenience
[ "used", "primarily", "by", "tests", "for", "convenience" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/idx.go#L30-L36
train
grafana/metrictank
api/middleware/stats.go
RequestStats
func RequestStats() macaron.Handler { stats := requestStats{ responseCounts: make(map[string]map[int]*stats.Counter32), latencyHistograms: make(map[string]*stats.LatencyHistogram15s32), sizeMeters: make(map[string]*stats.Meter32), } return func(ctx *macaron.Context) { start := time.Now() rw := ctx.Resp.(macaron.ResponseWriter) // call next handler. This will return after all handlers // have completed and the request has been sent. ctx.Next() status := rw.Status() path := pathSlug(ctx.Req.URL.Path) // graphite cluster requests use local=1 // this way we can differentiate "full" render requests from client to MT (encompassing data processing, proxing to graphite, etc) // from "subrequests" where metrictank is called by graphite and graphite does the processing and returns to the client if ctx.Req.Request.Form.Get("local") == "1" { path += "-local" } stats.PathStatusCount(path, status) stats.PathLatency(path, time.Since(start)) // only record the request size if the request succeeded. if status < 300 { stats.PathSize(path, rw.Size()) } } }
go
func RequestStats() macaron.Handler { stats := requestStats{ responseCounts: make(map[string]map[int]*stats.Counter32), latencyHistograms: make(map[string]*stats.LatencyHistogram15s32), sizeMeters: make(map[string]*stats.Meter32), } return func(ctx *macaron.Context) { start := time.Now() rw := ctx.Resp.(macaron.ResponseWriter) // call next handler. This will return after all handlers // have completed and the request has been sent. ctx.Next() status := rw.Status() path := pathSlug(ctx.Req.URL.Path) // graphite cluster requests use local=1 // this way we can differentiate "full" render requests from client to MT (encompassing data processing, proxing to graphite, etc) // from "subrequests" where metrictank is called by graphite and graphite does the processing and returns to the client if ctx.Req.Request.Form.Get("local") == "1" { path += "-local" } stats.PathStatusCount(path, status) stats.PathLatency(path, time.Since(start)) // only record the request size if the request succeeded. if status < 300 { stats.PathSize(path, rw.Size()) } } }
[ "func", "RequestStats", "(", ")", "macaron", ".", "Handler", "{", "stats", ":=", "requestStats", "{", "responseCounts", ":", "make", "(", "map", "[", "string", "]", "map", "[", "int", "]", "*", "stats", ".", "Counter32", ")", ",", "latencyHistograms", ":...
// RequestStats returns a middleware that tracks request metrics.
[ "RequestStats", "returns", "a", "middleware", "that", "tracks", "request", "metrics", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/middleware/stats.go#L64-L92
train
grafana/metrictank
api/response/error.go
WrapErrorForTagDB
func WrapErrorForTagDB(e error) *ErrorResp { b, err := json.Marshal(TagDBError{Error: e.Error()}) if err != nil { return &ErrorResp{ err: "{\"error\": \"failed to encode error message\"}", code: http.StatusInternalServerError, } } resp := &ErrorResp{ err: string(b), code: http.StatusInternalServerError, } if _, ok := e.(Error); ok { resp.code = e.(Error).Code() } resp.ValidateAndFixCode() return resp }
go
func WrapErrorForTagDB(e error) *ErrorResp { b, err := json.Marshal(TagDBError{Error: e.Error()}) if err != nil { return &ErrorResp{ err: "{\"error\": \"failed to encode error message\"}", code: http.StatusInternalServerError, } } resp := &ErrorResp{ err: string(b), code: http.StatusInternalServerError, } if _, ok := e.(Error); ok { resp.code = e.(Error).Code() } resp.ValidateAndFixCode() return resp }
[ "func", "WrapErrorForTagDB", "(", "e", "error", ")", "*", "ErrorResp", "{", "b", ",", "err", ":=", "json", ".", "Marshal", "(", "TagDBError", "{", "Error", ":", "e", ".", "Error", "(", ")", "}", ")", "\n", "if", "err", "!=", "nil", "{", "return", ...
// graphite's http tagdb client requires a specific error format
[ "graphite", "s", "http", "tagdb", "client", "requires", "a", "specific", "error", "format" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/response/error.go#L43-L63
train
grafana/metrictank
idx/cassandra/config.go
NewIdxConfig
func NewIdxConfig() *IdxConfig { return &IdxConfig{ Enabled: true, hosts: "localhost:9042", keyspace: "metrictank", consistency: "one", timeout: time.Second, numConns: 10, writeQueueSize: 100000, updateCassIdx: true, updateInterval: time.Hour * 3, pruneInterval: time.Hour * 3, protoVer: 4, createKeyspace: true, schemaFile: "/etc/metrictank/schema-idx-cassandra.toml", disableInitialHostLookup: false, ssl: false, capath: "/etc/metrictank/ca.pem", hostverification: true, auth: false, username: "cassandra", password: "cassandra", initLoadConcurrency: 1, } }
go
func NewIdxConfig() *IdxConfig { return &IdxConfig{ Enabled: true, hosts: "localhost:9042", keyspace: "metrictank", consistency: "one", timeout: time.Second, numConns: 10, writeQueueSize: 100000, updateCassIdx: true, updateInterval: time.Hour * 3, pruneInterval: time.Hour * 3, protoVer: 4, createKeyspace: true, schemaFile: "/etc/metrictank/schema-idx-cassandra.toml", disableInitialHostLookup: false, ssl: false, capath: "/etc/metrictank/ca.pem", hostverification: true, auth: false, username: "cassandra", password: "cassandra", initLoadConcurrency: 1, } }
[ "func", "NewIdxConfig", "(", ")", "*", "IdxConfig", "{", "return", "&", "IdxConfig", "{", "Enabled", ":", "true", ",", "hosts", ":", "\"localhost:9042\"", ",", "keyspace", ":", "\"metrictank\"", ",", "consistency", ":", "\"one\"", ",", "timeout", ":", "time"...
// NewIdxConfig returns IdxConfig with default values set.
[ "NewIdxConfig", "returns", "IdxConfig", "with", "default", "values", "set", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/config.go#L46-L70
train
grafana/metrictank
idx/cassandra/config.go
Validate
func (cfg *IdxConfig) Validate() error { if cfg.pruneInterval == 0 { return errors.New("pruneInterval must be greater then 0. " + timeUnits) } if cfg.timeout == 0 { return errors.New("timeout must be greater than 0. " + timeUnits) } return nil }
go
func (cfg *IdxConfig) Validate() error { if cfg.pruneInterval == 0 { return errors.New("pruneInterval must be greater then 0. " + timeUnits) } if cfg.timeout == 0 { return errors.New("timeout must be greater than 0. " + timeUnits) } return nil }
[ "func", "(", "cfg", "*", "IdxConfig", ")", "Validate", "(", ")", "error", "{", "if", "cfg", ".", "pruneInterval", "==", "0", "{", "return", "errors", ".", "New", "(", "\"pruneInterval must be greater then 0. \"", "+", "timeUnits", ")", "\n", "}", "\n", "if...
// Validate validates IdxConfig settings
[ "Validate", "validates", "IdxConfig", "settings" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/cassandra/config.go#L73-L81
train
grafana/metrictank
mdata/aggmetric.go
NewAggMetric
func NewAggMetric(store Store, cachePusher cache.CachePusher, key schema.AMKey, retentions conf.Retentions, reorderWindow, interval uint32, agg *conf.Aggregation, dropFirstChunk bool) *AggMetric { // note: during parsing of retentions, we assure there's at least 1. ret := retentions[0] m := AggMetric{ cachePusher: cachePusher, store: store, key: key, chunkSpan: ret.ChunkSpan, numChunks: ret.NumChunks, chunks: make([]*chunk.Chunk, 0, ret.NumChunks), dropFirstChunk: dropFirstChunk, ttl: uint32(ret.MaxRetention()), // we set LastWrite here to make sure a new Chunk doesn't get immediately // garbage collected right after creating it, before we can push to it. lastWrite: uint32(time.Now().Unix()), } if reorderWindow != 0 { m.rob = NewReorderBuffer(reorderWindow, interval) } for _, ret := range retentions[1:] { m.aggregators = append(m.aggregators, NewAggregator(store, cachePusher, key, ret, *agg, dropFirstChunk)) } return &m }
go
func NewAggMetric(store Store, cachePusher cache.CachePusher, key schema.AMKey, retentions conf.Retentions, reorderWindow, interval uint32, agg *conf.Aggregation, dropFirstChunk bool) *AggMetric { // note: during parsing of retentions, we assure there's at least 1. ret := retentions[0] m := AggMetric{ cachePusher: cachePusher, store: store, key: key, chunkSpan: ret.ChunkSpan, numChunks: ret.NumChunks, chunks: make([]*chunk.Chunk, 0, ret.NumChunks), dropFirstChunk: dropFirstChunk, ttl: uint32(ret.MaxRetention()), // we set LastWrite here to make sure a new Chunk doesn't get immediately // garbage collected right after creating it, before we can push to it. lastWrite: uint32(time.Now().Unix()), } if reorderWindow != 0 { m.rob = NewReorderBuffer(reorderWindow, interval) } for _, ret := range retentions[1:] { m.aggregators = append(m.aggregators, NewAggregator(store, cachePusher, key, ret, *agg, dropFirstChunk)) } return &m }
[ "func", "NewAggMetric", "(", "store", "Store", ",", "cachePusher", "cache", ".", "CachePusher", ",", "key", "schema", ".", "AMKey", ",", "retentions", "conf", ".", "Retentions", ",", "reorderWindow", ",", "interval", "uint32", ",", "agg", "*", "conf", ".", ...
// NewAggMetric creates a metric with given key, it retains the given number of chunks each chunkSpan seconds long // it optionally also creates aggregations with the given settings // the 0th retention is the native archive of this metric. if there's several others, we create aggregators, using agg. // it's the callers responsibility to make sure agg is not nil in that case!
[ "NewAggMetric", "creates", "a", "metric", "with", "given", "key", "it", "retains", "the", "given", "number", "of", "chunks", "each", "chunkSpan", "seconds", "long", "it", "optionally", "also", "creates", "aggregations", "with", "the", "given", "settings", "the",...
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggmetric.go#L53-L80
train
grafana/metrictank
mdata/aggmetric.go
addAggregators
func (a *AggMetric) addAggregators(ts uint32, val float64) { for _, agg := range a.aggregators { log.Debugf("AM: %s pushing %d,%f to aggregator %d", a.key, ts, val, agg.span) agg.Add(ts, val) } }
go
func (a *AggMetric) addAggregators(ts uint32, val float64) { for _, agg := range a.aggregators { log.Debugf("AM: %s pushing %d,%f to aggregator %d", a.key, ts, val, agg.span) agg.Add(ts, val) } }
[ "func", "(", "a", "*", "AggMetric", ")", "addAggregators", "(", "ts", "uint32", ",", "val", "float64", ")", "{", "for", "_", ",", "agg", ":=", "range", "a", ".", "aggregators", "{", "log", ".", "Debugf", "(", "\"AM: %s pushing %d,%f to aggregator %d\"", ",...
// caller must hold lock
[ "caller", "must", "hold", "lock" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggmetric.go#L315-L320
train
grafana/metrictank
mdata/aggmetric.go
pushToCache
func (a *AggMetric) pushToCache(c *chunk.Chunk) { if a.cachePusher == nil { return } // push into cache intervalHint := a.key.Archive.Span() itergen, err := chunk.NewIterGen(c.Series.T0, intervalHint, c.Encode(a.chunkSpan)) if err != nil { log.Errorf("AM: %s failed to generate IterGen. this should never happen: %s", a.key, err) } go a.cachePusher.AddIfHot(a.key, 0, itergen) }
go
func (a *AggMetric) pushToCache(c *chunk.Chunk) { if a.cachePusher == nil { return } // push into cache intervalHint := a.key.Archive.Span() itergen, err := chunk.NewIterGen(c.Series.T0, intervalHint, c.Encode(a.chunkSpan)) if err != nil { log.Errorf("AM: %s failed to generate IterGen. this should never happen: %s", a.key, err) } go a.cachePusher.AddIfHot(a.key, 0, itergen) }
[ "func", "(", "a", "*", "AggMetric", ")", "pushToCache", "(", "c", "*", "chunk", ".", "Chunk", ")", "{", "if", "a", ".", "cachePusher", "==", "nil", "{", "return", "\n", "}", "\n", "intervalHint", ":=", "a", ".", "key", ".", "Archive", ".", "Span", ...
// pushToCache adds the chunk into the cache if it is hot // caller must hold lock
[ "pushToCache", "adds", "the", "chunk", "into", "the", "cache", "if", "it", "is", "hot", "caller", "must", "hold", "lock" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggmetric.go#L324-L336
train
grafana/metrictank
mdata/aggmetric.go
Add
func (a *AggMetric) Add(ts uint32, val float64) { a.Lock() defer a.Unlock() if a.rob == nil { // write directly a.add(ts, val) } else { // write through reorder buffer res, err := a.rob.Add(ts, val) if err == nil { if len(res) == 0 { a.lastWrite = uint32(time.Now().Unix()) } else { for _, p := range res { a.add(p.Ts, p.Val) } } } else { log.Debugf("AM: failed to add metric to reorder buffer for %s. %s", a.key, err) a.discardedMetricsInc(err) } } }
go
func (a *AggMetric) Add(ts uint32, val float64) { a.Lock() defer a.Unlock() if a.rob == nil { // write directly a.add(ts, val) } else { // write through reorder buffer res, err := a.rob.Add(ts, val) if err == nil { if len(res) == 0 { a.lastWrite = uint32(time.Now().Unix()) } else { for _, p := range res { a.add(p.Ts, p.Val) } } } else { log.Debugf("AM: failed to add metric to reorder buffer for %s. %s", a.key, err) a.discardedMetricsInc(err) } } }
[ "func", "(", "a", "*", "AggMetric", ")", "Add", "(", "ts", "uint32", ",", "val", "float64", ")", "{", "a", ".", "Lock", "(", ")", "\n", "defer", "a", ".", "Unlock", "(", ")", "\n", "if", "a", ".", "rob", "==", "nil", "{", "a", ".", "add", "...
// don't ever call with a ts of 0, cause we use 0 to mean not initialized!
[ "don", "t", "ever", "call", "with", "a", "ts", "of", "0", "cause", "we", "use", "0", "to", "mean", "not", "initialized!" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggmetric.go#L405-L429
train
grafana/metrictank
mdata/aggmetric.go
GC
func (a *AggMetric) GC(now, chunkMinTs, metricMinTs uint32) (uint32, bool) { a.Lock() defer a.Unlock() // unless it looks like the AggMetric is collectable, abort and mark as not stale if !a.collectable(now, chunkMinTs) { return 0, false } // make sure any points in the reorderBuffer are moved into our chunks so we can save the data if a.rob != nil { tmpLastWrite := a.lastWrite pts := a.rob.Flush() for _, p := range pts { a.add(p.Ts, p.Val) } // adding points will cause our lastWrite to be updated, but we want to keep the old value a.lastWrite = tmpLastWrite } // this aggMetric has never had metrics written to it. if len(a.chunks) == 0 { return a.gcAggregators(now, chunkMinTs, metricMinTs) } currentChunk := a.chunks[a.currentChunkPos] // we must check collectable again. Imagine this scenario: // * we didn't have any chunks when calling collectable() the first time so it returned true // * data from the ROB is flushed and moved into a new chunk // * this new chunk is active so we're not collectable, even though earlier we thought we were. if !a.collectable(now, chunkMinTs) { return 0, false } if !currentChunk.Series.Finished { // chunk hasn't been written to in a while, and is not yet closed. // Let's close it and persist it if we are a primary log.Debugf("AM: Found stale Chunk, adding end-of-stream bytes. key: %v T0: %d", a.key, currentChunk.Series.T0) currentChunk.Finish() a.pushToCache(currentChunk) if cluster.Manager.IsPrimary() { log.Debugf("AM: persist(): node is primary, saving chunk. %v T0: %d", a.key, currentChunk.Series.T0) // persist the chunk. If the writeQueue is full, then this will block. a.persist(a.currentChunkPos) } } var points uint32 for _, chunk := range a.chunks { points += chunk.NumPoints } p, stale := a.gcAggregators(now, chunkMinTs, metricMinTs) points += p return points, stale && a.lastWrite < metricMinTs }
go
func (a *AggMetric) GC(now, chunkMinTs, metricMinTs uint32) (uint32, bool) { a.Lock() defer a.Unlock() // unless it looks like the AggMetric is collectable, abort and mark as not stale if !a.collectable(now, chunkMinTs) { return 0, false } // make sure any points in the reorderBuffer are moved into our chunks so we can save the data if a.rob != nil { tmpLastWrite := a.lastWrite pts := a.rob.Flush() for _, p := range pts { a.add(p.Ts, p.Val) } // adding points will cause our lastWrite to be updated, but we want to keep the old value a.lastWrite = tmpLastWrite } // this aggMetric has never had metrics written to it. if len(a.chunks) == 0 { return a.gcAggregators(now, chunkMinTs, metricMinTs) } currentChunk := a.chunks[a.currentChunkPos] // we must check collectable again. Imagine this scenario: // * we didn't have any chunks when calling collectable() the first time so it returned true // * data from the ROB is flushed and moved into a new chunk // * this new chunk is active so we're not collectable, even though earlier we thought we were. if !a.collectable(now, chunkMinTs) { return 0, false } if !currentChunk.Series.Finished { // chunk hasn't been written to in a while, and is not yet closed. // Let's close it and persist it if we are a primary log.Debugf("AM: Found stale Chunk, adding end-of-stream bytes. key: %v T0: %d", a.key, currentChunk.Series.T0) currentChunk.Finish() a.pushToCache(currentChunk) if cluster.Manager.IsPrimary() { log.Debugf("AM: persist(): node is primary, saving chunk. %v T0: %d", a.key, currentChunk.Series.T0) // persist the chunk. If the writeQueue is full, then this will block. a.persist(a.currentChunkPos) } } var points uint32 for _, chunk := range a.chunks { points += chunk.NumPoints } p, stale := a.gcAggregators(now, chunkMinTs, metricMinTs) points += p return points, stale && a.lastWrite < metricMinTs }
[ "func", "(", "a", "*", "AggMetric", ")", "GC", "(", "now", ",", "chunkMinTs", ",", "metricMinTs", "uint32", ")", "(", "uint32", ",", "bool", ")", "{", "a", ".", "Lock", "(", ")", "\n", "defer", "a", ".", "Unlock", "(", ")", "\n", "if", "!", "a"...
// GC returns whether or not this AggMetric is stale and can be removed, and its pointcount if so // chunkMinTs -> min timestamp of a chunk before to be considered stale and to be persisted to Cassandra // metricMinTs -> min timestamp for a metric before to be considered stale and to be purged from the tank
[ "GC", "returns", "whether", "or", "not", "this", "AggMetric", "is", "stale", "and", "can", "be", "removed", "and", "its", "pointcount", "if", "so", "chunkMinTs", "-", ">", "min", "timestamp", "of", "a", "chunk", "before", "to", "be", "considered", "stale",...
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggmetric.go#L558-L614
train
grafana/metrictank
mdata/aggmetric.go
gcAggregators
func (a *AggMetric) gcAggregators(now, chunkMinTs, metricMinTs uint32) (uint32, bool) { var points uint32 stale := true for _, agg := range a.aggregators { p, s := agg.GC(now, chunkMinTs, metricMinTs, a.lastWrite) points += p stale = stale && s } return points, stale }
go
func (a *AggMetric) gcAggregators(now, chunkMinTs, metricMinTs uint32) (uint32, bool) { var points uint32 stale := true for _, agg := range a.aggregators { p, s := agg.GC(now, chunkMinTs, metricMinTs, a.lastWrite) points += p stale = stale && s } return points, stale }
[ "func", "(", "a", "*", "AggMetric", ")", "gcAggregators", "(", "now", ",", "chunkMinTs", ",", "metricMinTs", "uint32", ")", "(", "uint32", ",", "bool", ")", "{", "var", "points", "uint32", "\n", "stale", ":=", "true", "\n", "for", "_", ",", "agg", ":...
// gcAggregators returns whether all aggregators are stale and can be removed, and their pointcount if so
[ "gcAggregators", "returns", "whether", "all", "aggregators", "are", "stale", "and", "can", "be", "removed", "and", "their", "pointcount", "if", "so" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/mdata/aggmetric.go#L617-L626
train
grafana/metrictank
idx/memory/find_cache.go
Purge
func (c *FindCache) Purge(orgId uint32) { c.RLock() cache, ok := c.cache[orgId] c.RUnlock() if !ok { return } cache.Purge() }
go
func (c *FindCache) Purge(orgId uint32) { c.RLock() cache, ok := c.cache[orgId] c.RUnlock() if !ok { return } cache.Purge() }
[ "func", "(", "c", "*", "FindCache", ")", "Purge", "(", "orgId", "uint32", ")", "{", "c", ".", "RLock", "(", ")", "\n", "cache", ",", "ok", ":=", "c", ".", "cache", "[", "orgId", "]", "\n", "c", ".", "RUnlock", "(", ")", "\n", "if", "!", "ok",...
// Purge clears the cache for the specified orgId
[ "Purge", "clears", "the", "cache", "for", "the", "specified", "orgId" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/find_cache.go#L129-L137
train
grafana/metrictank
idx/memory/find_cache.go
PurgeAll
func (c *FindCache) PurgeAll() { c.RLock() orgs := make([]uint32, len(c.cache)) i := 0 for k := range c.cache { orgs[i] = k i++ } c.RUnlock() for _, org := range orgs { c.Purge(org) } }
go
func (c *FindCache) PurgeAll() { c.RLock() orgs := make([]uint32, len(c.cache)) i := 0 for k := range c.cache { orgs[i] = k i++ } c.RUnlock() for _, org := range orgs { c.Purge(org) } }
[ "func", "(", "c", "*", "FindCache", ")", "PurgeAll", "(", ")", "{", "c", ".", "RLock", "(", ")", "\n", "orgs", ":=", "make", "(", "[", "]", "uint32", ",", "len", "(", "c", ".", "cache", ")", ")", "\n", "i", ":=", "0", "\n", "for", "k", ":="...
// PurgeAll clears the caches for all orgIds
[ "PurgeAll", "clears", "the", "caches", "for", "all", "orgIds" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/find_cache.go#L140-L152
train
grafana/metrictank
idx/memory/find_cache.go
InvalidateFor
func (c *FindCache) InvalidateFor(orgId uint32, path string) { c.Lock() findCacheInvalidationsReceived.Inc() defer c.Unlock() if c.backoff { findCacheInvalidationsDropped.Inc() return } cache, ok := c.cache[orgId] if !ok || cache.Len() < 1 { findCacheInvalidationsDropped.Inc() return } req := invalidateRequest{ orgId: orgId, path: path, } select { case c.invalidateReqs <- req: default: c.triggerBackoff() } }
go
func (c *FindCache) InvalidateFor(orgId uint32, path string) { c.Lock() findCacheInvalidationsReceived.Inc() defer c.Unlock() if c.backoff { findCacheInvalidationsDropped.Inc() return } cache, ok := c.cache[orgId] if !ok || cache.Len() < 1 { findCacheInvalidationsDropped.Inc() return } req := invalidateRequest{ orgId: orgId, path: path, } select { case c.invalidateReqs <- req: default: c.triggerBackoff() } }
[ "func", "(", "c", "*", "FindCache", ")", "InvalidateFor", "(", "orgId", "uint32", ",", "path", "string", ")", "{", "c", ".", "Lock", "(", ")", "\n", "findCacheInvalidationsReceived", ".", "Inc", "(", ")", "\n", "defer", "c", ".", "Unlock", "(", ")", ...
// InvalidateFor removes entries from the cache for 'orgId' // that match the provided path. If lots of InvalidateFor calls // are made at once and we end up with `invalidateQueueSize` concurrent // goroutines processing the invalidations, we purge the cache and // disable it for `backoffTime`. Future InvalidateFor calls made during // the backoff time will then return immediately.
[ "InvalidateFor", "removes", "entries", "from", "the", "cache", "for", "orgId", "that", "match", "the", "provided", "path", ".", "If", "lots", "of", "InvalidateFor", "calls", "are", "made", "at", "once", "and", "we", "end", "up", "with", "invalidateQueueSize", ...
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/find_cache.go#L160-L184
train
grafana/metrictank
idx/memory/find_cache.go
triggerBackoff
func (c *FindCache) triggerBackoff() { log.Infof("memory-idx: findCache invalidate-queue full. Disabling cache for %s", c.backoffTime.String()) findCacheBackoff.Inc() c.backoff = true time.AfterFunc(c.backoffTime, func() { findCacheBackoff.Dec() c.Lock() c.backoff = false c.Unlock() }) c.cache = make(map[uint32]*lru.Cache) // drain queue L: for { select { case <-c.invalidateReqs: default: break L } } }
go
func (c *FindCache) triggerBackoff() { log.Infof("memory-idx: findCache invalidate-queue full. Disabling cache for %s", c.backoffTime.String()) findCacheBackoff.Inc() c.backoff = true time.AfterFunc(c.backoffTime, func() { findCacheBackoff.Dec() c.Lock() c.backoff = false c.Unlock() }) c.cache = make(map[uint32]*lru.Cache) // drain queue L: for { select { case <-c.invalidateReqs: default: break L } } }
[ "func", "(", "c", "*", "FindCache", ")", "triggerBackoff", "(", ")", "{", "log", ".", "Infof", "(", "\"memory-idx: findCache invalidate-queue full. Disabling cache for %s\"", ",", "c", ".", "backoffTime", ".", "String", "(", ")", ")", "\n", "findCacheBackoff", "."...
// caller must hold lock!
[ "caller", "must", "hold", "lock!" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/find_cache.go#L187-L207
train
grafana/metrictank
idx/memory/find_cache.go
PurgeFindCache
func (p *PartitionedMemoryIdx) PurgeFindCache() { for _, m := range p.Partition { if m.findCache != nil { m.findCache.PurgeAll() } } }
go
func (p *PartitionedMemoryIdx) PurgeFindCache() { for _, m := range p.Partition { if m.findCache != nil { m.findCache.PurgeAll() } } }
[ "func", "(", "p", "*", "PartitionedMemoryIdx", ")", "PurgeFindCache", "(", ")", "{", "for", "_", ",", "m", ":=", "range", "p", ".", "Partition", "{", "if", "m", ".", "findCache", "!=", "nil", "{", "m", ".", "findCache", ".", "PurgeAll", "(", ")", "...
// PurgeFindCache purges the findCaches for all orgIds // across all partitions
[ "PurgeFindCache", "purges", "the", "findCaches", "for", "all", "orgIds", "across", "all", "partitions" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/find_cache.go#L351-L357
train
grafana/metrictank
idx/memory/find_cache.go
ForceInvalidationFindCache
func (p *PartitionedMemoryIdx) ForceInvalidationFindCache() { for _, m := range p.Partition { if m.findCache != nil { m.findCache.forceInvalidation() } } }
go
func (p *PartitionedMemoryIdx) ForceInvalidationFindCache() { for _, m := range p.Partition { if m.findCache != nil { m.findCache.forceInvalidation() } } }
[ "func", "(", "p", "*", "PartitionedMemoryIdx", ")", "ForceInvalidationFindCache", "(", ")", "{", "for", "_", ",", "m", ":=", "range", "p", ".", "Partition", "{", "if", "m", ".", "findCache", "!=", "nil", "{", "m", ".", "findCache", ".", "forceInvalidatio...
// ForceInvalidationFindCache forces a full invalidation cycle of the find cache
[ "ForceInvalidationFindCache", "forces", "a", "full", "invalidation", "cycle", "of", "the", "find", "cache" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/idx/memory/find_cache.go#L360-L366
train
grafana/metrictank
cmd/mt-store-cat/tables.go
getTables
func getTables(store *cassandra.CassandraStore, match string) ([]cassandra.Table, error) { var tables []cassandra.Table if match == "*" || match == "" { for _, table := range store.TTLTables { if table.Name == "metric_idx" || !strings.HasPrefix(table.Name, "metric_") { continue } tables = append(tables, table) } sort.Sort(TablesByTTL(tables)) } else { for _, table := range store.TTLTables { if table.Name == match { tables = append(tables, table) return tables, nil } } return nil, fmt.Errorf("table %q not found", match) } return tables, nil }
go
func getTables(store *cassandra.CassandraStore, match string) ([]cassandra.Table, error) { var tables []cassandra.Table if match == "*" || match == "" { for _, table := range store.TTLTables { if table.Name == "metric_idx" || !strings.HasPrefix(table.Name, "metric_") { continue } tables = append(tables, table) } sort.Sort(TablesByTTL(tables)) } else { for _, table := range store.TTLTables { if table.Name == match { tables = append(tables, table) return tables, nil } } return nil, fmt.Errorf("table %q not found", match) } return tables, nil }
[ "func", "getTables", "(", "store", "*", "cassandra", ".", "CassandraStore", ",", "match", "string", ")", "(", "[", "]", "cassandra", ".", "Table", ",", "error", ")", "{", "var", "tables", "[", "]", "cassandra", ".", "Table", "\n", "if", "match", "==", ...
// getTables returns the requested cassandra store tables in TTL asc order based on match string
[ "getTables", "returns", "the", "requested", "cassandra", "store", "tables", "in", "TTL", "asc", "order", "based", "on", "match", "string" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-store-cat/tables.go#L19-L39
train
grafana/metrictank
cmd/mt-store-cat/tables.go
printTables
func printTables(store *cassandra.CassandraStore) { tables, err := getTables(store, "") if err != nil { log.Fatal(err.Error()) } for _, table := range tables { fmt.Printf("%s (%d hours <= ttl < %d hours)\n", table.Name, table.TTL, table.TTL*2) } }
go
func printTables(store *cassandra.CassandraStore) { tables, err := getTables(store, "") if err != nil { log.Fatal(err.Error()) } for _, table := range tables { fmt.Printf("%s (%d hours <= ttl < %d hours)\n", table.Name, table.TTL, table.TTL*2) } }
[ "func", "printTables", "(", "store", "*", "cassandra", ".", "CassandraStore", ")", "{", "tables", ",", "err", ":=", "getTables", "(", "store", ",", "\"\"", ")", "\n", "if", "err", "!=", "nil", "{", "log", ".", "Fatal", "(", "err", ".", "Error", "(", ...
//printTables prints all tables in the store
[ "printTables", "prints", "all", "tables", "in", "the", "store" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/cmd/mt-store-cat/tables.go#L42-L50
train
grafana/metrictank
api/prometheus_querier.go
Querier
func (s *Server) Querier(ctx context.Context, min, max int64) (storage.Querier, error) { from := uint32(min / 1000) to := uint32(max / 1000) return NewQuerier(ctx, s, from, to, ctx.Value(orgID("org-id")).(uint32), false), nil }
go
func (s *Server) Querier(ctx context.Context, min, max int64) (storage.Querier, error) { from := uint32(min / 1000) to := uint32(max / 1000) return NewQuerier(ctx, s, from, to, ctx.Value(orgID("org-id")).(uint32), false), nil }
[ "func", "(", "s", "*", "Server", ")", "Querier", "(", "ctx", "context", ".", "Context", ",", "min", ",", "max", "int64", ")", "(", "storage", ".", "Querier", ",", "error", ")", "{", "from", ":=", "uint32", "(", "min", "/", "1000", ")", "\n", "to"...
// Querier creates a new querier that will operate on the subject server // it needs the org-id stored in a context value
[ "Querier", "creates", "a", "new", "querier", "that", "will", "operate", "on", "the", "subject", "server", "it", "needs", "the", "org", "-", "id", "stored", "in", "a", "context", "value" ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/prometheus_querier.go#L21-L25
train
grafana/metrictank
api/prometheus_querier.go
Select
func (q *querier) Select(matchers ...*labels.Matcher) (storage.SeriesSet, error) { minFrom := uint32(math.MaxUint32) var maxTo uint32 var target string var reqs []models.Req expressions := []string{} for _, matcher := range matchers { if matcher.Name == model.MetricNameLabel { matcher.Name = "name" } if matcher.Type == labels.MatchNotRegexp { expressions = append(expressions, fmt.Sprintf("%s!=~%s", matcher.Name, matcher.Value)) } else { expressions = append(expressions, fmt.Sprintf("%s%s%s", matcher.Name, matcher.Type, matcher.Value)) } } series, err := q.clusterFindByTag(q.ctx, q.OrgID, expressions, 0, maxSeriesPerReq) if err != nil { return nil, err } if q.metadataOnly { return BuildMetadataSeriesSet(series) } minFrom = util.Min(minFrom, q.from) maxTo = util.Max(maxTo, q.to) for _, s := range series { for _, metric := range s.Series { for _, archive := range metric.Defs { consReq := consolidation.None fn := mdata.Aggregations.Get(archive.AggId).AggregationMethod[0] cons := consolidation.Consolidator(fn) newReq := models.NewReq(archive.Id, archive.NameWithTags(), target, q.from, q.to, math.MaxUint32, uint32(archive.Interval), cons, consReq, s.Node, archive.SchemaId, archive.AggId) reqs = append(reqs, newReq) } } } select { case <-q.ctx.Done(): //request canceled return nil, fmt.Errorf("request canceled") default: } reqRenderSeriesCount.Value(len(reqs)) if len(reqs) == 0 { return nil, fmt.Errorf("no series found") } // note: if 1 series has a movingAvg that requires a long time range extension, it may push other reqs into another archive. can be optimized later reqs, _, _, err = alignRequests(uint32(time.Now().Unix()), minFrom, maxTo, reqs) if err != nil { log.Errorf("HTTP Render alignReq error: %s", err.Error()) return nil, err } out, err := q.getTargets(q.ctx, reqs) if err != nil { log.Errorf("HTTP Render %s", err.Error()) return nil, err } return SeriesToSeriesSet(out) }
go
func (q *querier) Select(matchers ...*labels.Matcher) (storage.SeriesSet, error) { minFrom := uint32(math.MaxUint32) var maxTo uint32 var target string var reqs []models.Req expressions := []string{} for _, matcher := range matchers { if matcher.Name == model.MetricNameLabel { matcher.Name = "name" } if matcher.Type == labels.MatchNotRegexp { expressions = append(expressions, fmt.Sprintf("%s!=~%s", matcher.Name, matcher.Value)) } else { expressions = append(expressions, fmt.Sprintf("%s%s%s", matcher.Name, matcher.Type, matcher.Value)) } } series, err := q.clusterFindByTag(q.ctx, q.OrgID, expressions, 0, maxSeriesPerReq) if err != nil { return nil, err } if q.metadataOnly { return BuildMetadataSeriesSet(series) } minFrom = util.Min(minFrom, q.from) maxTo = util.Max(maxTo, q.to) for _, s := range series { for _, metric := range s.Series { for _, archive := range metric.Defs { consReq := consolidation.None fn := mdata.Aggregations.Get(archive.AggId).AggregationMethod[0] cons := consolidation.Consolidator(fn) newReq := models.NewReq(archive.Id, archive.NameWithTags(), target, q.from, q.to, math.MaxUint32, uint32(archive.Interval), cons, consReq, s.Node, archive.SchemaId, archive.AggId) reqs = append(reqs, newReq) } } } select { case <-q.ctx.Done(): //request canceled return nil, fmt.Errorf("request canceled") default: } reqRenderSeriesCount.Value(len(reqs)) if len(reqs) == 0 { return nil, fmt.Errorf("no series found") } // note: if 1 series has a movingAvg that requires a long time range extension, it may push other reqs into another archive. can be optimized later reqs, _, _, err = alignRequests(uint32(time.Now().Unix()), minFrom, maxTo, reqs) if err != nil { log.Errorf("HTTP Render alignReq error: %s", err.Error()) return nil, err } out, err := q.getTargets(q.ctx, reqs) if err != nil { log.Errorf("HTTP Render %s", err.Error()) return nil, err } return SeriesToSeriesSet(out) }
[ "func", "(", "q", "*", "querier", ")", "Select", "(", "matchers", "...", "*", "labels", ".", "Matcher", ")", "(", "storage", ".", "SeriesSet", ",", "error", ")", "{", "minFrom", ":=", "uint32", "(", "math", ".", "MaxUint32", ")", "\n", "var", "maxTo"...
// Select returns a set of series that matches the given label matchers.
[ "Select", "returns", "a", "set", "of", "series", "that", "matches", "the", "given", "label", "matchers", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/prometheus_querier.go#L49-L117
train
grafana/metrictank
api/prometheus_querier.go
LabelValues
func (q *querier) LabelValues(name string) ([]string, error) { expressions := []string{"name=~[a-zA-Z_][a-zA-Z0-9_]*$"} if name == model.MetricNameLabel { name = "name" expressions = append(expressions, "name=~[a-zA-Z_:][a-zA-Z0-9_:]*$") } return q.MetricIndex.FindTagValues(q.OrgID, name, "", expressions, 0, 100000) }
go
func (q *querier) LabelValues(name string) ([]string, error) { expressions := []string{"name=~[a-zA-Z_][a-zA-Z0-9_]*$"} if name == model.MetricNameLabel { name = "name" expressions = append(expressions, "name=~[a-zA-Z_:][a-zA-Z0-9_:]*$") } return q.MetricIndex.FindTagValues(q.OrgID, name, "", expressions, 0, 100000) }
[ "func", "(", "q", "*", "querier", ")", "LabelValues", "(", "name", "string", ")", "(", "[", "]", "string", ",", "error", ")", "{", "expressions", ":=", "[", "]", "string", "{", "\"name=~[a-zA-Z_][a-zA-Z0-9_]*$\"", "}", "\n", "if", "name", "==", "model", ...
// LabelValues returns all potential values for a label name.
[ "LabelValues", "returns", "all", "potential", "values", "for", "a", "label", "name", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/prometheus_querier.go#L120-L127
train
grafana/metrictank
api/graphite.go
closestAggMethod
func closestAggMethod(requested consolidation.Consolidator, available []conf.Method) consolidation.Consolidator { // if there is only 1 consolidation method available, then that is all we can return. if len(available) == 1 { return consolidation.Consolidator(available[0]) } avail := map[consolidation.Consolidator]struct{}{} for _, a := range available { avail[consolidation.Consolidator(a)] = struct{}{} } var orderOfPreference []consolidation.Consolidator orderOfPreference, ok := rollupPreference[requested] if !ok { return consolidation.Consolidator(available[0]) } for _, p := range orderOfPreference { if _, ok := avail[p]; ok { return p } } // fall back to the default aggregation method. return consolidation.Consolidator(available[0]) }
go
func closestAggMethod(requested consolidation.Consolidator, available []conf.Method) consolidation.Consolidator { // if there is only 1 consolidation method available, then that is all we can return. if len(available) == 1 { return consolidation.Consolidator(available[0]) } avail := map[consolidation.Consolidator]struct{}{} for _, a := range available { avail[consolidation.Consolidator(a)] = struct{}{} } var orderOfPreference []consolidation.Consolidator orderOfPreference, ok := rollupPreference[requested] if !ok { return consolidation.Consolidator(available[0]) } for _, p := range orderOfPreference { if _, ok := avail[p]; ok { return p } } // fall back to the default aggregation method. return consolidation.Consolidator(available[0]) }
[ "func", "closestAggMethod", "(", "requested", "consolidation", ".", "Consolidator", ",", "available", "[", "]", "conf", ".", "Method", ")", "consolidation", ".", "Consolidator", "{", "if", "len", "(", "available", ")", "==", "1", "{", "return", "consolidation"...
// find the best consolidation method based on what was requested and what aggregations are available.
[ "find", "the", "best", "consolidation", "method", "based", "on", "what", "was", "requested", "and", "what", "aggregations", "are", "available", "." ]
dd9b92db72d27553d9a8214bff5f01d2531f63b0
https://github.com/grafana/metrictank/blob/dd9b92db72d27553d9a8214bff5f01d2531f63b0/api/graphite.go#L760-L782
train
lightninglabs/neutrino
mock_store.go
newMockBlockHeaderStore
func newMockBlockHeaderStore() headerfs.BlockHeaderStore { return &mockBlockHeaderStore{ headers: make(map[chainhash.Hash]wire.BlockHeader), } }
go
func newMockBlockHeaderStore() headerfs.BlockHeaderStore { return &mockBlockHeaderStore{ headers: make(map[chainhash.Hash]wire.BlockHeader), } }
[ "func", "newMockBlockHeaderStore", "(", ")", "headerfs", ".", "BlockHeaderStore", "{", "return", "&", "mockBlockHeaderStore", "{", "headers", ":", "make", "(", "map", "[", "chainhash", ".", "Hash", "]", "wire", ".", "BlockHeader", ")", ",", "}", "\n", "}" ]
// NewMockBlockHeaderStore returns a version of the BlockHeaderStore that's // backed by an in-memory map. This instance is meant to be used by callers // outside the package to unit test components that require a BlockHeaderStore // interface.
[ "NewMockBlockHeaderStore", "returns", "a", "version", "of", "the", "BlockHeaderStore", "that", "s", "backed", "by", "an", "in", "-", "memory", "map", ".", "This", "instance", "is", "meant", "to", "be", "used", "by", "callers", "outside", "the", "package", "t...
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/mock_store.go#L27-L31
train
lightninglabs/neutrino
cache/cacheable_block.go
Size
func (c *CacheableBlock) Size() (uint64, error) { return uint64(c.Block.MsgBlock().SerializeSize()), nil }
go
func (c *CacheableBlock) Size() (uint64, error) { return uint64(c.Block.MsgBlock().SerializeSize()), nil }
[ "func", "(", "c", "*", "CacheableBlock", ")", "Size", "(", ")", "(", "uint64", ",", "error", ")", "{", "return", "uint64", "(", "c", ".", "Block", ".", "MsgBlock", "(", ")", ".", "SerializeSize", "(", ")", ")", ",", "nil", "\n", "}" ]
// Size returns size of this block in bytes.
[ "Size", "returns", "size", "of", "this", "block", "in", "bytes", "." ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/cache/cacheable_block.go#L12-L14
train
lightninglabs/neutrino
headerfs/index.go
newHeaderIndex
func newHeaderIndex(db walletdb.DB, indexType HeaderType) (*headerIndex, error) { // As an initially step, we'll attempt to create all the buckets // necessary for functioning of the index. If these buckets has already // been created, then we can exit early. err := walletdb.Update(db, func(tx walletdb.ReadWriteTx) error { _, err := tx.CreateTopLevelBucket(indexBucket) return err }) if err != nil && err != walletdb.ErrBucketExists { return nil, err } return &headerIndex{ db: db, indexType: indexType, }, nil }
go
func newHeaderIndex(db walletdb.DB, indexType HeaderType) (*headerIndex, error) { // As an initially step, we'll attempt to create all the buckets // necessary for functioning of the index. If these buckets has already // been created, then we can exit early. err := walletdb.Update(db, func(tx walletdb.ReadWriteTx) error { _, err := tx.CreateTopLevelBucket(indexBucket) return err }) if err != nil && err != walletdb.ErrBucketExists { return nil, err } return &headerIndex{ db: db, indexType: indexType, }, nil }
[ "func", "newHeaderIndex", "(", "db", "walletdb", ".", "DB", ",", "indexType", "HeaderType", ")", "(", "*", "headerIndex", ",", "error", ")", "{", "err", ":=", "walletdb", ".", "Update", "(", "db", ",", "func", "(", "tx", "walletdb", ".", "ReadWriteTx", ...
// newHeaderIndex creates a new headerIndex given an already open database, and // a particular header type.
[ "newHeaderIndex", "creates", "a", "new", "headerIndex", "given", "an", "already", "open", "database", "and", "a", "particular", "header", "type", "." ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/index.go#L83-L100
train
lightninglabs/neutrino
headerfs/index.go
addHeaders
func (h *headerIndex) addHeaders(batch headerBatch) error { // If we're writing a 0-length batch, make no changes and return. if len(batch) == 0 { return nil } // In order to ensure optimal write performance, we'll ensure that the // items are sorted by their hash before insertion into the database. sort.Sort(batch) return walletdb.Update(h.db, func(tx walletdb.ReadWriteTx) error { rootBucket := tx.ReadWriteBucket(indexBucket) var tipKey []byte // Based on the specified index type of this instance of the // index, we'll grab the key that tracks the tip of the chain // so we can update the index once all the header entries have // been updated. // TODO(roasbeef): only need block tip? switch h.indexType { case Block: tipKey = bitcoinTip case RegularFilter: tipKey = regFilterTip default: return fmt.Errorf("unknown index type: %v", h.indexType) } var ( chainTipHash chainhash.Hash chainTipHeight uint32 ) for _, header := range batch { var heightBytes [4]byte binary.BigEndian.PutUint32(heightBytes[:], header.height) err := rootBucket.Put(header.hash[:], heightBytes[:]) if err != nil { return err } // TODO(roasbeef): need to remedy if side-chain // tracking added if header.height >= chainTipHeight { chainTipHash = header.hash chainTipHeight = header.height } } return rootBucket.Put(tipKey, chainTipHash[:]) }) }
go
func (h *headerIndex) addHeaders(batch headerBatch) error { // If we're writing a 0-length batch, make no changes and return. if len(batch) == 0 { return nil } // In order to ensure optimal write performance, we'll ensure that the // items are sorted by their hash before insertion into the database. sort.Sort(batch) return walletdb.Update(h.db, func(tx walletdb.ReadWriteTx) error { rootBucket := tx.ReadWriteBucket(indexBucket) var tipKey []byte // Based on the specified index type of this instance of the // index, we'll grab the key that tracks the tip of the chain // so we can update the index once all the header entries have // been updated. // TODO(roasbeef): only need block tip? switch h.indexType { case Block: tipKey = bitcoinTip case RegularFilter: tipKey = regFilterTip default: return fmt.Errorf("unknown index type: %v", h.indexType) } var ( chainTipHash chainhash.Hash chainTipHeight uint32 ) for _, header := range batch { var heightBytes [4]byte binary.BigEndian.PutUint32(heightBytes[:], header.height) err := rootBucket.Put(header.hash[:], heightBytes[:]) if err != nil { return err } // TODO(roasbeef): need to remedy if side-chain // tracking added if header.height >= chainTipHeight { chainTipHash = header.hash chainTipHeight = header.height } } return rootBucket.Put(tipKey, chainTipHash[:]) }) }
[ "func", "(", "h", "*", "headerIndex", ")", "addHeaders", "(", "batch", "headerBatch", ")", "error", "{", "if", "len", "(", "batch", ")", "==", "0", "{", "return", "nil", "\n", "}", "\n", "sort", ".", "Sort", "(", "batch", ")", "\n", "return", "wall...
// addHeaders writes a batch of header entries in a single atomic batch
[ "addHeaders", "writes", "a", "batch", "of", "header", "entries", "in", "a", "single", "atomic", "batch" ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/index.go#L139-L191
train
lightninglabs/neutrino
headerfs/index.go
heightFromHash
func (h *headerIndex) heightFromHash(hash *chainhash.Hash) (uint32, error) { var height uint32 err := walletdb.View(h.db, func(tx walletdb.ReadTx) error { rootBucket := tx.ReadBucket(indexBucket) heightBytes := rootBucket.Get(hash[:]) if heightBytes == nil { // If the hash wasn't found, then we don't know of this // hash within the index. return ErrHashNotFound } height = binary.BigEndian.Uint32(heightBytes) return nil }) if err != nil { return 0, err } return height, nil }
go
func (h *headerIndex) heightFromHash(hash *chainhash.Hash) (uint32, error) { var height uint32 err := walletdb.View(h.db, func(tx walletdb.ReadTx) error { rootBucket := tx.ReadBucket(indexBucket) heightBytes := rootBucket.Get(hash[:]) if heightBytes == nil { // If the hash wasn't found, then we don't know of this // hash within the index. return ErrHashNotFound } height = binary.BigEndian.Uint32(heightBytes) return nil }) if err != nil { return 0, err } return height, nil }
[ "func", "(", "h", "*", "headerIndex", ")", "heightFromHash", "(", "hash", "*", "chainhash", ".", "Hash", ")", "(", "uint32", ",", "error", ")", "{", "var", "height", "uint32", "\n", "err", ":=", "walletdb", ".", "View", "(", "h", ".", "db", ",", "f...
// heightFromHash returns the height of the entry that matches the specified // height. With this height, the caller is then able to seek to the appropriate // spot in the flat files in order to extract the true header.
[ "heightFromHash", "returns", "the", "height", "of", "the", "entry", "that", "matches", "the", "specified", "height", ".", "With", "this", "height", "the", "caller", "is", "then", "able", "to", "seek", "to", "the", "appropriate", "spot", "in", "the", "flat", ...
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/index.go#L196-L216
train
lightninglabs/neutrino
headerfs/index.go
chainTip
func (h *headerIndex) chainTip() (*chainhash.Hash, uint32, error) { var ( tipHeight uint32 tipHash *chainhash.Hash ) err := walletdb.View(h.db, func(tx walletdb.ReadTx) error { rootBucket := tx.ReadBucket(indexBucket) var tipKey []byte // Based on the specified index type of this instance of the // index, we'll grab the particular key that tracks the chain // tip. switch h.indexType { case Block: tipKey = bitcoinTip case RegularFilter: tipKey = regFilterTip default: return fmt.Errorf("unknown chain tip index type: %v", h.indexType) } // Now that we have the particular tip key for this header // type, we'll fetch the hash for this tip, then using that // we'll fetch the height that corresponds to that hash. tipHashBytes := rootBucket.Get(tipKey) tipHeightBytes := rootBucket.Get(tipHashBytes) if len(tipHeightBytes) != 4 { return ErrHeightNotFound } // With the height fetched, we can now populate our return // parameters. h, err := chainhash.NewHash(tipHashBytes) if err != nil { return err } tipHash = h tipHeight = binary.BigEndian.Uint32(tipHeightBytes) return nil }) if err != nil { return nil, 0, err } return tipHash, tipHeight, nil }
go
func (h *headerIndex) chainTip() (*chainhash.Hash, uint32, error) { var ( tipHeight uint32 tipHash *chainhash.Hash ) err := walletdb.View(h.db, func(tx walletdb.ReadTx) error { rootBucket := tx.ReadBucket(indexBucket) var tipKey []byte // Based on the specified index type of this instance of the // index, we'll grab the particular key that tracks the chain // tip. switch h.indexType { case Block: tipKey = bitcoinTip case RegularFilter: tipKey = regFilterTip default: return fmt.Errorf("unknown chain tip index type: %v", h.indexType) } // Now that we have the particular tip key for this header // type, we'll fetch the hash for this tip, then using that // we'll fetch the height that corresponds to that hash. tipHashBytes := rootBucket.Get(tipKey) tipHeightBytes := rootBucket.Get(tipHashBytes) if len(tipHeightBytes) != 4 { return ErrHeightNotFound } // With the height fetched, we can now populate our return // parameters. h, err := chainhash.NewHash(tipHashBytes) if err != nil { return err } tipHash = h tipHeight = binary.BigEndian.Uint32(tipHeightBytes) return nil }) if err != nil { return nil, 0, err } return tipHash, tipHeight, nil }
[ "func", "(", "h", "*", "headerIndex", ")", "chainTip", "(", ")", "(", "*", "chainhash", ".", "Hash", ",", "uint32", ",", "error", ")", "{", "var", "(", "tipHeight", "uint32", "\n", "tipHash", "*", "chainhash", ".", "Hash", "\n", ")", "\n", "err", "...
// chainTip returns the best hash and height that the index knows of.
[ "chainTip", "returns", "the", "best", "hash", "and", "height", "that", "the", "index", "knows", "of", "." ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/index.go#L219-L267
train
lightninglabs/neutrino
headerfs/index.go
truncateIndex
func (h *headerIndex) truncateIndex(newTip *chainhash.Hash, delete bool) error { return walletdb.Update(h.db, func(tx walletdb.ReadWriteTx) error { rootBucket := tx.ReadWriteBucket(indexBucket) var tipKey []byte // Based on the specified index type of this instance of the // index, we'll grab the key that tracks the tip of the chain // we need to update. switch h.indexType { case Block: tipKey = bitcoinTip case RegularFilter: tipKey = regFilterTip default: return fmt.Errorf("unknown index type: %v", h.indexType) } // If the delete flag is set, then we'll also delete this entry // from the database as the primary index (block headers) is // being rolled back. if delete { prevTipHash := rootBucket.Get(tipKey) if err := rootBucket.Delete(prevTipHash); err != nil { return err } } // With the now stale entry deleted, we'll update the chain tip // to point to the new hash. return rootBucket.Put(tipKey, newTip[:]) }) }
go
func (h *headerIndex) truncateIndex(newTip *chainhash.Hash, delete bool) error { return walletdb.Update(h.db, func(tx walletdb.ReadWriteTx) error { rootBucket := tx.ReadWriteBucket(indexBucket) var tipKey []byte // Based on the specified index type of this instance of the // index, we'll grab the key that tracks the tip of the chain // we need to update. switch h.indexType { case Block: tipKey = bitcoinTip case RegularFilter: tipKey = regFilterTip default: return fmt.Errorf("unknown index type: %v", h.indexType) } // If the delete flag is set, then we'll also delete this entry // from the database as the primary index (block headers) is // being rolled back. if delete { prevTipHash := rootBucket.Get(tipKey) if err := rootBucket.Delete(prevTipHash); err != nil { return err } } // With the now stale entry deleted, we'll update the chain tip // to point to the new hash. return rootBucket.Put(tipKey, newTip[:]) }) }
[ "func", "(", "h", "*", "headerIndex", ")", "truncateIndex", "(", "newTip", "*", "chainhash", ".", "Hash", ",", "delete", "bool", ")", "error", "{", "return", "walletdb", ".", "Update", "(", "h", ".", "db", ",", "func", "(", "tx", "walletdb", ".", "Re...
// truncateIndex truncates the index for a particluar header type by a single // header entry. The passed newTip pointer should point to the hash of the new // chain tip. Optionally, if the entry is to be deleted as well, then the // delete flag should be set to true.
[ "truncateIndex", "truncates", "the", "index", "for", "a", "particluar", "header", "type", "by", "a", "single", "header", "entry", ".", "The", "passed", "newTip", "pointer", "should", "point", "to", "the", "hash", "of", "the", "new", "chain", "tip", ".", "O...
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/index.go#L273-L305
train
lightninglabs/neutrino
batch_spend_reporter.go
newBatchSpendReporter
func newBatchSpendReporter() *batchSpendReporter { return &batchSpendReporter{ requests: make(map[wire.OutPoint][]*GetUtxoRequest), initialTxns: make(map[wire.OutPoint]*SpendReport), outpoints: make(map[wire.OutPoint][]byte), } }
go
func newBatchSpendReporter() *batchSpendReporter { return &batchSpendReporter{ requests: make(map[wire.OutPoint][]*GetUtxoRequest), initialTxns: make(map[wire.OutPoint]*SpendReport), outpoints: make(map[wire.OutPoint][]byte), } }
[ "func", "newBatchSpendReporter", "(", ")", "*", "batchSpendReporter", "{", "return", "&", "batchSpendReporter", "{", "requests", ":", "make", "(", "map", "[", "wire", ".", "OutPoint", "]", "[", "]", "*", "GetUtxoRequest", ")", ",", "initialTxns", ":", "make"...
// newBatchSpendReporter instantiates a fresh batchSpendReporter.
[ "newBatchSpendReporter", "instantiates", "a", "fresh", "batchSpendReporter", "." ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/batch_spend_reporter.go#L37-L43
train
lightninglabs/neutrino
batch_spend_reporter.go
NotifyUnspentAndUnfound
func (b *batchSpendReporter) NotifyUnspentAndUnfound() { log.Debugf("Finished batch, %d unspent outpoints", len(b.requests)) for outpoint, requests := range b.requests { // A nil SpendReport indicates the output was not found. tx, ok := b.initialTxns[outpoint] if !ok { log.Warnf("Unknown initial txn for getuxo request %v", outpoint) } b.notifyRequests(&outpoint, requests, tx, nil) } }
go
func (b *batchSpendReporter) NotifyUnspentAndUnfound() { log.Debugf("Finished batch, %d unspent outpoints", len(b.requests)) for outpoint, requests := range b.requests { // A nil SpendReport indicates the output was not found. tx, ok := b.initialTxns[outpoint] if !ok { log.Warnf("Unknown initial txn for getuxo request %v", outpoint) } b.notifyRequests(&outpoint, requests, tx, nil) } }
[ "func", "(", "b", "*", "batchSpendReporter", ")", "NotifyUnspentAndUnfound", "(", ")", "{", "log", ".", "Debugf", "(", "\"Finished batch, %d unspent outpoints\"", ",", "len", "(", "b", ".", "requests", ")", ")", "\n", "for", "outpoint", ",", "requests", ":=", ...
// NotifyUnspentAndUnfound iterates through any requests for which no spends // were detected. If we were able to find the initial output, this will be // delivered signaling that no spend was detected. If the original output could // not be found, a nil spend report is returned.
[ "NotifyUnspentAndUnfound", "iterates", "through", "any", "requests", "for", "which", "no", "spends", "were", "detected", ".", "If", "we", "were", "able", "to", "find", "the", "initial", "output", "this", "will", "be", "delivered", "signaling", "that", "no", "s...
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/batch_spend_reporter.go#L49-L62
train
lightninglabs/neutrino
batch_spend_reporter.go
ProcessBlock
func (b *batchSpendReporter) ProcessBlock(blk *wire.MsgBlock, newReqs []*GetUtxoRequest, height uint32) { // If any requests want the UTXOs at this height, scan the block to find // the original outputs that might be spent from. if len(newReqs) > 0 { b.addNewRequests(newReqs) b.findInitialTransactions(blk, newReqs, height) } // Next, filter the block for any spends using the current set of // watched outpoints. This will include any new requests added above. spends := b.notifySpends(blk, height) // Finally, rebuild filter entries from cached entries remaining in // outpoints map. This will provide an updated watchlist used to scan // the subsequent filters. rebuildWatchlist := len(newReqs) > 0 || len(spends) > 0 if rebuildWatchlist { b.filterEntries = b.filterEntries[:0] for _, entry := range b.outpoints { b.filterEntries = append(b.filterEntries, entry) } } }
go
func (b *batchSpendReporter) ProcessBlock(blk *wire.MsgBlock, newReqs []*GetUtxoRequest, height uint32) { // If any requests want the UTXOs at this height, scan the block to find // the original outputs that might be spent from. if len(newReqs) > 0 { b.addNewRequests(newReqs) b.findInitialTransactions(blk, newReqs, height) } // Next, filter the block for any spends using the current set of // watched outpoints. This will include any new requests added above. spends := b.notifySpends(blk, height) // Finally, rebuild filter entries from cached entries remaining in // outpoints map. This will provide an updated watchlist used to scan // the subsequent filters. rebuildWatchlist := len(newReqs) > 0 || len(spends) > 0 if rebuildWatchlist { b.filterEntries = b.filterEntries[:0] for _, entry := range b.outpoints { b.filterEntries = append(b.filterEntries, entry) } } }
[ "func", "(", "b", "*", "batchSpendReporter", ")", "ProcessBlock", "(", "blk", "*", "wire", ".", "MsgBlock", ",", "newReqs", "[", "]", "*", "GetUtxoRequest", ",", "height", "uint32", ")", "{", "if", "len", "(", "newReqs", ")", ">", "0", "{", "b", ".",...
// ProcessBlock accepts a block, block height, and any new requests whose start // height matches the provided height. If a non-zero number of new requests are // presented, the block will first be checked for the initial outputs from which // spends may occur. Afterwards, any spends detected in the block are // immediately dispatched, and the watchlist updated in preparation of filtering // the next block.
[ "ProcessBlock", "accepts", "a", "block", "block", "height", "and", "any", "new", "requests", "whose", "start", "height", "matches", "the", "provided", "height", ".", "If", "a", "non", "-", "zero", "number", "of", "new", "requests", "are", "presented", "the",...
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/batch_spend_reporter.go#L100-L124
train
lightninglabs/neutrino
batch_spend_reporter.go
addNewRequests
func (b *batchSpendReporter) addNewRequests(reqs []*GetUtxoRequest) { for _, req := range reqs { outpoint := req.Input.OutPoint log.Debugf("Adding outpoint=%s height=%d to watchlist", outpoint, req.BirthHeight) b.requests[outpoint] = append(b.requests[outpoint], req) // Build the filter entry only if it is the first time seeing // the outpoint. if _, ok := b.outpoints[outpoint]; !ok { entry := req.Input.PkScript b.outpoints[outpoint] = entry b.filterEntries = append(b.filterEntries, entry) } } }
go
func (b *batchSpendReporter) addNewRequests(reqs []*GetUtxoRequest) { for _, req := range reqs { outpoint := req.Input.OutPoint log.Debugf("Adding outpoint=%s height=%d to watchlist", outpoint, req.BirthHeight) b.requests[outpoint] = append(b.requests[outpoint], req) // Build the filter entry only if it is the first time seeing // the outpoint. if _, ok := b.outpoints[outpoint]; !ok { entry := req.Input.PkScript b.outpoints[outpoint] = entry b.filterEntries = append(b.filterEntries, entry) } } }
[ "func", "(", "b", "*", "batchSpendReporter", ")", "addNewRequests", "(", "reqs", "[", "]", "*", "GetUtxoRequest", ")", "{", "for", "_", ",", "req", ":=", "range", "reqs", "{", "outpoint", ":=", "req", ".", "Input", ".", "OutPoint", "\n", "log", ".", ...
// addNewRequests adds a set of new GetUtxoRequests to the spend reporter's // state. This method immediately adds the request's outpoints to the reporter's // watchlist.
[ "addNewRequests", "adds", "a", "set", "of", "new", "GetUtxoRequests", "to", "the", "spend", "reporter", "s", "state", ".", "This", "method", "immediately", "adds", "the", "request", "s", "outpoints", "to", "the", "reporter", "s", "watchlist", "." ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/batch_spend_reporter.go#L129-L146
train
lightninglabs/neutrino
batch_spend_reporter.go
findInitialTransactions
func (b *batchSpendReporter) findInitialTransactions(block *wire.MsgBlock, newReqs []*GetUtxoRequest, height uint32) map[wire.OutPoint]*SpendReport { // First, construct a reverse index from txid to all a list of requests // whose outputs share the same txid. txidReverseIndex := make(map[chainhash.Hash][]*GetUtxoRequest) for _, req := range newReqs { txidReverseIndex[req.Input.OutPoint.Hash] = append( txidReverseIndex[req.Input.OutPoint.Hash], req, ) } // Iterate over the transactions in this block, hashing each and // querying our reverse index to see if any requests depend on the txn. initialTxns := make(map[wire.OutPoint]*SpendReport) for _, tx := range block.Transactions { // If our reverse index has been cleared, we are done. if len(txidReverseIndex) == 0 { break } hash := tx.TxHash() txidReqs, ok := txidReverseIndex[hash] if !ok { continue } delete(txidReverseIndex, hash) // For all requests that are watching this txid, use the output // index of each to grab the initial output. txOuts := tx.TxOut for _, req := range txidReqs { op := req.Input.OutPoint // Ensure that the outpoint's index references an actual // output on the transaction. If not, we will be unable // to find the initial output. if op.Index >= uint32(len(txOuts)) { log.Errorf("Failed to find outpoint %s -- "+ "invalid output index", op) initialTxns[op] = nil continue } initialTxns[op] = &SpendReport{ Output: txOuts[op.Index], } } } // Finally, we must reconcile any requests for which the txid did not // exist in this block. A nil spend report is saved for every initial // txn that could not be found, otherwise the result is copied from scan // above. The copied values can include valid initial txns, as well as // nil spend report if the output index was invalid. for _, req := range newReqs { tx, ok := initialTxns[req.Input.OutPoint] switch { case !ok: log.Errorf("Failed to find outpoint %s -- "+ "txid not found in block", req.Input.OutPoint) initialTxns[req.Input.OutPoint] = nil case tx != nil: log.Tracef("Block %d creates output %s", height, req.Input.OutPoint) default: } b.initialTxns[req.Input.OutPoint] = tx } return initialTxns }
go
func (b *batchSpendReporter) findInitialTransactions(block *wire.MsgBlock, newReqs []*GetUtxoRequest, height uint32) map[wire.OutPoint]*SpendReport { // First, construct a reverse index from txid to all a list of requests // whose outputs share the same txid. txidReverseIndex := make(map[chainhash.Hash][]*GetUtxoRequest) for _, req := range newReqs { txidReverseIndex[req.Input.OutPoint.Hash] = append( txidReverseIndex[req.Input.OutPoint.Hash], req, ) } // Iterate over the transactions in this block, hashing each and // querying our reverse index to see if any requests depend on the txn. initialTxns := make(map[wire.OutPoint]*SpendReport) for _, tx := range block.Transactions { // If our reverse index has been cleared, we are done. if len(txidReverseIndex) == 0 { break } hash := tx.TxHash() txidReqs, ok := txidReverseIndex[hash] if !ok { continue } delete(txidReverseIndex, hash) // For all requests that are watching this txid, use the output // index of each to grab the initial output. txOuts := tx.TxOut for _, req := range txidReqs { op := req.Input.OutPoint // Ensure that the outpoint's index references an actual // output on the transaction. If not, we will be unable // to find the initial output. if op.Index >= uint32(len(txOuts)) { log.Errorf("Failed to find outpoint %s -- "+ "invalid output index", op) initialTxns[op] = nil continue } initialTxns[op] = &SpendReport{ Output: txOuts[op.Index], } } } // Finally, we must reconcile any requests for which the txid did not // exist in this block. A nil spend report is saved for every initial // txn that could not be found, otherwise the result is copied from scan // above. The copied values can include valid initial txns, as well as // nil spend report if the output index was invalid. for _, req := range newReqs { tx, ok := initialTxns[req.Input.OutPoint] switch { case !ok: log.Errorf("Failed to find outpoint %s -- "+ "txid not found in block", req.Input.OutPoint) initialTxns[req.Input.OutPoint] = nil case tx != nil: log.Tracef("Block %d creates output %s", height, req.Input.OutPoint) default: } b.initialTxns[req.Input.OutPoint] = tx } return initialTxns }
[ "func", "(", "b", "*", "batchSpendReporter", ")", "findInitialTransactions", "(", "block", "*", "wire", ".", "MsgBlock", ",", "newReqs", "[", "]", "*", "GetUtxoRequest", ",", "height", "uint32", ")", "map", "[", "wire", ".", "OutPoint", "]", "*", "SpendRep...
// findInitialTransactions searches the given block for the creation of the // UTXOs that are supposed to be birthed in this block. If any are found, a // spend report containing the initial outpoint will be saved in case the // outpoint is not spent later on. Requests corresponding to outpoints that are // not found in the block will return a nil spend report to indicate that the // UTXO was not found.
[ "findInitialTransactions", "searches", "the", "given", "block", "for", "the", "creation", "of", "the", "UTXOs", "that", "are", "supposed", "to", "be", "birthed", "in", "this", "block", ".", "If", "any", "are", "found", "a", "spend", "report", "containing", "...
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/batch_spend_reporter.go#L154-L226
train
lightninglabs/neutrino
batch_spend_reporter.go
notifySpends
func (b *batchSpendReporter) notifySpends(block *wire.MsgBlock, height uint32) map[wire.OutPoint]*SpendReport { spends := make(map[wire.OutPoint]*SpendReport) for _, tx := range block.Transactions { // Check each input to see if this transaction spends one of our // watched outpoints. for i, ti := range tx.TxIn { outpoint := ti.PreviousOutPoint // Find the requests this spend relates to. requests, ok := b.requests[outpoint] if !ok { continue } log.Debugf("UTXO %v spent by txn %v", outpoint, tx.TxHash()) spend := &SpendReport{ SpendingTx: tx, SpendingInputIndex: uint32(i), SpendingTxHeight: height, } spends[outpoint] = spend // With the requests located, we remove this outpoint // from both the requests, outpoints, and initial txns // map. This will ensures we don't continue watching // this outpoint. b.notifyRequests(&outpoint, requests, spend, nil) } } return spends }
go
func (b *batchSpendReporter) notifySpends(block *wire.MsgBlock, height uint32) map[wire.OutPoint]*SpendReport { spends := make(map[wire.OutPoint]*SpendReport) for _, tx := range block.Transactions { // Check each input to see if this transaction spends one of our // watched outpoints. for i, ti := range tx.TxIn { outpoint := ti.PreviousOutPoint // Find the requests this spend relates to. requests, ok := b.requests[outpoint] if !ok { continue } log.Debugf("UTXO %v spent by txn %v", outpoint, tx.TxHash()) spend := &SpendReport{ SpendingTx: tx, SpendingInputIndex: uint32(i), SpendingTxHeight: height, } spends[outpoint] = spend // With the requests located, we remove this outpoint // from both the requests, outpoints, and initial txns // map. This will ensures we don't continue watching // this outpoint. b.notifyRequests(&outpoint, requests, spend, nil) } } return spends }
[ "func", "(", "b", "*", "batchSpendReporter", ")", "notifySpends", "(", "block", "*", "wire", ".", "MsgBlock", ",", "height", "uint32", ")", "map", "[", "wire", ".", "OutPoint", "]", "*", "SpendReport", "{", "spends", ":=", "make", "(", "map", "[", "wir...
// notifySpends finds any transactions in the block that spend from our watched // outpoints. If a spend is detected, it is immediately delivered and cleaned up // from the reporter's internal state.
[ "notifySpends", "finds", "any", "transactions", "in", "the", "block", "that", "spend", "from", "our", "watched", "outpoints", ".", "If", "a", "spend", "is", "detected", "it", "is", "immediately", "delivered", "and", "cleaned", "up", "from", "the", "reporter", ...
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/batch_spend_reporter.go#L231-L267
train
lightninglabs/neutrino
headerfs/file.go
appendRaw
func (h *headerStore) appendRaw(header []byte) error { if _, err := h.file.Write(header); err != nil { return err } return nil }
go
func (h *headerStore) appendRaw(header []byte) error { if _, err := h.file.Write(header); err != nil { return err } return nil }
[ "func", "(", "h", "*", "headerStore", ")", "appendRaw", "(", "header", "[", "]", "byte", ")", "error", "{", "if", "_", ",", "err", ":=", "h", ".", "file", ".", "Write", "(", "header", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}...
// appendRaw appends a new raw header to the end of the flat file.
[ "appendRaw", "appends", "a", "new", "raw", "header", "to", "the", "end", "of", "the", "flat", "file", "." ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/file.go#L13-L19
train
lightninglabs/neutrino
headerfs/file.go
readRaw
func (h *headerStore) readRaw(seekDist uint64) ([]byte, error) { var headerSize uint32 // Based on the defined header type, we'll determine the number of // bytes that we need to read past the sync point. switch h.indexType { case Block: headerSize = 80 case RegularFilter: headerSize = 32 default: return nil, fmt.Errorf("unknown index type: %v", h.indexType) } // TODO(roasbeef): add buffer pool // With the number of bytes to read determined, we'll create a slice // for that number of bytes, and read directly from the file into the // buffer. rawHeader := make([]byte, headerSize) if _, err := h.file.ReadAt(rawHeader[:], int64(seekDist)); err != nil { return nil, err } return rawHeader[:], nil }
go
func (h *headerStore) readRaw(seekDist uint64) ([]byte, error) { var headerSize uint32 // Based on the defined header type, we'll determine the number of // bytes that we need to read past the sync point. switch h.indexType { case Block: headerSize = 80 case RegularFilter: headerSize = 32 default: return nil, fmt.Errorf("unknown index type: %v", h.indexType) } // TODO(roasbeef): add buffer pool // With the number of bytes to read determined, we'll create a slice // for that number of bytes, and read directly from the file into the // buffer. rawHeader := make([]byte, headerSize) if _, err := h.file.ReadAt(rawHeader[:], int64(seekDist)); err != nil { return nil, err } return rawHeader[:], nil }
[ "func", "(", "h", "*", "headerStore", ")", "readRaw", "(", "seekDist", "uint64", ")", "(", "[", "]", "byte", ",", "error", ")", "{", "var", "headerSize", "uint32", "\n", "switch", "h", ".", "indexType", "{", "case", "Block", ":", "headerSize", "=", "...
// readRaw reads a raw header from disk from a particular seek distance. The // amount of bytes read past the seek distance is determined by the specified // header type.
[ "readRaw", "reads", "a", "raw", "header", "from", "disk", "from", "a", "particular", "seek", "distance", ".", "The", "amount", "of", "bytes", "read", "past", "the", "seek", "distance", "is", "determined", "by", "the", "specified", "header", "type", "." ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/file.go#L24-L51
train
lightninglabs/neutrino
headerfs/file.go
readHeader
func (h *blockHeaderStore) readHeader(height uint32) (wire.BlockHeader, error) { var header wire.BlockHeader // Each header is 80 bytes, so using this information, we'll seek a // distance to cover that height based on the size of block headers. seekDistance := uint64(height) * 80 // With the distance calculated, we'll raw a raw header start from that // offset. rawHeader, err := h.readRaw(seekDistance) if err != nil { return header, err } headerReader := bytes.NewReader(rawHeader) // Finally, decode the raw bytes into a proper bitcoin header. if err := header.Deserialize(headerReader); err != nil { return header, err } return header, nil }
go
func (h *blockHeaderStore) readHeader(height uint32) (wire.BlockHeader, error) { var header wire.BlockHeader // Each header is 80 bytes, so using this information, we'll seek a // distance to cover that height based on the size of block headers. seekDistance := uint64(height) * 80 // With the distance calculated, we'll raw a raw header start from that // offset. rawHeader, err := h.readRaw(seekDistance) if err != nil { return header, err } headerReader := bytes.NewReader(rawHeader) // Finally, decode the raw bytes into a proper bitcoin header. if err := header.Deserialize(headerReader); err != nil { return header, err } return header, nil }
[ "func", "(", "h", "*", "blockHeaderStore", ")", "readHeader", "(", "height", "uint32", ")", "(", "wire", ".", "BlockHeader", ",", "error", ")", "{", "var", "header", "wire", ".", "BlockHeader", "\n", "seekDistance", ":=", "uint64", "(", "height", ")", "*...
// readHeader reads a full block header from the flat-file. The header read is // determined by the hight value.
[ "readHeader", "reads", "a", "full", "block", "header", "from", "the", "flat", "-", "file", ".", "The", "header", "read", "is", "determined", "by", "the", "hight", "value", "." ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/file.go#L90-L111
train
lightninglabs/neutrino
headerfs/file.go
readHeader
func (f *FilterHeaderStore) readHeader(height uint32) (*chainhash.Hash, error) { seekDistance := uint64(height) * 32 rawHeader, err := f.readRaw(seekDistance) if err != nil { return nil, err } return chainhash.NewHash(rawHeader) }
go
func (f *FilterHeaderStore) readHeader(height uint32) (*chainhash.Hash, error) { seekDistance := uint64(height) * 32 rawHeader, err := f.readRaw(seekDistance) if err != nil { return nil, err } return chainhash.NewHash(rawHeader) }
[ "func", "(", "f", "*", "FilterHeaderStore", ")", "readHeader", "(", "height", "uint32", ")", "(", "*", "chainhash", ".", "Hash", ",", "error", ")", "{", "seekDistance", ":=", "uint64", "(", "height", ")", "*", "32", "\n", "rawHeader", ",", "err", ":=",...
// readHeader reads a single filter header at the specified height from the // flat files on disk.
[ "readHeader", "reads", "a", "single", "filter", "header", "at", "the", "specified", "height", "from", "the", "flat", "files", "on", "disk", "." ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/file.go#L115-L124
train
lightninglabs/neutrino
headerfs/file.go
readHeadersFromFile
func readHeadersFromFile(f *os.File, headerSize, startHeight, endHeight uint32) (*bytes.Reader, error) { // Each header is headerSize bytes, so using this information, we'll // seek a distance to cover that height based on the size the headers. seekDistance := uint64(startHeight) * uint64(headerSize) // Based on the number of headers in the range, we'll allocate a single // slice that's able to hold the entire range of headers. numHeaders := endHeight - startHeight + 1 rawHeaderBytes := make([]byte, headerSize*numHeaders) // Now that we have our slice allocated, we'll read out the entire // range of headers with a single system call. _, err := f.ReadAt(rawHeaderBytes, int64(seekDistance)) if err != nil { return nil, err } return bytes.NewReader(rawHeaderBytes), nil }
go
func readHeadersFromFile(f *os.File, headerSize, startHeight, endHeight uint32) (*bytes.Reader, error) { // Each header is headerSize bytes, so using this information, we'll // seek a distance to cover that height based on the size the headers. seekDistance := uint64(startHeight) * uint64(headerSize) // Based on the number of headers in the range, we'll allocate a single // slice that's able to hold the entire range of headers. numHeaders := endHeight - startHeight + 1 rawHeaderBytes := make([]byte, headerSize*numHeaders) // Now that we have our slice allocated, we'll read out the entire // range of headers with a single system call. _, err := f.ReadAt(rawHeaderBytes, int64(seekDistance)) if err != nil { return nil, err } return bytes.NewReader(rawHeaderBytes), nil }
[ "func", "readHeadersFromFile", "(", "f", "*", "os", ".", "File", ",", "headerSize", ",", "startHeight", ",", "endHeight", "uint32", ")", "(", "*", "bytes", ".", "Reader", ",", "error", ")", "{", "seekDistance", ":=", "uint64", "(", "startHeight", ")", "*...
// readHeadersFromFile reads a chunk of headers, each of size headerSize, from // the given file, from startHeight to endHeight.
[ "readHeadersFromFile", "reads", "a", "chunk", "of", "headers", "each", "of", "size", "headerSize", "from", "the", "given", "file", "from", "startHeight", "to", "endHeight", "." ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/headerfs/file.go#L163-L183
train
lightninglabs/neutrino
neutrino.go
Count
func (ps *peerState) Count() int { return len(ps.outboundPeers) + len(ps.persistentPeers) }
go
func (ps *peerState) Count() int { return len(ps.outboundPeers) + len(ps.persistentPeers) }
[ "func", "(", "ps", "*", "peerState", ")", "Count", "(", ")", "int", "{", "return", "len", "(", "ps", ".", "outboundPeers", ")", "+", "len", "(", "ps", ".", "persistentPeers", ")", "\n", "}" ]
// Count returns the count of all known peers.
[ "Count", "returns", "the", "count", "of", "all", "known", "peers", "." ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L108-L110
train
lightninglabs/neutrino
neutrino.go
forAllOutboundPeers
func (ps *peerState) forAllOutboundPeers(closure func(sp *ServerPeer)) { for _, e := range ps.outboundPeers { closure(e) } for _, e := range ps.persistentPeers { closure(e) } }
go
func (ps *peerState) forAllOutboundPeers(closure func(sp *ServerPeer)) { for _, e := range ps.outboundPeers { closure(e) } for _, e := range ps.persistentPeers { closure(e) } }
[ "func", "(", "ps", "*", "peerState", ")", "forAllOutboundPeers", "(", "closure", "func", "(", "sp", "*", "ServerPeer", ")", ")", "{", "for", "_", ",", "e", ":=", "range", "ps", ".", "outboundPeers", "{", "closure", "(", "e", ")", "\n", "}", "\n", "...
// forAllOutboundPeers is a helper function that runs closure on all outbound // peers known to peerState.
[ "forAllOutboundPeers", "is", "a", "helper", "function", "that", "runs", "closure", "on", "all", "outbound", "peers", "known", "to", "peerState", "." ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L114-L121
train
lightninglabs/neutrino
neutrino.go
newServerPeer
func newServerPeer(s *ChainService, isPersistent bool) *ServerPeer { return &ServerPeer{ server: s, persistent: isPersistent, knownAddresses: make(map[string]struct{}), quit: make(chan struct{}), recvSubscribers: make(map[spMsgSubscription]struct{}), } }
go
func newServerPeer(s *ChainService, isPersistent bool) *ServerPeer { return &ServerPeer{ server: s, persistent: isPersistent, knownAddresses: make(map[string]struct{}), quit: make(chan struct{}), recvSubscribers: make(map[spMsgSubscription]struct{}), } }
[ "func", "newServerPeer", "(", "s", "*", "ChainService", ",", "isPersistent", "bool", ")", "*", "ServerPeer", "{", "return", "&", "ServerPeer", "{", "server", ":", "s", ",", "persistent", ":", "isPersistent", ",", "knownAddresses", ":", "make", "(", "map", ...
// newServerPeer returns a new ServerPeer instance. The peer needs to be set by // the caller.
[ "newServerPeer", "returns", "a", "new", "ServerPeer", "instance", ".", "The", "peer", "needs", "to", "be", "set", "by", "the", "caller", "." ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L171-L179
train
lightninglabs/neutrino
neutrino.go
addKnownAddresses
func (sp *ServerPeer) addKnownAddresses(addresses []*wire.NetAddress) { for _, na := range addresses { sp.knownAddresses[addrmgr.NetAddressKey(na)] = struct{}{} } }
go
func (sp *ServerPeer) addKnownAddresses(addresses []*wire.NetAddress) { for _, na := range addresses { sp.knownAddresses[addrmgr.NetAddressKey(na)] = struct{}{} } }
[ "func", "(", "sp", "*", "ServerPeer", ")", "addKnownAddresses", "(", "addresses", "[", "]", "*", "wire", ".", "NetAddress", ")", "{", "for", "_", ",", "na", ":=", "range", "addresses", "{", "sp", ".", "knownAddresses", "[", "addrmgr", ".", "NetAddressKey...
// addKnownAddresses adds the given addresses to the set of known addresses to // the peer to prevent sending duplicate addresses.
[ "addKnownAddresses", "adds", "the", "given", "addresses", "to", "the", "set", "of", "known", "addresses", "to", "the", "peer", "to", "prevent", "sending", "duplicate", "addresses", "." ]
a655679fe131a5d1b4417872cc834fc3862ac70e
https://github.com/lightninglabs/neutrino/blob/a655679fe131a5d1b4417872cc834fc3862ac70e/neutrino.go#L194-L198
train