repo
stringlengths
5
67
path
stringlengths
4
218
func_name
stringlengths
0
151
original_string
stringlengths
52
373k
language
stringclasses
6 values
code
stringlengths
52
373k
code_tokens
listlengths
10
512
docstring
stringlengths
3
47.2k
docstring_tokens
listlengths
3
234
sha
stringlengths
40
40
url
stringlengths
85
339
partition
stringclasses
3 values
prometheus/tsdb
index/postings.go
EnsureOrder
func (p *MemPostings) EnsureOrder() { p.mtx.Lock() defer p.mtx.Unlock() if p.ordered { return } n := runtime.GOMAXPROCS(0) workc := make(chan []uint64) var wg sync.WaitGroup wg.Add(n) for i := 0; i < n; i++ { go func() { for l := range workc { sort.Slice(l, func(i, j int) bool { return l[i] < l[j] }) } wg.Done() }() } for _, e := range p.m { for _, l := range e { workc <- l } } close(workc) wg.Wait() p.ordered = true }
go
func (p *MemPostings) EnsureOrder() { p.mtx.Lock() defer p.mtx.Unlock() if p.ordered { return } n := runtime.GOMAXPROCS(0) workc := make(chan []uint64) var wg sync.WaitGroup wg.Add(n) for i := 0; i < n; i++ { go func() { for l := range workc { sort.Slice(l, func(i, j int) bool { return l[i] < l[j] }) } wg.Done() }() } for _, e := range p.m { for _, l := range e { workc <- l } } close(workc) wg.Wait() p.ordered = true }
[ "func", "(", "p", "*", "MemPostings", ")", "EnsureOrder", "(", ")", "{", "p", ".", "mtx", ".", "Lock", "(", ")", "\n", "defer", "p", ".", "mtx", ".", "Unlock", "(", ")", "\n", "if", "p", ".", "ordered", "{", "return", "\n", "}", "\n", "n", ":...
// EnsureOrder ensures that all postings lists are sorted. After it returns all further // calls to add and addFor will insert new IDs in a sorted manner.
[ "EnsureOrder", "ensures", "that", "all", "postings", "lists", "are", "sorted", ".", "After", "it", "returns", "all", "further", "calls", "to", "add", "and", "addFor", "will", "insert", "new", "IDs", "in", "a", "sorted", "manner", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/postings.go#L105-L137
train
prometheus/tsdb
index/postings.go
Delete
func (p *MemPostings) Delete(deleted map[uint64]struct{}) { var keys, vals []string // Collect all keys relevant for deletion once. New keys added afterwards // can by definition not be affected by any of the given deletes. p.mtx.RLock() for n := range p.m { keys = append(keys, n) } p.mtx.RUnlock() for _, n := range keys { p.mtx.RLock() vals = vals[:0] for v := range p.m[n] { vals = append(vals, v) } p.mtx.RUnlock() // For each posting we first analyse whether the postings list is affected by the deletes. // If yes, we actually reallocate a new postings list. for _, l := range vals { // Only lock for processing one postings list so we don't block reads for too long. p.mtx.Lock() found := false for _, id := range p.m[n][l] { if _, ok := deleted[id]; ok { found = true break } } if !found { p.mtx.Unlock() continue } repl := make([]uint64, 0, len(p.m[n][l])) for _, id := range p.m[n][l] { if _, ok := deleted[id]; !ok { repl = append(repl, id) } } if len(repl) > 0 { p.m[n][l] = repl } else { delete(p.m[n], l) } p.mtx.Unlock() } p.mtx.Lock() if len(p.m[n]) == 0 { delete(p.m, n) } p.mtx.Unlock() } }
go
func (p *MemPostings) Delete(deleted map[uint64]struct{}) { var keys, vals []string // Collect all keys relevant for deletion once. New keys added afterwards // can by definition not be affected by any of the given deletes. p.mtx.RLock() for n := range p.m { keys = append(keys, n) } p.mtx.RUnlock() for _, n := range keys { p.mtx.RLock() vals = vals[:0] for v := range p.m[n] { vals = append(vals, v) } p.mtx.RUnlock() // For each posting we first analyse whether the postings list is affected by the deletes. // If yes, we actually reallocate a new postings list. for _, l := range vals { // Only lock for processing one postings list so we don't block reads for too long. p.mtx.Lock() found := false for _, id := range p.m[n][l] { if _, ok := deleted[id]; ok { found = true break } } if !found { p.mtx.Unlock() continue } repl := make([]uint64, 0, len(p.m[n][l])) for _, id := range p.m[n][l] { if _, ok := deleted[id]; !ok { repl = append(repl, id) } } if len(repl) > 0 { p.m[n][l] = repl } else { delete(p.m[n], l) } p.mtx.Unlock() } p.mtx.Lock() if len(p.m[n]) == 0 { delete(p.m, n) } p.mtx.Unlock() } }
[ "func", "(", "p", "*", "MemPostings", ")", "Delete", "(", "deleted", "map", "[", "uint64", "]", "struct", "{", "}", ")", "{", "var", "keys", ",", "vals", "[", "]", "string", "\n", "p", ".", "mtx", ".", "RLock", "(", ")", "\n", "for", "n", ":=",...
// Delete removes all ids in the given map from the postings lists.
[ "Delete", "removes", "all", "ids", "in", "the", "given", "map", "from", "the", "postings", "lists", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/postings.go#L140-L196
train
prometheus/tsdb
index/postings.go
Iter
func (p *MemPostings) Iter(f func(labels.Label, Postings) error) error { p.mtx.RLock() defer p.mtx.RUnlock() for n, e := range p.m { for v, p := range e { if err := f(labels.Label{Name: n, Value: v}, newListPostings(p...)); err != nil { return err } } } return nil }
go
func (p *MemPostings) Iter(f func(labels.Label, Postings) error) error { p.mtx.RLock() defer p.mtx.RUnlock() for n, e := range p.m { for v, p := range e { if err := f(labels.Label{Name: n, Value: v}, newListPostings(p...)); err != nil { return err } } } return nil }
[ "func", "(", "p", "*", "MemPostings", ")", "Iter", "(", "f", "func", "(", "labels", ".", "Label", ",", "Postings", ")", "error", ")", "error", "{", "p", ".", "mtx", ".", "RLock", "(", ")", "\n", "defer", "p", ".", "mtx", ".", "RUnlock", "(", ")...
// Iter calls f for each postings list. It aborts if f returns an error and returns it.
[ "Iter", "calls", "f", "for", "each", "postings", "list", ".", "It", "aborts", "if", "f", "returns", "an", "error", "and", "returns", "it", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/postings.go#L199-L211
train
prometheus/tsdb
index/postings.go
Add
func (p *MemPostings) Add(id uint64, lset labels.Labels) { p.mtx.Lock() for _, l := range lset { p.addFor(id, l) } p.addFor(id, allPostingsKey) p.mtx.Unlock() }
go
func (p *MemPostings) Add(id uint64, lset labels.Labels) { p.mtx.Lock() for _, l := range lset { p.addFor(id, l) } p.addFor(id, allPostingsKey) p.mtx.Unlock() }
[ "func", "(", "p", "*", "MemPostings", ")", "Add", "(", "id", "uint64", ",", "lset", "labels", ".", "Labels", ")", "{", "p", ".", "mtx", ".", "Lock", "(", ")", "\n", "for", "_", ",", "l", ":=", "range", "lset", "{", "p", ".", "addFor", "(", "i...
// Add a label set to the postings index.
[ "Add", "a", "label", "set", "to", "the", "postings", "index", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/postings.go#L214-L223
train
prometheus/tsdb
index/postings.go
ExpandPostings
func ExpandPostings(p Postings) (res []uint64, err error) { for p.Next() { res = append(res, p.At()) } return res, p.Err() }
go
func ExpandPostings(p Postings) (res []uint64, err error) { for p.Next() { res = append(res, p.At()) } return res, p.Err() }
[ "func", "ExpandPostings", "(", "p", "Postings", ")", "(", "res", "[", "]", "uint64", ",", "err", "error", ")", "{", "for", "p", ".", "Next", "(", ")", "{", "res", "=", "append", "(", "res", ",", "p", ".", "At", "(", ")", ")", "\n", "}", "\n",...
// ExpandPostings returns the postings expanded as a slice.
[ "ExpandPostings", "returns", "the", "postings", "expanded", "as", "a", "slice", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/postings.go#L250-L255
train
prometheus/tsdb
index/postings.go
Intersect
func Intersect(its ...Postings) Postings { if len(its) == 0 { return EmptyPostings() } if len(its) == 1 { return its[0] } l := len(its) / 2 a := Intersect(its[:l]...) b := Intersect(its[l:]...) if a == EmptyPostings() || b == EmptyPostings() { return EmptyPostings() } return newIntersectPostings(a, b) }
go
func Intersect(its ...Postings) Postings { if len(its) == 0 { return EmptyPostings() } if len(its) == 1 { return its[0] } l := len(its) / 2 a := Intersect(its[:l]...) b := Intersect(its[l:]...) if a == EmptyPostings() || b == EmptyPostings() { return EmptyPostings() } return newIntersectPostings(a, b) }
[ "func", "Intersect", "(", "its", "...", "Postings", ")", "Postings", "{", "if", "len", "(", "its", ")", "==", "0", "{", "return", "EmptyPostings", "(", ")", "\n", "}", "\n", "if", "len", "(", "its", ")", "==", "1", "{", "return", "its", "[", "0",...
// Intersect returns a new postings list over the intersection of the // input postings.
[ "Intersect", "returns", "a", "new", "postings", "list", "over", "the", "intersection", "of", "the", "input", "postings", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/postings.go#L299-L315
train
prometheus/tsdb
index/postings.go
Merge
func Merge(its ...Postings) Postings { if len(its) == 0 { return EmptyPostings() } if len(its) == 1 { return its[0] } p, ok := newMergedPostings(its) if !ok { return EmptyPostings() } return p }
go
func Merge(its ...Postings) Postings { if len(its) == 0 { return EmptyPostings() } if len(its) == 1 { return its[0] } p, ok := newMergedPostings(its) if !ok { return EmptyPostings() } return p }
[ "func", "Merge", "(", "its", "...", "Postings", ")", "Postings", "{", "if", "len", "(", "its", ")", "==", "0", "{", "return", "EmptyPostings", "(", ")", "\n", "}", "\n", "if", "len", "(", "its", ")", "==", "1", "{", "return", "its", "[", "0", "...
// Merge returns a new iterator over the union of the input iterators.
[ "Merge", "returns", "a", "new", "iterator", "over", "the", "union", "of", "the", "input", "iterators", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/postings.go#L371-L384
train
prometheus/tsdb
index/postings.go
Without
func Without(full, drop Postings) Postings { if full == EmptyPostings() { return EmptyPostings() } if drop == EmptyPostings() { return full } return newRemovedPostings(full, drop) }
go
func Without(full, drop Postings) Postings { if full == EmptyPostings() { return EmptyPostings() } if drop == EmptyPostings() { return full } return newRemovedPostings(full, drop) }
[ "func", "Without", "(", "full", ",", "drop", "Postings", ")", "Postings", "{", "if", "full", "==", "EmptyPostings", "(", ")", "{", "return", "EmptyPostings", "(", ")", "\n", "}", "\n", "if", "drop", "==", "EmptyPostings", "(", ")", "{", "return", "full...
// Without returns a new postings list that contains all elements from the full list that // are not in the drop list.
[ "Without", "returns", "a", "new", "postings", "list", "that", "contains", "all", "elements", "from", "the", "full", "list", "that", "are", "not", "in", "the", "drop", "list", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/postings.go#L520-L529
train
prometheus/tsdb
fileutil/flock.go
Flock
func Flock(fileName string) (r Releaser, existed bool, err error) { if err = os.MkdirAll(filepath.Dir(fileName), 0755); err != nil { return nil, false, err } _, err = os.Stat(fileName) existed = err == nil r, err = newLock(fileName) return r, existed, err }
go
func Flock(fileName string) (r Releaser, existed bool, err error) { if err = os.MkdirAll(filepath.Dir(fileName), 0755); err != nil { return nil, false, err } _, err = os.Stat(fileName) existed = err == nil r, err = newLock(fileName) return r, existed, err }
[ "func", "Flock", "(", "fileName", "string", ")", "(", "r", "Releaser", ",", "existed", "bool", ",", "err", "error", ")", "{", "if", "err", "=", "os", ".", "MkdirAll", "(", "filepath", ".", "Dir", "(", "fileName", ")", ",", "0755", ")", ";", "err", ...
// Flock locks the file with the provided name. If the file does not exist, it is // created. The returned Releaser is used to release the lock. existed is true // if the file to lock already existed. A non-nil error is returned if the // locking has failed. Neither this function nor the returned Releaser is // goroutine-safe.
[ "Flock", "locks", "the", "file", "with", "the", "provided", "name", ".", "If", "the", "file", "does", "not", "exist", "it", "is", "created", ".", "The", "returned", "Releaser", "is", "used", "to", "release", "the", "lock", ".", "existed", "is", "true", ...
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/fileutil/flock.go#L31-L41
train
prometheus/tsdb
tsdbutil/chunks.go
PopulatedChunk
func PopulatedChunk(numSamples int, minTime int64) chunks.Meta { samples := make([]Sample, numSamples) for i := 0; i < numSamples; i++ { samples[i] = sample{minTime + int64(i*1000), 1.0} } return ChunkFromSamples(samples) }
go
func PopulatedChunk(numSamples int, minTime int64) chunks.Meta { samples := make([]Sample, numSamples) for i := 0; i < numSamples; i++ { samples[i] = sample{minTime + int64(i*1000), 1.0} } return ChunkFromSamples(samples) }
[ "func", "PopulatedChunk", "(", "numSamples", "int", ",", "minTime", "int64", ")", "chunks", ".", "Meta", "{", "samples", ":=", "make", "(", "[", "]", "Sample", ",", "numSamples", ")", "\n", "for", "i", ":=", "0", ";", "i", "<", "numSamples", ";", "i"...
// PopulatedChunk creates a chunk populated with samples every second starting at minTime
[ "PopulatedChunk", "creates", "a", "chunk", "populated", "with", "samples", "every", "second", "starting", "at", "minTime" ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/tsdbutil/chunks.go#L47-L53
train
prometheus/tsdb
encoding/encoding.go
PutHash
func (e *Encbuf) PutHash(h hash.Hash) { h.Reset() _, err := h.Write(e.B) if err != nil { panic(err) // The CRC32 implementation does not error } e.B = h.Sum(e.B) }
go
func (e *Encbuf) PutHash(h hash.Hash) { h.Reset() _, err := h.Write(e.B) if err != nil { panic(err) // The CRC32 implementation does not error } e.B = h.Sum(e.B) }
[ "func", "(", "e", "*", "Encbuf", ")", "PutHash", "(", "h", "hash", ".", "Hash", ")", "{", "h", ".", "Reset", "(", ")", "\n", "_", ",", "err", ":=", "h", ".", "Write", "(", "e", ".", "B", ")", "\n", "if", "err", "!=", "nil", "{", "panic", ...
// PutHash appends a hash over the buffers current contents to the buffer.
[ "PutHash", "appends", "a", "hash", "over", "the", "buffers", "current", "contents", "to", "the", "buffer", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/encoding/encoding.go#L76-L83
train
prometheus/tsdb
encoding/encoding.go
NewDecbufAt
func NewDecbufAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Decbuf { if bs.Len() < off+4 { return Decbuf{E: ErrInvalidSize} } b := bs.Range(off, off+4) l := int(binary.BigEndian.Uint32(b)) if bs.Len() < off+4+l+4 { return Decbuf{E: ErrInvalidSize} } // Load bytes holding the contents plus a CRC32 checksum. b = bs.Range(off+4, off+4+l+4) dec := Decbuf{B: b[:len(b)-4]} if exp := binary.BigEndian.Uint32(b[len(b)-4:]); dec.Crc32(castagnoliTable) != exp { return Decbuf{E: ErrInvalidChecksum} } return dec }
go
func NewDecbufAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Decbuf { if bs.Len() < off+4 { return Decbuf{E: ErrInvalidSize} } b := bs.Range(off, off+4) l := int(binary.BigEndian.Uint32(b)) if bs.Len() < off+4+l+4 { return Decbuf{E: ErrInvalidSize} } // Load bytes holding the contents plus a CRC32 checksum. b = bs.Range(off+4, off+4+l+4) dec := Decbuf{B: b[:len(b)-4]} if exp := binary.BigEndian.Uint32(b[len(b)-4:]); dec.Crc32(castagnoliTable) != exp { return Decbuf{E: ErrInvalidChecksum} } return dec }
[ "func", "NewDecbufAt", "(", "bs", "ByteSlice", ",", "off", "int", ",", "castagnoliTable", "*", "crc32", ".", "Table", ")", "Decbuf", "{", "if", "bs", ".", "Len", "(", ")", "<", "off", "+", "4", "{", "return", "Decbuf", "{", "E", ":", "ErrInvalidSize"...
// NewDecbufAt returns a new decoding buffer. It expects the first 4 bytes // after offset to hold the big endian encoded content length, followed by the contents and the expected // checksum.
[ "NewDecbufAt", "returns", "a", "new", "decoding", "buffer", ".", "It", "expects", "the", "first", "4", "bytes", "after", "offset", "to", "hold", "the", "big", "endian", "encoded", "content", "length", "followed", "by", "the", "contents", "and", "the", "expec...
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/encoding/encoding.go#L97-L116
train
prometheus/tsdb
encoding/encoding.go
NewDecbufUvarintAt
func NewDecbufUvarintAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Decbuf { // We never have to access this method at the far end of the byte slice. Thus just checking // against the MaxVarintLen32 is sufficient. if bs.Len() < off+binary.MaxVarintLen32 { return Decbuf{E: ErrInvalidSize} } b := bs.Range(off, off+binary.MaxVarintLen32) l, n := binary.Uvarint(b) if n <= 0 || n > binary.MaxVarintLen32 { return Decbuf{E: errors.Errorf("invalid uvarint %d", n)} } if bs.Len() < off+n+int(l)+4 { return Decbuf{E: ErrInvalidSize} } // Load bytes holding the contents plus a CRC32 checksum. b = bs.Range(off+n, off+n+int(l)+4) dec := Decbuf{B: b[:len(b)-4]} if dec.Crc32(castagnoliTable) != binary.BigEndian.Uint32(b[len(b)-4:]) { return Decbuf{E: ErrInvalidChecksum} } return dec }
go
func NewDecbufUvarintAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Decbuf { // We never have to access this method at the far end of the byte slice. Thus just checking // against the MaxVarintLen32 is sufficient. if bs.Len() < off+binary.MaxVarintLen32 { return Decbuf{E: ErrInvalidSize} } b := bs.Range(off, off+binary.MaxVarintLen32) l, n := binary.Uvarint(b) if n <= 0 || n > binary.MaxVarintLen32 { return Decbuf{E: errors.Errorf("invalid uvarint %d", n)} } if bs.Len() < off+n+int(l)+4 { return Decbuf{E: ErrInvalidSize} } // Load bytes holding the contents plus a CRC32 checksum. b = bs.Range(off+n, off+n+int(l)+4) dec := Decbuf{B: b[:len(b)-4]} if dec.Crc32(castagnoliTable) != binary.BigEndian.Uint32(b[len(b)-4:]) { return Decbuf{E: ErrInvalidChecksum} } return dec }
[ "func", "NewDecbufUvarintAt", "(", "bs", "ByteSlice", ",", "off", "int", ",", "castagnoliTable", "*", "crc32", ".", "Table", ")", "Decbuf", "{", "if", "bs", ".", "Len", "(", ")", "<", "off", "+", "binary", ".", "MaxVarintLen32", "{", "return", "Decbuf", ...
// NewDecbufUvarintAt returns a new decoding buffer. It expects the first bytes // after offset to hold the uvarint-encoded buffers length, followed by the contents and the expected // checksum.
[ "NewDecbufUvarintAt", "returns", "a", "new", "decoding", "buffer", ".", "It", "expects", "the", "first", "bytes", "after", "offset", "to", "hold", "the", "uvarint", "-", "encoded", "buffers", "length", "followed", "by", "the", "contents", "and", "the", "expect...
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/encoding/encoding.go#L121-L146
train
prometheus/tsdb
encoding/encoding.go
Crc32
func (d *Decbuf) Crc32(castagnoliTable *crc32.Table) uint32 { return crc32.Checksum(d.B, castagnoliTable) }
go
func (d *Decbuf) Crc32(castagnoliTable *crc32.Table) uint32 { return crc32.Checksum(d.B, castagnoliTable) }
[ "func", "(", "d", "*", "Decbuf", ")", "Crc32", "(", "castagnoliTable", "*", "crc32", ".", "Table", ")", "uint32", "{", "return", "crc32", ".", "Checksum", "(", "d", ".", "B", ",", "castagnoliTable", ")", "\n", "}" ]
// Crc32 returns a CRC32 checksum over the remaining bytes.
[ "Crc32", "returns", "a", "CRC32", "checksum", "over", "the", "remaining", "bytes", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/encoding/encoding.go#L153-L155
train
prometheus/tsdb
block.go
OpenBlock
func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) { if logger == nil { logger = log.NewNopLogger() } var closers []io.Closer defer func() { if err != nil { var merr tsdb_errors.MultiError merr.Add(err) merr.Add(closeAll(closers)) err = merr.Err() } }() meta, err := readMetaFile(dir) if err != nil { return nil, err } cr, err := chunks.NewDirReader(chunkDir(dir), pool) if err != nil { return nil, err } closers = append(closers, cr) ir, err := index.NewFileReader(filepath.Join(dir, indexFilename)) if err != nil { return nil, err } closers = append(closers, ir) tr, tsr, err := readTombstones(dir) if err != nil { return nil, err } closers = append(closers, tr) // TODO refactor to set this at block creation time as // that would be the logical place for a block size to be calculated. bs := blockSize(cr, ir, tsr) meta.Stats.NumBytes = bs err = writeMetaFile(logger, dir, meta) if err != nil { level.Warn(logger).Log("msg", "couldn't write the meta file for the block size", "block", dir, "err", err) } pb = &Block{ dir: dir, meta: *meta, chunkr: cr, indexr: ir, tombstones: tr, symbolTableSize: ir.SymbolTableSize(), logger: logger, } return pb, nil }
go
func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) { if logger == nil { logger = log.NewNopLogger() } var closers []io.Closer defer func() { if err != nil { var merr tsdb_errors.MultiError merr.Add(err) merr.Add(closeAll(closers)) err = merr.Err() } }() meta, err := readMetaFile(dir) if err != nil { return nil, err } cr, err := chunks.NewDirReader(chunkDir(dir), pool) if err != nil { return nil, err } closers = append(closers, cr) ir, err := index.NewFileReader(filepath.Join(dir, indexFilename)) if err != nil { return nil, err } closers = append(closers, ir) tr, tsr, err := readTombstones(dir) if err != nil { return nil, err } closers = append(closers, tr) // TODO refactor to set this at block creation time as // that would be the logical place for a block size to be calculated. bs := blockSize(cr, ir, tsr) meta.Stats.NumBytes = bs err = writeMetaFile(logger, dir, meta) if err != nil { level.Warn(logger).Log("msg", "couldn't write the meta file for the block size", "block", dir, "err", err) } pb = &Block{ dir: dir, meta: *meta, chunkr: cr, indexr: ir, tombstones: tr, symbolTableSize: ir.SymbolTableSize(), logger: logger, } return pb, nil }
[ "func", "OpenBlock", "(", "logger", "log", ".", "Logger", ",", "dir", "string", ",", "pool", "chunkenc", ".", "Pool", ")", "(", "pb", "*", "Block", ",", "err", "error", ")", "{", "if", "logger", "==", "nil", "{", "logger", "=", "log", ".", "NewNopL...
// OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used // to instantiate chunk structs.
[ "OpenBlock", "opens", "the", "block", "in", "the", "directory", ".", "It", "can", "be", "passed", "a", "chunk", "pool", "which", "is", "used", "to", "instantiate", "chunk", "structs", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/block.go#L292-L347
train
prometheus/tsdb
block.go
Close
func (pb *Block) Close() error { pb.mtx.Lock() pb.closing = true pb.mtx.Unlock() pb.pendingReaders.Wait() var merr tsdb_errors.MultiError merr.Add(pb.chunkr.Close()) merr.Add(pb.indexr.Close()) merr.Add(pb.tombstones.Close()) return merr.Err() }
go
func (pb *Block) Close() error { pb.mtx.Lock() pb.closing = true pb.mtx.Unlock() pb.pendingReaders.Wait() var merr tsdb_errors.MultiError merr.Add(pb.chunkr.Close()) merr.Add(pb.indexr.Close()) merr.Add(pb.tombstones.Close()) return merr.Err() }
[ "func", "(", "pb", "*", "Block", ")", "Close", "(", ")", "error", "{", "pb", ".", "mtx", ".", "Lock", "(", ")", "\n", "pb", ".", "closing", "=", "true", "\n", "pb", ".", "mtx", ".", "Unlock", "(", ")", "\n", "pb", ".", "pendingReaders", ".", ...
// Close closes the on-disk block. It blocks as long as there are readers reading from the block.
[ "Close", "closes", "the", "on", "-", "disk", "block", ".", "It", "blocks", "as", "long", "as", "there", "are", "readers", "reading", "from", "the", "block", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/block.go#L360-L374
train
prometheus/tsdb
block.go
Index
func (pb *Block) Index() (IndexReader, error) { if err := pb.startRead(); err != nil { return nil, err } return blockIndexReader{ir: pb.indexr, b: pb}, nil }
go
func (pb *Block) Index() (IndexReader, error) { if err := pb.startRead(); err != nil { return nil, err } return blockIndexReader{ir: pb.indexr, b: pb}, nil }
[ "func", "(", "pb", "*", "Block", ")", "Index", "(", ")", "(", "IndexReader", ",", "error", ")", "{", "if", "err", ":=", "pb", ".", "startRead", "(", ")", ";", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "return", "blo...
// Index returns a new IndexReader against the block data.
[ "Index", "returns", "a", "new", "IndexReader", "against", "the", "block", "data", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/block.go#L410-L415
train
prometheus/tsdb
block.go
Chunks
func (pb *Block) Chunks() (ChunkReader, error) { if err := pb.startRead(); err != nil { return nil, err } return blockChunkReader{ChunkReader: pb.chunkr, b: pb}, nil }
go
func (pb *Block) Chunks() (ChunkReader, error) { if err := pb.startRead(); err != nil { return nil, err } return blockChunkReader{ChunkReader: pb.chunkr, b: pb}, nil }
[ "func", "(", "pb", "*", "Block", ")", "Chunks", "(", ")", "(", "ChunkReader", ",", "error", ")", "{", "if", "err", ":=", "pb", ".", "startRead", "(", ")", ";", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "return", "bl...
// Chunks returns a new ChunkReader against the block data.
[ "Chunks", "returns", "a", "new", "ChunkReader", "against", "the", "block", "data", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/block.go#L418-L423
train
prometheus/tsdb
block.go
Tombstones
func (pb *Block) Tombstones() (TombstoneReader, error) { if err := pb.startRead(); err != nil { return nil, err } return blockTombstoneReader{TombstoneReader: pb.tombstones, b: pb}, nil }
go
func (pb *Block) Tombstones() (TombstoneReader, error) { if err := pb.startRead(); err != nil { return nil, err } return blockTombstoneReader{TombstoneReader: pb.tombstones, b: pb}, nil }
[ "func", "(", "pb", "*", "Block", ")", "Tombstones", "(", ")", "(", "TombstoneReader", ",", "error", ")", "{", "if", "err", ":=", "pb", ".", "startRead", "(", ")", ";", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", "\n", "return...
// Tombstones returns a new TombstoneReader against the block data.
[ "Tombstones", "returns", "a", "new", "TombstoneReader", "against", "the", "block", "data", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/block.go#L426-L431
train
prometheus/tsdb
block.go
Delete
func (pb *Block) Delete(mint, maxt int64, ms ...labels.Matcher) error { pb.mtx.Lock() defer pb.mtx.Unlock() if pb.closing { return ErrClosing } p, err := PostingsForMatchers(pb.indexr, ms...) if err != nil { return errors.Wrap(err, "select series") } ir := pb.indexr // Choose only valid postings which have chunks in the time-range. stones := newMemTombstones() var lset labels.Labels var chks []chunks.Meta Outer: for p.Next() { err := ir.Series(p.At(), &lset, &chks) if err != nil { return err } for _, chk := range chks { if chk.OverlapsClosedInterval(mint, maxt) { // Delete only until the current values and not beyond. tmin, tmax := clampInterval(mint, maxt, chks[0].MinTime, chks[len(chks)-1].MaxTime) stones.addInterval(p.At(), Interval{tmin, tmax}) continue Outer } } } if p.Err() != nil { return p.Err() } err = pb.tombstones.Iter(func(id uint64, ivs Intervals) error { for _, iv := range ivs { stones.addInterval(id, iv) } return nil }) if err != nil { return err } pb.tombstones = stones pb.meta.Stats.NumTombstones = pb.tombstones.Total() if err := writeTombstoneFile(pb.logger, pb.dir, pb.tombstones); err != nil { return err } return writeMetaFile(pb.logger, pb.dir, &pb.meta) }
go
func (pb *Block) Delete(mint, maxt int64, ms ...labels.Matcher) error { pb.mtx.Lock() defer pb.mtx.Unlock() if pb.closing { return ErrClosing } p, err := PostingsForMatchers(pb.indexr, ms...) if err != nil { return errors.Wrap(err, "select series") } ir := pb.indexr // Choose only valid postings which have chunks in the time-range. stones := newMemTombstones() var lset labels.Labels var chks []chunks.Meta Outer: for p.Next() { err := ir.Series(p.At(), &lset, &chks) if err != nil { return err } for _, chk := range chks { if chk.OverlapsClosedInterval(mint, maxt) { // Delete only until the current values and not beyond. tmin, tmax := clampInterval(mint, maxt, chks[0].MinTime, chks[len(chks)-1].MaxTime) stones.addInterval(p.At(), Interval{tmin, tmax}) continue Outer } } } if p.Err() != nil { return p.Err() } err = pb.tombstones.Iter(func(id uint64, ivs Intervals) error { for _, iv := range ivs { stones.addInterval(id, iv) } return nil }) if err != nil { return err } pb.tombstones = stones pb.meta.Stats.NumTombstones = pb.tombstones.Total() if err := writeTombstoneFile(pb.logger, pb.dir, pb.tombstones); err != nil { return err } return writeMetaFile(pb.logger, pb.dir, &pb.meta) }
[ "func", "(", "pb", "*", "Block", ")", "Delete", "(", "mint", ",", "maxt", "int64", ",", "ms", "...", "labels", ".", "Matcher", ")", "error", "{", "pb", ".", "mtx", ".", "Lock", "(", ")", "\n", "defer", "pb", ".", "mtx", ".", "Unlock", "(", ")",...
// Delete matching series between mint and maxt in the block.
[ "Delete", "matching", "series", "between", "mint", "and", "maxt", "in", "the", "block", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/block.go#L510-L568
train
prometheus/tsdb
block.go
Snapshot
func (pb *Block) Snapshot(dir string) error { blockDir := filepath.Join(dir, pb.meta.ULID.String()) if err := os.MkdirAll(blockDir, 0777); err != nil { return errors.Wrap(err, "create snapshot block dir") } chunksDir := chunkDir(blockDir) if err := os.MkdirAll(chunksDir, 0777); err != nil { return errors.Wrap(err, "create snapshot chunk dir") } // Hardlink meta, index and tombstones for _, fname := range []string{ metaFilename, indexFilename, tombstoneFilename, } { if err := os.Link(filepath.Join(pb.dir, fname), filepath.Join(blockDir, fname)); err != nil { return errors.Wrapf(err, "create snapshot %s", fname) } } // Hardlink the chunks curChunkDir := chunkDir(pb.dir) files, err := ioutil.ReadDir(curChunkDir) if err != nil { return errors.Wrap(err, "ReadDir the current chunk dir") } for _, f := range files { err := os.Link(filepath.Join(curChunkDir, f.Name()), filepath.Join(chunksDir, f.Name())) if err != nil { return errors.Wrap(err, "hardlink a chunk") } } return nil }
go
func (pb *Block) Snapshot(dir string) error { blockDir := filepath.Join(dir, pb.meta.ULID.String()) if err := os.MkdirAll(blockDir, 0777); err != nil { return errors.Wrap(err, "create snapshot block dir") } chunksDir := chunkDir(blockDir) if err := os.MkdirAll(chunksDir, 0777); err != nil { return errors.Wrap(err, "create snapshot chunk dir") } // Hardlink meta, index and tombstones for _, fname := range []string{ metaFilename, indexFilename, tombstoneFilename, } { if err := os.Link(filepath.Join(pb.dir, fname), filepath.Join(blockDir, fname)); err != nil { return errors.Wrapf(err, "create snapshot %s", fname) } } // Hardlink the chunks curChunkDir := chunkDir(pb.dir) files, err := ioutil.ReadDir(curChunkDir) if err != nil { return errors.Wrap(err, "ReadDir the current chunk dir") } for _, f := range files { err := os.Link(filepath.Join(curChunkDir, f.Name()), filepath.Join(chunksDir, f.Name())) if err != nil { return errors.Wrap(err, "hardlink a chunk") } } return nil }
[ "func", "(", "pb", "*", "Block", ")", "Snapshot", "(", "dir", "string", ")", "error", "{", "blockDir", ":=", "filepath", ".", "Join", "(", "dir", ",", "pb", ".", "meta", ".", "ULID", ".", "String", "(", ")", ")", "\n", "if", "err", ":=", "os", ...
// Snapshot creates snapshot of the block into dir.
[ "Snapshot", "creates", "snapshot", "of", "the", "block", "into", "dir", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/block.go#L595-L632
train
prometheus/tsdb
wal.go
OpenSegmentWAL
func OpenSegmentWAL(dir string, logger log.Logger, flushInterval time.Duration, r prometheus.Registerer) (*SegmentWAL, error) { if err := os.MkdirAll(dir, 0777); err != nil { return nil, err } df, err := fileutil.OpenDir(dir) if err != nil { return nil, err } if logger == nil { logger = log.NewNopLogger() } w := &SegmentWAL{ dirFile: df, logger: logger, flushInterval: flushInterval, donec: make(chan struct{}), stopc: make(chan struct{}), actorc: make(chan func() error, 1), segmentSize: walSegmentSizeBytes, crc32: newCRC32(), } w.metrics = newWalMetrics(w, r) fns, err := sequenceFiles(w.dirFile.Name()) if err != nil { return nil, err } for i, fn := range fns { f, err := w.openSegmentFile(fn) if err == nil { w.files = append(w.files, newSegmentFile(f)) continue } level.Warn(logger).Log("msg", "invalid segment file detected, truncating WAL", "err", err, "file", fn) for _, fn := range fns[i:] { if err := os.Remove(fn); err != nil { return w, errors.Wrap(err, "removing segment failed") } } break } go w.run(flushInterval) return w, nil }
go
func OpenSegmentWAL(dir string, logger log.Logger, flushInterval time.Duration, r prometheus.Registerer) (*SegmentWAL, error) { if err := os.MkdirAll(dir, 0777); err != nil { return nil, err } df, err := fileutil.OpenDir(dir) if err != nil { return nil, err } if logger == nil { logger = log.NewNopLogger() } w := &SegmentWAL{ dirFile: df, logger: logger, flushInterval: flushInterval, donec: make(chan struct{}), stopc: make(chan struct{}), actorc: make(chan func() error, 1), segmentSize: walSegmentSizeBytes, crc32: newCRC32(), } w.metrics = newWalMetrics(w, r) fns, err := sequenceFiles(w.dirFile.Name()) if err != nil { return nil, err } for i, fn := range fns { f, err := w.openSegmentFile(fn) if err == nil { w.files = append(w.files, newSegmentFile(f)) continue } level.Warn(logger).Log("msg", "invalid segment file detected, truncating WAL", "err", err, "file", fn) for _, fn := range fns[i:] { if err := os.Remove(fn); err != nil { return w, errors.Wrap(err, "removing segment failed") } } break } go w.run(flushInterval) return w, nil }
[ "func", "OpenSegmentWAL", "(", "dir", "string", ",", "logger", "log", ".", "Logger", ",", "flushInterval", "time", ".", "Duration", ",", "r", "prometheus", ".", "Registerer", ")", "(", "*", "SegmentWAL", ",", "error", ")", "{", "if", "err", ":=", "os", ...
// OpenSegmentWAL opens or creates a write ahead log in the given directory. // The WAL must be read completely before new data is written.
[ "OpenSegmentWAL", "opens", "or", "creates", "a", "write", "ahead", "log", "in", "the", "given", "directory", ".", "The", "WAL", "must", "be", "read", "completely", "before", "new", "data", "is", "written", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L184-L232
train
prometheus/tsdb
wal.go
truncate
func (w *SegmentWAL) truncate(err error, file int, lastOffset int64) error { level.Error(w.logger).Log("msg", "WAL corruption detected; truncating", "err", err, "file", w.files[file].Name(), "pos", lastOffset) // Close and delete all files after the current one. for _, f := range w.files[file+1:] { if err := f.Close(); err != nil { return err } if err := os.Remove(f.Name()); err != nil { return err } } w.mtx.Lock() defer w.mtx.Unlock() w.files = w.files[:file+1] // Seek the current file to the last valid offset where we continue writing from. _, err = w.files[file].Seek(lastOffset, io.SeekStart) return err }
go
func (w *SegmentWAL) truncate(err error, file int, lastOffset int64) error { level.Error(w.logger).Log("msg", "WAL corruption detected; truncating", "err", err, "file", w.files[file].Name(), "pos", lastOffset) // Close and delete all files after the current one. for _, f := range w.files[file+1:] { if err := f.Close(); err != nil { return err } if err := os.Remove(f.Name()); err != nil { return err } } w.mtx.Lock() defer w.mtx.Unlock() w.files = w.files[:file+1] // Seek the current file to the last valid offset where we continue writing from. _, err = w.files[file].Seek(lastOffset, io.SeekStart) return err }
[ "func", "(", "w", "*", "SegmentWAL", ")", "truncate", "(", "err", "error", ",", "file", "int", ",", "lastOffset", "int64", ")", "error", "{", "level", ".", "Error", "(", "w", ".", "logger", ")", ".", "Log", "(", "\"msg\"", ",", "\"WAL corruption detect...
// truncate the WAL after the last valid entry.
[ "truncate", "the", "WAL", "after", "the", "last", "valid", "entry", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L259-L280
train
prometheus/tsdb
wal.go
Reader
func (w *SegmentWAL) Reader() WALReader { return &repairingWALReader{ wal: w, r: newWALReader(w.files, w.logger), } }
go
func (w *SegmentWAL) Reader() WALReader { return &repairingWALReader{ wal: w, r: newWALReader(w.files, w.logger), } }
[ "func", "(", "w", "*", "SegmentWAL", ")", "Reader", "(", ")", "WALReader", "{", "return", "&", "repairingWALReader", "{", "wal", ":", "w", ",", "r", ":", "newWALReader", "(", "w", ".", "files", ",", "w", ".", "logger", ")", ",", "}", "\n", "}" ]
// Reader returns a new reader over the the write ahead log data. // It must be completely consumed before writing to the WAL.
[ "Reader", "returns", "a", "new", "reader", "over", "the", "the", "write", "ahead", "log", "data", ".", "It", "must", "be", "completely", "consumed", "before", "writing", "to", "the", "WAL", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L284-L289
train
prometheus/tsdb
wal.go
LogSeries
func (w *SegmentWAL) LogSeries(series []RefSeries) error { buf := w.getBuffer() flag := w.encodeSeries(buf, series) w.mtx.Lock() defer w.mtx.Unlock() err := w.write(WALEntrySeries, flag, buf.Get()) w.putBuffer(buf) if err != nil { return errors.Wrap(err, "log series") } tf := w.head() for _, s := range series { if tf.minSeries > s.Ref { tf.minSeries = s.Ref } } return nil }
go
func (w *SegmentWAL) LogSeries(series []RefSeries) error { buf := w.getBuffer() flag := w.encodeSeries(buf, series) w.mtx.Lock() defer w.mtx.Unlock() err := w.write(WALEntrySeries, flag, buf.Get()) w.putBuffer(buf) if err != nil { return errors.Wrap(err, "log series") } tf := w.head() for _, s := range series { if tf.minSeries > s.Ref { tf.minSeries = s.Ref } } return nil }
[ "func", "(", "w", "*", "SegmentWAL", ")", "LogSeries", "(", "series", "[", "]", "RefSeries", ")", "error", "{", "buf", ":=", "w", ".", "getBuffer", "(", ")", "\n", "flag", ":=", "w", ".", "encodeSeries", "(", "buf", ",", "series", ")", "\n", "w", ...
// LogSeries writes a batch of new series labels to the log. // The series have to be ordered.
[ "LogSeries", "writes", "a", "batch", "of", "new", "series", "labels", "to", "the", "log", ".", "The", "series", "have", "to", "be", "ordered", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L429-L453
train
prometheus/tsdb
wal.go
LogSamples
func (w *SegmentWAL) LogSamples(samples []RefSample) error { buf := w.getBuffer() flag := w.encodeSamples(buf, samples) w.mtx.Lock() defer w.mtx.Unlock() err := w.write(WALEntrySamples, flag, buf.Get()) w.putBuffer(buf) if err != nil { return errors.Wrap(err, "log series") } tf := w.head() for _, s := range samples { if tf.maxTime < s.T { tf.maxTime = s.T } } return nil }
go
func (w *SegmentWAL) LogSamples(samples []RefSample) error { buf := w.getBuffer() flag := w.encodeSamples(buf, samples) w.mtx.Lock() defer w.mtx.Unlock() err := w.write(WALEntrySamples, flag, buf.Get()) w.putBuffer(buf) if err != nil { return errors.Wrap(err, "log series") } tf := w.head() for _, s := range samples { if tf.maxTime < s.T { tf.maxTime = s.T } } return nil }
[ "func", "(", "w", "*", "SegmentWAL", ")", "LogSamples", "(", "samples", "[", "]", "RefSample", ")", "error", "{", "buf", ":=", "w", ".", "getBuffer", "(", ")", "\n", "flag", ":=", "w", ".", "encodeSamples", "(", "buf", ",", "samples", ")", "\n", "w...
// LogSamples writes a batch of new samples to the log.
[ "LogSamples", "writes", "a", "batch", "of", "new", "samples", "to", "the", "log", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L456-L479
train
prometheus/tsdb
wal.go
LogDeletes
func (w *SegmentWAL) LogDeletes(stones []Stone) error { buf := w.getBuffer() flag := w.encodeDeletes(buf, stones) w.mtx.Lock() defer w.mtx.Unlock() err := w.write(WALEntryDeletes, flag, buf.Get()) w.putBuffer(buf) if err != nil { return errors.Wrap(err, "log series") } tf := w.head() for _, s := range stones { for _, iv := range s.intervals { if tf.maxTime < iv.Maxt { tf.maxTime = iv.Maxt } } } return nil }
go
func (w *SegmentWAL) LogDeletes(stones []Stone) error { buf := w.getBuffer() flag := w.encodeDeletes(buf, stones) w.mtx.Lock() defer w.mtx.Unlock() err := w.write(WALEntryDeletes, flag, buf.Get()) w.putBuffer(buf) if err != nil { return errors.Wrap(err, "log series") } tf := w.head() for _, s := range stones { for _, iv := range s.intervals { if tf.maxTime < iv.Maxt { tf.maxTime = iv.Maxt } } } return nil }
[ "func", "(", "w", "*", "SegmentWAL", ")", "LogDeletes", "(", "stones", "[", "]", "Stone", ")", "error", "{", "buf", ":=", "w", ".", "getBuffer", "(", ")", "\n", "flag", ":=", "w", ".", "encodeDeletes", "(", "buf", ",", "stones", ")", "\n", "w", "...
// LogDeletes write a batch of new deletes to the log.
[ "LogDeletes", "write", "a", "batch", "of", "new", "deletes", "to", "the", "log", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L482-L507
train
prometheus/tsdb
wal.go
openSegmentFile
func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) { // We must open all files in read/write mode as we may have to truncate along // the way and any file may become the head. f, err := os.OpenFile(name, os.O_RDWR, 0666) if err != nil { return nil, err } metab := make([]byte, 8) // If there is an error, we need close f for platform windows before gc. // Otherwise, file op may fail. hasError := true defer func() { if hasError { f.Close() } }() if n, err := f.Read(metab); err != nil { return nil, errors.Wrapf(err, "validate meta %q", f.Name()) } else if n != 8 { return nil, errors.Errorf("invalid header size %d in %q", n, f.Name()) } if m := binary.BigEndian.Uint32(metab[:4]); m != WALMagic { return nil, errors.Errorf("invalid magic header %x in %q", m, f.Name()) } if metab[4] != WALFormatDefault { return nil, errors.Errorf("unknown WAL segment format %d in %q", metab[4], f.Name()) } hasError = false return f, nil }
go
func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) { // We must open all files in read/write mode as we may have to truncate along // the way and any file may become the head. f, err := os.OpenFile(name, os.O_RDWR, 0666) if err != nil { return nil, err } metab := make([]byte, 8) // If there is an error, we need close f for platform windows before gc. // Otherwise, file op may fail. hasError := true defer func() { if hasError { f.Close() } }() if n, err := f.Read(metab); err != nil { return nil, errors.Wrapf(err, "validate meta %q", f.Name()) } else if n != 8 { return nil, errors.Errorf("invalid header size %d in %q", n, f.Name()) } if m := binary.BigEndian.Uint32(metab[:4]); m != WALMagic { return nil, errors.Errorf("invalid magic header %x in %q", m, f.Name()) } if metab[4] != WALFormatDefault { return nil, errors.Errorf("unknown WAL segment format %d in %q", metab[4], f.Name()) } hasError = false return f, nil }
[ "func", "(", "w", "*", "SegmentWAL", ")", "openSegmentFile", "(", "name", "string", ")", "(", "*", "os", ".", "File", ",", "error", ")", "{", "f", ",", "err", ":=", "os", ".", "OpenFile", "(", "name", ",", "os", ".", "O_RDWR", ",", "0666", ")", ...
// openSegmentFile opens the given segment file and consumes and validates header.
[ "openSegmentFile", "opens", "the", "given", "segment", "file", "and", "consumes", "and", "validates", "header", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L510-L542
train
prometheus/tsdb
wal.go
createSegmentFile
func (w *SegmentWAL) createSegmentFile(name string) (*os.File, error) { f, err := os.Create(name) if err != nil { return nil, err } if err = fileutil.Preallocate(f, w.segmentSize, true); err != nil { return nil, err } // Write header metadata for new file. metab := make([]byte, 8) binary.BigEndian.PutUint32(metab[:4], WALMagic) metab[4] = WALFormatDefault if _, err := f.Write(metab); err != nil { return nil, err } return f, err }
go
func (w *SegmentWAL) createSegmentFile(name string) (*os.File, error) { f, err := os.Create(name) if err != nil { return nil, err } if err = fileutil.Preallocate(f, w.segmentSize, true); err != nil { return nil, err } // Write header metadata for new file. metab := make([]byte, 8) binary.BigEndian.PutUint32(metab[:4], WALMagic) metab[4] = WALFormatDefault if _, err := f.Write(metab); err != nil { return nil, err } return f, err }
[ "func", "(", "w", "*", "SegmentWAL", ")", "createSegmentFile", "(", "name", "string", ")", "(", "*", "os", ".", "File", ",", "error", ")", "{", "f", ",", "err", ":=", "os", ".", "Create", "(", "name", ")", "\n", "if", "err", "!=", "nil", "{", "...
// createSegmentFile creates a new segment file with the given name. It preallocates // the standard segment size if possible and writes the header.
[ "createSegmentFile", "creates", "a", "new", "segment", "file", "with", "the", "given", "name", ".", "It", "preallocates", "the", "standard", "segment", "size", "if", "possible", "and", "writes", "the", "header", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L546-L563
train
prometheus/tsdb
wal.go
cut
func (w *SegmentWAL) cut() error { // Sync current head to disk and close. if hf := w.head(); hf != nil { if err := w.flush(); err != nil { return err } // Finish last segment asynchronously to not block the WAL moving along // in the new segment. go func() { w.actorc <- func() error { off, err := hf.Seek(0, io.SeekCurrent) if err != nil { return errors.Wrapf(err, "finish old segment %s", hf.Name()) } if err := hf.Truncate(off); err != nil { return errors.Wrapf(err, "finish old segment %s", hf.Name()) } if err := hf.Sync(); err != nil { return errors.Wrapf(err, "finish old segment %s", hf.Name()) } if err := hf.Close(); err != nil { return errors.Wrapf(err, "finish old segment %s", hf.Name()) } return nil } }() } p, _, err := nextSequenceFile(w.dirFile.Name()) if err != nil { return err } f, err := w.createSegmentFile(p) if err != nil { return err } go func() { w.actorc <- func() error { return errors.Wrap(w.dirFile.Sync(), "sync WAL directory") } }() w.files = append(w.files, newSegmentFile(f)) // TODO(gouthamve): make the buffer size a constant. w.cur = bufio.NewWriterSize(f, 8*1024*1024) w.curN = 8 return nil }
go
func (w *SegmentWAL) cut() error { // Sync current head to disk and close. if hf := w.head(); hf != nil { if err := w.flush(); err != nil { return err } // Finish last segment asynchronously to not block the WAL moving along // in the new segment. go func() { w.actorc <- func() error { off, err := hf.Seek(0, io.SeekCurrent) if err != nil { return errors.Wrapf(err, "finish old segment %s", hf.Name()) } if err := hf.Truncate(off); err != nil { return errors.Wrapf(err, "finish old segment %s", hf.Name()) } if err := hf.Sync(); err != nil { return errors.Wrapf(err, "finish old segment %s", hf.Name()) } if err := hf.Close(); err != nil { return errors.Wrapf(err, "finish old segment %s", hf.Name()) } return nil } }() } p, _, err := nextSequenceFile(w.dirFile.Name()) if err != nil { return err } f, err := w.createSegmentFile(p) if err != nil { return err } go func() { w.actorc <- func() error { return errors.Wrap(w.dirFile.Sync(), "sync WAL directory") } }() w.files = append(w.files, newSegmentFile(f)) // TODO(gouthamve): make the buffer size a constant. w.cur = bufio.NewWriterSize(f, 8*1024*1024) w.curN = 8 return nil }
[ "func", "(", "w", "*", "SegmentWAL", ")", "cut", "(", ")", "error", "{", "if", "hf", ":=", "w", ".", "head", "(", ")", ";", "hf", "!=", "nil", "{", "if", "err", ":=", "w", ".", "flush", "(", ")", ";", "err", "!=", "nil", "{", "return", "err...
// cut finishes the currently active segments and opens the next one. // The encoder is reset to point to the new segment.
[ "cut", "finishes", "the", "currently", "active", "segments", "and", "opens", "the", "next", "one", ".", "The", "encoder", "is", "reset", "to", "point", "to", "the", "new", "segment", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L567-L617
train
prometheus/tsdb
wal.go
Sync
func (w *SegmentWAL) Sync() error { var head *segmentFile var err error // Flush the writer and retrieve the reference to the head segment under mutex lock. func() { w.mtx.Lock() defer w.mtx.Unlock() if err = w.flush(); err != nil { return } head = w.head() }() if err != nil { return errors.Wrap(err, "flush buffer") } if head != nil { // But only fsync the head segment after releasing the mutex as it will block on disk I/O. start := time.Now() err := fileutil.Fdatasync(head.File) w.metrics.fsyncDuration.Observe(time.Since(start).Seconds()) return err } return nil }
go
func (w *SegmentWAL) Sync() error { var head *segmentFile var err error // Flush the writer and retrieve the reference to the head segment under mutex lock. func() { w.mtx.Lock() defer w.mtx.Unlock() if err = w.flush(); err != nil { return } head = w.head() }() if err != nil { return errors.Wrap(err, "flush buffer") } if head != nil { // But only fsync the head segment after releasing the mutex as it will block on disk I/O. start := time.Now() err := fileutil.Fdatasync(head.File) w.metrics.fsyncDuration.Observe(time.Since(start).Seconds()) return err } return nil }
[ "func", "(", "w", "*", "SegmentWAL", ")", "Sync", "(", ")", "error", "{", "var", "head", "*", "segmentFile", "\n", "var", "err", "error", "\n", "func", "(", ")", "{", "w", ".", "mtx", ".", "Lock", "(", ")", "\n", "defer", "w", ".", "mtx", ".", ...
// Sync flushes the changes to disk.
[ "Sync", "flushes", "the", "changes", "to", "disk", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L627-L651
train
prometheus/tsdb
wal.go
Close
func (w *SegmentWAL) Close() error { // Make sure you can call Close() multiple times. select { case <-w.stopc: return nil // Already closed. default: } close(w.stopc) <-w.donec w.mtx.Lock() defer w.mtx.Unlock() if err := w.sync(); err != nil { return err } // On opening, a WAL must be fully consumed once. Afterwards // only the current segment will still be open. if hf := w.head(); hf != nil { if err := hf.Close(); err != nil { return errors.Wrapf(err, "closing WAL head %s", hf.Name()) } } return errors.Wrapf(w.dirFile.Close(), "closing WAL dir %s", w.dirFile.Name()) }
go
func (w *SegmentWAL) Close() error { // Make sure you can call Close() multiple times. select { case <-w.stopc: return nil // Already closed. default: } close(w.stopc) <-w.donec w.mtx.Lock() defer w.mtx.Unlock() if err := w.sync(); err != nil { return err } // On opening, a WAL must be fully consumed once. Afterwards // only the current segment will still be open. if hf := w.head(); hf != nil { if err := hf.Close(); err != nil { return errors.Wrapf(err, "closing WAL head %s", hf.Name()) } } return errors.Wrapf(w.dirFile.Close(), "closing WAL dir %s", w.dirFile.Name()) }
[ "func", "(", "w", "*", "SegmentWAL", ")", "Close", "(", ")", "error", "{", "select", "{", "case", "<-", "w", ".", "stopc", ":", "return", "nil", "\n", "default", ":", "}", "\n", "close", "(", "w", ".", "stopc", ")", "\n", "<-", "w", ".", "donec...
// Close syncs all data and closes the underlying resources.
[ "Close", "syncs", "all", "data", "and", "closes", "the", "underlying", "resources", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L711-L737
train
prometheus/tsdb
wal.go
next
func (r *walReader) next() bool { if r.cur >= len(r.files) { return false } cf := r.files[r.cur] // Remember the offset after the last correctly read entry. If the next one // is corrupted, this is where we can safely truncate. r.lastOffset, r.err = cf.Seek(0, io.SeekCurrent) if r.err != nil { return false } et, flag, b, err := r.entry(cf) // If we reached the end of the reader, advance to the next one // and close. // Do not close on the last one as it will still be appended to. if err == io.EOF { if r.cur == len(r.files)-1 { return false } // Current reader completed, close and move to the next one. if err := cf.Close(); err != nil { r.err = err return false } r.cur++ return r.next() } if err != nil { r.err = err return false } r.curType = et r.curFlag = flag r.curBuf = b return r.err == nil }
go
func (r *walReader) next() bool { if r.cur >= len(r.files) { return false } cf := r.files[r.cur] // Remember the offset after the last correctly read entry. If the next one // is corrupted, this is where we can safely truncate. r.lastOffset, r.err = cf.Seek(0, io.SeekCurrent) if r.err != nil { return false } et, flag, b, err := r.entry(cf) // If we reached the end of the reader, advance to the next one // and close. // Do not close on the last one as it will still be appended to. if err == io.EOF { if r.cur == len(r.files)-1 { return false } // Current reader completed, close and move to the next one. if err := cf.Close(); err != nil { r.err = err return false } r.cur++ return r.next() } if err != nil { r.err = err return false } r.curType = et r.curFlag = flag r.curBuf = b return r.err == nil }
[ "func", "(", "r", "*", "walReader", ")", "next", "(", ")", "bool", "{", "if", "r", ".", "cur", ">=", "len", "(", "r", ".", "files", ")", "{", "return", "false", "\n", "}", "\n", "cf", ":=", "r", ".", "files", "[", "r", ".", "cur", "]", "\n"...
// next returns decodes the next entry pair and returns true // if it was successful.
[ "next", "returns", "decodes", "the", "next", "entry", "pair", "and", "returns", "true", "if", "it", "was", "successful", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L1013-L1051
train
prometheus/tsdb
wal.go
MigrateWAL
func MigrateWAL(logger log.Logger, dir string) (err error) { if logger == nil { logger = log.NewNopLogger() } if exists, err := deprecatedWALExists(logger, dir); err != nil || !exists { return err } level.Info(logger).Log("msg", "migrating WAL format") tmpdir := dir + ".tmp" if err := os.RemoveAll(tmpdir); err != nil { return errors.Wrap(err, "cleanup replacement dir") } repl, err := wal.New(logger, nil, tmpdir) if err != nil { return errors.Wrap(err, "open new WAL") } // It should've already been closed as part of the previous finalization. // Do it once again in case of prior errors. defer func() { if err != nil { repl.Close() } }() w, err := OpenSegmentWAL(dir, logger, time.Minute, nil) if err != nil { return errors.Wrap(err, "open old WAL") } defer w.Close() rdr := w.Reader() var ( enc RecordEncoder b []byte ) decErr := rdr.Read( func(s []RefSeries) { if err != nil { return } err = repl.Log(enc.Series(s, b[:0])) }, func(s []RefSample) { if err != nil { return } err = repl.Log(enc.Samples(s, b[:0])) }, func(s []Stone) { if err != nil { return } err = repl.Log(enc.Tombstones(s, b[:0])) }, ) if decErr != nil { return errors.Wrap(err, "decode old entries") } if err != nil { return errors.Wrap(err, "write new entries") } // We explicitly close even when there is a defer for Windows to be // able to delete it. The defer is in place to close it in-case there // are errors above. if err := w.Close(); err != nil { return errors.Wrap(err, "close old WAL") } if err := repl.Close(); err != nil { return errors.Wrap(err, "close new WAL") } if err := fileutil.Replace(tmpdir, dir); err != nil { return errors.Wrap(err, "replace old WAL") } return nil }
go
func MigrateWAL(logger log.Logger, dir string) (err error) { if logger == nil { logger = log.NewNopLogger() } if exists, err := deprecatedWALExists(logger, dir); err != nil || !exists { return err } level.Info(logger).Log("msg", "migrating WAL format") tmpdir := dir + ".tmp" if err := os.RemoveAll(tmpdir); err != nil { return errors.Wrap(err, "cleanup replacement dir") } repl, err := wal.New(logger, nil, tmpdir) if err != nil { return errors.Wrap(err, "open new WAL") } // It should've already been closed as part of the previous finalization. // Do it once again in case of prior errors. defer func() { if err != nil { repl.Close() } }() w, err := OpenSegmentWAL(dir, logger, time.Minute, nil) if err != nil { return errors.Wrap(err, "open old WAL") } defer w.Close() rdr := w.Reader() var ( enc RecordEncoder b []byte ) decErr := rdr.Read( func(s []RefSeries) { if err != nil { return } err = repl.Log(enc.Series(s, b[:0])) }, func(s []RefSample) { if err != nil { return } err = repl.Log(enc.Samples(s, b[:0])) }, func(s []Stone) { if err != nil { return } err = repl.Log(enc.Tombstones(s, b[:0])) }, ) if decErr != nil { return errors.Wrap(err, "decode old entries") } if err != nil { return errors.Wrap(err, "write new entries") } // We explicitly close even when there is a defer for Windows to be // able to delete it. The defer is in place to close it in-case there // are errors above. if err := w.Close(); err != nil { return errors.Wrap(err, "close old WAL") } if err := repl.Close(); err != nil { return errors.Wrap(err, "close new WAL") } if err := fileutil.Replace(tmpdir, dir); err != nil { return errors.Wrap(err, "replace old WAL") } return nil }
[ "func", "MigrateWAL", "(", "logger", "log", ".", "Logger", ",", "dir", "string", ")", "(", "err", "error", ")", "{", "if", "logger", "==", "nil", "{", "logger", "=", "log", ".", "NewNopLogger", "(", ")", "\n", "}", "\n", "if", "exists", ",", "err",...
// MigrateWAL rewrites the deprecated write ahead log into the new format.
[ "MigrateWAL", "rewrites", "the", "deprecated", "write", "ahead", "log", "into", "the", "new", "format", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal.go#L1235-L1312
train
prometheus/tsdb
db.go
Appender
func (db *DB) Appender() Appender { return dbAppender{db: db, Appender: db.head.Appender()} }
go
func (db *DB) Appender() Appender { return dbAppender{db: db, Appender: db.head.Appender()} }
[ "func", "(", "db", "*", "DB", ")", "Appender", "(", ")", "Appender", "{", "return", "dbAppender", "{", "db", ":", "db", ",", "Appender", ":", "db", ".", "head", ".", "Appender", "(", ")", "}", "\n", "}" ]
// Appender opens a new appender against the database.
[ "Appender", "opens", "a", "new", "appender", "against", "the", "database", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L379-L381
train
prometheus/tsdb
db.go
compact
func (db *DB) compact() (err error) { db.cmtx.Lock() defer db.cmtx.Unlock() // Check whether we have pending head blocks that are ready to be persisted. // They have the highest priority. for { select { case <-db.stopc: return nil default: } if !db.head.compactable() { break } mint := db.head.MinTime() maxt := rangeForTimestamp(mint, db.head.chunkRange) // Wrap head into a range that bounds all reads to it. head := &rangeHead{ head: db.head, mint: mint, // We remove 1 millisecond from maxt because block // intervals are half-open: [b.MinTime, b.MaxTime). But // chunk intervals are closed: [c.MinTime, c.MaxTime]; // so in order to make sure that overlaps are evaluated // consistently, we explicitly remove the last value // from the block interval here. maxt: maxt - 1, } uid, err := db.compactor.Write(db.dir, head, mint, maxt, nil) if err != nil { return errors.Wrap(err, "persist head block") } runtime.GC() if err := db.reload(); err != nil { if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil { return errors.Wrapf(err, "delete persisted head block after failed db reload:%s", uid) } return errors.Wrap(err, "reload blocks") } if (uid == ulid.ULID{}) { // Compaction resulted in an empty block. // Head truncating during db.reload() depends on the persisted blocks and // in this case no new block will be persisted so manually truncate the head. if err = db.head.Truncate(maxt); err != nil { return errors.Wrap(err, "head truncate failed (in compact)") } } runtime.GC() } // Check for compactions of multiple blocks. for { plan, err := db.compactor.Plan(db.dir) if err != nil { return errors.Wrap(err, "plan compaction") } if len(plan) == 0 { break } select { case <-db.stopc: return nil default: } uid, err := db.compactor.Compact(db.dir, plan, db.blocks) if err != nil { return errors.Wrapf(err, "compact %s", plan) } runtime.GC() if err := db.reload(); err != nil { if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil { return errors.Wrapf(err, "delete compacted block after failed db reload:%s", uid) } return errors.Wrap(err, "reload blocks") } runtime.GC() } return nil }
go
func (db *DB) compact() (err error) { db.cmtx.Lock() defer db.cmtx.Unlock() // Check whether we have pending head blocks that are ready to be persisted. // They have the highest priority. for { select { case <-db.stopc: return nil default: } if !db.head.compactable() { break } mint := db.head.MinTime() maxt := rangeForTimestamp(mint, db.head.chunkRange) // Wrap head into a range that bounds all reads to it. head := &rangeHead{ head: db.head, mint: mint, // We remove 1 millisecond from maxt because block // intervals are half-open: [b.MinTime, b.MaxTime). But // chunk intervals are closed: [c.MinTime, c.MaxTime]; // so in order to make sure that overlaps are evaluated // consistently, we explicitly remove the last value // from the block interval here. maxt: maxt - 1, } uid, err := db.compactor.Write(db.dir, head, mint, maxt, nil) if err != nil { return errors.Wrap(err, "persist head block") } runtime.GC() if err := db.reload(); err != nil { if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil { return errors.Wrapf(err, "delete persisted head block after failed db reload:%s", uid) } return errors.Wrap(err, "reload blocks") } if (uid == ulid.ULID{}) { // Compaction resulted in an empty block. // Head truncating during db.reload() depends on the persisted blocks and // in this case no new block will be persisted so manually truncate the head. if err = db.head.Truncate(maxt); err != nil { return errors.Wrap(err, "head truncate failed (in compact)") } } runtime.GC() } // Check for compactions of multiple blocks. for { plan, err := db.compactor.Plan(db.dir) if err != nil { return errors.Wrap(err, "plan compaction") } if len(plan) == 0 { break } select { case <-db.stopc: return nil default: } uid, err := db.compactor.Compact(db.dir, plan, db.blocks) if err != nil { return errors.Wrapf(err, "compact %s", plan) } runtime.GC() if err := db.reload(); err != nil { if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil { return errors.Wrapf(err, "delete compacted block after failed db reload:%s", uid) } return errors.Wrap(err, "reload blocks") } runtime.GC() } return nil }
[ "func", "(", "db", "*", "DB", ")", "compact", "(", ")", "(", "err", "error", ")", "{", "db", ".", "cmtx", ".", "Lock", "(", ")", "\n", "defer", "db", ".", "cmtx", ".", "Unlock", "(", ")", "\n", "for", "{", "select", "{", "case", "<-", "db", ...
// Compact data if possible. After successful compaction blocks are reloaded // which will also trigger blocks to be deleted that fall out of the retention // window. // If no blocks are compacted, the retention window state doesn't change. Thus, // this is sufficient to reliably delete old data. // Old blocks are only deleted on reload based on the new block's parent information. // See DB.reload documentation for further information.
[ "Compact", "data", "if", "possible", ".", "After", "successful", "compaction", "blocks", "are", "reloaded", "which", "will", "also", "trigger", "blocks", "to", "be", "deleted", "that", "fall", "out", "of", "the", "retention", "window", ".", "If", "no", "bloc...
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L411-L496
train
prometheus/tsdb
db.go
deletableBlocks
func (db *DB) deletableBlocks(blocks []*Block) map[ulid.ULID]*Block { deletable := make(map[ulid.ULID]*Block) // Sort the blocks by time - newest to oldest (largest to smallest timestamp). // This ensures that the retentions will remove the oldest blocks. sort.Slice(blocks, func(i, j int) bool { return blocks[i].Meta().MaxTime > blocks[j].Meta().MaxTime }) for _, block := range blocks { if block.Meta().Compaction.Deletable { deletable[block.Meta().ULID] = block } } for ulid, block := range db.beyondTimeRetention(blocks) { deletable[ulid] = block } for ulid, block := range db.beyondSizeRetention(blocks) { deletable[ulid] = block } return deletable }
go
func (db *DB) deletableBlocks(blocks []*Block) map[ulid.ULID]*Block { deletable := make(map[ulid.ULID]*Block) // Sort the blocks by time - newest to oldest (largest to smallest timestamp). // This ensures that the retentions will remove the oldest blocks. sort.Slice(blocks, func(i, j int) bool { return blocks[i].Meta().MaxTime > blocks[j].Meta().MaxTime }) for _, block := range blocks { if block.Meta().Compaction.Deletable { deletable[block.Meta().ULID] = block } } for ulid, block := range db.beyondTimeRetention(blocks) { deletable[ulid] = block } for ulid, block := range db.beyondSizeRetention(blocks) { deletable[ulid] = block } return deletable }
[ "func", "(", "db", "*", "DB", ")", "deletableBlocks", "(", "blocks", "[", "]", "*", "Block", ")", "map", "[", "ulid", ".", "ULID", "]", "*", "Block", "{", "deletable", ":=", "make", "(", "map", "[", "ulid", ".", "ULID", "]", "*", "Block", ")", ...
// deletableBlocks returns all blocks past retention policy.
[ "deletableBlocks", "returns", "all", "blocks", "past", "retention", "policy", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L634-L658
train
prometheus/tsdb
db.go
deleteBlocks
func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { for ulid, block := range blocks { if block != nil { if err := block.Close(); err != nil { level.Warn(db.logger).Log("msg", "closing block failed", "err", err) } } if err := os.RemoveAll(filepath.Join(db.dir, ulid.String())); err != nil { return errors.Wrapf(err, "delete obsolete block %s", ulid) } } return nil }
go
func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { for ulid, block := range blocks { if block != nil { if err := block.Close(); err != nil { level.Warn(db.logger).Log("msg", "closing block failed", "err", err) } } if err := os.RemoveAll(filepath.Join(db.dir, ulid.String())); err != nil { return errors.Wrapf(err, "delete obsolete block %s", ulid) } } return nil }
[ "func", "(", "db", "*", "DB", ")", "deleteBlocks", "(", "blocks", "map", "[", "ulid", ".", "ULID", "]", "*", "Block", ")", "error", "{", "for", "ulid", ",", "block", ":=", "range", "blocks", "{", "if", "block", "!=", "nil", "{", "if", "err", ":="...
// deleteBlocks closes and deletes blocks from the disk. // When the map contains a non nil block object it means it is loaded in memory // so needs to be closed first as it might need to wait for pending readers to complete.
[ "deleteBlocks", "closes", "and", "deletes", "blocks", "from", "the", "disk", ".", "When", "the", "map", "contains", "a", "non", "nil", "block", "object", "it", "means", "it", "is", "loaded", "in", "memory", "so", "needs", "to", "be", "closed", "first", "...
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L706-L718
train
prometheus/tsdb
db.go
validateBlockSequence
func validateBlockSequence(bs []*Block) error { if len(bs) <= 1 { return nil } var metas []BlockMeta for _, b := range bs { metas = append(metas, b.meta) } overlaps := OverlappingBlocks(metas) if len(overlaps) > 0 { return errors.Errorf("block time ranges overlap: %s", overlaps) } return nil }
go
func validateBlockSequence(bs []*Block) error { if len(bs) <= 1 { return nil } var metas []BlockMeta for _, b := range bs { metas = append(metas, b.meta) } overlaps := OverlappingBlocks(metas) if len(overlaps) > 0 { return errors.Errorf("block time ranges overlap: %s", overlaps) } return nil }
[ "func", "validateBlockSequence", "(", "bs", "[", "]", "*", "Block", ")", "error", "{", "if", "len", "(", "bs", ")", "<=", "1", "{", "return", "nil", "\n", "}", "\n", "var", "metas", "[", "]", "BlockMeta", "\n", "for", "_", ",", "b", ":=", "range"...
// validateBlockSequence returns error if given block meta files indicate that some blocks overlaps within sequence.
[ "validateBlockSequence", "returns", "error", "if", "given", "block", "meta", "files", "indicate", "that", "some", "blocks", "overlaps", "within", "sequence", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L721-L737
train
prometheus/tsdb
db.go
String
func (o Overlaps) String() string { var res []string for r, overlaps := range o { var groups []string for _, m := range overlaps { groups = append(groups, fmt.Sprintf( "<ulid: %s, mint: %d, maxt: %d, range: %s>", m.ULID.String(), m.MinTime, m.MaxTime, (time.Duration((m.MaxTime-m.MinTime)/1000)*time.Second).String(), )) } res = append(res, fmt.Sprintf( "[mint: %d, maxt: %d, range: %s, blocks: %d]: %s", r.Min, r.Max, (time.Duration((r.Max-r.Min)/1000)*time.Second).String(), len(overlaps), strings.Join(groups, ", ")), ) } return strings.Join(res, "\n") }
go
func (o Overlaps) String() string { var res []string for r, overlaps := range o { var groups []string for _, m := range overlaps { groups = append(groups, fmt.Sprintf( "<ulid: %s, mint: %d, maxt: %d, range: %s>", m.ULID.String(), m.MinTime, m.MaxTime, (time.Duration((m.MaxTime-m.MinTime)/1000)*time.Second).String(), )) } res = append(res, fmt.Sprintf( "[mint: %d, maxt: %d, range: %s, blocks: %d]: %s", r.Min, r.Max, (time.Duration((r.Max-r.Min)/1000)*time.Second).String(), len(overlaps), strings.Join(groups, ", ")), ) } return strings.Join(res, "\n") }
[ "func", "(", "o", "Overlaps", ")", "String", "(", ")", "string", "{", "var", "res", "[", "]", "string", "\n", "for", "r", ",", "overlaps", ":=", "range", "o", "{", "var", "groups", "[", "]", "string", "\n", "for", "_", ",", "m", ":=", "range", ...
// String returns human readable string form of overlapped blocks.
[ "String", "returns", "human", "readable", "string", "form", "of", "overlapped", "blocks", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L748-L770
train
prometheus/tsdb
db.go
OverlappingBlocks
func OverlappingBlocks(bm []BlockMeta) Overlaps { if len(bm) <= 1 { return nil } var ( overlaps [][]BlockMeta // pending contains not ended blocks in regards to "current" timestamp. pending = []BlockMeta{bm[0]} // continuousPending helps to aggregate same overlaps to single group. continuousPending = true ) // We have here blocks sorted by minTime. We iterate over each block and treat its minTime as our "current" timestamp. // We check if any of the pending block finished (blocks that we have seen before, but their maxTime was still ahead current // timestamp). If not, it means they overlap with our current block. In the same time current block is assumed pending. for _, b := range bm[1:] { var newPending []BlockMeta for _, p := range pending { // "b.MinTime" is our current time. if b.MinTime >= p.MaxTime { continuousPending = false continue } // "p" overlaps with "b" and "p" is still pending. newPending = append(newPending, p) } // Our block "b" is now pending. pending = append(newPending, b) if len(newPending) == 0 { // No overlaps. continue } if continuousPending && len(overlaps) > 0 { overlaps[len(overlaps)-1] = append(overlaps[len(overlaps)-1], b) continue } overlaps = append(overlaps, append(newPending, b)) // Start new pendings. continuousPending = true } // Fetch the critical overlapped time range foreach overlap groups. overlapGroups := Overlaps{} for _, overlap := range overlaps { minRange := TimeRange{Min: 0, Max: math.MaxInt64} for _, b := range overlap { if minRange.Max > b.MaxTime { minRange.Max = b.MaxTime } if minRange.Min < b.MinTime { minRange.Min = b.MinTime } } overlapGroups[minRange] = overlap } return overlapGroups }
go
func OverlappingBlocks(bm []BlockMeta) Overlaps { if len(bm) <= 1 { return nil } var ( overlaps [][]BlockMeta // pending contains not ended blocks in regards to "current" timestamp. pending = []BlockMeta{bm[0]} // continuousPending helps to aggregate same overlaps to single group. continuousPending = true ) // We have here blocks sorted by minTime. We iterate over each block and treat its minTime as our "current" timestamp. // We check if any of the pending block finished (blocks that we have seen before, but their maxTime was still ahead current // timestamp). If not, it means they overlap with our current block. In the same time current block is assumed pending. for _, b := range bm[1:] { var newPending []BlockMeta for _, p := range pending { // "b.MinTime" is our current time. if b.MinTime >= p.MaxTime { continuousPending = false continue } // "p" overlaps with "b" and "p" is still pending. newPending = append(newPending, p) } // Our block "b" is now pending. pending = append(newPending, b) if len(newPending) == 0 { // No overlaps. continue } if continuousPending && len(overlaps) > 0 { overlaps[len(overlaps)-1] = append(overlaps[len(overlaps)-1], b) continue } overlaps = append(overlaps, append(newPending, b)) // Start new pendings. continuousPending = true } // Fetch the critical overlapped time range foreach overlap groups. overlapGroups := Overlaps{} for _, overlap := range overlaps { minRange := TimeRange{Min: 0, Max: math.MaxInt64} for _, b := range overlap { if minRange.Max > b.MaxTime { minRange.Max = b.MaxTime } if minRange.Min < b.MinTime { minRange.Min = b.MinTime } } overlapGroups[minRange] = overlap } return overlapGroups }
[ "func", "OverlappingBlocks", "(", "bm", "[", "]", "BlockMeta", ")", "Overlaps", "{", "if", "len", "(", "bm", ")", "<=", "1", "{", "return", "nil", "\n", "}", "\n", "var", "(", "overlaps", "[", "]", "[", "]", "BlockMeta", "\n", "pending", "=", "[", ...
// OverlappingBlocks returns all overlapping blocks from given meta files.
[ "OverlappingBlocks", "returns", "all", "overlapping", "blocks", "from", "given", "meta", "files", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L773-L837
train
prometheus/tsdb
db.go
Blocks
func (db *DB) Blocks() []*Block { db.mtx.RLock() defer db.mtx.RUnlock() return db.blocks }
go
func (db *DB) Blocks() []*Block { db.mtx.RLock() defer db.mtx.RUnlock() return db.blocks }
[ "func", "(", "db", "*", "DB", ")", "Blocks", "(", ")", "[", "]", "*", "Block", "{", "db", ".", "mtx", ".", "RLock", "(", ")", "\n", "defer", "db", ".", "mtx", ".", "RUnlock", "(", ")", "\n", "return", "db", ".", "blocks", "\n", "}" ]
// Blocks returns the databases persisted blocks.
[ "Blocks", "returns", "the", "databases", "persisted", "blocks", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L844-L849
train
prometheus/tsdb
db.go
Close
func (db *DB) Close() error { close(db.stopc) db.compactCancel() <-db.donec db.mtx.Lock() defer db.mtx.Unlock() var g errgroup.Group // blocks also contains all head blocks. for _, pb := range db.blocks { g.Go(pb.Close) } var merr tsdb_errors.MultiError merr.Add(g.Wait()) if db.lockf != nil { merr.Add(db.lockf.Release()) } merr.Add(db.head.Close()) return merr.Err() }
go
func (db *DB) Close() error { close(db.stopc) db.compactCancel() <-db.donec db.mtx.Lock() defer db.mtx.Unlock() var g errgroup.Group // blocks also contains all head blocks. for _, pb := range db.blocks { g.Go(pb.Close) } var merr tsdb_errors.MultiError merr.Add(g.Wait()) if db.lockf != nil { merr.Add(db.lockf.Release()) } merr.Add(db.head.Close()) return merr.Err() }
[ "func", "(", "db", "*", "DB", ")", "Close", "(", ")", "error", "{", "close", "(", "db", ".", "stopc", ")", "\n", "db", ".", "compactCancel", "(", ")", "\n", "<-", "db", ".", "donec", "\n", "db", ".", "mtx", ".", "Lock", "(", ")", "\n", "defer...
// Close the partition.
[ "Close", "the", "partition", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L857-L881
train
prometheus/tsdb
db.go
DisableCompactions
func (db *DB) DisableCompactions() { db.autoCompactMtx.Lock() defer db.autoCompactMtx.Unlock() db.autoCompact = false level.Info(db.logger).Log("msg", "compactions disabled") }
go
func (db *DB) DisableCompactions() { db.autoCompactMtx.Lock() defer db.autoCompactMtx.Unlock() db.autoCompact = false level.Info(db.logger).Log("msg", "compactions disabled") }
[ "func", "(", "db", "*", "DB", ")", "DisableCompactions", "(", ")", "{", "db", ".", "autoCompactMtx", ".", "Lock", "(", ")", "\n", "defer", "db", ".", "autoCompactMtx", ".", "Unlock", "(", ")", "\n", "db", ".", "autoCompact", "=", "false", "\n", "leve...
// DisableCompactions disables auto compactions.
[ "DisableCompactions", "disables", "auto", "compactions", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L884-L890
train
prometheus/tsdb
db.go
EnableCompactions
func (db *DB) EnableCompactions() { db.autoCompactMtx.Lock() defer db.autoCompactMtx.Unlock() db.autoCompact = true level.Info(db.logger).Log("msg", "compactions enabled") }
go
func (db *DB) EnableCompactions() { db.autoCompactMtx.Lock() defer db.autoCompactMtx.Unlock() db.autoCompact = true level.Info(db.logger).Log("msg", "compactions enabled") }
[ "func", "(", "db", "*", "DB", ")", "EnableCompactions", "(", ")", "{", "db", ".", "autoCompactMtx", ".", "Lock", "(", ")", "\n", "defer", "db", ".", "autoCompactMtx", ".", "Unlock", "(", ")", "\n", "db", ".", "autoCompact", "=", "true", "\n", "level"...
// EnableCompactions enables auto compactions.
[ "EnableCompactions", "enables", "auto", "compactions", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L893-L899
train
prometheus/tsdb
db.go
Querier
func (db *DB) Querier(mint, maxt int64) (Querier, error) { var blocks []BlockReader var blockMetas []BlockMeta db.mtx.RLock() defer db.mtx.RUnlock() for _, b := range db.blocks { if b.OverlapsClosedInterval(mint, maxt) { blocks = append(blocks, b) blockMetas = append(blockMetas, b.Meta()) } } if maxt >= db.head.MinTime() { blocks = append(blocks, &rangeHead{ head: db.head, mint: mint, maxt: maxt, }) } blockQueriers := make([]Querier, 0, len(blocks)) for _, b := range blocks { q, err := NewBlockQuerier(b, mint, maxt) if err == nil { blockQueriers = append(blockQueriers, q) continue } // If we fail, all previously opened queriers must be closed. for _, q := range blockQueriers { q.Close() } return nil, errors.Wrapf(err, "open querier for block %s", b) } if len(OverlappingBlocks(blockMetas)) > 0 { return &verticalQuerier{ querier: querier{ blocks: blockQueriers, }, }, nil } return &querier{ blocks: blockQueriers, }, nil }
go
func (db *DB) Querier(mint, maxt int64) (Querier, error) { var blocks []BlockReader var blockMetas []BlockMeta db.mtx.RLock() defer db.mtx.RUnlock() for _, b := range db.blocks { if b.OverlapsClosedInterval(mint, maxt) { blocks = append(blocks, b) blockMetas = append(blockMetas, b.Meta()) } } if maxt >= db.head.MinTime() { blocks = append(blocks, &rangeHead{ head: db.head, mint: mint, maxt: maxt, }) } blockQueriers := make([]Querier, 0, len(blocks)) for _, b := range blocks { q, err := NewBlockQuerier(b, mint, maxt) if err == nil { blockQueriers = append(blockQueriers, q) continue } // If we fail, all previously opened queriers must be closed. for _, q := range blockQueriers { q.Close() } return nil, errors.Wrapf(err, "open querier for block %s", b) } if len(OverlappingBlocks(blockMetas)) > 0 { return &verticalQuerier{ querier: querier{ blocks: blockQueriers, }, }, nil } return &querier{ blocks: blockQueriers, }, nil }
[ "func", "(", "db", "*", "DB", ")", "Querier", "(", "mint", ",", "maxt", "int64", ")", "(", "Querier", ",", "error", ")", "{", "var", "blocks", "[", "]", "BlockReader", "\n", "var", "blockMetas", "[", "]", "BlockMeta", "\n", "db", ".", "mtx", ".", ...
// Querier returns a new querier over the data partition for the given time range. // A goroutine must not handle more than one open Querier.
[ "Querier", "returns", "a", "new", "querier", "over", "the", "data", "partition", "for", "the", "given", "time", "range", ".", "A", "goroutine", "must", "not", "handle", "more", "than", "one", "open", "Querier", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L933-L979
train
prometheus/tsdb
db.go
Delete
func (db *DB) Delete(mint, maxt int64, ms ...labels.Matcher) error { db.cmtx.Lock() defer db.cmtx.Unlock() var g errgroup.Group db.mtx.RLock() defer db.mtx.RUnlock() for _, b := range db.blocks { if b.OverlapsClosedInterval(mint, maxt) { g.Go(func(b *Block) func() error { return func() error { return b.Delete(mint, maxt, ms...) } }(b)) } } g.Go(func() error { return db.head.Delete(mint, maxt, ms...) }) return g.Wait() }
go
func (db *DB) Delete(mint, maxt int64, ms ...labels.Matcher) error { db.cmtx.Lock() defer db.cmtx.Unlock() var g errgroup.Group db.mtx.RLock() defer db.mtx.RUnlock() for _, b := range db.blocks { if b.OverlapsClosedInterval(mint, maxt) { g.Go(func(b *Block) func() error { return func() error { return b.Delete(mint, maxt, ms...) } }(b)) } } g.Go(func() error { return db.head.Delete(mint, maxt, ms...) }) return g.Wait() }
[ "func", "(", "db", "*", "DB", ")", "Delete", "(", "mint", ",", "maxt", "int64", ",", "ms", "...", "labels", ".", "Matcher", ")", "error", "{", "db", ".", "cmtx", ".", "Lock", "(", ")", "\n", "defer", "db", ".", "cmtx", ".", "Unlock", "(", ")", ...
// Delete implements deletion of metrics. It only has atomicity guarantees on a per-block basis.
[ "Delete", "implements", "deletion", "of", "metrics", ".", "It", "only", "has", "atomicity", "guarantees", "on", "a", "per", "-", "block", "basis", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L986-L1006
train
prometheus/tsdb
db.go
CleanTombstones
func (db *DB) CleanTombstones() (err error) { db.cmtx.Lock() defer db.cmtx.Unlock() start := time.Now() defer db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds()) newUIDs := []ulid.ULID{} defer func() { // If any error is caused, we need to delete all the new directory created. if err != nil { for _, uid := range newUIDs { dir := filepath.Join(db.Dir(), uid.String()) if err := os.RemoveAll(dir); err != nil { level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) } } } }() db.mtx.RLock() blocks := db.blocks[:] db.mtx.RUnlock() for _, b := range blocks { if uid, er := b.CleanTombstones(db.Dir(), db.compactor); er != nil { err = errors.Wrapf(er, "clean tombstones: %s", b.Dir()) return err } else if uid != nil { // New block was created. newUIDs = append(newUIDs, *uid) } } return errors.Wrap(db.reload(), "reload blocks") }
go
func (db *DB) CleanTombstones() (err error) { db.cmtx.Lock() defer db.cmtx.Unlock() start := time.Now() defer db.metrics.tombCleanTimer.Observe(time.Since(start).Seconds()) newUIDs := []ulid.ULID{} defer func() { // If any error is caused, we need to delete all the new directory created. if err != nil { for _, uid := range newUIDs { dir := filepath.Join(db.Dir(), uid.String()) if err := os.RemoveAll(dir); err != nil { level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) } } } }() db.mtx.RLock() blocks := db.blocks[:] db.mtx.RUnlock() for _, b := range blocks { if uid, er := b.CleanTombstones(db.Dir(), db.compactor); er != nil { err = errors.Wrapf(er, "clean tombstones: %s", b.Dir()) return err } else if uid != nil { // New block was created. newUIDs = append(newUIDs, *uid) } } return errors.Wrap(db.reload(), "reload blocks") }
[ "func", "(", "db", "*", "DB", ")", "CleanTombstones", "(", ")", "(", "err", "error", ")", "{", "db", ".", "cmtx", ".", "Lock", "(", ")", "\n", "defer", "db", ".", "cmtx", ".", "Unlock", "(", ")", "\n", "start", ":=", "time", ".", "Now", "(", ...
// CleanTombstones re-writes any blocks with tombstones.
[ "CleanTombstones", "re", "-", "writes", "any", "blocks", "with", "tombstones", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/db.go#L1009-L1042
train
prometheus/tsdb
compact.go
ExponentialBlockRanges
func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 { ranges := make([]int64, 0, steps) curRange := minSize for i := 0; i < steps; i++ { ranges = append(ranges, curRange) curRange = curRange * int64(stepSize) } return ranges }
go
func ExponentialBlockRanges(minSize int64, steps, stepSize int) []int64 { ranges := make([]int64, 0, steps) curRange := minSize for i := 0; i < steps; i++ { ranges = append(ranges, curRange) curRange = curRange * int64(stepSize) } return ranges }
[ "func", "ExponentialBlockRanges", "(", "minSize", "int64", ",", "steps", ",", "stepSize", "int", ")", "[", "]", "int64", "{", "ranges", ":=", "make", "(", "[", "]", "int64", ",", "0", ",", "steps", ")", "\n", "curRange", ":=", "minSize", "\n", "for", ...
// ExponentialBlockRanges returns the time ranges based on the stepSize.
[ "ExponentialBlockRanges", "returns", "the", "time", "ranges", "based", "on", "the", "stepSize", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/compact.go#L41-L50
train
prometheus/tsdb
compact.go
NewLeveledCompactor
func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool) (*LeveledCompactor, error) { if len(ranges) == 0 { return nil, errors.Errorf("at least one range must be provided") } if pool == nil { pool = chunkenc.NewPool() } if l == nil { l = log.NewNopLogger() } return &LeveledCompactor{ ranges: ranges, chunkPool: pool, logger: l, metrics: newCompactorMetrics(r), ctx: ctx, }, nil }
go
func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool) (*LeveledCompactor, error) { if len(ranges) == 0 { return nil, errors.Errorf("at least one range must be provided") } if pool == nil { pool = chunkenc.NewPool() } if l == nil { l = log.NewNopLogger() } return &LeveledCompactor{ ranges: ranges, chunkPool: pool, logger: l, metrics: newCompactorMetrics(r), ctx: ctx, }, nil }
[ "func", "NewLeveledCompactor", "(", "ctx", "context", ".", "Context", ",", "r", "prometheus", ".", "Registerer", ",", "l", "log", ".", "Logger", ",", "ranges", "[", "]", "int64", ",", "pool", "chunkenc", ".", "Pool", ")", "(", "*", "LeveledCompactor", ",...
// NewLeveledCompactor returns a LeveledCompactor.
[ "NewLeveledCompactor", "returns", "a", "LeveledCompactor", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/compact.go#L151-L168
train
prometheus/tsdb
compact.go
Plan
func (c *LeveledCompactor) Plan(dir string) ([]string, error) { dirs, err := blockDirs(dir) if err != nil { return nil, err } if len(dirs) < 1 { return nil, nil } var dms []dirMeta for _, dir := range dirs { meta, err := readMetaFile(dir) if err != nil { return nil, err } dms = append(dms, dirMeta{dir, meta}) } return c.plan(dms) }
go
func (c *LeveledCompactor) Plan(dir string) ([]string, error) { dirs, err := blockDirs(dir) if err != nil { return nil, err } if len(dirs) < 1 { return nil, nil } var dms []dirMeta for _, dir := range dirs { meta, err := readMetaFile(dir) if err != nil { return nil, err } dms = append(dms, dirMeta{dir, meta}) } return c.plan(dms) }
[ "func", "(", "c", "*", "LeveledCompactor", ")", "Plan", "(", "dir", "string", ")", "(", "[", "]", "string", ",", "error", ")", "{", "dirs", ",", "err", ":=", "blockDirs", "(", "dir", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",",...
// Plan returns a list of compactable blocks in the provided directory.
[ "Plan", "returns", "a", "list", "of", "compactable", "blocks", "in", "the", "provided", "directory", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/compact.go#L176-L194
train
prometheus/tsdb
compact.go
selectDirs
func (c *LeveledCompactor) selectDirs(ds []dirMeta) []dirMeta { if len(c.ranges) < 2 || len(ds) < 1 { return nil } highTime := ds[len(ds)-1].meta.MinTime for _, iv := range c.ranges[1:] { parts := splitByRange(ds, iv) if len(parts) == 0 { continue } Outer: for _, p := range parts { // Do not select the range if it has a block whose compaction failed. for _, dm := range p { if dm.meta.Compaction.Failed { continue Outer } } mint := p[0].meta.MinTime maxt := p[len(p)-1].meta.MaxTime // Pick the range of blocks if it spans the full range (potentially with gaps) // or is before the most recent block. // This ensures we don't compact blocks prematurely when another one of the same // size still fits in the range. if (maxt-mint == iv || maxt <= highTime) && len(p) > 1 { return p } } } return nil }
go
func (c *LeveledCompactor) selectDirs(ds []dirMeta) []dirMeta { if len(c.ranges) < 2 || len(ds) < 1 { return nil } highTime := ds[len(ds)-1].meta.MinTime for _, iv := range c.ranges[1:] { parts := splitByRange(ds, iv) if len(parts) == 0 { continue } Outer: for _, p := range parts { // Do not select the range if it has a block whose compaction failed. for _, dm := range p { if dm.meta.Compaction.Failed { continue Outer } } mint := p[0].meta.MinTime maxt := p[len(p)-1].meta.MaxTime // Pick the range of blocks if it spans the full range (potentially with gaps) // or is before the most recent block. // This ensures we don't compact blocks prematurely when another one of the same // size still fits in the range. if (maxt-mint == iv || maxt <= highTime) && len(p) > 1 { return p } } } return nil }
[ "func", "(", "c", "*", "LeveledCompactor", ")", "selectDirs", "(", "ds", "[", "]", "dirMeta", ")", "[", "]", "dirMeta", "{", "if", "len", "(", "c", ".", "ranges", ")", "<", "2", "||", "len", "(", "ds", ")", "<", "1", "{", "return", "nil", "\n",...
// selectDirs returns the dir metas that should be compacted into a single new block. // If only a single block range is configured, the result is always nil.
[ "selectDirs", "returns", "the", "dir", "metas", "that", "should", "be", "compacted", "into", "a", "single", "new", "block", ".", "If", "only", "a", "single", "block", "range", "is", "configured", "the", "result", "is", "always", "nil", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/compact.go#L233-L268
train
prometheus/tsdb
compact.go
selectOverlappingDirs
func (c *LeveledCompactor) selectOverlappingDirs(ds []dirMeta) []string { if len(ds) < 2 { return nil } var overlappingDirs []string globalMaxt := ds[0].meta.MaxTime for i, d := range ds[1:] { if d.meta.MinTime < globalMaxt { if len(overlappingDirs) == 0 { // When it is the first overlap, need to add the last one as well. overlappingDirs = append(overlappingDirs, ds[i].dir) } overlappingDirs = append(overlappingDirs, d.dir) } else if len(overlappingDirs) > 0 { break } if d.meta.MaxTime > globalMaxt { globalMaxt = d.meta.MaxTime } } return overlappingDirs }
go
func (c *LeveledCompactor) selectOverlappingDirs(ds []dirMeta) []string { if len(ds) < 2 { return nil } var overlappingDirs []string globalMaxt := ds[0].meta.MaxTime for i, d := range ds[1:] { if d.meta.MinTime < globalMaxt { if len(overlappingDirs) == 0 { // When it is the first overlap, need to add the last one as well. overlappingDirs = append(overlappingDirs, ds[i].dir) } overlappingDirs = append(overlappingDirs, d.dir) } else if len(overlappingDirs) > 0 { break } if d.meta.MaxTime > globalMaxt { globalMaxt = d.meta.MaxTime } } return overlappingDirs }
[ "func", "(", "c", "*", "LeveledCompactor", ")", "selectOverlappingDirs", "(", "ds", "[", "]", "dirMeta", ")", "[", "]", "string", "{", "if", "len", "(", "ds", ")", "<", "2", "{", "return", "nil", "\n", "}", "\n", "var", "overlappingDirs", "[", "]", ...
// selectOverlappingDirs returns all dirs with overlapping time ranges. // It expects sorted input by mint and returns the overlapping dirs in the same order as received.
[ "selectOverlappingDirs", "returns", "all", "dirs", "with", "overlapping", "time", "ranges", ".", "It", "expects", "sorted", "input", "by", "mint", "and", "returns", "the", "overlapping", "dirs", "in", "the", "same", "order", "as", "received", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/compact.go#L272-L292
train
prometheus/tsdb
tombstones.go
addInterval
func (t *memTombstones) addInterval(ref uint64, itvs ...Interval) { t.mtx.Lock() defer t.mtx.Unlock() for _, itv := range itvs { t.intvlGroups[ref] = t.intvlGroups[ref].add(itv) } }
go
func (t *memTombstones) addInterval(ref uint64, itvs ...Interval) { t.mtx.Lock() defer t.mtx.Unlock() for _, itv := range itvs { t.intvlGroups[ref] = t.intvlGroups[ref].add(itv) } }
[ "func", "(", "t", "*", "memTombstones", ")", "addInterval", "(", "ref", "uint64", ",", "itvs", "...", "Interval", ")", "{", "t", ".", "mtx", ".", "Lock", "(", ")", "\n", "defer", "t", ".", "mtx", ".", "Unlock", "(", ")", "\n", "for", "_", ",", ...
// addInterval to an existing memTombstones
[ "addInterval", "to", "an", "existing", "memTombstones" ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/tombstones.go#L225-L231
train
prometheus/tsdb
tombstones.go
add
func (itvs Intervals) add(n Interval) Intervals { for i, r := range itvs { // TODO(gouthamve): Make this codepath easier to digest. if r.inBounds(n.Mint-1) || r.inBounds(n.Mint) { if n.Maxt > r.Maxt { itvs[i].Maxt = n.Maxt } j := 0 for _, r2 := range itvs[i+1:] { if n.Maxt < r2.Mint { break } j++ } if j != 0 { if itvs[i+j].Maxt > n.Maxt { itvs[i].Maxt = itvs[i+j].Maxt } itvs = append(itvs[:i+1], itvs[i+j+1:]...) } return itvs } if r.inBounds(n.Maxt+1) || r.inBounds(n.Maxt) { if n.Mint < r.Maxt { itvs[i].Mint = n.Mint } return itvs } if n.Mint < r.Mint { newRange := make(Intervals, i, len(itvs[:i])+1) copy(newRange, itvs[:i]) newRange = append(newRange, n) newRange = append(newRange, itvs[i:]...) return newRange } } itvs = append(itvs, n) return itvs }
go
func (itvs Intervals) add(n Interval) Intervals { for i, r := range itvs { // TODO(gouthamve): Make this codepath easier to digest. if r.inBounds(n.Mint-1) || r.inBounds(n.Mint) { if n.Maxt > r.Maxt { itvs[i].Maxt = n.Maxt } j := 0 for _, r2 := range itvs[i+1:] { if n.Maxt < r2.Mint { break } j++ } if j != 0 { if itvs[i+j].Maxt > n.Maxt { itvs[i].Maxt = itvs[i+j].Maxt } itvs = append(itvs[:i+1], itvs[i+j+1:]...) } return itvs } if r.inBounds(n.Maxt+1) || r.inBounds(n.Maxt) { if n.Mint < r.Maxt { itvs[i].Mint = n.Mint } return itvs } if n.Mint < r.Mint { newRange := make(Intervals, i, len(itvs[:i])+1) copy(newRange, itvs[:i]) newRange = append(newRange, n) newRange = append(newRange, itvs[i:]...) return newRange } } itvs = append(itvs, n) return itvs }
[ "func", "(", "itvs", "Intervals", ")", "add", "(", "n", "Interval", ")", "Intervals", "{", "for", "i", ",", "r", ":=", "range", "itvs", "{", "if", "r", ".", "inBounds", "(", "n", ".", "Mint", "-", "1", ")", "||", "r", ".", "inBounds", "(", "n",...
// add the new time-range to the existing ones. // The existing ones must be sorted.
[ "add", "the", "new", "time", "-", "range", "to", "the", "existing", "ones", ".", "The", "existing", "ones", "must", "be", "sorted", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/tombstones.go#L271-L314
train
prometheus/tsdb
fileutil/fileutil.go
CopyDirs
func CopyDirs(src, dest string) error { if err := os.MkdirAll(dest, 0777); err != nil { return err } files, err := readDirs(src) if err != nil { return err } for _, f := range files { dp := filepath.Join(dest, f) sp := filepath.Join(src, f) stat, err := os.Stat(sp) if err != nil { return err } // Empty directories are also created. if stat.IsDir() { if err := os.MkdirAll(dp, 0777); err != nil { return err } continue } if err := copyFile(sp, dp); err != nil { return err } } return nil }
go
func CopyDirs(src, dest string) error { if err := os.MkdirAll(dest, 0777); err != nil { return err } files, err := readDirs(src) if err != nil { return err } for _, f := range files { dp := filepath.Join(dest, f) sp := filepath.Join(src, f) stat, err := os.Stat(sp) if err != nil { return err } // Empty directories are also created. if stat.IsDir() { if err := os.MkdirAll(dp, 0777); err != nil { return err } continue } if err := copyFile(sp, dp); err != nil { return err } } return nil }
[ "func", "CopyDirs", "(", "src", ",", "dest", "string", ")", "error", "{", "if", "err", ":=", "os", ".", "MkdirAll", "(", "dest", ",", "0777", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "files", ",", "err", ":=", "readDir...
// CopyDirs copies all directories, subdirectories and files recursively including the empty folders. // Source and destination must be full paths.
[ "CopyDirs", "copies", "all", "directories", "subdirectories", "and", "files", "recursively", "including", "the", "empty", "folders", ".", "Source", "and", "destination", "must", "be", "full", "paths", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/fileutil/fileutil.go#L30-L61
train
prometheus/tsdb
fileutil/fileutil.go
readDirs
func readDirs(src string) ([]string, error) { var files []string err := filepath.Walk(src, func(path string, f os.FileInfo, err error) error { relativePath := strings.TrimPrefix(path, src) if len(relativePath) > 0 { files = append(files, relativePath) } return nil }) if err != nil { return nil, err } return files, nil }
go
func readDirs(src string) ([]string, error) { var files []string err := filepath.Walk(src, func(path string, f os.FileInfo, err error) error { relativePath := strings.TrimPrefix(path, src) if len(relativePath) > 0 { files = append(files, relativePath) } return nil }) if err != nil { return nil, err } return files, nil }
[ "func", "readDirs", "(", "src", "string", ")", "(", "[", "]", "string", ",", "error", ")", "{", "var", "files", "[", "]", "string", "\n", "err", ":=", "filepath", ".", "Walk", "(", "src", ",", "func", "(", "path", "string", ",", "f", "os", ".", ...
// readDirs reads the source directory recursively and // returns relative paths to all files and empty directories.
[ "readDirs", "reads", "the", "source", "directory", "recursively", "and", "returns", "relative", "paths", "to", "all", "files", "and", "empty", "directories", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/fileutil/fileutil.go#L78-L92
train
prometheus/tsdb
fileutil/fileutil.go
Replace
func Replace(from, to string) error { if err := os.RemoveAll(to); err != nil { return err } if err := os.Rename(from, to); err != nil { return err } // Directory was renamed; sync parent dir to persist rename. pdir, err := OpenDir(filepath.Dir(to)) if err != nil { return err } if err = pdir.Sync(); err != nil { pdir.Close() return err } return pdir.Close() }
go
func Replace(from, to string) error { if err := os.RemoveAll(to); err != nil { return err } if err := os.Rename(from, to); err != nil { return err } // Directory was renamed; sync parent dir to persist rename. pdir, err := OpenDir(filepath.Dir(to)) if err != nil { return err } if err = pdir.Sync(); err != nil { pdir.Close() return err } return pdir.Close() }
[ "func", "Replace", "(", "from", ",", "to", "string", ")", "error", "{", "if", "err", ":=", "os", ".", "RemoveAll", "(", "to", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}", "\n", "if", "err", ":=", "os", ".", "Rename", "(", "fro...
// Replace moves a file or directory to a new location and deletes any previous data. // It is not atomic.
[ "Replace", "moves", "a", "file", "or", "directory", "to", "a", "new", "location", "and", "deletes", "any", "previous", "data", ".", "It", "is", "not", "atomic", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/fileutil/fileutil.go#L130-L149
train
prometheus/tsdb
checkpoint.go
LastCheckpoint
func LastCheckpoint(dir string) (string, int, error) { files, err := ioutil.ReadDir(dir) if err != nil { return "", 0, err } // Traverse list backwards since there may be multiple checkpoints left. for i := len(files) - 1; i >= 0; i-- { fi := files[i] if !strings.HasPrefix(fi.Name(), checkpointPrefix) { continue } if !fi.IsDir() { return "", 0, errors.Errorf("checkpoint %s is not a directory", fi.Name()) } idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):]) if err != nil { continue } return filepath.Join(dir, fi.Name()), idx, nil } return "", 0, ErrNotFound }
go
func LastCheckpoint(dir string) (string, int, error) { files, err := ioutil.ReadDir(dir) if err != nil { return "", 0, err } // Traverse list backwards since there may be multiple checkpoints left. for i := len(files) - 1; i >= 0; i-- { fi := files[i] if !strings.HasPrefix(fi.Name(), checkpointPrefix) { continue } if !fi.IsDir() { return "", 0, errors.Errorf("checkpoint %s is not a directory", fi.Name()) } idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):]) if err != nil { continue } return filepath.Join(dir, fi.Name()), idx, nil } return "", 0, ErrNotFound }
[ "func", "LastCheckpoint", "(", "dir", "string", ")", "(", "string", ",", "int", ",", "error", ")", "{", "files", ",", "err", ":=", "ioutil", ".", "ReadDir", "(", "dir", ")", "\n", "if", "err", "!=", "nil", "{", "return", "\"\"", ",", "0", ",", "e...
// LastCheckpoint returns the directory name and index of the most recent checkpoint. // If dir does not contain any checkpoints, ErrNotFound is returned.
[ "LastCheckpoint", "returns", "the", "directory", "name", "and", "index", "of", "the", "most", "recent", "checkpoint", ".", "If", "dir", "does", "not", "contain", "any", "checkpoints", "ErrNotFound", "is", "returned", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/checkpoint.go#L45-L67
train
prometheus/tsdb
checkpoint.go
DeleteCheckpoints
func DeleteCheckpoints(dir string, maxIndex int) error { var errs tsdb_errors.MultiError files, err := ioutil.ReadDir(dir) if err != nil { return err } for _, fi := range files { if !strings.HasPrefix(fi.Name(), checkpointPrefix) { continue } index, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):]) if err != nil || index >= maxIndex { continue } if err := os.RemoveAll(filepath.Join(dir, fi.Name())); err != nil { errs.Add(err) } } return errs.Err() }
go
func DeleteCheckpoints(dir string, maxIndex int) error { var errs tsdb_errors.MultiError files, err := ioutil.ReadDir(dir) if err != nil { return err } for _, fi := range files { if !strings.HasPrefix(fi.Name(), checkpointPrefix) { continue } index, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):]) if err != nil || index >= maxIndex { continue } if err := os.RemoveAll(filepath.Join(dir, fi.Name())); err != nil { errs.Add(err) } } return errs.Err() }
[ "func", "DeleteCheckpoints", "(", "dir", "string", ",", "maxIndex", "int", ")", "error", "{", "var", "errs", "tsdb_errors", ".", "MultiError", "\n", "files", ",", "err", ":=", "ioutil", ".", "ReadDir", "(", "dir", ")", "\n", "if", "err", "!=", "nil", "...
// DeleteCheckpoints deletes all checkpoints in a directory below a given index.
[ "DeleteCheckpoints", "deletes", "all", "checkpoints", "in", "a", "directory", "below", "a", "given", "index", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/checkpoint.go#L70-L90
train
prometheus/tsdb
wal/live_reader.go
NewLiveReader
func NewLiveReader(logger log.Logger, r io.Reader) *LiveReader { return &LiveReader{ logger: logger, rdr: r, // Until we understand how they come about, make readers permissive // to records spanning pages. permissive: true, } }
go
func NewLiveReader(logger log.Logger, r io.Reader) *LiveReader { return &LiveReader{ logger: logger, rdr: r, // Until we understand how they come about, make readers permissive // to records spanning pages. permissive: true, } }
[ "func", "NewLiveReader", "(", "logger", "log", ".", "Logger", ",", "r", "io", ".", "Reader", ")", "*", "LiveReader", "{", "return", "&", "LiveReader", "{", "logger", ":", "logger", ",", "rdr", ":", "r", ",", "permissive", ":", "true", ",", "}", "\n",...
// NewLiveReader returns a new live reader.
[ "NewLiveReader", "returns", "a", "new", "live", "reader", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/live_reader.go#L38-L47
train
prometheus/tsdb
wal/live_reader.go
Err
func (r *LiveReader) Err() error { if r.eofNonErr && r.err == io.EOF { return nil } return r.err }
go
func (r *LiveReader) Err() error { if r.eofNonErr && r.err == io.EOF { return nil } return r.err }
[ "func", "(", "r", "*", "LiveReader", ")", "Err", "(", ")", "error", "{", "if", "r", ".", "eofNonErr", "&&", "r", ".", "err", "==", "io", ".", "EOF", "{", "return", "nil", "\n", "}", "\n", "return", "r", ".", "err", "\n", "}" ]
// Err returns any errors encountered reading the WAL. io.EOFs are not terminal // and Next can be tried again. Non-EOFs are terminal, and the reader should // not be used again. It is up to the user to decide when to stop trying should // io.EOF be returned.
[ "Err", "returns", "any", "errors", "encountered", "reading", "the", "WAL", ".", "io", ".", "EOFs", "are", "not", "terminal", "and", "Next", "can", "be", "tried", "again", ".", "Non", "-", "EOFs", "are", "terminal", "and", "the", "reader", "should", "not"...
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/live_reader.go#L77-L82
train
prometheus/tsdb
wal/live_reader.go
buildRecord
func (r *LiveReader) buildRecord() (bool, error) { for { // Check that we have data in the internal buffer to read. if r.writeIndex <= r.readIndex { return false, nil } // Attempt to read a record, partial or otherwise. temp, n, err := r.readRecord() if err != nil { return false, err } r.readIndex += n r.total += int64(n) if temp == nil { return false, nil } rt := recType(r.hdr[0]) if rt == recFirst || rt == recFull { r.rec = r.rec[:0] } r.rec = append(r.rec, temp...) if err := validateRecord(rt, r.index); err != nil { r.index = 0 return false, err } if rt == recLast || rt == recFull { r.index = 0 return true, nil } // Only increment i for non-zero records since we use it // to determine valid content record sequences. r.index++ } }
go
func (r *LiveReader) buildRecord() (bool, error) { for { // Check that we have data in the internal buffer to read. if r.writeIndex <= r.readIndex { return false, nil } // Attempt to read a record, partial or otherwise. temp, n, err := r.readRecord() if err != nil { return false, err } r.readIndex += n r.total += int64(n) if temp == nil { return false, nil } rt := recType(r.hdr[0]) if rt == recFirst || rt == recFull { r.rec = r.rec[:0] } r.rec = append(r.rec, temp...) if err := validateRecord(rt, r.index); err != nil { r.index = 0 return false, err } if rt == recLast || rt == recFull { r.index = 0 return true, nil } // Only increment i for non-zero records since we use it // to determine valid content record sequences. r.index++ } }
[ "func", "(", "r", "*", "LiveReader", ")", "buildRecord", "(", ")", "(", "bool", ",", "error", ")", "{", "for", "{", "if", "r", ".", "writeIndex", "<=", "r", ".", "readIndex", "{", "return", "false", ",", "nil", "\n", "}", "\n", "temp", ",", "n", ...
// Rebuild a full record from potentially partial records. Returns false // if there was an error or if we weren't able to read a record for any reason. // Returns true if we read a full record. Any record data is appended to // LiveReader.rec
[ "Rebuild", "a", "full", "record", "from", "potentially", "partial", "records", ".", "Returns", "false", "if", "there", "was", "an", "error", "or", "if", "we", "weren", "t", "able", "to", "read", "a", "record", "for", "any", "reason", ".", "Returns", "tru...
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/live_reader.go#L150-L187
train
prometheus/tsdb
errors/errors.go
Add
func (es *MultiError) Add(err error) { if err == nil { return } if merr, ok := err.(MultiError); ok { *es = append(*es, merr...) } else { *es = append(*es, err) } }
go
func (es *MultiError) Add(err error) { if err == nil { return } if merr, ok := err.(MultiError); ok { *es = append(*es, merr...) } else { *es = append(*es, err) } }
[ "func", "(", "es", "*", "MultiError", ")", "Add", "(", "err", "error", ")", "{", "if", "err", "==", "nil", "{", "return", "\n", "}", "\n", "if", "merr", ",", "ok", ":=", "err", ".", "(", "MultiError", ")", ";", "ok", "{", "*", "es", "=", "app...
// Add adds the error to the error list if it is not nil.
[ "Add", "adds", "the", "error", "to", "the", "error", "list", "if", "it", "is", "not", "nil", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/errors/errors.go#L45-L54
train
prometheus/tsdb
wal/wal.go
OpenWriteSegment
func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { segName := SegmentName(dir, k) f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0666) if err != nil { return nil, err } stat, err := f.Stat() if err != nil { f.Close() return nil, err } // If the last page is torn, fill it with zeros. // In case it was torn after all records were written successfully, this // will just pad the page and everything will be fine. // If it was torn mid-record, a full read (which the caller should do anyway // to ensure integrity) will detect it as a corruption by the end. if d := stat.Size() % pageSize; d != 0 { level.Warn(logger).Log("msg", "last page of the wal is torn, filling it with zeros", "segment", segName) if _, err := f.Write(make([]byte, pageSize-d)); err != nil { f.Close() return nil, errors.Wrap(err, "zero-pad torn page") } } return &Segment{File: f, i: k, dir: dir}, nil }
go
func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { segName := SegmentName(dir, k) f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0666) if err != nil { return nil, err } stat, err := f.Stat() if err != nil { f.Close() return nil, err } // If the last page is torn, fill it with zeros. // In case it was torn after all records were written successfully, this // will just pad the page and everything will be fine. // If it was torn mid-record, a full read (which the caller should do anyway // to ensure integrity) will detect it as a corruption by the end. if d := stat.Size() % pageSize; d != 0 { level.Warn(logger).Log("msg", "last page of the wal is torn, filling it with zeros", "segment", segName) if _, err := f.Write(make([]byte, pageSize-d)); err != nil { f.Close() return nil, errors.Wrap(err, "zero-pad torn page") } } return &Segment{File: f, i: k, dir: dir}, nil }
[ "func", "OpenWriteSegment", "(", "logger", "log", ".", "Logger", ",", "dir", "string", ",", "k", "int", ")", "(", "*", "Segment", ",", "error", ")", "{", "segName", ":=", "SegmentName", "(", "dir", ",", "k", ")", "\n", "f", ",", "err", ":=", "os", ...
// OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends.
[ "OpenWriteSegment", "opens", "segment", "k", "in", "dir", ".", "The", "returned", "segment", "is", "ready", "for", "new", "appends", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L99-L123
train
prometheus/tsdb
wal/wal.go
CreateSegment
func CreateSegment(dir string, k int) (*Segment, error) { f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) if err != nil { return nil, err } return &Segment{File: f, i: k, dir: dir}, nil }
go
func CreateSegment(dir string, k int) (*Segment, error) { f, err := os.OpenFile(SegmentName(dir, k), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) if err != nil { return nil, err } return &Segment{File: f, i: k, dir: dir}, nil }
[ "func", "CreateSegment", "(", "dir", "string", ",", "k", "int", ")", "(", "*", "Segment", ",", "error", ")", "{", "f", ",", "err", ":=", "os", ".", "OpenFile", "(", "SegmentName", "(", "dir", ",", "k", ")", ",", "os", ".", "O_WRONLY", "|", "os", ...
// CreateSegment creates a new segment k in dir.
[ "CreateSegment", "creates", "a", "new", "segment", "k", "in", "dir", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L126-L132
train
prometheus/tsdb
wal/wal.go
OpenReadSegment
func OpenReadSegment(fn string) (*Segment, error) { k, err := strconv.Atoi(filepath.Base(fn)) if err != nil { return nil, errors.New("not a valid filename") } f, err := os.Open(fn) if err != nil { return nil, err } return &Segment{File: f, i: k, dir: filepath.Dir(fn)}, nil }
go
func OpenReadSegment(fn string) (*Segment, error) { k, err := strconv.Atoi(filepath.Base(fn)) if err != nil { return nil, errors.New("not a valid filename") } f, err := os.Open(fn) if err != nil { return nil, err } return &Segment{File: f, i: k, dir: filepath.Dir(fn)}, nil }
[ "func", "OpenReadSegment", "(", "fn", "string", ")", "(", "*", "Segment", ",", "error", ")", "{", "k", ",", "err", ":=", "strconv", ".", "Atoi", "(", "filepath", ".", "Base", "(", "fn", ")", ")", "\n", "if", "err", "!=", "nil", "{", "return", "ni...
// OpenReadSegment opens the segment with the given filename.
[ "OpenReadSegment", "opens", "the", "segment", "with", "the", "given", "filename", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L135-L145
train
prometheus/tsdb
wal/wal.go
New
func New(logger log.Logger, reg prometheus.Registerer, dir string) (*WAL, error) { return NewSize(logger, reg, dir, DefaultSegmentSize) }
go
func New(logger log.Logger, reg prometheus.Registerer, dir string) (*WAL, error) { return NewSize(logger, reg, dir, DefaultSegmentSize) }
[ "func", "New", "(", "logger", "log", ".", "Logger", ",", "reg", "prometheus", ".", "Registerer", ",", "dir", "string", ")", "(", "*", "WAL", ",", "error", ")", "{", "return", "NewSize", "(", "logger", ",", "reg", ",", "dir", ",", "DefaultSegmentSize", ...
// New returns a new WAL over the given directory.
[ "New", "returns", "a", "new", "WAL", "over", "the", "given", "directory", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L177-L179
train
prometheus/tsdb
wal/wal.go
NewSize
func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int) (*WAL, error) { if segmentSize%pageSize != 0 { return nil, errors.New("invalid segment size") } if err := os.MkdirAll(dir, 0777); err != nil { return nil, errors.Wrap(err, "create dir") } if logger == nil { logger = log.NewNopLogger() } w := &WAL{ dir: dir, logger: logger, segmentSize: segmentSize, page: &page{}, actorc: make(chan func(), 100), stopc: make(chan chan struct{}), } w.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{ Name: "prometheus_tsdb_wal_fsync_duration_seconds", Help: "Duration of WAL fsync.", }) w.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_wal_page_flushes_total", Help: "Total number of page flushes.", }) w.pageCompletions = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_wal_completed_pages_total", Help: "Total number of completed pages.", }) w.truncateFail = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_wal_truncations_failed_total", Help: "Total number of WAL truncations that failed.", }) w.truncateTotal = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_wal_truncations_total", Help: "Total number of WAL truncations attempted.", }) if reg != nil { reg.MustRegister(w.fsyncDuration, w.pageFlushes, w.pageCompletions, w.truncateFail, w.truncateTotal) } _, j, err := w.Segments() if err != nil { return nil, errors.Wrap(err, "get segment range") } // Fresh dir, no segments yet. if j == -1 { segment, err := CreateSegment(w.dir, 0) if err != nil { return nil, err } if err := w.setSegment(segment); err != nil { return nil, err } } else { segment, err := OpenWriteSegment(logger, w.dir, j) if err != nil { return nil, err } if err := w.setSegment(segment); err != nil { return nil, err } } go w.run() return w, nil }
go
func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int) (*WAL, error) { if segmentSize%pageSize != 0 { return nil, errors.New("invalid segment size") } if err := os.MkdirAll(dir, 0777); err != nil { return nil, errors.Wrap(err, "create dir") } if logger == nil { logger = log.NewNopLogger() } w := &WAL{ dir: dir, logger: logger, segmentSize: segmentSize, page: &page{}, actorc: make(chan func(), 100), stopc: make(chan chan struct{}), } w.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{ Name: "prometheus_tsdb_wal_fsync_duration_seconds", Help: "Duration of WAL fsync.", }) w.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_wal_page_flushes_total", Help: "Total number of page flushes.", }) w.pageCompletions = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_wal_completed_pages_total", Help: "Total number of completed pages.", }) w.truncateFail = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_wal_truncations_failed_total", Help: "Total number of WAL truncations that failed.", }) w.truncateTotal = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_wal_truncations_total", Help: "Total number of WAL truncations attempted.", }) if reg != nil { reg.MustRegister(w.fsyncDuration, w.pageFlushes, w.pageCompletions, w.truncateFail, w.truncateTotal) } _, j, err := w.Segments() if err != nil { return nil, errors.Wrap(err, "get segment range") } // Fresh dir, no segments yet. if j == -1 { segment, err := CreateSegment(w.dir, 0) if err != nil { return nil, err } if err := w.setSegment(segment); err != nil { return nil, err } } else { segment, err := OpenWriteSegment(logger, w.dir, j) if err != nil { return nil, err } if err := w.setSegment(segment); err != nil { return nil, err } } go w.run() return w, nil }
[ "func", "NewSize", "(", "logger", "log", ".", "Logger", ",", "reg", "prometheus", ".", "Registerer", ",", "dir", "string", ",", "segmentSize", "int", ")", "(", "*", "WAL", ",", "error", ")", "{", "if", "segmentSize", "%", "pageSize", "!=", "0", "{", ...
// NewSize returns a new WAL over the given directory. // New segments are created with the specified size.
[ "NewSize", "returns", "a", "new", "WAL", "over", "the", "given", "directory", ".", "New", "segments", "are", "created", "with", "the", "specified", "size", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L183-L252
train
prometheus/tsdb
wal/wal.go
Repair
func (w *WAL) Repair(origErr error) error { // We could probably have a mode that only discards torn records right around // the corruption to preserve as data much as possible. // But that's not generally applicable if the records have any kind of causality. // Maybe as an extra mode in the future if mid-WAL corruptions become // a frequent concern. err := errors.Cause(origErr) // So that we can pick up errors even if wrapped. cerr, ok := err.(*CorruptionErr) if !ok { return errors.Wrap(origErr, "cannot handle error") } if cerr.Segment < 0 { return errors.New("corruption error does not specify position") } level.Warn(w.logger).Log("msg", "starting corruption repair", "segment", cerr.Segment, "offset", cerr.Offset) // All segments behind the corruption can no longer be used. segs, err := listSegments(w.dir) if err != nil { return errors.Wrap(err, "list segments") } level.Warn(w.logger).Log("msg", "deleting all segments newer than corrupted segment", "segment", cerr.Segment) for _, s := range segs { if w.segment.i == s.index { // The active segment needs to be removed, // close it first (Windows!). Can be closed safely // as we set the current segment to repaired file // below. if err := w.segment.Close(); err != nil { return errors.Wrap(err, "close active segment") } } if s.index <= cerr.Segment { continue } if err := os.Remove(filepath.Join(w.dir, s.name)); err != nil { return errors.Wrapf(err, "delete segment:%v", s.index) } } // Regardless of the corruption offset, no record reaches into the previous segment. // So we can safely repair the WAL by removing the segment and re-inserting all // its records up to the corruption. level.Warn(w.logger).Log("msg", "rewrite corrupted segment", "segment", cerr.Segment) fn := SegmentName(w.dir, cerr.Segment) tmpfn := fn + ".repair" if err := fileutil.Rename(fn, tmpfn); err != nil { return err } // Create a clean segment and make it the active one. s, err := CreateSegment(w.dir, cerr.Segment) if err != nil { return err } if err := w.setSegment(s); err != nil { return err } f, err := os.Open(tmpfn) if err != nil { return errors.Wrap(err, "open segment") } defer f.Close() r := NewReader(bufio.NewReader(f)) for r.Next() { // Add records only up to the where the error was. if r.Offset() >= cerr.Offset { break } if err := w.Log(r.Record()); err != nil { return errors.Wrap(err, "insert record") } } // We expect an error here from r.Err(), so nothing to handle. // We explicitly close even when there is a defer for Windows to be // able to delete it. The defer is in place to close it in-case there // are errors above. if err := f.Close(); err != nil { return errors.Wrap(err, "close corrupted file") } if err := os.Remove(tmpfn); err != nil { return errors.Wrap(err, "delete corrupted segment") } return nil }
go
func (w *WAL) Repair(origErr error) error { // We could probably have a mode that only discards torn records right around // the corruption to preserve as data much as possible. // But that's not generally applicable if the records have any kind of causality. // Maybe as an extra mode in the future if mid-WAL corruptions become // a frequent concern. err := errors.Cause(origErr) // So that we can pick up errors even if wrapped. cerr, ok := err.(*CorruptionErr) if !ok { return errors.Wrap(origErr, "cannot handle error") } if cerr.Segment < 0 { return errors.New("corruption error does not specify position") } level.Warn(w.logger).Log("msg", "starting corruption repair", "segment", cerr.Segment, "offset", cerr.Offset) // All segments behind the corruption can no longer be used. segs, err := listSegments(w.dir) if err != nil { return errors.Wrap(err, "list segments") } level.Warn(w.logger).Log("msg", "deleting all segments newer than corrupted segment", "segment", cerr.Segment) for _, s := range segs { if w.segment.i == s.index { // The active segment needs to be removed, // close it first (Windows!). Can be closed safely // as we set the current segment to repaired file // below. if err := w.segment.Close(); err != nil { return errors.Wrap(err, "close active segment") } } if s.index <= cerr.Segment { continue } if err := os.Remove(filepath.Join(w.dir, s.name)); err != nil { return errors.Wrapf(err, "delete segment:%v", s.index) } } // Regardless of the corruption offset, no record reaches into the previous segment. // So we can safely repair the WAL by removing the segment and re-inserting all // its records up to the corruption. level.Warn(w.logger).Log("msg", "rewrite corrupted segment", "segment", cerr.Segment) fn := SegmentName(w.dir, cerr.Segment) tmpfn := fn + ".repair" if err := fileutil.Rename(fn, tmpfn); err != nil { return err } // Create a clean segment and make it the active one. s, err := CreateSegment(w.dir, cerr.Segment) if err != nil { return err } if err := w.setSegment(s); err != nil { return err } f, err := os.Open(tmpfn) if err != nil { return errors.Wrap(err, "open segment") } defer f.Close() r := NewReader(bufio.NewReader(f)) for r.Next() { // Add records only up to the where the error was. if r.Offset() >= cerr.Offset { break } if err := w.Log(r.Record()); err != nil { return errors.Wrap(err, "insert record") } } // We expect an error here from r.Err(), so nothing to handle. // We explicitly close even when there is a defer for Windows to be // able to delete it. The defer is in place to close it in-case there // are errors above. if err := f.Close(); err != nil { return errors.Wrap(err, "close corrupted file") } if err := os.Remove(tmpfn); err != nil { return errors.Wrap(err, "delete corrupted segment") } return nil }
[ "func", "(", "w", "*", "WAL", ")", "Repair", "(", "origErr", "error", ")", "error", "{", "err", ":=", "errors", ".", "Cause", "(", "origErr", ")", "\n", "cerr", ",", "ok", ":=", "err", ".", "(", "*", "CorruptionErr", ")", "\n", "if", "!", "ok", ...
// Repair attempts to repair the WAL based on the error. // It discards all data after the corruption.
[ "Repair", "attempts", "to", "repair", "the", "WAL", "based", "on", "the", "error", ".", "It", "discards", "all", "data", "after", "the", "corruption", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L279-L371
train
prometheus/tsdb
wal/wal.go
SegmentName
func SegmentName(dir string, i int) string { return filepath.Join(dir, fmt.Sprintf("%08d", i)) }
go
func SegmentName(dir string, i int) string { return filepath.Join(dir, fmt.Sprintf("%08d", i)) }
[ "func", "SegmentName", "(", "dir", "string", ",", "i", "int", ")", "string", "{", "return", "filepath", ".", "Join", "(", "dir", ",", "fmt", ".", "Sprintf", "(", "\"%08d\"", ",", "i", ")", ")", "\n", "}" ]
// SegmentName builds a segment name for the directory.
[ "SegmentName", "builds", "a", "segment", "name", "for", "the", "directory", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L374-L376
train
prometheus/tsdb
wal/wal.go
nextSegment
func (w *WAL) nextSegment() error { // Only flush the current page if it actually holds data. if w.page.alloc > 0 { if err := w.flushPage(true); err != nil { return err } } next, err := CreateSegment(w.dir, w.segment.Index()+1) if err != nil { return errors.Wrap(err, "create new segment file") } prev := w.segment if err := w.setSegment(next); err != nil { return err } // Don't block further writes by fsyncing the last segment. w.actorc <- func() { if err := w.fsync(prev); err != nil { level.Error(w.logger).Log("msg", "sync previous segment", "err", err) } if err := prev.Close(); err != nil { level.Error(w.logger).Log("msg", "close previous segment", "err", err) } } return nil }
go
func (w *WAL) nextSegment() error { // Only flush the current page if it actually holds data. if w.page.alloc > 0 { if err := w.flushPage(true); err != nil { return err } } next, err := CreateSegment(w.dir, w.segment.Index()+1) if err != nil { return errors.Wrap(err, "create new segment file") } prev := w.segment if err := w.setSegment(next); err != nil { return err } // Don't block further writes by fsyncing the last segment. w.actorc <- func() { if err := w.fsync(prev); err != nil { level.Error(w.logger).Log("msg", "sync previous segment", "err", err) } if err := prev.Close(); err != nil { level.Error(w.logger).Log("msg", "close previous segment", "err", err) } } return nil }
[ "func", "(", "w", "*", "WAL", ")", "nextSegment", "(", ")", "error", "{", "if", "w", ".", "page", ".", "alloc", ">", "0", "{", "if", "err", ":=", "w", ".", "flushPage", "(", "true", ")", ";", "err", "!=", "nil", "{", "return", "err", "\n", "}...
// nextSegment creates the next segment and closes the previous one.
[ "nextSegment", "creates", "the", "next", "segment", "and", "closes", "the", "previous", "one", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L379-L405
train
prometheus/tsdb
wal/wal.go
flushPage
func (w *WAL) flushPage(clear bool) error { w.pageFlushes.Inc() p := w.page clear = clear || p.full() // No more data will fit into the page. Enqueue and clear it. if clear { p.alloc = pageSize // Write till end of page. w.pageCompletions.Inc() } n, err := w.segment.Write(p.buf[p.flushed:p.alloc]) if err != nil { return err } p.flushed += n // We flushed an entire page, prepare a new one. if clear { for i := range p.buf { p.buf[i] = 0 } p.alloc = 0 p.flushed = 0 w.donePages++ } return nil }
go
func (w *WAL) flushPage(clear bool) error { w.pageFlushes.Inc() p := w.page clear = clear || p.full() // No more data will fit into the page. Enqueue and clear it. if clear { p.alloc = pageSize // Write till end of page. w.pageCompletions.Inc() } n, err := w.segment.Write(p.buf[p.flushed:p.alloc]) if err != nil { return err } p.flushed += n // We flushed an entire page, prepare a new one. if clear { for i := range p.buf { p.buf[i] = 0 } p.alloc = 0 p.flushed = 0 w.donePages++ } return nil }
[ "func", "(", "w", "*", "WAL", ")", "flushPage", "(", "clear", "bool", ")", "error", "{", "w", ".", "pageFlushes", ".", "Inc", "(", ")", "\n", "p", ":=", "w", ".", "page", "\n", "clear", "=", "clear", "||", "p", ".", "full", "(", ")", "\n", "i...
// flushPage writes the new contents of the page to disk. If no more records will fit into // the page, the remaining bytes will be set to zero and a new page will be started. // If clear is true, this is enforced regardless of how many bytes are left in the page.
[ "flushPage", "writes", "the", "new", "contents", "of", "the", "page", "to", "disk", ".", "If", "no", "more", "records", "will", "fit", "into", "the", "page", "the", "remaining", "bytes", "will", "be", "set", "to", "zero", "and", "a", "new", "page", "wi...
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L423-L450
train
prometheus/tsdb
wal/wal.go
Log
func (w *WAL) Log(recs ...[]byte) error { w.mtx.Lock() defer w.mtx.Unlock() // Callers could just implement their own list record format but adding // a bit of extra logic here frees them from that overhead. for i, r := range recs { if err := w.log(r, i == len(recs)-1); err != nil { return err } } return nil }
go
func (w *WAL) Log(recs ...[]byte) error { w.mtx.Lock() defer w.mtx.Unlock() // Callers could just implement their own list record format but adding // a bit of extra logic here frees them from that overhead. for i, r := range recs { if err := w.log(r, i == len(recs)-1); err != nil { return err } } return nil }
[ "func", "(", "w", "*", "WAL", ")", "Log", "(", "recs", "...", "[", "]", "byte", ")", "error", "{", "w", ".", "mtx", ".", "Lock", "(", ")", "\n", "defer", "w", ".", "mtx", ".", "Unlock", "(", ")", "\n", "for", "i", ",", "r", ":=", "range", ...
// Log writes the records into the log. // Multiple records can be passed at once to reduce writes and increase throughput.
[ "Log", "writes", "the", "records", "into", "the", "log", ".", "Multiple", "records", "can", "be", "passed", "at", "once", "to", "reduce", "writes", "and", "increase", "throughput", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L485-L496
train
prometheus/tsdb
wal/wal.go
log
func (w *WAL) log(rec []byte, final bool) error { // If the record is too big to fit within the active page in the current // segment, terminate the active segment and advance to the next one. // This ensures that records do not cross segment boundaries. left := w.page.remaining() - recordHeaderSize // Free space in the active page. left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment. if len(rec) > left { if err := w.nextSegment(); err != nil { return err } } // Populate as many pages as necessary to fit the record. // Be careful to always do one pass to ensure we write zero-length records. for i := 0; i == 0 || len(rec) > 0; i++ { p := w.page // Find how much of the record we can fit into the page. var ( l = min(len(rec), (pageSize-p.alloc)-recordHeaderSize) part = rec[:l] buf = p.buf[p.alloc:] typ recType ) switch { case i == 0 && len(part) == len(rec): typ = recFull case len(part) == len(rec): typ = recLast case i == 0: typ = recFirst default: typ = recMiddle } buf[0] = byte(typ) crc := crc32.Checksum(part, castagnoliTable) binary.BigEndian.PutUint16(buf[1:], uint16(len(part))) binary.BigEndian.PutUint32(buf[3:], crc) copy(buf[recordHeaderSize:], part) p.alloc += len(part) + recordHeaderSize // By definition when a record is split it means its size is bigger than // the page boundary so the current page would be full and needs to be flushed. // On contrary if we wrote a full record, we can fit more records of the batch // into the page before flushing it. if final || typ != recFull || w.page.full() { if err := w.flushPage(false); err != nil { return err } } rec = rec[l:] } return nil }
go
func (w *WAL) log(rec []byte, final bool) error { // If the record is too big to fit within the active page in the current // segment, terminate the active segment and advance to the next one. // This ensures that records do not cross segment boundaries. left := w.page.remaining() - recordHeaderSize // Free space in the active page. left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment. if len(rec) > left { if err := w.nextSegment(); err != nil { return err } } // Populate as many pages as necessary to fit the record. // Be careful to always do one pass to ensure we write zero-length records. for i := 0; i == 0 || len(rec) > 0; i++ { p := w.page // Find how much of the record we can fit into the page. var ( l = min(len(rec), (pageSize-p.alloc)-recordHeaderSize) part = rec[:l] buf = p.buf[p.alloc:] typ recType ) switch { case i == 0 && len(part) == len(rec): typ = recFull case len(part) == len(rec): typ = recLast case i == 0: typ = recFirst default: typ = recMiddle } buf[0] = byte(typ) crc := crc32.Checksum(part, castagnoliTable) binary.BigEndian.PutUint16(buf[1:], uint16(len(part))) binary.BigEndian.PutUint32(buf[3:], crc) copy(buf[recordHeaderSize:], part) p.alloc += len(part) + recordHeaderSize // By definition when a record is split it means its size is bigger than // the page boundary so the current page would be full and needs to be flushed. // On contrary if we wrote a full record, we can fit more records of the batch // into the page before flushing it. if final || typ != recFull || w.page.full() { if err := w.flushPage(false); err != nil { return err } } rec = rec[l:] } return nil }
[ "func", "(", "w", "*", "WAL", ")", "log", "(", "rec", "[", "]", "byte", ",", "final", "bool", ")", "error", "{", "left", ":=", "w", ".", "page", ".", "remaining", "(", ")", "-", "recordHeaderSize", "\n", "left", "+=", "(", "pageSize", "-", "recor...
// log writes rec to the log and forces a flush of the current page if its // the final record of a batch, the record is bigger than the page size or // the current page is full.
[ "log", "writes", "rec", "to", "the", "log", "and", "forces", "a", "flush", "of", "the", "current", "page", "if", "its", "the", "final", "record", "of", "a", "batch", "the", "record", "is", "bigger", "than", "the", "page", "size", "or", "the", "current"...
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L501-L558
train
prometheus/tsdb
wal/wal.go
Truncate
func (w *WAL) Truncate(i int) (err error) { w.truncateTotal.Inc() defer func() { if err != nil { w.truncateFail.Inc() } }() refs, err := listSegments(w.dir) if err != nil { return err } for _, r := range refs { if r.index >= i { break } if err = os.Remove(filepath.Join(w.dir, r.name)); err != nil { return err } } return nil }
go
func (w *WAL) Truncate(i int) (err error) { w.truncateTotal.Inc() defer func() { if err != nil { w.truncateFail.Inc() } }() refs, err := listSegments(w.dir) if err != nil { return err } for _, r := range refs { if r.index >= i { break } if err = os.Remove(filepath.Join(w.dir, r.name)); err != nil { return err } } return nil }
[ "func", "(", "w", "*", "WAL", ")", "Truncate", "(", "i", "int", ")", "(", "err", "error", ")", "{", "w", ".", "truncateTotal", ".", "Inc", "(", ")", "\n", "defer", "func", "(", ")", "{", "if", "err", "!=", "nil", "{", "w", ".", "truncateFail", ...
// Truncate drops all segments before i.
[ "Truncate", "drops", "all", "segments", "before", "i", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L574-L594
train
prometheus/tsdb
wal/wal.go
Close
func (w *WAL) Close() (err error) { w.mtx.Lock() defer w.mtx.Unlock() if w.closed { return errors.New("wal already closed") } // Flush the last page and zero out all its remaining size. // We must not flush an empty page as it would falsely signal // the segment is done if we start writing to it again after opening. if w.page.alloc > 0 { if err := w.flushPage(true); err != nil { return err } } donec := make(chan struct{}) w.stopc <- donec <-donec if err = w.fsync(w.segment); err != nil { level.Error(w.logger).Log("msg", "sync previous segment", "err", err) } if err := w.segment.Close(); err != nil { level.Error(w.logger).Log("msg", "close previous segment", "err", err) } w.closed = true return nil }
go
func (w *WAL) Close() (err error) { w.mtx.Lock() defer w.mtx.Unlock() if w.closed { return errors.New("wal already closed") } // Flush the last page and zero out all its remaining size. // We must not flush an empty page as it would falsely signal // the segment is done if we start writing to it again after opening. if w.page.alloc > 0 { if err := w.flushPage(true); err != nil { return err } } donec := make(chan struct{}) w.stopc <- donec <-donec if err = w.fsync(w.segment); err != nil { level.Error(w.logger).Log("msg", "sync previous segment", "err", err) } if err := w.segment.Close(); err != nil { level.Error(w.logger).Log("msg", "close previous segment", "err", err) } w.closed = true return nil }
[ "func", "(", "w", "*", "WAL", ")", "Close", "(", ")", "(", "err", "error", ")", "{", "w", ".", "mtx", ".", "Lock", "(", ")", "\n", "defer", "w", ".", "mtx", ".", "Unlock", "(", ")", "\n", "if", "w", ".", "closed", "{", "return", "errors", "...
// Close flushes all writes and closes active segment.
[ "Close", "flushes", "all", "writes", "and", "closes", "active", "segment", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L604-L633
train
prometheus/tsdb
wal/wal.go
NewSegmentsRangeReader
func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) { var segs []*Segment for _, sgmRange := range sr { refs, err := listSegments(sgmRange.Dir) if err != nil { return nil, errors.Wrapf(err, "list segment in dir:%v", sgmRange.Dir) } for _, r := range refs { if sgmRange.First >= 0 && r.index < sgmRange.First { continue } if sgmRange.Last >= 0 && r.index > sgmRange.Last { break } s, err := OpenReadSegment(filepath.Join(sgmRange.Dir, r.name)) if err != nil { return nil, errors.Wrapf(err, "open segment:%v in dir:%v", r.name, sgmRange.Dir) } segs = append(segs, s) } } return newSegmentBufReader(segs...), nil }
go
func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) { var segs []*Segment for _, sgmRange := range sr { refs, err := listSegments(sgmRange.Dir) if err != nil { return nil, errors.Wrapf(err, "list segment in dir:%v", sgmRange.Dir) } for _, r := range refs { if sgmRange.First >= 0 && r.index < sgmRange.First { continue } if sgmRange.Last >= 0 && r.index > sgmRange.Last { break } s, err := OpenReadSegment(filepath.Join(sgmRange.Dir, r.name)) if err != nil { return nil, errors.Wrapf(err, "open segment:%v in dir:%v", r.name, sgmRange.Dir) } segs = append(segs, s) } } return newSegmentBufReader(segs...), nil }
[ "func", "NewSegmentsRangeReader", "(", "sr", "...", "SegmentRange", ")", "(", "io", ".", "ReadCloser", ",", "error", ")", "{", "var", "segs", "[", "]", "*", "Segment", "\n", "for", "_", ",", "sgmRange", ":=", "range", "sr", "{", "refs", ",", "err", "...
// NewSegmentsRangeReader returns a new reader over the given WAL segment ranges. // If first or last are -1, the range is open on the respective end.
[ "NewSegmentsRangeReader", "returns", "a", "new", "reader", "over", "the", "given", "WAL", "segment", "ranges", ".", "If", "first", "or", "last", "are", "-", "1", "the", "range", "is", "open", "on", "the", "respective", "end", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/wal/wal.go#L676-L700
train
prometheus/tsdb
index/index.go
NewTOCFromByteSlice
func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) { if bs.Len() < indexTOCLen { return nil, encoding.ErrInvalidSize } b := bs.Range(bs.Len()-indexTOCLen, bs.Len()) expCRC := binary.BigEndian.Uint32(b[len(b)-4:]) d := encoding.Decbuf{B: b[:len(b)-4]} if d.Crc32(castagnoliTable) != expCRC { return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read TOC") } if err := d.Err(); err != nil { return nil, err } return &TOC{ Symbols: d.Be64(), Series: d.Be64(), LabelIndices: d.Be64(), LabelIndicesTable: d.Be64(), Postings: d.Be64(), PostingsTable: d.Be64(), }, nil }
go
func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) { if bs.Len() < indexTOCLen { return nil, encoding.ErrInvalidSize } b := bs.Range(bs.Len()-indexTOCLen, bs.Len()) expCRC := binary.BigEndian.Uint32(b[len(b)-4:]) d := encoding.Decbuf{B: b[:len(b)-4]} if d.Crc32(castagnoliTable) != expCRC { return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read TOC") } if err := d.Err(); err != nil { return nil, err } return &TOC{ Symbols: d.Be64(), Series: d.Be64(), LabelIndices: d.Be64(), LabelIndicesTable: d.Be64(), Postings: d.Be64(), PostingsTable: d.Be64(), }, nil }
[ "func", "NewTOCFromByteSlice", "(", "bs", "ByteSlice", ")", "(", "*", "TOC", ",", "error", ")", "{", "if", "bs", ".", "Len", "(", ")", "<", "indexTOCLen", "{", "return", "nil", ",", "encoding", ".", "ErrInvalidSize", "\n", "}", "\n", "b", ":=", "bs",...
// NewTOCFromByteSlice return parsed TOC from given index byte slice.
[ "NewTOCFromByteSlice", "return", "parsed", "TOC", "from", "given", "index", "byte", "slice", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L150-L175
train
prometheus/tsdb
index/index.go
NewWriter
func NewWriter(fn string) (*Writer, error) { dir := filepath.Dir(fn) df, err := fileutil.OpenDir(dir) if err != nil { return nil, err } defer df.Close() // Close for platform windows. if err := os.RemoveAll(fn); err != nil { return nil, errors.Wrap(err, "remove any existing index at path") } f, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY, 0666) if err != nil { return nil, err } if err := df.Sync(); err != nil { return nil, errors.Wrap(err, "sync dir") } iw := &Writer{ f: f, fbuf: bufio.NewWriterSize(f, 1<<22), pos: 0, stage: idxStageNone, // Reusable memory. buf1: encoding.Encbuf{B: make([]byte, 0, 1<<22)}, buf2: encoding.Encbuf{B: make([]byte, 0, 1<<22)}, uint32s: make([]uint32, 0, 1<<15), // Caches. symbols: make(map[string]uint32, 1<<13), seriesOffsets: make(map[uint64]uint64, 1<<16), crc32: newCRC32(), } if err := iw.writeMeta(); err != nil { return nil, err } return iw, nil }
go
func NewWriter(fn string) (*Writer, error) { dir := filepath.Dir(fn) df, err := fileutil.OpenDir(dir) if err != nil { return nil, err } defer df.Close() // Close for platform windows. if err := os.RemoveAll(fn); err != nil { return nil, errors.Wrap(err, "remove any existing index at path") } f, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY, 0666) if err != nil { return nil, err } if err := df.Sync(); err != nil { return nil, errors.Wrap(err, "sync dir") } iw := &Writer{ f: f, fbuf: bufio.NewWriterSize(f, 1<<22), pos: 0, stage: idxStageNone, // Reusable memory. buf1: encoding.Encbuf{B: make([]byte, 0, 1<<22)}, buf2: encoding.Encbuf{B: make([]byte, 0, 1<<22)}, uint32s: make([]uint32, 0, 1<<15), // Caches. symbols: make(map[string]uint32, 1<<13), seriesOffsets: make(map[uint64]uint64, 1<<16), crc32: newCRC32(), } if err := iw.writeMeta(); err != nil { return nil, err } return iw, nil }
[ "func", "NewWriter", "(", "fn", "string", ")", "(", "*", "Writer", ",", "error", ")", "{", "dir", ":=", "filepath", ".", "Dir", "(", "fn", ")", "\n", "df", ",", "err", ":=", "fileutil", ".", "OpenDir", "(", "dir", ")", "\n", "if", "err", "!=", ...
// NewWriter returns a new Writer to the given filename. It serializes data in format version 2.
[ "NewWriter", "returns", "a", "new", "Writer", "to", "the", "given", "filename", ".", "It", "serializes", "data", "in", "format", "version", "2", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L178-L219
train
prometheus/tsdb
index/index.go
addPadding
func (w *Writer) addPadding(size int) error { p := w.pos % uint64(size) if p == 0 { return nil } p = uint64(size) - p return errors.Wrap(w.write(make([]byte, p)), "add padding") }
go
func (w *Writer) addPadding(size int) error { p := w.pos % uint64(size) if p == 0 { return nil } p = uint64(size) - p return errors.Wrap(w.write(make([]byte, p)), "add padding") }
[ "func", "(", "w", "*", "Writer", ")", "addPadding", "(", "size", "int", ")", "error", "{", "p", ":=", "w", ".", "pos", "%", "uint64", "(", "size", ")", "\n", "if", "p", "==", "0", "{", "return", "nil", "\n", "}", "\n", "p", "=", "uint64", "("...
// addPadding adds zero byte padding until the file size is a multiple size.
[ "addPadding", "adds", "zero", "byte", "padding", "until", "the", "file", "size", "is", "a", "multiple", "size", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L240-L247
train
prometheus/tsdb
index/index.go
ensureStage
func (w *Writer) ensureStage(s indexWriterStage) error { if w.stage == s { return nil } if w.stage > s { return errors.Errorf("invalid stage %q, currently at %q", s, w.stage) } // Mark start of sections in table of contents. switch s { case idxStageSymbols: w.toc.Symbols = w.pos case idxStageSeries: w.toc.Series = w.pos case idxStageLabelIndex: w.toc.LabelIndices = w.pos case idxStagePostings: w.toc.Postings = w.pos case idxStageDone: w.toc.LabelIndicesTable = w.pos if err := w.writeOffsetTable(w.labelIndexes); err != nil { return err } w.toc.PostingsTable = w.pos if err := w.writeOffsetTable(w.postings); err != nil { return err } if err := w.writeTOC(); err != nil { return err } } w.stage = s return nil }
go
func (w *Writer) ensureStage(s indexWriterStage) error { if w.stage == s { return nil } if w.stage > s { return errors.Errorf("invalid stage %q, currently at %q", s, w.stage) } // Mark start of sections in table of contents. switch s { case idxStageSymbols: w.toc.Symbols = w.pos case idxStageSeries: w.toc.Series = w.pos case idxStageLabelIndex: w.toc.LabelIndices = w.pos case idxStagePostings: w.toc.Postings = w.pos case idxStageDone: w.toc.LabelIndicesTable = w.pos if err := w.writeOffsetTable(w.labelIndexes); err != nil { return err } w.toc.PostingsTable = w.pos if err := w.writeOffsetTable(w.postings); err != nil { return err } if err := w.writeTOC(); err != nil { return err } } w.stage = s return nil }
[ "func", "(", "w", "*", "Writer", ")", "ensureStage", "(", "s", "indexWriterStage", ")", "error", "{", "if", "w", ".", "stage", "==", "s", "{", "return", "nil", "\n", "}", "\n", "if", "w", ".", "stage", ">", "s", "{", "return", "errors", ".", "Err...
// ensureStage handles transitions between write stages and ensures that IndexWriter // methods are called in an order valid for the implementation.
[ "ensureStage", "handles", "transitions", "between", "write", "stages", "and", "ensures", "that", "IndexWriter", "methods", "are", "called", "in", "an", "order", "valid", "for", "the", "implementation", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L251-L288
train
prometheus/tsdb
index/index.go
writeOffsetTable
func (w *Writer) writeOffsetTable(entries []hashEntry) error { w.buf2.Reset() w.buf2.PutBE32int(len(entries)) for _, e := range entries { w.buf2.PutUvarint(len(e.keys)) for _, k := range e.keys { w.buf2.PutUvarintStr(k) } w.buf2.PutUvarint64(e.offset) } w.buf1.Reset() w.buf1.PutBE32int(w.buf2.Len()) w.buf2.PutHash(w.crc32) return w.write(w.buf1.Get(), w.buf2.Get()) }
go
func (w *Writer) writeOffsetTable(entries []hashEntry) error { w.buf2.Reset() w.buf2.PutBE32int(len(entries)) for _, e := range entries { w.buf2.PutUvarint(len(e.keys)) for _, k := range e.keys { w.buf2.PutUvarintStr(k) } w.buf2.PutUvarint64(e.offset) } w.buf1.Reset() w.buf1.PutBE32int(w.buf2.Len()) w.buf2.PutHash(w.crc32) return w.write(w.buf1.Get(), w.buf2.Get()) }
[ "func", "(", "w", "*", "Writer", ")", "writeOffsetTable", "(", "entries", "[", "]", "hashEntry", ")", "error", "{", "w", ".", "buf2", ".", "Reset", "(", ")", "\n", "w", ".", "buf2", ".", "PutBE32int", "(", "len", "(", "entries", ")", ")", "\n", "...
// writeOffsetTable writes a sequence of readable hash entries.
[ "writeOffsetTable", "writes", "a", "sequence", "of", "readable", "hash", "entries", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L451-L468
train
prometheus/tsdb
index/index.go
NewReader
func NewReader(b ByteSlice) (*Reader, error) { return newReader(b, ioutil.NopCloser(nil)) }
go
func NewReader(b ByteSlice) (*Reader, error) { return newReader(b, ioutil.NopCloser(nil)) }
[ "func", "NewReader", "(", "b", "ByteSlice", ")", "(", "*", "Reader", ",", "error", ")", "{", "return", "newReader", "(", "b", ",", "ioutil", ".", "NopCloser", "(", "nil", ")", ")", "\n", "}" ]
// NewReader returns a new index reader on the given byte slice. It automatically // handles different format versions.
[ "NewReader", "returns", "a", "new", "index", "reader", "on", "the", "given", "byte", "slice", ".", "It", "automatically", "handles", "different", "format", "versions", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L618-L620
train
prometheus/tsdb
index/index.go
NewFileReader
func NewFileReader(path string) (*Reader, error) { f, err := fileutil.OpenMmapFile(path) if err != nil { return nil, err } r, err := newReader(realByteSlice(f.Bytes()), f) if err != nil { var merr tsdb_errors.MultiError merr.Add(err) merr.Add(f.Close()) return nil, merr } return r, nil }
go
func NewFileReader(path string) (*Reader, error) { f, err := fileutil.OpenMmapFile(path) if err != nil { return nil, err } r, err := newReader(realByteSlice(f.Bytes()), f) if err != nil { var merr tsdb_errors.MultiError merr.Add(err) merr.Add(f.Close()) return nil, merr } return r, nil }
[ "func", "NewFileReader", "(", "path", "string", ")", "(", "*", "Reader", ",", "error", ")", "{", "f", ",", "err", ":=", "fileutil", ".", "OpenMmapFile", "(", "path", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", "\n", "}", ...
// NewFileReader returns a new index reader against the given index file.
[ "NewFileReader", "returns", "a", "new", "index", "reader", "against", "the", "given", "index", "file", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L623-L637
train
prometheus/tsdb
index/index.go
PostingsRanges
func (r *Reader) PostingsRanges() (map[labels.Label]Range, error) { m := map[labels.Label]Range{} for k, e := range r.postings { for v, start := range e { d := encoding.NewDecbufAt(r.b, int(start), castagnoliTable) if d.Err() != nil { return nil, d.Err() } m[labels.Label{Name: k, Value: v}] = Range{ Start: int64(start) + 4, End: int64(start) + 4 + int64(d.Len()), } } } return m, nil }
go
func (r *Reader) PostingsRanges() (map[labels.Label]Range, error) { m := map[labels.Label]Range{} for k, e := range r.postings { for v, start := range e { d := encoding.NewDecbufAt(r.b, int(start), castagnoliTable) if d.Err() != nil { return nil, d.Err() } m[labels.Label{Name: k, Value: v}] = Range{ Start: int64(start) + 4, End: int64(start) + 4 + int64(d.Len()), } } } return m, nil }
[ "func", "(", "r", "*", "Reader", ")", "PostingsRanges", "(", ")", "(", "map", "[", "labels", ".", "Label", "]", "Range", ",", "error", ")", "{", "m", ":=", "map", "[", "labels", ".", "Label", "]", "Range", "{", "}", "\n", "for", "k", ",", "e", ...
// PostingsRanges returns a new map of byte range in the underlying index file // for all postings lists.
[ "PostingsRanges", "returns", "a", "new", "map", "of", "byte", "range", "in", "the", "underlying", "index", "file", "for", "all", "postings", "lists", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L725-L741
train
prometheus/tsdb
index/index.go
ReadSymbols
func ReadSymbols(bs ByteSlice, version int, off int) ([]string, map[uint32]string, error) { if off == 0 { return nil, nil, nil } d := encoding.NewDecbufAt(bs, off, castagnoliTable) var ( origLen = d.Len() cnt = d.Be32int() basePos = uint32(off) + 4 nextPos = basePos + uint32(origLen-d.Len()) symbolSlice []string symbols = map[uint32]string{} ) if version == FormatV2 { symbolSlice = make([]string, 0, cnt) } for d.Err() == nil && d.Len() > 0 && cnt > 0 { s := d.UvarintStr() if version == FormatV2 { symbolSlice = append(symbolSlice, s) } else { symbols[nextPos] = s nextPos = basePos + uint32(origLen-d.Len()) } cnt-- } return symbolSlice, symbols, errors.Wrap(d.Err(), "read symbols") }
go
func ReadSymbols(bs ByteSlice, version int, off int) ([]string, map[uint32]string, error) { if off == 0 { return nil, nil, nil } d := encoding.NewDecbufAt(bs, off, castagnoliTable) var ( origLen = d.Len() cnt = d.Be32int() basePos = uint32(off) + 4 nextPos = basePos + uint32(origLen-d.Len()) symbolSlice []string symbols = map[uint32]string{} ) if version == FormatV2 { symbolSlice = make([]string, 0, cnt) } for d.Err() == nil && d.Len() > 0 && cnt > 0 { s := d.UvarintStr() if version == FormatV2 { symbolSlice = append(symbolSlice, s) } else { symbols[nextPos] = s nextPos = basePos + uint32(origLen-d.Len()) } cnt-- } return symbolSlice, symbols, errors.Wrap(d.Err(), "read symbols") }
[ "func", "ReadSymbols", "(", "bs", "ByteSlice", ",", "version", "int", ",", "off", "int", ")", "(", "[", "]", "string", ",", "map", "[", "uint32", "]", "string", ",", "error", ")", "{", "if", "off", "==", "0", "{", "return", "nil", ",", "nil", ","...
// ReadSymbols reads the symbol table fully into memory and allocates proper strings for them. // Strings backed by the mmap'd memory would cause memory faults if applications keep using them // after the reader is closed.
[ "ReadSymbols", "reads", "the", "symbol", "table", "fully", "into", "memory", "and", "allocates", "proper", "strings", "for", "them", ".", "Strings", "backed", "by", "the", "mmap", "d", "memory", "would", "cause", "memory", "faults", "if", "applications", "keep...
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L746-L776
train
prometheus/tsdb
index/index.go
ReadOffsetTable
func ReadOffsetTable(bs ByteSlice, off uint64, f func([]string, uint64) error) error { d := encoding.NewDecbufAt(bs, int(off), castagnoliTable) cnt := d.Be32() for d.Err() == nil && d.Len() > 0 && cnt > 0 { keyCount := d.Uvarint() keys := make([]string, 0, keyCount) for i := 0; i < keyCount; i++ { keys = append(keys, d.UvarintStr()) } o := d.Uvarint64() if d.Err() != nil { break } if err := f(keys, o); err != nil { return err } cnt-- } return d.Err() }
go
func ReadOffsetTable(bs ByteSlice, off uint64, f func([]string, uint64) error) error { d := encoding.NewDecbufAt(bs, int(off), castagnoliTable) cnt := d.Be32() for d.Err() == nil && d.Len() > 0 && cnt > 0 { keyCount := d.Uvarint() keys := make([]string, 0, keyCount) for i := 0; i < keyCount; i++ { keys = append(keys, d.UvarintStr()) } o := d.Uvarint64() if d.Err() != nil { break } if err := f(keys, o); err != nil { return err } cnt-- } return d.Err() }
[ "func", "ReadOffsetTable", "(", "bs", "ByteSlice", ",", "off", "uint64", ",", "f", "func", "(", "[", "]", "string", ",", "uint64", ")", "error", ")", "error", "{", "d", ":=", "encoding", ".", "NewDecbufAt", "(", "bs", ",", "int", "(", "off", ")", "...
// ReadOffsetTable reads an offset table and at the given position calls f for each // found entry. If f returns an error it stops decoding and returns the received error.
[ "ReadOffsetTable", "reads", "an", "offset", "table", "and", "at", "the", "given", "position", "calls", "f", "for", "each", "found", "entry", ".", "If", "f", "returns", "an", "error", "it", "stops", "decoding", "and", "returns", "the", "received", "error", ...
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L780-L801
train
prometheus/tsdb
index/index.go
Symbols
func (r *Reader) Symbols() (map[string]struct{}, error) { res := make(map[string]struct{}, len(r.symbolsV1)+len(r.symbolsV2)) for _, s := range r.symbolsV1 { res[s] = struct{}{} } for _, s := range r.symbolsV2 { res[s] = struct{}{} } return res, nil }
go
func (r *Reader) Symbols() (map[string]struct{}, error) { res := make(map[string]struct{}, len(r.symbolsV1)+len(r.symbolsV2)) for _, s := range r.symbolsV1 { res[s] = struct{}{} } for _, s := range r.symbolsV2 { res[s] = struct{}{} } return res, nil }
[ "func", "(", "r", "*", "Reader", ")", "Symbols", "(", ")", "(", "map", "[", "string", "]", "struct", "{", "}", ",", "error", ")", "{", "res", ":=", "make", "(", "map", "[", "string", "]", "struct", "{", "}", ",", "len", "(", "r", ".", "symbol...
// Symbols returns a set of symbols that exist within the index.
[ "Symbols", "returns", "a", "set", "of", "symbols", "that", "exist", "within", "the", "index", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L820-L830
train
prometheus/tsdb
index/index.go
LabelValues
func (r *Reader) LabelValues(names ...string) (StringTuples, error) { key := strings.Join(names, labelNameSeperator) off, ok := r.labels[key] if !ok { // XXX(fabxc): hot fix. Should return a partial data error and handle cases // where the entire block has no data gracefully. return emptyStringTuples{}, nil //return nil, fmt.Errorf("label index doesn't exist") } d := encoding.NewDecbufAt(r.b, int(off), castagnoliTable) nc := d.Be32int() d.Be32() // consume unused value entry count. if d.Err() != nil { return nil, errors.Wrap(d.Err(), "read label value index") } st := &serializedStringTuples{ idsCount: nc, idsBytes: d.Get(), lookup: r.lookupSymbol, } return st, nil }
go
func (r *Reader) LabelValues(names ...string) (StringTuples, error) { key := strings.Join(names, labelNameSeperator) off, ok := r.labels[key] if !ok { // XXX(fabxc): hot fix. Should return a partial data error and handle cases // where the entire block has no data gracefully. return emptyStringTuples{}, nil //return nil, fmt.Errorf("label index doesn't exist") } d := encoding.NewDecbufAt(r.b, int(off), castagnoliTable) nc := d.Be32int() d.Be32() // consume unused value entry count. if d.Err() != nil { return nil, errors.Wrap(d.Err(), "read label value index") } st := &serializedStringTuples{ idsCount: nc, idsBytes: d.Get(), lookup: r.lookupSymbol, } return st, nil }
[ "func", "(", "r", "*", "Reader", ")", "LabelValues", "(", "names", "...", "string", ")", "(", "StringTuples", ",", "error", ")", "{", "key", ":=", "strings", ".", "Join", "(", "names", ",", "labelNameSeperator", ")", "\n", "off", ",", "ok", ":=", "r"...
// LabelValues returns value tuples that exist for the given label name tuples.
[ "LabelValues", "returns", "value", "tuples", "that", "exist", "for", "the", "given", "label", "name", "tuples", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L838-L863
train
prometheus/tsdb
index/index.go
Series
func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) error { offset := id // In version 2 series IDs are no longer exact references but series are 16-byte padded // and the ID is the multiple of 16 of the actual position. if r.version == FormatV2 { offset = id * 16 } d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable) if d.Err() != nil { return d.Err() } return errors.Wrap(r.dec.Series(d.Get(), lbls, chks), "read series") }
go
func (r *Reader) Series(id uint64, lbls *labels.Labels, chks *[]chunks.Meta) error { offset := id // In version 2 series IDs are no longer exact references but series are 16-byte padded // and the ID is the multiple of 16 of the actual position. if r.version == FormatV2 { offset = id * 16 } d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable) if d.Err() != nil { return d.Err() } return errors.Wrap(r.dec.Series(d.Get(), lbls, chks), "read series") }
[ "func", "(", "r", "*", "Reader", ")", "Series", "(", "id", "uint64", ",", "lbls", "*", "labels", ".", "Labels", ",", "chks", "*", "[", "]", "chunks", ".", "Meta", ")", "error", "{", "offset", ":=", "id", "\n", "if", "r", ".", "version", "==", "...
// Series reads the series with the given ID and writes its labels and chunks into lbls and chks.
[ "Series", "reads", "the", "series", "with", "the", "given", "ID", "and", "writes", "its", "labels", "and", "chunks", "into", "lbls", "and", "chks", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L881-L893
train
prometheus/tsdb
index/index.go
Postings
func (r *Reader) Postings(name, value string) (Postings, error) { e, ok := r.postings[name] if !ok { return EmptyPostings(), nil } off, ok := e[value] if !ok { return EmptyPostings(), nil } d := encoding.NewDecbufAt(r.b, int(off), castagnoliTable) if d.Err() != nil { return nil, errors.Wrap(d.Err(), "get postings entry") } _, p, err := r.dec.Postings(d.Get()) if err != nil { return nil, errors.Wrap(err, "decode postings") } return p, nil }
go
func (r *Reader) Postings(name, value string) (Postings, error) { e, ok := r.postings[name] if !ok { return EmptyPostings(), nil } off, ok := e[value] if !ok { return EmptyPostings(), nil } d := encoding.NewDecbufAt(r.b, int(off), castagnoliTable) if d.Err() != nil { return nil, errors.Wrap(d.Err(), "get postings entry") } _, p, err := r.dec.Postings(d.Get()) if err != nil { return nil, errors.Wrap(err, "decode postings") } return p, nil }
[ "func", "(", "r", "*", "Reader", ")", "Postings", "(", "name", ",", "value", "string", ")", "(", "Postings", ",", "error", ")", "{", "e", ",", "ok", ":=", "r", ".", "postings", "[", "name", "]", "\n", "if", "!", "ok", "{", "return", "EmptyPosting...
// Postings returns a postings list for the given label pair.
[ "Postings", "returns", "a", "postings", "list", "for", "the", "given", "label", "pair", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L896-L914
train
prometheus/tsdb
index/index.go
LabelNames
func (r *Reader) LabelNames() ([]string, error) { labelNamesMap := make(map[string]struct{}, len(r.labels)) for key := range r.labels { // 'key' contains the label names concatenated with the // delimiter 'labelNameSeperator'. names := strings.Split(key, labelNameSeperator) for _, name := range names { if name == allPostingsKey.Name { // This is not from any metric. // It is basically an empty label name. continue } labelNamesMap[name] = struct{}{} } } labelNames := make([]string, 0, len(labelNamesMap)) for name := range labelNamesMap { labelNames = append(labelNames, name) } sort.Strings(labelNames) return labelNames, nil }
go
func (r *Reader) LabelNames() ([]string, error) { labelNamesMap := make(map[string]struct{}, len(r.labels)) for key := range r.labels { // 'key' contains the label names concatenated with the // delimiter 'labelNameSeperator'. names := strings.Split(key, labelNameSeperator) for _, name := range names { if name == allPostingsKey.Name { // This is not from any metric. // It is basically an empty label name. continue } labelNamesMap[name] = struct{}{} } } labelNames := make([]string, 0, len(labelNamesMap)) for name := range labelNamesMap { labelNames = append(labelNames, name) } sort.Strings(labelNames) return labelNames, nil }
[ "func", "(", "r", "*", "Reader", ")", "LabelNames", "(", ")", "(", "[", "]", "string", ",", "error", ")", "{", "labelNamesMap", ":=", "make", "(", "map", "[", "string", "]", "struct", "{", "}", ",", "len", "(", "r", ".", "labels", ")", ")", "\n...
// LabelNames returns all the unique label names present in the index.
[ "LabelNames", "returns", "all", "the", "unique", "label", "names", "present", "in", "the", "index", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L928-L949
train
prometheus/tsdb
index/index.go
Postings
func (dec *Decoder) Postings(b []byte) (int, Postings, error) { d := encoding.Decbuf{B: b} n := d.Be32int() l := d.Get() return n, newBigEndianPostings(l), d.Err() }
go
func (dec *Decoder) Postings(b []byte) (int, Postings, error) { d := encoding.Decbuf{B: b} n := d.Be32int() l := d.Get() return n, newBigEndianPostings(l), d.Err() }
[ "func", "(", "dec", "*", "Decoder", ")", "Postings", "(", "b", "[", "]", "byte", ")", "(", "int", ",", "Postings", ",", "error", ")", "{", "d", ":=", "encoding", ".", "Decbuf", "{", "B", ":", "b", "}", "\n", "n", ":=", "d", ".", "Be32int", "(...
// Postings returns a postings list for b and its number of elements.
[ "Postings", "returns", "a", "postings", "list", "for", "b", "and", "its", "number", "of", "elements", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L1028-L1033
train
prometheus/tsdb
index/index.go
Series
func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) error { *lbls = (*lbls)[:0] *chks = (*chks)[:0] d := encoding.Decbuf{B: b} k := d.Uvarint() for i := 0; i < k; i++ { lno := uint32(d.Uvarint()) lvo := uint32(d.Uvarint()) if d.Err() != nil { return errors.Wrap(d.Err(), "read series label offsets") } ln, err := dec.LookupSymbol(lno) if err != nil { return errors.Wrap(err, "lookup label name") } lv, err := dec.LookupSymbol(lvo) if err != nil { return errors.Wrap(err, "lookup label value") } *lbls = append(*lbls, labels.Label{Name: ln, Value: lv}) } // Read the chunks meta data. k = d.Uvarint() if k == 0 { return nil } t0 := d.Varint64() maxt := int64(d.Uvarint64()) + t0 ref0 := int64(d.Uvarint64()) *chks = append(*chks, chunks.Meta{ Ref: uint64(ref0), MinTime: t0, MaxTime: maxt, }) t0 = maxt for i := 1; i < k; i++ { mint := int64(d.Uvarint64()) + t0 maxt := int64(d.Uvarint64()) + mint ref0 += d.Varint64() t0 = maxt if d.Err() != nil { return errors.Wrapf(d.Err(), "read meta for chunk %d", i) } *chks = append(*chks, chunks.Meta{ Ref: uint64(ref0), MinTime: mint, MaxTime: maxt, }) } return d.Err() }
go
func (dec *Decoder) Series(b []byte, lbls *labels.Labels, chks *[]chunks.Meta) error { *lbls = (*lbls)[:0] *chks = (*chks)[:0] d := encoding.Decbuf{B: b} k := d.Uvarint() for i := 0; i < k; i++ { lno := uint32(d.Uvarint()) lvo := uint32(d.Uvarint()) if d.Err() != nil { return errors.Wrap(d.Err(), "read series label offsets") } ln, err := dec.LookupSymbol(lno) if err != nil { return errors.Wrap(err, "lookup label name") } lv, err := dec.LookupSymbol(lvo) if err != nil { return errors.Wrap(err, "lookup label value") } *lbls = append(*lbls, labels.Label{Name: ln, Value: lv}) } // Read the chunks meta data. k = d.Uvarint() if k == 0 { return nil } t0 := d.Varint64() maxt := int64(d.Uvarint64()) + t0 ref0 := int64(d.Uvarint64()) *chks = append(*chks, chunks.Meta{ Ref: uint64(ref0), MinTime: t0, MaxTime: maxt, }) t0 = maxt for i := 1; i < k; i++ { mint := int64(d.Uvarint64()) + t0 maxt := int64(d.Uvarint64()) + mint ref0 += d.Varint64() t0 = maxt if d.Err() != nil { return errors.Wrapf(d.Err(), "read meta for chunk %d", i) } *chks = append(*chks, chunks.Meta{ Ref: uint64(ref0), MinTime: mint, MaxTime: maxt, }) } return d.Err() }
[ "func", "(", "dec", "*", "Decoder", ")", "Series", "(", "b", "[", "]", "byte", ",", "lbls", "*", "labels", ".", "Labels", ",", "chks", "*", "[", "]", "chunks", ".", "Meta", ")", "error", "{", "*", "lbls", "=", "(", "*", "lbls", ")", "[", ":",...
// Series decodes a series entry from the given byte slice into lset and chks.
[ "Series", "decodes", "a", "series", "entry", "from", "the", "given", "byte", "slice", "into", "lset", "and", "chks", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/index/index.go#L1036-L1100
train
prometheus/tsdb
labels/selector.go
Matches
func (s Selector) Matches(labels Labels) bool { for _, m := range s { if v := labels.Get(m.Name()); !m.Matches(v) { return false } } return true }
go
func (s Selector) Matches(labels Labels) bool { for _, m := range s { if v := labels.Get(m.Name()); !m.Matches(v) { return false } } return true }
[ "func", "(", "s", "Selector", ")", "Matches", "(", "labels", "Labels", ")", "bool", "{", "for", "_", ",", "m", ":=", "range", "s", "{", "if", "v", ":=", "labels", ".", "Get", "(", "m", ".", "Name", "(", ")", ")", ";", "!", "m", ".", "Matches"...
// Matches returns whether the labels satisfy all matchers.
[ "Matches", "returns", "whether", "the", "labels", "satisfy", "all", "matchers", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/labels/selector.go#L25-L32
train
prometheus/tsdb
labels/selector.go
NewEqualMatcher
func NewEqualMatcher(name, value string) Matcher { return &EqualMatcher{name: name, value: value} }
go
func NewEqualMatcher(name, value string) Matcher { return &EqualMatcher{name: name, value: value} }
[ "func", "NewEqualMatcher", "(", "name", ",", "value", "string", ")", "Matcher", "{", "return", "&", "EqualMatcher", "{", "name", ":", "name", ",", "value", ":", "value", "}", "\n", "}" ]
// NewEqualMatcher returns a new matcher matching an exact label value.
[ "NewEqualMatcher", "returns", "a", "new", "matcher", "matching", "an", "exact", "label", "value", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/labels/selector.go#L62-L64
train
prometheus/tsdb
labels/selector.go
NewRegexpMatcher
func NewRegexpMatcher(name, pattern string) (Matcher, error) { re, err := regexp.Compile(pattern) if err != nil { return nil, err } return &regexpMatcher{name: name, re: re}, nil }
go
func NewRegexpMatcher(name, pattern string) (Matcher, error) { re, err := regexp.Compile(pattern) if err != nil { return nil, err } return &regexpMatcher{name: name, re: re}, nil }
[ "func", "NewRegexpMatcher", "(", "name", ",", "pattern", "string", ")", "(", "Matcher", ",", "error", ")", "{", "re", ",", "err", ":=", "regexp", ".", "Compile", "(", "pattern", ")", "\n", "if", "err", "!=", "nil", "{", "return", "nil", ",", "err", ...
// NewRegexpMatcher returns a new matcher verifying that a value matches // the regular expression pattern.
[ "NewRegexpMatcher", "returns", "a", "new", "matcher", "verifying", "that", "a", "value", "matches", "the", "regular", "expression", "pattern", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/labels/selector.go#L77-L83
train
prometheus/tsdb
labels/selector.go
NewMustRegexpMatcher
func NewMustRegexpMatcher(name, pattern string) Matcher { re, err := regexp.Compile(pattern) if err != nil { panic(err) } return &regexpMatcher{name: name, re: re} }
go
func NewMustRegexpMatcher(name, pattern string) Matcher { re, err := regexp.Compile(pattern) if err != nil { panic(err) } return &regexpMatcher{name: name, re: re} }
[ "func", "NewMustRegexpMatcher", "(", "name", ",", "pattern", "string", ")", "Matcher", "{", "re", ",", "err", ":=", "regexp", ".", "Compile", "(", "pattern", ")", "\n", "if", "err", "!=", "nil", "{", "panic", "(", "err", ")", "\n", "}", "\n", "return...
// NewMustRegexpMatcher returns a new matcher verifying that a value matches // the regular expression pattern. Will panic if the pattern is not a valid // regular expression.
[ "NewMustRegexpMatcher", "returns", "a", "new", "matcher", "verifying", "that", "a", "value", "matches", "the", "regular", "expression", "pattern", ".", "Will", "panic", "if", "the", "pattern", "is", "not", "a", "valid", "regular", "expression", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/labels/selector.go#L88-L95
train
prometheus/tsdb
chunks/chunks.go
writeHash
func (cm *Meta) writeHash(h hash.Hash) error { if _, err := h.Write([]byte{byte(cm.Chunk.Encoding())}); err != nil { return err } if _, err := h.Write(cm.Chunk.Bytes()); err != nil { return err } return nil }
go
func (cm *Meta) writeHash(h hash.Hash) error { if _, err := h.Write([]byte{byte(cm.Chunk.Encoding())}); err != nil { return err } if _, err := h.Write(cm.Chunk.Bytes()); err != nil { return err } return nil }
[ "func", "(", "cm", "*", "Meta", ")", "writeHash", "(", "h", "hash", ".", "Hash", ")", "error", "{", "if", "_", ",", "err", ":=", "h", ".", "Write", "(", "[", "]", "byte", "{", "byte", "(", "cm", ".", "Chunk", ".", "Encoding", "(", ")", ")", ...
// writeHash writes the chunk encoding and raw data into the provided hash.
[ "writeHash", "writes", "the", "chunk", "encoding", "and", "raw", "data", "into", "the", "provided", "hash", "." ]
3ccab17f5dc60de1bea3e5cfc807cb63a287078f
https://github.com/prometheus/tsdb/blob/3ccab17f5dc60de1bea3e5cfc807cb63a287078f/chunks/chunks.go#L58-L66
train