code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package scroll
import (
"fmt"
)
type (
// Position represents a point in text but is decoupled from its exact
// source, i.e. it does not hold a file or pointer to the source code.
Position struct {
Offset int // Byte offset from start of text
Line int // Current line index
ColByte int // Byte offset from start of the line
ColRune int // Rune offset from start of line
}
// Snippet represents a range between two Positions within some text. When
// operating with Snippets the source text must be the same for both,
// however, they may overlap.
Snippet struct {
Start Position
End Position
Text string
}
// TextMarker provides functionality for moving progressing through some text.
TextMarker Position
)
// String returns the position as a human readable string in the format:
// offset[line:colByte/colRune]
func (p Position) String() string {
return fmt.Sprintf("%d[%d:%d/%d]",
p.Offset,
p.Line,
p.ColByte,
p.ColRune,
)
}
// From returns the position representing the beginning of the snippet.
func (s Snippet) From() Position {
return s.Start
}
// To returns the position representing the end of the snippet.
func (s Snippet) To() Position {
return s.End
}
// String returns a human readable string representation of the Snippet.
func (s Snippet) String() string {
return fmt.Sprintf("%q %s -> %s",
s.Text,
s.Start.String(),
s.End.String(),
)
}
// Pos returns the current Position of the TextMarker.
func (tm TextMarker) Pos() Position {
return Position(tm)
}
// EndOf returns a Position representing the end of 's' assuming 's' starts at
// the TextMarker's current position. No advancing takes place.
func (tm TextMarker) EndOf(s string) Position {
end := TextMarker(tm.Pos())
end.Advance(s)
return Position(end)
}
// Advance increments the number of bytes in 's'. For each linefeed '\n'
// the line field is incremented and column values zeroed. A snippet
// representing 's' is returned.
func (tm *TextMarker) Advance(s string) Snippet {
start := tm.Pos()
tm.Offset += len(s)
for _, ru := range s {
if ru == '\n' {
tm.Line++
tm.ColByte = 0
tm.ColRune = 0
} else {
tm.ColByte += len(string(ru))
tm.ColRune++
}
}
return Snippet{
Start: start,
End: tm.Pos(),
Text: s,
}
}
// SliceSnippet returns the snippet of 's'.
func (tm *TextMarker) SliceSnippet(s string) Snippet {
cp := TextMarker(tm.Pos())
return cp.Advance(s)
} | mmxxi/scarlet/scroll/position.go | 0.804598 | 0.410904 | position.go | starcoder |
package effect
import (
s3mfile "github.com/gotracker/goaudiofile/music/tracked/s3m"
"github.com/gotracker/voice/oscillator"
"gotracker/internal/comparison"
"gotracker/internal/format/s3m/layout/channel"
"gotracker/internal/format/s3m/playback/util"
"gotracker/internal/player/intf"
"gotracker/internal/song/note"
)
func doVolSlide(cs intf.Channel, delta float32, multiplier float32) error {
av := cs.GetActiveVolume()
v := util.VolumeToS3M(av)
vol := int16((float32(v) + delta) * multiplier)
if vol >= 64 {
vol = 63
}
if vol < 0 {
vol = 0
}
sv := s3mfile.Volume(uint8(vol))
nv := util.VolumeFromS3M(sv)
cs.SetActiveVolume(nv)
return nil
}
func doPortaUp(cs intf.Channel, amount float32, multiplier float32) error {
period := cs.GetPeriod()
if period == nil {
return nil
}
delta := int(amount * multiplier)
d := note.PeriodDelta(-delta)
period = period.AddDelta(d).(note.Period)
cs.SetPeriod(period)
return nil
}
func doPortaUpToNote(cs intf.Channel, amount float32, multiplier float32, target note.Period) error {
period := cs.GetPeriod()
if period == nil {
return nil
}
delta := int(amount * multiplier)
d := note.PeriodDelta(-delta)
period = period.AddDelta(d).(note.Period)
if note.ComparePeriods(period, target) == comparison.SpaceshipLeftGreater {
period = target
}
cs.SetPeriod(period)
return nil
}
func doPortaDown(cs intf.Channel, amount float32, multiplier float32) error {
period := cs.GetPeriod()
if period == nil {
return nil
}
delta := int(amount * multiplier)
d := note.PeriodDelta(delta)
period = period.AddDelta(d).(note.Period)
cs.SetPeriod(period)
return nil
}
func doPortaDownToNote(cs intf.Channel, amount float32, multiplier float32, target note.Period) error {
period := cs.GetPeriod()
if period == nil {
return nil
}
delta := int(amount * multiplier)
d := note.PeriodDelta(delta)
period = period.AddDelta(d).(note.Period)
if note.ComparePeriods(period, target) == comparison.SpaceshipRightGreater {
period = target
}
cs.SetPeriod(period)
return nil
}
func doVibrato(cs intf.Channel, currentTick int, speed uint8, depth uint8, multiplier float32) error {
mem := cs.GetMemory().(*channel.Memory)
delta := calculateWaveTable(cs, currentTick, speed, depth, multiplier, mem.VibratoOscillator())
cs.SetPeriodDelta(note.PeriodDelta(delta))
return nil
}
func doTremor(cs intf.Channel, currentTick int, onTicks int, offTicks int) error {
mem := cs.GetMemory().(*channel.Memory)
tremor := mem.TremorMem()
if tremor.IsActive() {
if tremor.Advance() > onTicks {
tremor.ToggleAndReset()
}
} else {
if tremor.Advance() > offTicks {
tremor.ToggleAndReset()
}
}
cs.SetVolumeActive(tremor.IsActive())
return nil
}
func doArpeggio(cs intf.Channel, currentTick int, arpSemitoneADelta int8, arpSemitoneBDelta int8) error {
ns := cs.GetNoteSemitone()
var arpSemitoneTarget note.Semitone
switch currentTick % 3 {
case 0:
arpSemitoneTarget = ns
case 1:
arpSemitoneTarget = note.Semitone(int8(ns) + arpSemitoneADelta)
case 2:
arpSemitoneTarget = note.Semitone(int8(ns) + arpSemitoneBDelta)
}
cs.SetTargetSemitone(arpSemitoneTarget)
cs.SetTargetPos(cs.GetPos())
cs.SetNotePlayTick(true, currentTick)
return nil
}
var (
volSlideTwoThirdsTable = [...]s3mfile.Volume{
0, 0, 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9,
10, 10, 11, 11, 12, 13, 13, 14, 15, 15, 16, 16, 17, 18, 18, 19,
20, 20, 21, 21, 22, 23, 23, 24, 25, 25, 26, 26, 27, 28, 28, 29,
30, 30, 31, 31, 32, 33, 33, 34, 35, 35, 36, 36, 37, 38, 38, 39,
}
)
func doVolSlideTwoThirds(cs intf.Channel) error {
vol := util.VolumeToS3M(cs.GetActiveVolume())
if vol >= 64 {
vol = 63
}
cs.SetActiveVolume(util.VolumeFromS3M(volSlideTwoThirdsTable[vol]))
return nil
}
func doTremolo(cs intf.Channel, currentTick int, speed uint8, depth uint8, multiplier float32) error {
mem := cs.GetMemory().(*channel.Memory)
delta := calculateWaveTable(cs, currentTick, speed, depth, multiplier, mem.TremoloOscillator())
return doVolSlide(cs, delta, 1.0)
}
func calculateWaveTable(cs intf.Channel, currentTick int, speed uint8, depth uint8, multiplier float32, o oscillator.Oscillator) float32 {
delta := o.GetWave(float32(depth)) * multiplier
o.Advance(int(speed))
return delta
} | internal/format/s3m/playback/effect/util.go | 0.631481 | 0.427815 | util.go | starcoder |
package sizes
import (
"fmt"
"github.com/github/git-sizer/counts"
"github.com/github/git-sizer/git"
)
type Size interface {
fmt.Stringer
}
type BlobSize struct {
Size counts.Count32
}
type TreeSize struct {
// The maximum depth of trees and blobs starting at this object
// (including this object).
MaxPathDepth counts.Count32 `json:"max_path_depth"`
// The maximum length of any path relative to this object, in
// characters.
MaxPathLength counts.Count32 `json:"max_path_length"`
// The total number of trees, including duplicates.
ExpandedTreeCount counts.Count32 `json:"expanded_tree_count"`
// The total number of blobs, including duplicates.
ExpandedBlobCount counts.Count32 `json:"expanded_blob_count"`
// The total size of all blobs, including duplicates.
ExpandedBlobSize counts.Count64 `json:"expanded_blob_size"`
// The total number of symbolic links, including duplicates.
ExpandedLinkCount counts.Count32 `json:"expanded_link_count"`
// The total number of submodules referenced, including duplicates.
ExpandedSubmoduleCount counts.Count32 `json:"expanded_submodule_count"`
}
func (s *TreeSize) addDescendent(filename string, s2 TreeSize) {
s.MaxPathDepth.AdjustMaxIfNecessary(s2.MaxPathDepth)
if s2.MaxPathLength > 0 {
s.MaxPathLength.AdjustMaxIfNecessary(
(counts.NewCount32(uint64(len(filename))) + 1).Plus(s2.MaxPathLength),
)
} else {
s.MaxPathLength.AdjustMaxIfNecessary(counts.NewCount32(uint64(len(filename))))
}
s.ExpandedTreeCount.Increment(s2.ExpandedTreeCount)
s.ExpandedBlobCount.Increment(s2.ExpandedBlobCount)
s.ExpandedBlobSize.Increment(s2.ExpandedBlobSize)
s.ExpandedLinkCount.Increment(s2.ExpandedLinkCount)
s.ExpandedSubmoduleCount.Increment(s2.ExpandedSubmoduleCount)
}
// Record that the object has a blob of the specified `size` as a
// direct descendant.
func (s *TreeSize) addBlob(filename string, size BlobSize) {
s.MaxPathDepth.AdjustMaxIfNecessary(1)
s.MaxPathLength.AdjustMaxIfNecessary(counts.NewCount32(uint64(len(filename))))
s.ExpandedBlobSize.Increment(counts.Count64(size.Size))
s.ExpandedBlobCount.Increment(1)
}
// Record that the object has a link as a direct descendant.
func (s *TreeSize) addLink(filename string) {
s.MaxPathDepth.AdjustMaxIfNecessary(1)
s.MaxPathLength.AdjustMaxIfNecessary(counts.NewCount32(uint64(len(filename))))
s.ExpandedLinkCount.Increment(1)
}
// Record that the object has a submodule as a direct descendant.
func (s *TreeSize) addSubmodule(filename string) {
s.MaxPathDepth.AdjustMaxIfNecessary(1)
s.MaxPathLength.AdjustMaxIfNecessary(counts.NewCount32(uint64(len(filename))))
s.ExpandedSubmoduleCount.Increment(1)
}
type CommitSize struct {
// The height of the ancestor graph, including this commit.
MaxAncestorDepth counts.Count32 `json:"max_ancestor_depth"`
}
func (s *CommitSize) addParent(s2 CommitSize) {
s.MaxAncestorDepth.AdjustMaxIfNecessary(s2.MaxAncestorDepth)
}
func (s *CommitSize) addTree(s2 TreeSize) {
}
type TagSize struct {
// The number of tags that have to be traversed (including this
// one) to get to an object.
TagDepth counts.Count32
}
type HistorySize struct {
// The total number of unique commits analyzed.
UniqueCommitCount counts.Count32 `json:"unique_commit_count"`
// The total size of all commits analyzed.
UniqueCommitSize counts.Count64 `json:"unique_commit_size"`
// The maximum size of any analyzed commit.
MaxCommitSize counts.Count32 `json:"max_commit_size"`
// The commit with the maximum size.
MaxCommitSizeCommit *Path `json:"max_commit,omitempty"`
// The maximum ancestor depth of any analyzed commit.
MaxHistoryDepth counts.Count32 `json:"max_history_depth"`
// The maximum number of direct parents of any analyzed commit.
MaxParentCount counts.Count32 `json:"max_parent_count"`
// The commit with the maximum number of direct parents.
MaxParentCountCommit *Path `json:"max_parent_count_commit,omitempty"`
// The total number of unique trees analyzed.
UniqueTreeCount counts.Count32 `json:"unique_tree_count"`
// The total size of all trees analyzed.
UniqueTreeSize counts.Count64 `json:"unique_tree_size"`
// The total number of tree entries in all unique trees analyzed.
UniqueTreeEntries counts.Count64 `json:"unique_tree_entries"`
// The maximum number of entries an a tree.
MaxTreeEntries counts.Count32 `json:"max_tree_entries"`
// The tree with the maximum number of entries.
MaxTreeEntriesTree *Path `json:"max_tree_entries_tree,omitempty"`
// The total number of unique blobs analyzed.
UniqueBlobCount counts.Count32 `json:"unique_blob_count"`
// The total size of all of the unique blobs analyzed.
UniqueBlobSize counts.Count64 `json:"unique_blob_size"`
// The maximum size of any analyzed blob.
MaxBlobSize counts.Count32 `json:"max_blob_size"`
// The biggest blob found.
MaxBlobSizeBlob *Path `json:"max_blob_size_blob,omitempty"`
// The total number of unique tag objects analyzed.
UniqueTagCount counts.Count32 `json:"unique_tag_count"`
// The maximum number of tags in a chain.
MaxTagDepth counts.Count32 `json:"max_tag_depth"`
// The tag with the maximum tag depth.
MaxTagDepthTag *Path `json:"max_tag_depth_tag,omitempty"`
// The number of references analyzed. Note that we don't eliminate
// duplicates if the user passes the same reference more than
// once.
ReferenceCount counts.Count32 `json:"reference_count"`
// The maximum TreeSize in the analyzed history (where each
// attribute is maximized separately).
// The maximum depth of trees and blobs starting at this object
// (including this object).
MaxPathDepth counts.Count32 `json:"max_path_depth"`
// The tree with the maximum path depth.
MaxPathDepthTree *Path `json:"max_path_depth_tree,omitempty"`
// The maximum length of any path relative to this object, in
// characters.
MaxPathLength counts.Count32 `json:"max_path_length"`
// The tree with the maximum path length.
MaxPathLengthTree *Path `json:"max_path_length_tree,omitempty"`
// The total number of trees, including duplicates.
MaxExpandedTreeCount counts.Count32 `json:"max_expanded_tree_count"`
// The tree with the maximum expanded tree count.
MaxExpandedTreeCountTree *Path `json:"max_expanded_tree_count_tree,omitempty"`
// The total number of blobs, including duplicates.
MaxExpandedBlobCount counts.Count32 `json:"max_expanded_blob_count"`
// The tree with the maximum expanded blob count.
MaxExpandedBlobCountTree *Path `json:"max_expanded_blob_count_tree,omitempty"`
// The total size of all blobs, including duplicates.
MaxExpandedBlobSize counts.Count64 `json:"max_expanded_blob_size"`
// The tree with the maximum expanded blob size.
MaxExpandedBlobSizeTree *Path `json:"max_expanded_blob_size_tree,omitempty"`
// The total number of symbolic links, including duplicates.
MaxExpandedLinkCount counts.Count32 `json:"max_expanded_link_count"`
// The tree with the maximum expanded link count.
MaxExpandedLinkCountTree *Path `json:"max_expanded_link_count_tree,omitempty"`
// The total number of submodules referenced, including duplicates.
MaxExpandedSubmoduleCount counts.Count32 `json:"max_expanded_submodule_count"`
// The tree with the maximum expanded submodule count.
MaxExpandedSubmoduleCountTree *Path `json:"max_expanded_submodule_count_tree,omitempty"`
}
// Convenience function: forget `*path` if it is non-nil and overwrite
// it with a `*Path` for the object corresponding to `(oid,
// objectType)`. This function can be used if a new largest item was
// found.
func setPath(
pr PathResolver,
path **Path,
oid git.OID, objectType string) {
if *path != nil {
pr.ForgetPath(*path)
}
*path = pr.RequestPath(oid, objectType)
}
func (s *HistorySize) recordBlob(g *Graph, oid git.OID, blobSize BlobSize) {
s.UniqueBlobCount.Increment(1)
s.UniqueBlobSize.Increment(counts.Count64(blobSize.Size))
if s.MaxBlobSize.AdjustMaxIfNecessary(blobSize.Size) {
setPath(g.pathResolver, &s.MaxBlobSizeBlob, oid, "blob")
}
}
func (s *HistorySize) recordTree(
g *Graph, oid git.OID, treeSize TreeSize, size counts.Count32, treeEntries counts.Count32,
) {
s.UniqueTreeCount.Increment(1)
s.UniqueTreeSize.Increment(counts.Count64(size))
s.UniqueTreeEntries.Increment(counts.Count64(treeEntries))
if s.MaxTreeEntries.AdjustMaxIfNecessary(treeEntries) {
setPath(g.pathResolver, &s.MaxTreeEntriesTree, oid, "tree")
}
if s.MaxPathDepth.AdjustMaxIfNecessary(treeSize.MaxPathDepth) {
setPath(g.pathResolver, &s.MaxPathDepthTree, oid, "tree")
}
if s.MaxPathLength.AdjustMaxIfNecessary(treeSize.MaxPathLength) {
setPath(g.pathResolver, &s.MaxPathLengthTree, oid, "tree")
}
if s.MaxExpandedTreeCount.AdjustMaxIfNecessary(treeSize.ExpandedTreeCount) {
setPath(g.pathResolver, &s.MaxExpandedTreeCountTree, oid, "tree")
}
if s.MaxExpandedBlobCount.AdjustMaxIfNecessary(treeSize.ExpandedBlobCount) {
setPath(g.pathResolver, &s.MaxExpandedBlobCountTree, oid, "tree")
}
if s.MaxExpandedBlobSize.AdjustMaxIfNecessary(treeSize.ExpandedBlobSize) {
setPath(g.pathResolver, &s.MaxExpandedBlobSizeTree, oid, "tree")
}
if s.MaxExpandedLinkCount.AdjustMaxIfNecessary(treeSize.ExpandedLinkCount) {
setPath(g.pathResolver, &s.MaxExpandedLinkCountTree, oid, "tree")
}
if s.MaxExpandedSubmoduleCount.AdjustMaxIfNecessary(treeSize.ExpandedSubmoduleCount) {
setPath(g.pathResolver, &s.MaxExpandedSubmoduleCountTree, oid, "tree")
}
}
func (s *HistorySize) recordCommit(
g *Graph, oid git.OID, commitSize CommitSize,
size counts.Count32, parentCount counts.Count32,
) {
s.UniqueCommitCount.Increment(1)
s.UniqueCommitSize.Increment(counts.Count64(size))
if s.MaxCommitSize.AdjustMaxIfPossible(size) {
setPath(g.pathResolver, &s.MaxCommitSizeCommit, oid, "commit")
}
s.MaxHistoryDepth.AdjustMaxIfPossible(commitSize.MaxAncestorDepth)
if s.MaxParentCount.AdjustMaxIfPossible(parentCount) {
setPath(g.pathResolver, &s.MaxParentCountCommit, oid, "commit")
}
}
func (s *HistorySize) recordTag(g *Graph, oid git.OID, tagSize TagSize, size counts.Count32) {
s.UniqueTagCount.Increment(1)
if s.MaxTagDepth.AdjustMaxIfNecessary(tagSize.TagDepth) {
setPath(g.pathResolver, &s.MaxTagDepthTag, oid, "tag")
}
}
func (s *HistorySize) recordReference(g *Graph, ref git.Reference) {
s.ReferenceCount.Increment(1)
} | sizes/sizes.go | 0.786541 | 0.442215 | sizes.go | starcoder |
package compliance
import (
"context"
"fmt"
"testing"
"time"
"github.com/gomods/athens/pkg/errors"
"github.com/gomods/athens/pkg/index"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/technosophos/moniker"
)
// RunTests runs compliance tests for the given Indexer implementation.
// clearIndex is a function that must clear the entire storage so that
// tests can assume a clean state.
func RunTests(t *testing.T, indexer index.Indexer, clearIndex func() error) {
if err := clearIndex(); err != nil {
t.Fatal(err)
}
var tests = []struct {
name string
desc string
limit int
preTest func(t *testing.T) ([]*index.Line, time.Time)
}{
{
name: "empty",
desc: "an empty index should return an empty slice",
preTest: func(t *testing.T) ([]*index.Line, time.Time) { return []*index.Line{}, time.Time{} },
limit: 2000,
},
{
name: "happy path",
desc: "given 10 modules, return all of them in correct order",
preTest: func(t *testing.T) ([]*index.Line, time.Time) {
return seed(t, indexer, 10), time.Time{}
},
limit: 2000,
},
{
name: "respect the limit",
desc: "givn 10 modules and a 'limit' of 5, only return the first five lines",
preTest: func(t *testing.T) ([]*index.Line, time.Time) {
lines := seed(t, indexer, 10)
return lines[0:5], time.Time{}
},
limit: 5,
},
{
name: "respect the time",
desc: "given 10 modules, 'since' should filter out the ones that came before it",
preTest: func(t *testing.T) ([]*index.Line, time.Time) {
err := indexer.Index(context.Background(), "tobeignored", "v1.2.3")
if err != nil {
t.Fatal(err)
}
time.Sleep(50 * time.Millisecond)
now := time.Now()
lines := seed(t, indexer, 5)
return lines, now
},
limit: 2000,
},
{
name: "ignore the past",
desc: "no line should be returned if 'since' is after all of the indexed modules",
preTest: func(t *testing.T) ([]*index.Line, time.Time) {
seed(t, indexer, 5)
time.Sleep(50 * time.Millisecond)
return []*index.Line{}, time.Now()
},
limit: 2000,
},
{
name: "no limit no line",
desc: "if limit is set to zero, then nothing should be returned",
preTest: func(t *testing.T) ([]*index.Line, time.Time) {
seed(t, indexer, 5)
return []*index.Line{}, time.Time{}
},
limit: 0,
},
{
name: "duplicate module version",
desc: "if we try to index a module that already exists, a KindAlreadyExists must be returned",
preTest: func(t *testing.T) ([]*index.Line, time.Time) {
m := &index.Line{Path: "gomods.io/tobeduplicated", Version: "v0.1.0"}
err := indexer.Index(context.Background(), m.Path, m.Version)
if err != nil {
t.Fatal(err)
}
err = indexer.Index(context.Background(), m.Path, m.Version)
if !errors.Is(err, errors.KindAlreadyExists) {
t.Fatalf("expected an error of kind AlreadyExists but got %s", errors.KindText(err))
}
return []*index.Line{m}, time.Time{}
},
limit: 2000,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
t.Log(tc.desc)
t.Cleanup(func() {
if err := clearIndex(); err != nil {
t.Fatal(err)
}
})
expected, since := tc.preTest(t)
given, err := indexer.Lines(context.Background(), since, tc.limit)
if err != nil {
t.Fatal(err)
}
opts := cmpopts.IgnoreFields(index.Line{}, "Timestamp")
if !cmp.Equal(given, expected, opts) {
t.Fatal(cmp.Diff(expected, given, opts))
}
})
}
}
func seed(t *testing.T, indexer index.Indexer, num int) []*index.Line {
lines := []*index.Line{}
t.Helper()
for i := 0; i < num; i++ {
mod := moniker.New().NameSep("_")
ver := fmt.Sprintf("%d.0.0", i)
err := indexer.Index(context.Background(), mod, ver)
if err != nil {
t.Fatal(err)
}
lines = append(lines, &index.Line{Path: mod, Version: ver})
}
return lines
} | pkg/index/compliance/compliance.go | 0.505859 | 0.446434 | compliance.go | starcoder |
package qdb
/*
#include <qdb/ts.h>
*/
import "C"
import (
"math"
"time"
"unsafe"
)
// TsTimestampPoint : timestamped timestamp data point
type TsTimestampPoint struct {
timestamp time.Time
content time.Time
}
// Timestamp : return data point timestamp
func (t TsTimestampPoint) Timestamp() time.Time {
return t.timestamp
}
// Content : return data point content
func (t TsTimestampPoint) Content() time.Time {
return t.content
}
// NewTsTimestampPoint : Create new timeseries timestamp point
func NewTsTimestampPoint(timestamp time.Time, value time.Time) TsTimestampPoint {
return TsTimestampPoint{timestamp, value}
}
// :: internals
func (t TsTimestampPoint) toStructC() C.qdb_ts_timestamp_point {
return C.qdb_ts_timestamp_point{toQdbTimespec(t.timestamp), toQdbTimespec(t.content)}
}
func (t C.qdb_ts_timestamp_point) toStructG() TsTimestampPoint {
return TsTimestampPoint{t.timestamp.toStructG(), t.value.toStructG()}
}
func timestampPointArrayToC(pts ...TsTimestampPoint) *C.qdb_ts_timestamp_point {
if len(pts) == 0 {
return nil
}
points := make([]C.qdb_ts_timestamp_point, len(pts))
for idx, pt := range pts {
points[idx] = pt.toStructC()
}
return &points[0]
}
func timestampPointArrayToSlice(points *C.qdb_ts_timestamp_point, length int) []C.qdb_ts_timestamp_point {
// See https://github.com/mattn/go-sqlite3/issues/238 for details.
return (*[(math.MaxInt32 - 1) / unsafe.Sizeof(C.qdb_ts_timestamp_point{})]C.qdb_ts_timestamp_point)(unsafe.Pointer(points))[:length:length]
}
func timestampPointArrayToGo(points *C.qdb_ts_timestamp_point, pointsCount C.qdb_size_t) []TsTimestampPoint {
length := int(pointsCount)
output := make([]TsTimestampPoint, length)
if length > 0 {
slice := timestampPointArrayToSlice(points, length)
for i, s := range slice {
output[i] = s.toStructG()
}
}
return output
}
// TsTimestampColumn : a time series timestamp column
type TsTimestampColumn struct {
tsColumn
}
// TimestampColumn : create a column object
func (entry TimeseriesEntry) TimestampColumn(columnName string) TsTimestampColumn {
return TsTimestampColumn{tsColumn{NewTsColumnInfo(columnName, TsColumnTimestamp), entry}}
}
// Insert timestamp points into a timeseries
func (column TsTimestampColumn) Insert(points ...TsTimestampPoint) error {
alias := convertToCharStar(column.parent.alias)
defer releaseCharStar(alias)
columnName := convertToCharStar(column.name)
defer releaseCharStar(columnName)
contentCount := C.qdb_size_t(len(points))
content := timestampPointArrayToC(points...)
err := C.qdb_ts_timestamp_insert(column.parent.handle, alias, columnName, content, contentCount)
return makeErrorOrNil(err)
}
// EraseRanges : erase all points in the specified ranges
func (column TsTimestampColumn) EraseRanges(rgs ...TsRange) (uint64, error) {
alias := convertToCharStar(column.parent.alias)
defer releaseCharStar(alias)
columnName := convertToCharStar(column.name)
defer releaseCharStar(columnName)
ranges := rangeArrayToC(rgs...)
rangesCount := C.qdb_size_t(len(rgs))
erasedCount := C.qdb_uint_t(0)
err := C.qdb_ts_erase_ranges(column.parent.handle, alias, columnName, ranges, rangesCount, &erasedCount)
return uint64(erasedCount), makeErrorOrNil(err)
}
// GetRanges : Retrieves timestamps in the specified range of the time series column.
// It is an error to call this function on a non existing time-series.
func (column TsTimestampColumn) GetRanges(rgs ...TsRange) ([]TsTimestampPoint, error) {
alias := convertToCharStar(column.parent.alias)
defer releaseCharStar(alias)
columnName := convertToCharStar(column.name)
defer releaseCharStar(columnName)
ranges := rangeArrayToC(rgs...)
rangesCount := C.qdb_size_t(len(rgs))
var points *C.qdb_ts_timestamp_point
var pointsCount C.qdb_size_t
err := C.qdb_ts_timestamp_get_ranges(column.parent.handle, alias, columnName, ranges, rangesCount, &points, &pointsCount)
if err == 0 {
defer column.parent.Release(unsafe.Pointer(points))
return timestampPointArrayToGo(points, pointsCount), nil
}
return nil, ErrorType(err)
}
// TsTimestampAggregation : Aggregation of timestamp type
type TsTimestampAggregation struct {
kind TsAggregationType
rng TsRange
count int64
point TsTimestampPoint
}
// Type : returns the type of the aggregation
func (t TsTimestampAggregation) Type() TsAggregationType {
return t.kind
}
// Range : returns the range of the aggregation
func (t TsTimestampAggregation) Range() TsRange {
return t.rng
}
// Count : returns the number of points aggregated into the result
func (t TsTimestampAggregation) Count() int64 {
return t.count
}
// Result : result of the aggregation
func (t TsTimestampAggregation) Result() TsTimestampPoint {
return t.point
}
// NewTimestampAggregation : Create new timeseries timestamp aggregation
func NewTimestampAggregation(kind TsAggregationType, rng TsRange) *TsTimestampAggregation {
return &TsTimestampAggregation{kind, rng, 0, TsTimestampPoint{}}
}
// :: internals
func (t TsTimestampAggregation) toStructC() C.qdb_ts_timestamp_aggregation_t {
var cAgg C.qdb_ts_timestamp_aggregation_t
cAgg._type = C.qdb_ts_aggregation_type_t(t.kind)
cAgg._range = t.rng.toStructC()
cAgg.count = C.qdb_size_t(t.count)
cAgg.result = t.point.toStructC()
return cAgg
}
func (t C.qdb_ts_timestamp_aggregation_t) toStructG() TsTimestampAggregation {
var gAgg TsTimestampAggregation
gAgg.kind = TsAggregationType(t._type)
gAgg.rng = t._range.toStructG()
gAgg.count = int64(t.count)
gAgg.point = t.result.toStructG()
return gAgg
}
func timestampAggregationArrayToC(ags ...*TsTimestampAggregation) *C.qdb_ts_timestamp_aggregation_t {
if len(ags) == 0 {
return nil
}
var timestampAggregations []C.qdb_ts_timestamp_aggregation_t
for _, ag := range ags {
timestampAggregations = append(timestampAggregations, ag.toStructC())
}
return ×tampAggregations[0]
}
func timestampAggregationArrayToSlice(aggregations *C.qdb_ts_timestamp_aggregation_t, length int) []C.qdb_ts_timestamp_aggregation_t {
// See https://github.com/mattn/go-sqlite3/issues/238 for details.
return (*[(math.MaxInt32 - 1) / unsafe.Sizeof(C.qdb_ts_timestamp_aggregation_t{})]C.qdb_ts_timestamp_aggregation_t)(unsafe.Pointer(aggregations))[:length:length]
}
func timestampAggregationArrayToGo(aggregations *C.qdb_ts_timestamp_aggregation_t, aggregationsCount C.qdb_size_t, aggs []*TsTimestampAggregation) []TsTimestampAggregation {
length := int(aggregationsCount)
output := make([]TsTimestampAggregation, length)
if length > 0 {
slice := timestampAggregationArrayToSlice(aggregations, length)
for i, s := range slice {
*aggs[i] = s.toStructG()
output[i] = s.toStructG()
}
}
return output
}
// TODO(Vianney): Implement aggregate
// Aggregate : Aggregate a sub-part of a timeseries from the specified aggregations.
// It is an error to call this function on a non existing time-series.
func (column TsTimestampColumn) Aggregate(aggs ...*TsTimestampAggregation) ([]TsTimestampAggregation, error) {
return nil, ErrNotImplemented
}
// Timestamp : adds a timestamp in row transaction
func (t *TsBulk) Timestamp(value time.Time) *TsBulk {
if t.err == nil {
cValue := toQdbTimespec(value)
t.err = makeErrorOrNil(C.qdb_ts_row_set_timestamp(t.table, C.qdb_size_t(t.index), &cValue))
}
t.index++
return t
}
// GetTimestamp : gets a timestamp in row
func (t *TsBulk) GetTimestamp() (time.Time, error) {
var content C.qdb_timespec_t
err := C.qdb_ts_row_get_timestamp(t.table, C.qdb_size_t(t.index), &content)
t.index++
return content.toStructG(), makeErrorOrNil(err)
}
// RowSetTimestamp : Add a timestamp to current row
func (t *TsBatch) RowSetTimestamp(index int64, value time.Time) error {
valueIndex := C.qdb_size_t(index)
cValue := toQdbTimespec(value)
return makeErrorOrNil(C.qdb_ts_batch_row_set_timestamp(t.table, valueIndex, &cValue))
} | entry_timeseries_timestamp.go | 0.76207 | 0.525612 | entry_timeseries_timestamp.go | starcoder |
package shapes
import (
"errors"
"fmt"
"math"
"github.com/xyproto/num"
)
// Addition, subtraction, multiplication and division for
// 2D points that are (int, int) and 2D points that are (float64, float64)
// The functions starting with "Must" will not return an error but
// panic instead. Functions that return errors are also provided.
type Point struct {
x *num.Frac
y *num.Frac
}
var (
Zero = &Point{num.Zero, num.Zero}
ErrDivZero = errors.New("divide by zero")
)
func NewPoint(x, y int) *Point {
return NewPointFromInt(x, y)
}
func NewPointn(x, y *num.Frac) *Point {
return &Point{x, y}
}
func NewPointf(x, y float64) *Point {
return NewPointFromFloat(x, y, num.DefaultMaxIterations)
}
func NewPointFromInt(x, y int) *Point {
return &Point{num.NewFromInt(x), num.NewFromInt(y)}
}
func NewPointFromFloat(x, y float64, maxIterations int) *Point {
xn := num.NewFromFloat64(x, maxIterations)
yn := num.NewFromFloat64(y, maxIterations)
return &Point{xn, yn}
}
func (p *Point) Copy() *Point {
return &Point{p.x.Copy(), p.y.Copy()}
}
func (p *Point) XY() (*num.Frac, *num.Frac) {
return p.x, p.y
}
func (p *Point) XYi() (int, int) {
return p.x.Int(), p.y.Int()
}
func (p *Point) XYf() (float64, float64) {
return p.x.Float64(), p.y.Float64()
}
// Setf is for setting the X and Y coordinates from two floats.
// maxIterations is how many iterations should be performed when
// converting the two floats to fractional numbers, per float.
func (p *Point) Setf(x, y float64, maxIterations int) {
xn := num.NewFromFloat64(x, maxIterations)
yn := num.NewFromFloat64(y, maxIterations)
p.x = xn
p.y = yn
}
func (p *Point) Set(x, y *num.Frac) {
p.x, p.y = x, y
}
// --- Add ---
// Add and return
func Add(a, b *Point) *Point {
return &Point{num.Add(a.x, b.x), num.Add(a.y, b.y)}
}
// Add in place
func (a *Point) Add(b *Point) {
a.x.Add(b.x)
a.y.Add(b.y)
}
// --- Sub ---
// Sub and return
func Sub(a, b *Point) *Point {
return &Point{num.Sub(a.x, b.x), num.Sub(a.y, b.y)}
}
// Sub in place
func (a *Point) Sub(b *Point) {
a.x.Sub(b.x)
a.y.Sub(b.y)
}
// --- Mul ---
// Mul and return
func Mul(a, b *Point) *Point {
axbx, err := num.Mul(a.x, b.x)
if err != nil {
panic(err)
}
ayby, err := num.Mul(a.y, b.y)
if err != nil {
panic(err)
}
return &Point{axbx, ayby}
}
// Mul in place
func (a *Point) Mul(b *Point) {
a.x.Mul(b.x)
a.y.Mul(b.y)
}
// --- Div ---
// Div and return
func Div(a, b *Point) (*Point, error) {
x, err := num.Div(a.x, b.x)
if err != nil {
return nil, err
}
y, err := num.Div(a.y, b.y)
if err != nil {
return nil, err
}
return &Point{x, y}, nil
}
// Div and return
func MustDiv(a, b *Point) *Point {
x, err := num.Div(a.x, b.x)
if err != nil {
panic(err)
}
y, err := num.Div(a.y, b.y)
if err != nil {
panic(err)
}
return &Point{x, y}
}
// Div in place
func (a *Point) Div(b *Point) error {
if b.x.IsZero() || b.y.IsZero() {
return ErrDivZero
}
a.x.Div(b.x)
a.y.Div(b.y)
return nil
}
// Div in place
func (a *Point) MustDiv(b *Point) {
if b.x.IsZero() || b.y.IsZero() {
panic(ErrDivZero)
}
a.x.Div(b.x)
a.y.Div(b.y)
}
// --- Create points where x == y ---
func Newi(x int) *Point {
return NewPointFromInt(x, x)
}
func Newf(x float64, maxIterations int) *Point {
return NewPointFromFloat(x, x, maxIterations)
}
// --- Strings ---
// String outputs the coordinates as fractions
func (p *Point) String() string {
return fmt.Sprintf("(%s, %s)", p.x, p.y)
}
// Stringf outputs the coordinates as floats, with 3 digits after "."
func (p *Point) Stringf() string {
return fmt.Sprintf("(%.3f, %.3f)", p.x.Float64(), p.y.Float64())
}
// Stringi outputs the coordinates as ints
func (p *Point) Stringi() string {
return fmt.Sprintf("(%d, %d)", p.x.Int(), p.y.Int())
}
// --- Zero check ---
func (p *Point) IsZero() bool {
return p.x.IsZero() && p.y.IsZero()
}
// --- Rotation ---
func (p *Point) RotateAround(c *Point, rad float64) *Point {
sin := math.Sin(rad)
cos := math.Cos(rad)
cx := c.x.Float64()
cy := c.y.Float64()
// Translate the point
px := p.x.Float64() - cx
py := p.y.Float64() - cy
// Rotate the point
xnew := px*cos - py*sin
ynew := px*sin + py*cos
// Translate the point back
px = xnew + cx
py = ynew + cy
// Return the new coordinates
return NewPointf(px, py)
}
func (p *Point) CloseTo(x, y int, rounded bool) bool {
if rounded {
return p.x.Round() == x && p.y.Round() == y
}
return p.x.Int() == x && p.y.Int() == y
} | point.go | 0.806891 | 0.539044 | point.go | starcoder |
package util
import (
"fmt"
"math"
"strconv"
"time"
"github.com/prometheus/common/model"
)
// LatencyMetric represent 50th, 90th and 99th duration quantiles.
type LatencyMetric struct {
Perc50 time.Duration `json:"Perc50"`
Perc90 time.Duration `json:"Perc90"`
Perc99 time.Duration `json:"Perc99"`
}
// SetQuantile set quantile value.
// Only 0.5, 0.9 and 0.99 quantiles are supported.
func (metric *LatencyMetric) SetQuantile(quantile float64, latency time.Duration) {
switch quantile {
case 0.5:
metric.Perc50 = latency
case 0.9:
metric.Perc90 = latency
case 0.99:
metric.Perc99 = latency
}
}
// VerifyThreshold verifies latency metric against given percentile thresholds.
func (metric *LatencyMetric) VerifyThreshold(threshold time.Duration) error {
if metric.Perc50 > threshold {
return fmt.Errorf("too high latency 50th percentile: got %v expected: %v", metric.Perc50, threshold)
}
if metric.Perc90 > threshold {
return fmt.Errorf("too high latency 90th percentile: got %v expected: %v", metric.Perc90, threshold)
}
if metric.Perc99 > threshold {
return fmt.Errorf("too high latency 99th percentile: got %v expected: %v", metric.Perc99, threshold)
}
return nil
}
// ToPerfData converts latency metric to PerfData.
func (metric *LatencyMetric) ToPerfData(name string) DataItem {
return DataItem{
Data: map[string]float64{
"Perc50": float64(metric.Perc50) / float64(time.Millisecond),
"Perc90": float64(metric.Perc90) / float64(time.Millisecond),
"Perc99": float64(metric.Perc99) / float64(time.Millisecond),
},
Unit: "ms",
Labels: map[string]string{
"Metric": name,
},
}
}
func (metric LatencyMetric) String() string {
return fmt.Sprintf("perc50: %v, perc90: %v, perc99: %v", metric.Perc50, metric.Perc90, metric.Perc99)
}
// LatencyData is an interface for latance data structure.
type LatencyData interface {
GetLatency() time.Duration
}
// LatencySlice is a sortable latency array.
type LatencySlice []LatencyData
func (l LatencySlice) Len() int { return len(l) }
func (l LatencySlice) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l LatencySlice) Less(i, j int) bool { return l[i].GetLatency() < l[j].GetLatency() }
// NewLatencyMetric converts latency data array to latency metric.
func NewLatencyMetric(latencies []LatencyData) LatencyMetric {
length := len(latencies)
if length == 0 {
// Ideally we can return LatencyMetric with some NaN/incorrect values,
// but 0 is the best we can get for time.Duration type.
return LatencyMetric{Perc50: 0, Perc90: 0, Perc99: 0}
}
perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].GetLatency()
perc90 := latencies[int(math.Ceil(float64(length*90)/100))-1].GetLatency()
perc99 := latencies[int(math.Ceil(float64(length*99)/100))-1].GetLatency()
return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99}
}
// NewLatencyMetricPrometheus tries to parse latency data from results of Prometheus query.
func NewLatencyMetricPrometheus(samples []*model.Sample) (*LatencyMetric, error) {
var latencyMetric LatencyMetric
for _, sample := range samples {
val, ok := sample.Metric["quantile"]
if !ok {
return nil, fmt.Errorf("quantile missing in sample %v", sample)
}
quantile, err := strconv.ParseFloat(string(val), 64)
if err != nil {
return nil, err
}
latency := time.Duration(float64(sample.Value) * float64(time.Second))
latencyMetric.SetQuantile(quantile, latency)
}
return &latencyMetric, nil
} | clusterloader2/pkg/measurement/util/latency_metric.go | 0.818845 | 0.584834 | latency_metric.go | starcoder |
package pricing
import (
"fmt"
"github.com/tealeg/xlsx"
"go.uber.org/zap"
"github.com/transcom/mymove/pkg/models"
)
var parseShipmentManagementServicesPrices processXlsxSheet = func(params ParamConfig, sheetIndex int, logger Logger) (interface{}, error) {
// XLSX Sheet consts
const xlsxDataSheetNum int = 16 // 4a) Mgmt., Coun., Trans. Prices
const mgmtRowIndexStart int = 9
const contractYearColIndexStart int = 2
const priceColumnIndexStart int = 3
if xlsxDataSheetNum != sheetIndex {
return nil, fmt.Errorf("parseShipmentManagementServices expected to process sheet %d, but received sheetIndex %d", xlsxDataSheetNum, sheetIndex)
}
logger.Info("Parsing shipment management services prices")
var mgmtPrices []models.StageShipmentManagementServicesPrice
dataRows := params.XlsxFile.Sheets[xlsxDataSheetNum].Rows[mgmtRowIndexStart:]
for _, row := range dataRows {
shipMgmtSrvcPrice := models.StageShipmentManagementServicesPrice{
ContractYear: getCell(row.Cells, contractYearColIndexStart),
PricePerTaskOrder: getCell(row.Cells, priceColumnIndexStart),
}
// All the rows are consecutive, if we get a blank we're done
if shipMgmtSrvcPrice.ContractYear == "" {
break
}
if params.ShowOutput == true {
logger.Info("", zap.Any("StageShipmentManagementServicesPrice", shipMgmtSrvcPrice))
}
mgmtPrices = append(mgmtPrices, shipMgmtSrvcPrice)
}
return mgmtPrices, nil
}
var parseCounselingServicesPrices processXlsxSheet = func(params ParamConfig, sheetIndex int, logger Logger) (interface{}, error) {
// XLSX Sheet consts
const xlsxDataSheetNum int = 16 // 4a) Mgmt., Coun., Trans. Prices
const counRowIndexStart int = 22
const contractYearColIndexStart int = 2
const priceColumnIndexStart int = 3
if xlsxDataSheetNum != sheetIndex {
return nil, fmt.Errorf("parseCounselingServicesPrices expected to process sheet %d, but received sheetIndex %d", xlsxDataSheetNum, sheetIndex)
}
logger.Info("Parsing counseling services prices")
var counPrices []models.StageCounselingServicesPrice
dataRows := params.XlsxFile.Sheets[xlsxDataSheetNum].Rows[counRowIndexStart:]
for _, row := range dataRows {
cnslSrvcPrice := models.StageCounselingServicesPrice{
ContractYear: getCell(row.Cells, contractYearColIndexStart),
PricePerTaskOrder: getCell(row.Cells, priceColumnIndexStart),
}
// All the rows are consecutive, if we get a blank we're done
if cnslSrvcPrice.ContractYear == "" {
break
}
if params.ShowOutput == true {
logger.Info("", zap.Any("StageCounselingServicesPrice", cnslSrvcPrice))
}
counPrices = append(counPrices, cnslSrvcPrice)
}
return counPrices, nil
}
var parseTransitionPrices processXlsxSheet = func(params ParamConfig, sheetIndex int, logger Logger) (interface{}, error) {
// XLSX Sheet consts
const xlsxDataSheetNum int = 16 // 4a) Mgmt., Coun., Trans. Prices
const tranRowIndexStart int = 34
const contractYearColIndexStart int = 2
const priceColumnIndexStart int = 3
if xlsxDataSheetNum != sheetIndex {
return nil, fmt.Errorf("parseTransitionPrices expected to process sheet %d, but received sheetIndex %d", xlsxDataSheetNum, sheetIndex)
}
logger.Info("Parsing transition prices")
var tranPrices []models.StageTransitionPrice
dataRows := params.XlsxFile.Sheets[xlsxDataSheetNum].Rows[tranRowIndexStart:]
for _, row := range dataRows {
tranPrice := models.StageTransitionPrice{
ContractYear: getCell(row.Cells, contractYearColIndexStart),
PricePerTaskOrder: getCell(row.Cells, priceColumnIndexStart),
}
// All the rows are consecutive, if we get a blank we're done
if tranPrice.ContractYear == "" {
break
}
if params.ShowOutput == true {
logger.Info("", zap.Any("StageTransitionPrice", tranPrice))
}
tranPrices = append(tranPrices, tranPrice)
}
return tranPrices, nil
}
// verifyManagementCounselTransitionPrices: verification for: 4a) Mgmt., Coun., Trans. Prices
var verifyManagementCounselTransitionPrices verifyXlsxSheet = func(params ParamConfig, sheetIndex int) error {
// XLSX Sheet consts
const xlsxDataSheetNum int = 16 // 4a) Mgmt., Coun., Trans. Prices
const mgmtRowIndexStart int = 9
const counRowIndexStart int = 22
const tranRowIndexStart int = 34
const contractYearColIndexStart int = 2
const priceColumnIndexStart int = 3
if xlsxDataSheetNum != sheetIndex {
return fmt.Errorf("verifyManagementCounselTransitionPrices expected to process sheet %d, but received sheetIndex %d", xlsxDataSheetNum, sheetIndex)
}
// Shipment Management Services Headers
dataRows := params.XlsxFile.Sheets[xlsxDataSheetNum].Rows[mgmtRowIndexStart-1 : mgmtRowIndexStart]
err := helperCheckHeadersFor4b("EXAMPLE", "$X.XX", contractYearColIndexStart, priceColumnIndexStart, dataRows)
if err != nil {
return err
}
dataRows = params.XlsxFile.Sheets[xlsxDataSheetNum].Rows[mgmtRowIndexStart-2 : mgmtRowIndexStart-1]
err = helperCheckHeadersFor4b("Contract Year", "ShipmentManagementServicesPrice($pertaskorder)", contractYearColIndexStart, priceColumnIndexStart, dataRows)
if err != nil {
return err
}
// Counseling Services
dataRows = params.XlsxFile.Sheets[xlsxDataSheetNum].Rows[counRowIndexStart-1 : counRowIndexStart]
err = helperCheckHeadersFor4b("EXAMPLE", "$X.XX", contractYearColIndexStart, priceColumnIndexStart, dataRows)
if err != nil {
return err
}
dataRows = params.XlsxFile.Sheets[xlsxDataSheetNum].Rows[counRowIndexStart-2 : counRowIndexStart-1]
err = helperCheckHeadersFor4b("Contract Year", "CounselingServicesPrice($pertaskorder)", contractYearColIndexStart, priceColumnIndexStart, dataRows)
if err != nil {
return err
}
// Transition
dataRows = params.XlsxFile.Sheets[xlsxDataSheetNum].Rows[tranRowIndexStart-1 : tranRowIndexStart]
return helperCheckHeadersFor4b("Contract Year", "TransitionPrice($totalcost)", contractYearColIndexStart, priceColumnIndexStart, dataRows)
}
func helperCheckHeadersFor4b(contractYearHeader string, priceColumnHeader string, contractYearColIndexStart int, priceColumnIndexStart int, dataRows []*xlsx.Row) error {
for _, dataRow := range dataRows {
if header := getCell(dataRow.Cells, contractYearColIndexStart); header != contractYearHeader {
return fmt.Errorf("verifyManagementCounselTransitionPrices expected to find header '%s', but received header '%s'", contractYearHeader, header)
}
if header := removeWhiteSpace(getCell(dataRow.Cells, priceColumnIndexStart)); header != priceColumnHeader {
return fmt.Errorf("verifyManagementCounselTransitionPrices expected to find header '%s', but received header '%s'", priceColumnHeader, header)
}
}
return nil
} | pkg/parser/pricing/parse_management_counseling_transition_prices.go | 0.535341 | 0.464112 | parse_management_counseling_transition_prices.go | starcoder |
package processor
var TriggerParameter = ProcessorParameter{
Name: "trigger",
Description: "A yaml struct to define the condition when the rule, should be applied.",
}
var PathParameter = ProcessorParameter{
Name: "path",
Description: "A string array to define the path of the transformation in the kubernetes yaml.",
}
var TriggerDoc = `
#### Trigger parameter
Trigger can define a filter to apply the transformations only on a subset of the k8s manifests.
For example:
'''
- type: Add
trigger:
metadata:
name: datanode
path:
- metadata
- labels
value:
flokkr.github.io/monitoring: false
'''
This definition will apply only to the k8s resources where the value of 'metadata.name' is 'datanode'.
You can use multiple values in the trigger. All the key nodes will be collected and should be the same in the target resource.
`
var PathDoc = `
#### Path parameter
Path is a string array where each element represents a new level in the kubernetes manifest.
For example the '["spec","spec", "spec", "containers"]' array address a list in the kubernetes manifest files.
Array elements are indexed with a number from zero (eg. ["foo","barr",0]) _except_ if the elements in the array are maps with an existing _name_ key. In this case the index is this name.
For example with this standard kubernetes manifest:
'''
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
felkszible: generated
spec:
selector:
matchLabels:
app: nginx
replicas: 2
template:
metadata:
labels:
app: nginx
annotations: {}
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
env:
- name: KEY
value: XXX
'''
The path of the KEY environment variable is ''[spec, template, spec, containers, nginx, env, KEY ]' and not '[ spec, template, spec, containers, 0, env, 0]'
For matching, path segments are used as regular expressions. Therefore the following path matches for both the init and main containers:
'''yaml
path:
- spec
- template
- spec
- (initC|c)ontainers
- .*
'''
Matching works only if the Yaml file already has the specified path. But for kubernetes resources a few standard paths are pre-defined'
` | api/v2/processor/default_parameter.go | 0.810554 | 0.570451 | default_parameter.go | starcoder |
package reducers
import (
"github.com/paulmach/go.geo"
)
// A DouglasPeuckerReducer wraps the DouglasPeucker function
// to fulfill the geo.Reducer and geo.GeoReducer interfaces.
type DouglasPeuckerReducer struct {
Threshold float64
}
// NewDouglasPeucker creates a new DouglasPeuckerReducer.
func NewDouglasPeucker(threshold float64) *DouglasPeuckerReducer {
return &DouglasPeuckerReducer{
Threshold: threshold,
}
}
// Reduce runs the DouglasPeucker using the threshold of the DouglasPeuckerReducer.
func (r DouglasPeuckerReducer) Reduce(path *geo.Path) *geo.Path {
return DouglasPeucker(path, r.Threshold)
}
// GeoReduce runs the DouglasPeucker on a lng/lat path.
// The threshold is expected to be in meters.
func (r DouglasPeuckerReducer) GeoReduce(path *geo.Path) *geo.Path {
factor := geo.MercatorScaleFactor(path.Bound().Center().Lat())
merc := path.Clone().Transform(geo.Mercator.Project)
reduced := DouglasPeucker(merc, r.Threshold*factor)
return reduced.Transform(geo.Mercator.Inverse)
}
// DouglasPeucker simplifies the path using the Douglas Peucker method.
// Returns a new path and DOES NOT modify the original.
func DouglasPeucker(path *geo.Path, threshold float64) *geo.Path {
if path.Length() <= 2 {
return path.Clone()
}
mask := make([]byte, path.Length())
mask[0] = 1
mask[path.Length()-1] = 1
points := path.Points()
found := dpWorker(points, threshold, mask)
newPoints := make([]geo.Point, 0, found)
for i, v := range mask {
if v == 1 {
newPoints = append(newPoints, points[i])
}
}
return (&geo.Path{}).SetPoints(newPoints)
}
// DouglasPeuckerIndexMap is similar to DouglasPeucker but returns an array that maps
// each new path index to its original path index.
// Returns a new path and DOES NOT modify the original.
func DouglasPeuckerIndexMap(path *geo.Path, threshold float64) (reduced *geo.Path, indexMap []int) {
if path.Length() == 0 {
return path.Clone(), []int{}
}
if path.Length() == 1 {
return path.Clone(), []int{0}
}
if path.Length() == 2 {
return path.Clone(), []int{0, 1}
}
mask := make([]byte, path.Length())
mask[0] = 1
mask[path.Length()-1] = 1
originalPoints := path.Points()
found := dpWorker(originalPoints, threshold, mask)
points := make([]geo.Point, 0, found)
for i, v := range mask {
if v == 1 {
points = append(points, originalPoints[i])
indexMap = append(indexMap, i)
}
}
reduced = &geo.Path{}
return reduced.SetPoints(points), indexMap
}
// DouglasPeuckerGeoIndexMap is similar to GeoReduce but returns an array that maps
// each new path index to its original path index.
// Returns a new path and DOES NOT modify the original.
func DouglasPeuckerGeoIndexMap(path *geo.Path, meters float64) (reduced *geo.Path, indexMap []int) {
if path.Length() == 0 {
return path.Clone(), []int{}
}
if path.Length() == 1 {
return path.Clone(), []int{0}
}
if path.Length() == 2 {
return path.Clone(), []int{0, 1}
}
mask := make([]byte, path.Length())
mask[0] = 1
mask[path.Length()-1] = 1
factor := geo.MercatorScaleFactor(path.Bound().Center().Lat())
originalPoints := path.Clone().Transform(geo.Mercator.Project).Points()
found := dpWorker(originalPoints, meters*factor, mask)
points := make([]geo.Point, 0, found)
for i, v := range mask {
if v == 1 {
points = append(points, originalPoints[i])
indexMap = append(indexMap, i)
}
}
reduced = &geo.Path{}
reduced.SetPoints(points)
reduced.Transform(geo.Mercator.Inverse)
return reduced, indexMap
}
// dpWorker does the recursive threshold checks.
// Using a stack array with a stackLength variable resulted in 4x speed improvement
// over calling the function recursively.
func dpWorker(points []geo.Point, threshold float64, mask []byte) int {
found := 0
var stack []int
stack = append(stack, 0, len(points)-1)
l := &geo.Line{}
for len(stack) > 0 {
start := stack[len(stack)-2]
end := stack[len(stack)-1]
// modify the line in place
a := l.A()
a[0], a[1] = points[start][0], points[start][1]
b := l.B()
b[0], b[1] = points[end][0], points[end][1]
maxDist := 0.0
maxIndex := 0
for i := start + 1; i < end; i++ {
dist := l.SquaredDistanceFrom(&points[i])
if dist > maxDist {
maxDist = dist
maxIndex = i
}
}
if maxDist > threshold*threshold {
found++
mask[maxIndex] = 1
stack[len(stack)-1] = maxIndex
stack = append(stack, maxIndex, end)
} else {
stack = stack[:len(stack)-2]
}
}
return found
} | lab138/vendor/github.com/paulmach/go.geo/reducers/douglas_peucker.go | 0.835819 | 0.513668 | douglas_peucker.go | starcoder |
package util
import (
"math"
"time"
)
// ToFixed rounds passed num to target precision.
func ToFixed(num float64, precision int) float64 {
output := math.Pow(10, float64(precision))
return float64(round(num*output)) / output
}
// GetToday returns today timeTime object.
func GetToday(location *time.Location) time.Time {
var now time.Time
now = time.Now()
now = time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, location)
return now
}
// GetYesterday returns today timeTime object for yesterday 00:00:00
func GetYesterday(location *time.Location) time.Time {
return GetToday(location).Add(-24 * time.Hour)
}
// Contains return true if x in a.
func Contains(a []string, x string) bool {
for _, n := range a {
if x == n {
return true
}
}
return false
}
// GetDateRangeFunc returns a date range function over start date to end date inclusive.
// After the end of the range, the range function returns a zero date,
// date.IsZero() is true.
func GetDateRangeFunc(start, end time.Time) func() time.Time {
y, m, d := start.Date()
start = time.Date(y, m, d, 0, 0, 0, 0, time.UTC)
y, m, d = end.Date()
end = time.Date(y, m, d, 0, 0, 0, 0, time.UTC)
return func() time.Time {
if start.After(end) {
return time.Time{}
}
date := start
start = start.AddDate(0, 0, 1)
return date
}
}
// GetDateRangeArr returns array of dates in range
func GetDateRangeArr(startDate, endDate time.Time) []time.Time {
var dateRange []time.Time
for rd := GetDateRangeFunc(startDate, endDate); ; {
date := rd()
if date.IsZero() {
break
}
dateRange = append(dateRange, date)
}
return dateRange
}
// round float to integer
func round(num float64) int {
return int(num + math.Copysign(0.5, num))
}
// IsDateEquals returns true if dates are equal (no Timezone compared)
func IsDateEquals(date1 time.Time, date2 time.Time) bool {
return date1.Year() == date2.Year() && date1.Month() == date2.Month() && date1.Day() == date2.Day()
}
// UniqueStringSlice return slice with unique members of passed slice
func UniqueStringSlice(stringSlice []string) []string {
var (
list []string
keys = make(map[string]bool)
)
for _, entry := range stringSlice {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
} | internal/pkg/util/functions.go | 0.710226 | 0.444685 | functions.go | starcoder |
package websocket
// Chain contains data relevant for Chain Reaction Game
// Satisfies the Game interface
type Chain struct {
Len int
Squares []*Squares
Hub *Hub
}
// Squares contains data about a row of squares
// Each index is a square
type Squares struct {
Len int // Length of a row
Cur []int // How many circles are in the square
Max []int // Carrying Capacity of the square
Color []string // The color that occupies a square. "" if empty
}
// InitBoard Creates a board with dimensions rows x cols
func (c *Chain) InitBoard(rows, cols int) {
rows, cols = makeLegal(5, rows, 30), makeLegal(5, cols, 30)
c.Squares = make([]*Squares, cols)
c.Len = cols
for y := 0; y < cols; y++ {
c.Squares[y] = &Squares{
Len: rows,
Cur: make([]int, rows),
Max: make([]int, rows),
Color: make([]string, rows),
}
for x := 0; x < rows; x++ {
c.Squares[y].Max[x], _ = findneighbors(x, y, rows, cols)
}
}
}
// Checks if a square position exists on a board horizontally and vertically
func findneighbors(x, y, rows, cols int) (int, [][]int) {
totalNeighbros := 0
coords := make([][]int, 0, 4)
for _, v := range [][]int{
{1, 0}, {-1, 0}, {0, 1}, {0, -1}} {
nx := x + v[0]
ny := y + v[1]
if isBounded(nx, ny, rows, cols) {
totalNeighbros++
coords = append(coords, []int{nx, ny})
}
}
return totalNeighbros, coords
}
// MovePiece Moves the piece on the chain board.
// It will call the chained(explode) function to handle explosion.
// x - x coordinate of the user clicked square
// y - y coordinate of the user clicked square
// color - color of the user
// first return value is dynamic animation, second is the static position after an animation
// MovePiece is requirement for Game Interface.
// Animation data is data sent to Front end that will show animation.
// Moved / static data are the circles that remain from the explosion.
func (c *Chain) MovePiece(x, y int, color string) ([][][]int, [][][]int) {
c.Squares[y].Cur[x]++
// No explosion
if c.Squares[y].Cur[x] < c.Squares[y].Max[x] {
c.UpdateColor(color, c.Squares[y].Color[x])
c.Squares[y].Color[x] = color
return make([][][]int, 0), [][][]int{{{x, y, c.Squares[y].Cur[x]}}}
}
c.Squares[y].Cur[x] = 0
c.Squares[y].Color[x] = ""
c.UpdateColor("", color)
return chained(c.explode, [][]int{{x, y}}, color)
}
// explodeFunc used to clean syntax
type explodeFunc func([][]int, string) ([][]int, [][]int, [][]int)
// chained is a helper function that will continually call c.explode.
// explode - function to execute to receive animation data.
// exp - nested array that contains coords of exploding squares.
// color - Color of the person that is moving.
// Firt return value is dynamic animation. second is position right after an animation.
// Loops through the explode function until no more until come out.
// Receives animation data and static data.
// Animation in the front end works by iterating through the animations array and then animating the array's instructions.
func chained(explode explodeFunc, exp [][]int, color string) ([][][]int, [][][]int) {
x, y := exp[0][0], exp[0][1]
animations := make([][][]int, 0)
moved := [][][]int{{{x, y, 0}}} // 0 because square just exploded
for len(exp) != 0 {
newExp, newAni, newMoves := explode(exp, color)
animations = append(animations, newAni)
moved = append(moved, newMoves)
exp = newExp
}
return animations, moved
}
// explode simulates the actual game logic of Chain Logic
// Adds animation data to an array
// exp - Current exploding squares
// color - color of the user that is making the move
// Function to handle exploding squares.
// Returns a frame of animation and static data
// returns next level of exploding neighbors, animation that just occuredm and new positions
// explode iterates each exploding square and check if neigboring squares will also explode
// Else it will just add a square and add to static animation data
func (c *Chain) explode(exp [][]int, color string) ([][]int, [][]int, [][]int) {
expN := make([][]int, 0) // Neighbors that are going to explode next iteration
moved := make([][]int, 0) // New static positions
animations := make([][]int, 0) // animation of circles exploding
for _, coords := range exp {
// d is all the possible neighbors of the coords
for _, d := range [][]int{
{1, 0},
{-1, 0},
{0, 1},
{0, -1},
} {
x, y := coords[0]+d[0], coords[1]+d[1]
if !isBounded(x, y, c.GetRows(), c.GetCols()) {
continue
}
// (coords of explosion site), direction they are going
animations = append(animations, []int{coords[0], coords[1], d[0], d[1]})
sq := c.Squares[y]
oldColor := sq.Color[x]
if c.UpdateColor(color, oldColor) {
// OldColor player lost his / her circles.
for index := 0; index < len(c.Hub.Colors); index++ {
if c.Hub.Colors[index] == oldColor {
c.Hub.Colors = append(c.Hub.Colors[:index], c.Hub.Colors[index+1:]...)
// reposition turn tracker index
if index <= c.Hub.i && c.Hub.i != 0 {
c.Hub.i--
}
break
}
}
}
sq.Color[x] = color
sq.Cur[x]++
if sq.Cur[x] == sq.Max[x] {
sq.Cur[x] = 0
sq.Color[x] = ""
_ = c.UpdateColor("", color)
expN = append(expN, []int{x, y})
}
moved = append(moved, []int{x, y, sq.Cur[x]})
}
}
// Stop animation right when the last circle dies
if len(c.Hub.Colors) == 1 {
return make([][]int, 0), animations, moved
}
return expN, animations, moved
}
// UpdateColor updates squares controlled per client and sends a response
// results true if oldColor is dead / out of squares
func (c *Chain) UpdateColor(newColor, oldColor string) bool {
dead := false
if newColor == oldColor {
return dead
}
for client := range c.Hub.Clients {
if client.Color == oldColor {
c.Hub.Clients[client]--
if c.Hub.Clients[client] == 0 {
dead = true
}
} else if client.Color == newColor {
c.Hub.Clients[client]++
}
}
return dead
}
// GetRows is a requirement for Game interface
// GetRows Gets the rows in the Chain Board
func (c *Chain) GetRows() int {
return c.Squares[0].Len
}
// GetCols is a requirement for Game interface
// GetCols Gets the cols in the Chain Board
func (c *Chain) GetCols() int {
return c.Len
}
func (c *Chain) GetBoard() []*Squares {
return c.Squares
}
// makeLegal makes sure that dimensions are legal
// Must be [lower, upper]
func makeLegal(lower, dimension, upper int) int {
if dimension < lower {
dimension = lower
} else if dimension > upper {
dimension = upper
}
return dimension
}
func isBounded(x, y, rows, cols int) bool {
return 0 <= x && x < rows && 0 <= y && y < cols
} | websocket/chain.go | 0.776369 | 0.59611 | chain.go | starcoder |
package cm
// minHeap is a typed min heap for floating point numbers. Unlike the generic
// heap in the container/heap package, pushing data to or popping data off of
// the heap doesn't require conversion between floats and interface{} objects,
// therefore avoiding the memory and GC overhead due to the additional allocations.
type minHeap []float64
// Len returns the number of values in the heap.
func (h minHeap) Len() int { return len(h) }
// Min returns the minimum value from the heap.
func (h minHeap) Min() float64 { return h[0] }
// Push pushes a value onto the heap.
func (h *minHeap) Push(value float64) {
if len(*h) == cap(*h) {
h.ensureSize()
}
// append
(*h) = append(*h, value)
// then, shift up if necessary to fix heap structure. manually inlined.
heap := *h
n := len(heap)
i := n - 1
for i < n && i >= 0 {
parent := (i - 1) / 2
if parent == i || parent >= n || parent < 0 || heap[parent] <= heap[i] {
break
}
heap[parent], heap[i] = heap[i], heap[parent]
i = parent
}
}
func (h *minHeap) Reset() {
if heap := *h; cap(heap) >= _initialHeapBucketSize {
sharedHeapPool.Put(heap)
}
(*h) = nil
}
// Pop pops the minimum value from the heap.
func (h *minHeap) Pop() float64 {
var (
old = *h
n = len(old) - 1
val = old[0]
i int
)
old[0], old[n] = old[n], old[0]
smallest := i
for smallest >= 0 && smallest <= n { // bounds-check elimination hint
left := smallest*2 + 1
right := left + 1
if left < n && left >= 0 && old[left] < old[smallest] {
smallest = left
}
if right < n && right >= 0 && old[right] < old[smallest] {
smallest = right
}
if smallest == i {
break
}
old[i], old[smallest] = old[smallest], old[i]
i = smallest
}
*h = old[0:n]
return val
}
func (h minHeap) SortDesc() {
heap := h
// this is equivalent to Pop() in a loop (heapsort)
// all the redundant-looking conditions are there to eliminate bounds-checks
for n := len(heap) - 1; n > 0 && n < len(heap); n = len(heap) - 1 {
var (
i int
smallest int
)
heap[0], heap[n] = heap[n], heap[0]
for smallest >= 0 && smallest <= n {
var (
left = smallest*2 + 1
right = left + 1
)
if left < n && left >= 0 && heap[left] < heap[smallest] {
smallest = left
}
if right < n && right >= 0 && heap[right] < heap[smallest] {
smallest = right
}
if smallest == i {
break
}
heap[i], heap[smallest] = heap[smallest], heap[i]
i = smallest
}
heap = heap[0:n]
}
}
func (h *minHeap) ensureSize() {
var (
heap = *h
targetCap = cap(heap) * 2
newHeap = sharedHeapPool.Get(targetCap)
)
(*newHeap) = append(*newHeap, heap...)
if cap(heap) >= _initialHeapBucketSize {
sharedHeapPool.Put(heap)
}
(*h) = *newHeap
} | src/aggregator/aggregation/quantile/cm/heap.go | 0.735167 | 0.513729 | heap.go | starcoder |
package ticker
import (
"fmt"
"github.com/bitfinexcom/bitfinex-api-go/pkg/convert"
)
type Ticker struct {
Symbol string
Frr float64
Bid float64
BidPeriod int64
BidSize float64
Ask float64
AskPeriod int64
AskSize float64
DailyChange float64
DailyChangePerc float64
LastPrice float64
Volume float64
High float64
Low float64
}
type Update Ticker
type Snapshot struct {
Snapshot []*Ticker
}
func SnapshotFromRaw(symbol string, raw [][]interface{}) (*Snapshot, error) {
if len(raw) == 0 {
return nil, fmt.Errorf("data slice too short for ticker snapshot: %#v", raw)
}
snap := make([]*Ticker, 0)
for _, f := range raw {
c, err := FromRaw(symbol, f)
if err == nil {
snap = append(snap, c)
}
}
return &Snapshot{Snapshot: snap}, nil
}
func FromRaw(symbol string, raw []interface{}) (t *Ticker, err error) {
if len(raw) < 10 {
return t, fmt.Errorf("data slice too short for ticker, expected %d got %d: %#v", 10, len(raw), raw)
}
// funding currency ticker
// ignore bid/ask period for now
if len(raw) == 13 {
t = &Ticker{
Symbol: symbol,
Bid: convert.F64ValOrZero(raw[1]),
BidSize: convert.F64ValOrZero(raw[2]),
Ask: convert.F64ValOrZero(raw[4]),
AskSize: convert.F64ValOrZero(raw[5]),
DailyChange: convert.F64ValOrZero(raw[7]),
DailyChangePerc: convert.F64ValOrZero(raw[8]),
LastPrice: convert.F64ValOrZero(raw[9]),
Volume: convert.F64ValOrZero(raw[10]),
High: convert.F64ValOrZero(raw[11]),
Low: convert.F64ValOrZero(raw[12]),
}
return
}
if len(raw) == 16 {
t = &Ticker{
Symbol: symbol,
Frr: convert.F64ValOrZero(raw[0]),
Bid: convert.F64ValOrZero(raw[1]),
BidPeriod: convert.I64ValOrZero(raw[2]),
BidSize: convert.F64ValOrZero(raw[3]),
Ask: convert.F64ValOrZero(raw[4]),
AskPeriod: convert.I64ValOrZero(raw[5]),
AskSize: convert.F64ValOrZero(raw[6]),
DailyChange: convert.F64ValOrZero(raw[7]),
DailyChangePerc: convert.F64ValOrZero(raw[8]),
LastPrice: convert.F64ValOrZero(raw[9]),
Volume: convert.F64ValOrZero(raw[10]),
High: convert.F64ValOrZero(raw[11]),
Low: convert.F64ValOrZero(raw[12]),
}
return
}
// all other tickers
// on trading pairs (ex. tBTCUSD)
t = &Ticker{
Symbol: symbol,
Bid: convert.F64ValOrZero(raw[0]),
BidSize: convert.F64ValOrZero(raw[1]),
Ask: convert.F64ValOrZero(raw[2]),
AskSize: convert.F64ValOrZero(raw[3]),
DailyChange: convert.F64ValOrZero(raw[4]),
DailyChangePerc: convert.F64ValOrZero(raw[5]),
LastPrice: convert.F64ValOrZero(raw[6]),
Volume: convert.F64ValOrZero(raw[7]),
High: convert.F64ValOrZero(raw[8]),
Low: convert.F64ValOrZero(raw[9]),
}
return
}
func FromRestRaw(raw []interface{}) (t *Ticker, err error) {
if len(raw) == 0 {
return t, fmt.Errorf("data slice too short for ticker")
}
return FromRaw(raw[0].(string), raw[1:])
} | pkg/models/ticker/ticker.go | 0.556159 | 0.451085 | ticker.go | starcoder |
package day17
import (
"errors"
"math"
"regexp"
"strconv"
"advent2021.com/util"
)
type TargetArea struct {
MinX, MaxX, MinY, MaxY int
}
func NewTargetArea(minX, maxX, minY, maxY int) *TargetArea {
return &TargetArea{MinX: minX, MaxX: maxX, MinY: minY, MaxY: maxY}
}
func ParseTargetArea(line string) (*TargetArea, error) {
re := regexp.MustCompile(`target area: x=(-?\d+)\.\.(-?\d+), y=(-?\d+)\.\.(-?\d+)`)
values := re.FindStringSubmatch(line)
if values == nil {
return nil, errors.New("invalid line")
}
values = values[1:]
nums := make([]int, 4)
for i := 0; i < 4; i++ {
num, err := strconv.Atoi(values[i])
if err != nil {
return nil, err
}
nums[i] = num
}
return NewTargetArea(nums[0], nums[1], nums[2], nums[3]), nil
}
func inRange(val, min, max int) bool {
if min >= 0 && max >= 0 {
return val >= min && val <= max
} else if min >= 0 {
panic("weird")
} else {
if val >= 0 {
return false
}
return val >= min && val <= max
}
}
func (ta *TargetArea) InTargetAreaX(x int) bool {
return inRange(x, ta.MinX, ta.MaxX)
}
func (ta *TargetArea) InTargetAreaY(y int) bool {
return inRange(y, ta.MinY, ta.MaxY)
}
func (ta *TargetArea) InTargetArea(x, y int) bool {
return ta.InTargetAreaX(x) && ta.InTargetAreaY(y)
}
// Given an initial starting position of (x, y) will it hit the target area?
func (ta *TargetArea) IsHit(x, y int) bool {
curX := 0
curY := 0
for {
curX += x
curY += y
// fmt.Printf("(%d, %d) -> x = %d y = %d\n", curX, curY, x, y)
if ta.InTargetArea(curX, curY) {
return true
}
if x == 0 && (curX < ta.MinX || curX > ta.MaxX) {
return false
}
if y < 0 && curY < ta.MinY {
return false
}
if x > 0 {
x--
} else if x < 0 {
x++
}
y--
}
}
// Get the set of X velocities that will eventually cross into the target space
func (ta *TargetArea) GetValidX() []int {
values := make([]int, 0)
for x := 1; x < ta.MinX; x++ {
pos := x
step := x - 1
for {
if ta.InTargetAreaX(pos) {
values = append(values, x)
break
}
if step == 0 || pos > ta.MaxX {
break
}
pos += step
step--
}
}
for x := ta.MinX; x <= ta.MaxX; x++ {
values = append(values, x)
}
return values
}
func sumRange(max int) int {
val := 0
for true {
val += max
max--
if max == 0 {
break
}
}
return val
}
func PartBoth(line string) (maxY int, distinct int) {
ta, err := ParseTargetArea(line)
if err != nil {
panic(err)
}
maxY = math.MinInt
distinct = 0
for _, x := range ta.GetValidX() {
// This is a really brute force method. There is a way to constrain the set
// of possible 'y' values that I am missing
for y := util.Min(0, ta.MinY); y < 2000; y++ {
if ta.IsHit(x, y) {
//fmt.Printf("(%d, %d)\n", x, y)
maxY = util.Max(maxY, y)
distinct++
}
}
}
return maxY, distinct
}
func Part1(line string) int {
maxY, _ := PartBoth(line)
return sumRange(maxY)
}
func Part2(line string) int {
_, distinct := PartBoth(line)
return distinct
} | day17/day17.go | 0.640973 | 0.456834 | day17.go | starcoder |
package ints
// IsSortedUint64s returns true if the given nums are sorted,
// and false otherwise.
func IsSortedUint64s(nums []uint64) bool {
if len(nums) == 0 {
return true
}
prev := nums[0]
for _, n := range nums[1:] {
if prev > n {
return false
}
prev = n
}
return true
}
// IsSortedInt64s returns true if the given nums are sorted,
// and false otherwise.
func IsSortedInt64s(nums []int64) bool {
if len(nums) == 0 {
return true
}
prev := nums[0]
for _, n := range nums[1:] {
if prev > n {
return false
}
prev = n
}
return true
}
// IsSortedUint32s returns true if the given nums are sorted,
// and false otherwise.
func IsSortedUint32s(nums []uint32) bool {
if len(nums) == 0 {
return true
}
prev := nums[0]
for _, n := range nums[1:] {
if prev > n {
return false
}
prev = n
}
return true
}
// IsSortedInt32s returns true if the given nums are sorted,
// and false otherwise.
func IsSortedInt32s(nums []int32) bool {
if len(nums) == 0 {
return true
}
prev := nums[0]
for _, n := range nums[1:] {
if prev > n {
return false
}
prev = n
}
return true
}
// IsSortedUint16s returns true if the given nums are sorted,
// and false otherwise.
func IsSortedUint16s(nums []uint16) bool {
if len(nums) == 0 {
return true
}
prev := nums[0]
for _, n := range nums[1:] {
if prev > n {
return false
}
prev = n
}
return true
}
// IsSortedInt16s returns true if the given nums are sorted,
// and false otherwise.
func IsSortedInt16s(nums []int16) bool {
if len(nums) == 0 {
return true
}
prev := nums[0]
for _, n := range nums[1:] {
if prev > n {
return false
}
prev = n
}
return true
}
// IsSortedUint8s returns true if the given nums are sorted,
// and false otherwise.
func IsSortedUint8s(nums []uint8) bool {
if len(nums) == 0 {
return true
}
prev := nums[0]
for _, n := range nums[1:] {
if prev > n {
return false
}
prev = n
}
return true
}
// IsSortedInt8s returns true if the given nums are sorted,
// and false otherwise.
func IsSortedInt8s(nums []int8) bool {
if len(nums) == 0 {
return true
}
prev := nums[0]
for _, n := range nums[1:] {
if prev > n {
return false
}
prev = n
}
return true
}
// IsSortedUints returns true if the given nums are sorted,
// and false otherwise.
func IsSortedUints(nums []uint) bool {
if len(nums) == 0 {
return true
}
prev := nums[0]
for _, n := range nums[1:] {
if prev > n {
return false
}
prev = n
}
return true
}
// IsSortedInts returns true if the given nums are sorted,
// and false otherwise.
func IsSortedInts(nums []int) bool {
if len(nums) == 0 {
return true
}
prev := nums[0]
for _, n := range nums[1:] {
if prev > n {
return false
}
prev = n
}
return true
} | pkg/ints/sorted.go | 0.842701 | 0.502075 | sorted.go | starcoder |
package aqi
// Info represents the information based on Air Quality Index.
type Info struct {
Level, Implications, Caution string
}
var (
// Good represents the information when AQI is below 50.
Good = Info{"Good", "Air quality is considered satisfactory, and air pollution poses little or no risk", "-"}
// Moderate represents the information when AQI is above 50 and less than or equal to 100.
Moderate = Info{"Moderate", "Air quality is acceptable; however, for some pollutants there may be a moderate health concern for a very small number of people who are unusually sensitive to air pollution.", "Active children and adults, and people with respiratory disease, such as asthma, should limit prolonged outdoor exertion."}
// UnhealthySensitive represents the information when AQI is above 100 and less than or equal to 150.
UnhealthySensitive = Info{"Unhealthy for Sensitive Groups", "Members of sensitive groups may experience health effects. The general public is not likely to be affected.", "Active children and adults, and people with respiratory disease, such as asthma, should limit prolonged outdoor exertion."}
// Unhealthy represents the information when AQI is above 150 and less than or equal to 200.
Unhealthy = Info{"Unhealthy", "Everyone may begin to experience health effects; members of sensitive groups may experience more serious health effects", "Active children and adults, and people with respiratory disease, such as asthma, should avoid prolonged outdoor exertion; everyone else, especially children, should limit prolonged outdoor exertion"}
// VeryUnhealthy represents the information when AQI is above 200 and less than or equal to 300.
VeryUnhealthy = Info{"Very Unhealthy", "Health warnings of emergency conditions. The entire population is more likely to be affected.", "Active children and adults, and people with respiratory disease, such as asthma, should avoid all outdoor exertion; everyone else, especially children, should limit outdoor exertion."}
// Hazardous represents the information when AQI is above 300.
Hazardous = Info{"Hazardous", "Health alert: everyone may experience more serious health effects", "Everyone should avoid all outdoor exertion"}
NoInfo = Info{"-", "-", "-"}
) | pkg/aqi/aqi.go | 0.545286 | 0.568895 | aqi.go | starcoder |
package pgsql
import (
"database/sql"
"database/sql/driver"
)
// HStoreFromStringMap returns a driver.Valuer that produces a PostgreSQL hstore from the given Go map[string]string.
func HStoreFromStringMap(val map[string]string) driver.Valuer {
return hstoreFromStringMap{val: val}
}
// HStoreToStringMap returns an sql.Scanner that converts a PostgreSQL hstore into a Go map[string]string and sets it to val.
func HStoreToStringMap(val *map[string]string) sql.Scanner {
return hstoreToStringMap{val: val}
}
// HStoreFromStringPtrMap returns a driver.Valuer that produces a PostgreSQL hstore from the given Go map[string]*string.
func HStoreFromStringPtrMap(val map[string]*string) driver.Valuer {
return hstoreFromStringPtrMap{val: val}
}
// HStoreToStringPtrMap returns an sql.Scanner that converts a PostgreSQL hstore into a Go map[string]*string and sets it to val.
func HStoreToStringPtrMap(val *map[string]*string) sql.Scanner {
return hstoreToStringPtrMap{val: val}
}
// HStoreFromNullStringMap returns a driver.Valuer that produces a PostgreSQL hstore from the given Go map[string]sql.NullString.
func HStoreFromNullStringMap(val map[string]sql.NullString) driver.Valuer {
return hstoreFromNullStringMap{val: val}
}
// HStoreToNullStringMap returns an sql.Scanner that converts a PostgreSQL hstore into a Go map[string]sql.NullString and sets it to val.
func HStoreToNullStringMap(val *map[string]sql.NullString) sql.Scanner {
return hstoreToNullStringMap{val: val}
}
type hstoreFromStringMap struct {
val map[string]string
}
func (v hstoreFromStringMap) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
}
out := []byte{}
for key, val := range v.val {
out = pgAppendQuote1(out, []byte(key))
out = append(out, '=', '>')
out = pgAppendQuote1(out, []byte(val))
out = append(out, ',')
}
if len(out) > 0 {
out = out[:len(out)-1] // drop the last ','
}
return out, nil
}
type hstoreToStringMap struct {
val *map[string]string
}
func (v hstoreToStringMap) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
elems := pgparsehstore(data)
hash := make(map[string]string)
for i := 0; i < len(elems); i++ {
if value := elems[i][1]; value != nil {
hash[string(elems[i][0])] = string(value)
} else {
hash[string(elems[i][0])] = ""
}
}
*v.val = hash
return nil
}
type hstoreFromStringPtrMap struct {
val map[string]*string
}
func (v hstoreFromStringPtrMap) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
}
out := []byte{}
for key, val := range v.val {
out = pgAppendQuote1(out, []byte(key))
out = append(out, '=', '>')
if val != nil {
out = pgAppendQuote1(out, []byte(*val))
} else {
out = append(out, 'N', 'U', 'L', 'L')
}
out = append(out, ',')
}
if len(out) > 0 {
out = out[:len(out)-1] // drop the last ','
}
return out, nil
}
type hstoreToStringPtrMap struct {
val *map[string]*string
}
func (v hstoreToStringPtrMap) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
elems := pgparsehstore(data)
hash := make(map[string]*string)
for i := 0; i < len(elems); i++ {
if value := elems[i][1]; value != nil {
str := string(value)
hash[string(elems[i][0])] = &str
} else {
hash[string(elems[i][0])] = nil
}
}
*v.val = hash
return nil
}
type hstoreFromNullStringMap struct {
val map[string]sql.NullString
}
func (v hstoreFromNullStringMap) Value() (driver.Value, error) {
if v.val == nil {
return nil, nil
}
out := []byte{}
for key, val := range v.val {
out = pgAppendQuote1(out, []byte(key))
out = append(out, '=', '>')
if val.Valid {
out = pgAppendQuote1(out, []byte(val.String))
} else {
out = append(out, 'N', 'U', 'L', 'L')
}
out = append(out, ',')
}
if len(out) > 0 {
out = out[:len(out)-1] // drop the last ','
}
return out, nil
}
type hstoreToNullStringMap struct {
val *map[string]sql.NullString
}
func (v hstoreToNullStringMap) Scan(src interface{}) error {
data, err := srcbytes(src)
if err != nil {
return err
} else if data == nil {
return nil
}
elems := pgparsehstore(data)
hash := make(map[string]sql.NullString)
for i := 0; i < len(elems); i++ {
if value := elems[i][1]; value != nil {
str := sql.NullString{String: string(value), Valid: true}
hash[string(elems[i][0])] = str
} else {
hash[string(elems[i][0])] = sql.NullString{}
}
}
*v.val = hash
return nil
} | pgsql/hstore.go | 0.680454 | 0.406509 | hstore.go | starcoder |
package ihex
// RecordType defines what the type of a single record in a HEX file.
type RecordType byte
const (
// RecordData indicates the record contains data and a 16-bit starting address for the data.
// The byte count specifies number of data bytes in the record.
RecordData RecordType = 0x00
// RecordEOF indicates that this record is the end of the HEX file. Must occur exactly once.
// The data field is empty (thus byte count is 00) and the address field is typically 0000.
RecordEOF RecordType = 0x01
// RecordExtSegment is the extended segment address record type. Usable by I16HEX files only.
// The data field contains a 16-bit segment base address (thus byte count is always 02) compatible with 80x86 real mode addressing.
// The address field (typically 0000) is ignored.
// The segment address from the most recent 02 record is multiplied by 16 and added to each subsequent data record address to form the physical starting address for the data.
// This allows addressing up to one megabyte of address space.
RecordExtSegment RecordType = 0x02
// RecordStartSegment is the start segment address record type. Usable by I16HEX files only.
// For 80x86 processors, specifies the initial content of the CS:IP registers (i.e., the starting execution address).
// The address field is 0000, the byte count is always 04, the first two data bytes are the CS value, the latter two are the IP value.
RecordStartSegment RecordType = 0x03
// RecordExtLinear is the extended linear address record type. Usable by I32HEX files only.
// Allows for 32 bit addressing (up to 4GiB). The record's address field is ignored (typically 0000) and its byte count is always 02.
// The two data bytes (big endian) specify the upper 16 bits of the 32 bit absolute address for all subsequent type 00 records; these upper address bits apply until the next 04 record.
// The absolute address for a type 00 record is formed by combining the upper 16 address bits of the most recent 04 record with the low 16 address bits of the 00 record.
// If a type 00 record is not preceded by any type 04 records then its upper 16 address bits default to 0000.
RecordExtLinear RecordType = 0x04
// RecordStartLinear is the start linear address record type. Usable by I32HEX files only.
// The address field is 0000 (not used) and the byte count is always 04.
// The four data bytes represent a 32-bit address value (big-endian).
// In the case of 80386 and higher CPUs, this address is loaded into the EIP register.
RecordStartLinear RecordType = 0x05
) | recordType.go | 0.540924 | 0.802942 | recordType.go | starcoder |
package tests
/*
Find valid ZQ examples in markdown, run them against
https://github.com/brimsec/zq-sample-data/zeek-default, and compare results in
docs with results produced.
In separate patches:
- Find files as opposed to hard-coding them
Use markers in markdown fenced code blocks to denote either a zq command or
output from zq. Use is like:
```zq-command
zq "* | count()
```
```zq-output
1234
```
This is in compliance with https://spec.commonmark.org/0.29/#example-113
Doc authors MUST pair off each command and output in their own fenced code blocks.
A zq-command code block MUST be one line after the leading line, which includes
the info string. A zq-command MUST start with "zq". A zq-command MUST quote the
full zql with single quotes. The zql MAY contain double quotes, but it MUST NOT
contain single quotes.
Examples:
zql '* | count()' *.log.gz # ok
zql * | count() *.log.gz # not ok
zql "* | count()" *.log.gz # not ok
zql 'field="value" | count()' *.log.gz # ok
zql 'field=\'value\' | count()' *.log.gz # not ok
zql 'field='value' | count()' *.log.gz # not ok
zql "field=\"value\" | count()" *.log.gz # not ok
A zq-command MUST reference one or more files or globs, expanded at
zq-sample-data/zeek-default.
Examples:
zql '* | count()' *.log.gz # ok
zql '* | count()' conn.log.gz # ok
zql '* | count()' conn.log.gz http.log.gz # ok
zql '* | count()' c*.log.gz d*.log.gz # ok
zql '* | count()' # not ok
A zq-command MAY contain a sh-compliant comment string (denoted by '#') on the
line. Everything including and after the first # is stripped away.
A zq-output fenced code block MAY be multiple lines. zq-output MUST be verbatim
from the actual zq output.
zq-output MAY contain an optional marker to support record truncation. The
marker is denoted by "head:N" where N MUST be a non-negative integer
representing the number of lines to show. The marker MAY contain an ellipsis
via three dots "..." at the end to imply to readers the continuation of records
not shown.
Example:
```zq-output head:4
_PATH COUNT
conn 3
dhcp 2
dns 1
...
```
If head is malformed or N is invalid, fall back to verification against all
records.
*/
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"os/exec"
"path/filepath"
"strconv"
"strings"
"github.com/yuin/goldmark"
"github.com/yuin/goldmark/ast"
"github.com/yuin/goldmark/text"
)
type ZQExampleBlockType string
const (
ZQCommand ZQExampleBlockType = "zq-command"
ZQOutput ZQExampleBlockType = "zq-output"
ZQOutputHead string = "head:"
)
// ZQExampleInfo holds a ZQ example as found in markdown.
type ZQExampleInfo struct {
command *ast.FencedCodeBlock
output *ast.FencedCodeBlock
outputLineCount int
}
// ZQExampleTest holds a ZQ example as a testcase found from mardown, derived
// from a ZQExampleInfo.
type ZQExampleTest struct {
Name string
Command []string
Expected string
OutputLineCount int
}
// Run runs a zq command and returns its output.
func (t *ZQExampleTest) Run() (string, error) {
c := exec.Command(t.Command[0], t.Command[1:]...)
var b bytes.Buffer
c.Stdout = &b
c.Stderr = &b
err := c.Run()
if err != nil {
return string(b.Bytes()), err
}
scanner := bufio.NewScanner(&b)
i := 0
var s string
for scanner.Scan() {
if i == t.OutputLineCount {
break
}
s += scanner.Text() + "\n"
i++
}
if err := scanner.Err(); err != nil {
return s, err
}
return s, nil
}
// ZQOutputLineCount returns the number of lines against which zq-output should
// be verified.
func ZQOutputLineCount(fcb *ast.FencedCodeBlock, source []byte) int {
count := fcb.Lines().Len()
if fcb.Info == nil {
return count
}
info := strings.Split(string(fcb.Info.Segment.Value(source)), ZQOutputHead)
if len(info) != 2 {
return count
}
customCount, err := strconv.Atoi(info[1])
if err != nil || customCount < 0 {
return count
}
return customCount
}
// CollectExamples returns a zq-command / zq-output pairs from a single
// markdown source after parsing it as a goldmark AST.
func CollectExamples(node ast.Node, source []byte) ([]ZQExampleInfo, error) {
var examples []ZQExampleInfo
var command *ast.FencedCodeBlock
err := ast.Walk(node, func(n ast.Node, entering bool) (ast.WalkStatus, error) {
// Walk() calls its walker func twice. Once when entering and
// once before exiting, after walking any children. We need
// only do this processing once.
if !entering || n == nil || n.Kind() != ast.KindFencedCodeBlock {
return ast.WalkContinue, nil
}
fcb, ok := n.(*ast.FencedCodeBlock)
if !ok {
return ast.WalkStop,
fmt.Errorf("likely goldmark bug: Kind() reports a " +
"FencedCodeBlock, but the type assertion failed")
}
bt := ZQExampleBlockType(fcb.Language(source))
switch bt {
case ZQCommand:
if command != nil {
return ast.WalkStop,
fmt.Errorf("subsequent %s after another %s", bt, ZQCommand)
}
command = fcb
case ZQOutput:
if command == nil {
return ast.WalkStop,
fmt.Errorf("%s without a preceeding %s", bt, ZQCommand)
}
outputLineCount := ZQOutputLineCount(fcb, source)
examples = append(examples, ZQExampleInfo{command, fcb, outputLineCount})
command = nil
// A fenced code block need not specify an info string, or it
// could be arbitrary. The default case is to ignore everything
// else.
}
return ast.WalkContinue, nil
})
if command != nil && err == nil {
err = fmt.Errorf("%s without a following %s", ZQCommand, ZQOutput)
}
return examples, err
}
// BlockString returns the text of a ast.FencedCodeBlock as a string.
func BlockString(fcb *ast.FencedCodeBlock, source []byte) string {
var b strings.Builder
for i := 0; i < fcb.Lines().Len(); i++ {
line := fcb.Lines().At(i)
b.Write(line.Value(source))
}
return b.String()
}
// QualifyCommand translates a zq-command example to a runnable command,
// including abspath to zq binary and globs turned into absolute file paths.
func QualifyCommand(command string) ([]string, error) {
command = strings.TrimSpace(command)
command = strings.Split(command, "#")[0]
pieces := strings.Split(command, "'")
if len(pieces) != 3 {
return nil, fmt.Errorf("could not split zq command 3 tokens: %s", command)
}
command_and_flags := strings.Split(strings.TrimSpace(pieces[0]), " ")
if command_and_flags[0] != "zq" {
return nil, fmt.Errorf("command does not start with zq: %s", command)
}
// Nice, but this makes unit testing more complicated.
zq, err := ZQAbsPath()
if err != nil {
return nil, err
}
command_and_flags[0] = zq
zql := strings.TrimSpace(pieces[1])
var fileargs []string
sampledata, err := ZQSampleDataAbsPath()
if err != nil {
return nil, err
}
for _, relglobarg := range strings.Split(strings.TrimSpace(pieces[2]), " ") {
files, err := filepath.Glob(filepath.Join(sampledata, "zeek-default", relglobarg))
if err != nil {
return nil, err
}
fileargs = append(fileargs, files...)
}
finalized := command_and_flags
finalized = append(finalized, zql)
finalized = append(finalized, fileargs...)
return finalized, nil
}
// TestcasesFromFile returns ZQ example test cases from ZQ example pairs found
// in a file.
func TestcasesFromFile(filename string) ([]ZQExampleTest, error) {
var tests []ZQExampleTest
var examples []ZQExampleInfo
absfilename, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
repopath, err := RepoAbsPath()
if err != nil {
return nil, err
}
source, err := ioutil.ReadFile(absfilename)
if err != nil {
return nil, err
}
reader := text.NewReader(source)
parser := goldmark.DefaultParser()
doc := parser.Parse(reader)
examples, err = CollectExamples(doc, source)
if err != nil {
return nil, err
}
for i, example := range examples {
// Convert strings like
// /home/user/zql/docs/processors/README.md to
// /zql/docs/processors/README.md . RepoAbsPath() does not
// include a trailing filepath.Separator in its return.
testname := strings.TrimPrefix(absfilename, repopath)
// Now convert strings like /zql/docs/processors/README.md to
// zql/docs/processors/README.md1 go test will call such a test
// something like
// TestMarkdownExamples/zql/docs/processors/README.md1
testname = strings.TrimPrefix(testname, string(filepath.Separator)) + strconv.Itoa(i+1)
command, err := QualifyCommand(BlockString(example.command, source))
if err != nil {
return tests, err
}
output := strings.TrimSuffix(BlockString(example.output, source), "...\n")
tests = append(tests, ZQExampleTest{testname, command, output, example.outputLineCount})
}
return tests, nil
}
// DocMarkdownFiles returns markdown files to inspect.
func DocMarkdownFiles() ([]string, error) {
// This needs to find markdown files in the repo. Right now we just
// declare them directly.
files := []string{
"zql/docs/processors/README.md",
"zql/docs/search-syntax/README.md",
"zql/docs/aggregate-functions/README.md",
"zql/docs/expressions/README.md",
"zql/docs/data-types/README.md",
}
repopath, err := RepoAbsPath()
if err != nil {
return nil, err
}
for i, file := range files {
files[i] = filepath.Join(repopath, file)
}
return files, nil
}
// ZQExampleTestCases returns all test cases derived from doc examples.
func ZQExampleTestCases() ([]ZQExampleTest, error) {
var alltests []ZQExampleTest
files, err := DocMarkdownFiles()
if err != nil {
return nil, err
}
for _, filename := range files {
tests, err := TestcasesFromFile(filename)
if err != nil {
return nil, err
}
alltests = append(alltests, tests...)
}
return alltests, nil
} | tests/zq_example.go | 0.727104 | 0.752456 | zq_example.go | starcoder |
package types
import (
"bytes"
"io"
"reflect"
"github.com/lyraproj/pcore/px"
)
type taggedType struct {
typ reflect.Type
puppetTags map[string]string
annotations px.OrderedMap
tags map[string]map[string]string
parsedPuppetTags map[string]px.OrderedMap
}
var TagsAnnotationType px.ObjectType
func init() {
px.NewTaggedType = newTaggedType
px.NewAnnotatedType = newAnnotatedType
TagsAnnotationType = newGoObjectType(`TagsAnnotation`, reflect.TypeOf((*px.TagsAnnotation)(nil)).Elem(), `Annotation{
attributes => {
# Arbitrary data used by custom implementations
tags => Hash[String,String]
}
}`,
func(ctx px.Context, args []px.Value) px.Value {
return NewTagsAnnotation(args[0].(px.OrderedMap))
},
func(ctx px.Context, args []px.Value) px.Value {
h := args[0].(*Hash)
return NewTagsAnnotation(h.Get5(`tags`, px.EmptyMap).(px.OrderedMap))
},
)
}
func newAnnotatedType(typ reflect.Type, puppetTags map[string]string, annotations px.OrderedMap) px.AnnotatedType {
tt := &taggedType{typ: typ, puppetTags: puppetTags, annotations: annotations}
tt.initTags()
return tt
}
func newTaggedType(typ reflect.Type, puppetTags map[string]string) px.AnnotatedType {
tt := &taggedType{typ: typ, puppetTags: puppetTags, annotations: emptyMap}
tt.initTags()
return tt
}
type tagsAnnotation struct {
tags px.OrderedMap
}
func NewTagsAnnotation(tags px.OrderedMap) px.TagsAnnotation {
return &tagsAnnotation{tags}
}
func (c *tagsAnnotation) Equals(value interface{}, guard px.Guard) bool {
if oc, ok := value.(*tagsAnnotation); ok {
return c.tags.Equals(oc.tags, guard)
}
return false
}
func (c *tagsAnnotation) PType() px.Type {
return TagsAnnotationType
}
func (c *tagsAnnotation) String() string {
return px.ToString(c)
}
func (c *tagsAnnotation) ToString(bld io.Writer, format px.FormatContext, g px.RDetect) {
ObjectToString(c, format, bld, g)
}
func (c *tagsAnnotation) Get(key string) (value px.Value, ok bool) {
if key == `tags` {
return c.tags, true
}
return nil, false
}
func (c *tagsAnnotation) InitHash() px.OrderedMap {
return px.SingletonMap(`tags`, c.tags)
}
func (c *tagsAnnotation) Tag(key string) string {
if t, ok := c.tags.Get4(key); ok {
return t.String()
}
return ``
}
func (c *tagsAnnotation) Tags() px.OrderedMap {
return c.tags
}
func (r *tagsAnnotation) Validate(c px.Context, annotatedType px.Annotatable) {
}
func (tg *taggedType) Annotations() px.OrderedMap {
return tg.annotations
}
func (tg *taggedType) Type() reflect.Type {
return tg.typ
}
func (tg *taggedType) Tags() map[string]px.OrderedMap {
return tg.parsedPuppetTags
}
func (tg *taggedType) OtherTags() map[string]map[string]string {
return tg.tags
}
func (tg *taggedType) initTags() {
fs := Fields(tg.typ)
nf := len(fs)
tags := make(map[string]map[string]string, 7)
puppet := make(map[string]string)
if nf > 0 {
for i, f := range fs {
if i == 0 && f.Anonymous {
// Parent
continue
}
if f.PkgPath != `` {
// Unexported
continue
}
ft := ParseTags(string(f.Tag))
if p, ok := ft[`puppet`]; ok {
puppet[f.Name] = p
delete(ft, `puppet`)
}
if len(ft) > 0 {
tags[f.Name] = ft
}
}
}
if tg.puppetTags != nil && len(tg.puppetTags) > 0 {
for k, v := range tg.puppetTags {
puppet[k] = v
}
}
pt := make(map[string]px.OrderedMap, len(puppet))
for k, v := range puppet {
if h, ok := ParseTagHash(v); ok {
pt[k] = h
}
}
tg.parsedPuppetTags = pt
tg.tags = tags
}
func ParseTags(tag string) map[string]string {
result := make(map[string]string)
for tag != "" {
// Skip leading space.
i := 0
for i < len(tag) && tag[i] == ' ' {
i++
}
tag = tag[i:]
tagLen := len(tag)
if tagLen == 0 {
break
}
var c rune
for i, c = range tag {
if c < ' ' || c == ':' || c == '"' || c == 0x7f {
break
}
}
if i == 0 || i+1 >= tagLen || c != ':' || tag[i+1] != '"' {
break
}
name := string(tag[:i])
tag = tag[i+2:] // Skip ':' and leading '"'
esc := false
tb := bytes.NewBufferString(``)
for i, c = range tag {
if esc {
tb.WriteRune(c)
esc = false
} else if c == '\\' {
esc = true
} else if c == '"' {
break
} else {
tb.WriteRune(c)
}
}
if esc || c != '"' {
break
}
result[name] = tb.String()
tag = tag[i+1:]
}
return result
} | types/taggedtype.go | 0.547706 | 0.41739 | taggedtype.go | starcoder |
package datefmt
import (
"bytes"
"io"
"text/template"
"time"
"github.com/tigorlazuardi/datefmt/parser"
)
type Formatter struct {
parser.Parser
Day uint8
DayLong string
DayShort string
Hour uint8
Minute uint8
Month uint8
MonthLong string
MonthShort string
PaddedDay string
PaddedHour string
PaddedMinute string
PaddedMonth string
PaddedSecond string
Second uint8
SecondUnix int64
Year uint16
YearShort uint8
template *template.Template
}
/*
Turns the given string to replaced format.
Use `MustRender` if the given string format is hard coded so you don't have to handle errors.
Example:
str, err := formatter.Render("Jakarta, {{.Day}} {{.MonthLong}} {{.Year}} Pukul {{.PaddedHour}}:{{.PaddedMinute}} WIB")
// str will have output
// Output: Jakarta, 1 Oktober 2021 Pukul 13:05 WIB
Available APIs:
{{.Day}} // day of month
{{.DayLong}} // full day name
{{.DayShort}} // three letter day name
{{.Hour}} // hour of day
{{.Minute}} // minute of hour
{{.Month}} // month of year
{{.MonthLong}} // full month name
{{.MonthShort}} // three letter month name
{{.PaddedDay}} // day of month with leading 0
{{.PaddedHour}} // hour with leading 0
{{.PaddedMinute}} // minute with leading 0
{{.PaddedMonth}} // month with leading 0
{{.PaddedSecond}} // second with leading 0
{{.Second}} // second of minute
{{.SecondUnix}} // second from epoch
{{.Year}} // 4 digit year
{{.YearShort}} // last 2 digit of year
*/
func (f Formatter) Render(s string) (string, error) {
t, err := f.template.Parse(s)
if err != nil {
return "", err
}
b := &bytes.Buffer{}
_ = t.Execute(b, f)
return b.String(), err
}
/*
Turns the given string to replaced format and write the result to the given writer
Example:
buf := &bytes.Buffer{}
str, err := formatter.RenderWriter(buf, "Jakarta, {{.Day}} {{.MonthLong}} {{.Year}} Pukul {{.PaddedHour}}:{{.PaddedMinute}} WIB")
// buf.String() will have output
// Output: Jakarta, 1 Oktober 2021 Pukul 13:05 WIB
Available APIs:
{{.Day}} // day of month
{{.DayLong}} // full day name
{{.DayShort}} // three letter day name
{{.Hour}} // hour of day
{{.Minute}} // minute of hour
{{.Month}} // month of year
{{.MonthLong}} // full month name
{{.MonthShort}} // three letter month name
{{.PaddedDay}} // day of month with leading 0
{{.PaddedHour}} // hour with leading 0
{{.PaddedMinute}} // minute with leading 0
{{.PaddedMonth}} // month with leading 0
{{.PaddedSecond}} // second with leading 0
{{.Second}} // second of minute
{{.SecondUnix}} // second from epoch
{{.Year}} // 4 digit year
{{.YearShort}} // last 2 digit of year
*/
func (f Formatter) RenderWriter(writer io.Writer, s string) error {
t, err := f.template.Parse(s)
if err != nil {
return err
}
err = t.Execute(writer, f)
return err
}
/*
Turns the given string to replaced format, function panics when failed to parse.
Example:
str := formatter.MustRender("Jakarta, {{.Day}} {{.MonthLong}} {{.Year}} Pukul {{.PaddedHour}}:{{.PaddedMinute}} WIB")
// str will have output
// Output: Jakarta, 1 Oktober 2021 Pukul 13:05 WIB
Available APIs:
{{.Day}} // day of month
{{.DayLong}} // full day name
{{.DayShort}} // three letter day name
{{.Hour}} // hour of day
{{.Minute}} // minute of hour
{{.Month}} // month of year
{{.MonthLong}} // full month name
{{.MonthShort}} // three letter month name
{{.PaddedDay}} // day of month with leading 0
{{.PaddedHour}} // hour with leading 0
{{.PaddedMinute}} // minute with leading 0
{{.PaddedMonth}} // month with leading 0
{{.PaddedSecond}} // second with leading 0
{{.Second}} // second of minute
{{.SecondUnix}} // second from epoch
{{.Year}} // 4 digit year
{{.YearShort}} // last 2 digit of year
*/
func (f Formatter) MustRender(s string) string {
t := template.Must(f.template.Parse(s))
b := &bytes.Buffer{}
_ = t.Execute(b, f)
return b.String()
}
// Updates current formatter time with new time
func (f *Formatter) ParseTime(t time.Time) {
f.Parser.ParseTime(t)
f.Day = f.Parser.Day()
f.DayLong = f.Parser.DayLong()
f.DayShort = f.Parser.DayShort()
f.Hour = f.Parser.Hour()
f.Minute = f.Parser.Minute()
f.Month = f.Parser.Month()
f.MonthLong = f.Parser.MonthLong()
f.MonthShort = f.Parser.MonthShort()
f.PaddedDay = f.Parser.PaddedDay()
f.PaddedHour = f.Parser.PaddedHour()
f.PaddedMinute = f.Parser.PaddedMinute()
f.PaddedSecond = f.Parser.PaddedSecond()
f.Second = f.Parser.Second()
f.SecondUnix = f.Parser.SecondUnix()
f.Year = f.Parser.Year()
f.YearShort = f.Parser.YearShort()
}
/*
Creates new formatter for given time with specific parser.
Example:
formatter := datefmt.New(time.Now(), parser.NewIndonesian())
*/
func New(t time.Time, parser parser.Parser) Formatter {
f := Formatter{Parser: parser}
f.ParseTime(t)
f.template = template.New("datefmt/formatter")
return f
} | datefmt.go | 0.697506 | 0.40869 | datefmt.go | starcoder |
package metrics
import (
"time"
e2eperftype "github.com/divinerapier/learn-kubernetes/test/e2e/perftype"
)
// LatencyMetric is a struct for dashboard metrics.
type LatencyMetric struct {
Perc50 time.Duration `json:"Perc50"`
Perc90 time.Duration `json:"Perc90"`
Perc99 time.Duration `json:"Perc99"`
Perc100 time.Duration `json:"Perc100"`
}
// PodStartupLatency is a struct for managing latency of pod startup.
type PodStartupLatency struct {
CreateToScheduleLatency LatencyMetric `json:"createToScheduleLatency"`
ScheduleToRunLatency LatencyMetric `json:"scheduleToRunLatency"`
RunToWatchLatency LatencyMetric `json:"runToWatchLatency"`
ScheduleToWatchLatency LatencyMetric `json:"scheduleToWatchLatency"`
E2ELatency LatencyMetric `json:"e2eLatency"`
}
// SummaryKind returns the summary of pod startup latency.
func (l *PodStartupLatency) SummaryKind() string {
return "PodStartupLatency"
}
// PrintHumanReadable returns pod startup letency with JSON format.
func (l *PodStartupLatency) PrintHumanReadable() string {
return PrettyPrintJSON(l)
}
// PrintJSON returns pod startup letency with JSON format.
func (l *PodStartupLatency) PrintJSON() string {
return PrettyPrintJSON(PodStartupLatencyToPerfData(l))
}
func latencyToPerfData(l LatencyMetric, name string) e2eperftype.DataItem {
return e2eperftype.DataItem{
Data: map[string]float64{
"Perc50": float64(l.Perc50) / 1000000, // us -> ms
"Perc90": float64(l.Perc90) / 1000000,
"Perc99": float64(l.Perc99) / 1000000,
"Perc100": float64(l.Perc100) / 1000000,
},
Unit: "ms",
Labels: map[string]string{
"Metric": name,
},
}
}
// PodStartupLatencyToPerfData transforms PodStartupLatency to PerfData.
func PodStartupLatencyToPerfData(latency *PodStartupLatency) *e2eperftype.PerfData {
perfData := &e2eperftype.PerfData{Version: currentAPICallMetricsVersion}
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.CreateToScheduleLatency, "create_to_schedule"))
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.ScheduleToRunLatency, "schedule_to_run"))
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.RunToWatchLatency, "run_to_watch"))
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.ScheduleToWatchLatency, "schedule_to_watch"))
perfData.DataItems = append(perfData.DataItems, latencyToPerfData(latency.E2ELatency, "pod_startup"))
return perfData
} | test/e2e/framework/metrics/pod.go | 0.756897 | 0.487917 | pod.go | starcoder |
package stats
import (
"context"
"time"
"github.com/dustin/go-humanize"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
)
type ctxKeyType string
const (
trailersKey ctxKeyType = "trailers"
chunksKey ctxKeyType = "chunks"
ingesterKey ctxKeyType = "ingester"
storeKey ctxKeyType = "store"
)
// Result contains LogQL query statistics.
type Result struct {
Ingester Ingester
Store Store
Summary Summary
}
// Log logs a query statistics result.
func Log(log log.Logger, r Result) {
level.Debug(log).Log(
"Ingester.TotalReached", r.Ingester.TotalReached,
"Ingester.TotalChunksMatched", r.Ingester.TotalChunksMatched,
"Ingester.TotalBatches", r.Ingester.TotalBatches,
"Ingester.TotalLinesSent", r.Ingester.TotalLinesSent,
"Ingester.HeadChunkBytes", humanize.Bytes(uint64(r.Ingester.HeadChunkBytes)),
"Ingester.HeadChunkLines", r.Ingester.HeadChunkLines,
"Ingester.DecompressedBytes", humanize.Bytes(uint64(r.Ingester.DecompressedBytes)),
"Ingester.DecompressedLines", r.Ingester.DecompressedLines,
"Ingester.CompressedBytes", humanize.Bytes(uint64(r.Ingester.CompressedBytes)),
"Ingester.TotalDuplicates", r.Ingester.TotalDuplicates,
"Store.TotalChunksRef", r.Store.TotalChunksRef,
"Store.TotalDownloadedChunks", r.Store.TotalDownloadedChunks,
"Store.TimeDownloadingChunks", r.Store.TimeDownloadingChunks,
"Store.HeadChunkBytes", humanize.Bytes(uint64(r.Store.HeadChunkBytes)),
"Store.HeadChunkLines", r.Store.HeadChunkLines,
"Store.DecompressedBytes", humanize.Bytes(uint64(r.Store.DecompressedBytes)),
"Store.DecompressedLines", r.Store.DecompressedLines,
"Store.CompressedBytes", humanize.Bytes(uint64(r.Store.CompressedBytes)),
"Store.TotalDuplicates", r.Store.TotalDuplicates,
"Summary.BytesProcessedPerSeconds", humanize.Bytes(uint64(r.Summary.BytesProcessedPerSeconds)),
"Summary.LinesProcessedPerSeconds", r.Summary.LinesProcessedPerSeconds,
"Summary.TotalBytesProcessed", humanize.Bytes(uint64(r.Summary.TotalBytesProcessed)),
"Summary.TotalLinesProcessed", r.Summary.TotalLinesProcessed,
"Summary.ExecTime", r.Summary.ExecTime,
)
}
// Summary is the summary of a query statistics.
type Summary struct {
BytesProcessedPerSeconds int64 // Total bytes processed per seconds.
LinesProcessedPerSeconds int64 // Total lines processed per seconds.
TotalBytesProcessed int64 // Total bytes processed.
TotalLinesProcessed int64 // Total lines processed.
ExecTime time.Duration // Execution time.
}
// Ingester is the statistics result for ingesters queries.
type Ingester struct {
IngesterData
ChunkData
TotalReached int
}
// Store is the statistics result of the store.
type Store struct {
StoreData
ChunkData
}
// NewContext creates a new statistics context
func NewContext(ctx context.Context) context.Context {
ctx = injectTrailerCollector(ctx)
ctx = context.WithValue(ctx, storeKey, &StoreData{})
ctx = context.WithValue(ctx, chunksKey, &ChunkData{})
ctx = context.WithValue(ctx, ingesterKey, &IngesterData{})
return ctx
}
// ChunkData contains chunks specific statistics.
type ChunkData struct {
HeadChunkBytes int64 // Total bytes processed but was already in memory. (found in the headchunk)
HeadChunkLines int64 // Total lines processed but was already in memory. (found in the headchunk)
DecompressedBytes int64 // Total bytes decompressed and processed from chunks.
DecompressedLines int64 // Total lines decompressed and processed from chunks.
CompressedBytes int64 // Total bytes of compressed chunks (blocks) processed.
TotalDuplicates int64 // Total duplicates found while processing.
}
// GetChunkData returns the chunks statistics data from the current context.
func GetChunkData(ctx context.Context) *ChunkData {
res, ok := ctx.Value(chunksKey).(*ChunkData)
if !ok {
return &ChunkData{}
}
return res
}
// IngesterData contains ingester specific statistics.
type IngesterData struct {
TotalChunksMatched int64 // Total of chunks matched by the query from ingesters
TotalBatches int64 // Total of batches sent from ingesters.
TotalLinesSent int64 // Total lines sent by ingesters.
}
// GetIngesterData returns the ingester statistics data from the current context.
func GetIngesterData(ctx context.Context) *IngesterData {
res, ok := ctx.Value(ingesterKey).(*IngesterData)
if !ok {
return &IngesterData{}
}
return res
}
// StoreData contains store specific statistics.
type StoreData struct {
TotalChunksRef int64 // The total of chunk reference fetched from index.
TotalDownloadedChunks int64 // Total number of chunks fetched.
TimeDownloadingChunks time.Duration // Time spent fetching chunks.
}
// GetStoreData returns the store statistics data from the current context.
func GetStoreData(ctx context.Context) *StoreData {
res, ok := ctx.Value(storeKey).(*StoreData)
if !ok {
return &StoreData{}
}
return res
}
// Snapshot compute query statistics from a context using the total exec time.
func Snapshot(ctx context.Context, execTime time.Duration) Result {
var res Result
// ingester data is decoded from grpc trailers.
res.Ingester = decodeTrailers(ctx)
// collect data from store.
s, ok := ctx.Value(storeKey).(*StoreData)
if ok {
res.Store.StoreData = *s
}
// collect data from chunks iteration.
c, ok := ctx.Value(chunksKey).(*ChunkData)
if ok {
res.Store.ChunkData = *c
}
// calculate the summary
res.Summary.TotalBytesProcessed = res.Store.DecompressedBytes + res.Store.HeadChunkBytes +
res.Ingester.DecompressedBytes + res.Ingester.HeadChunkBytes
res.Summary.BytesProcessedPerSeconds =
int64(float64(res.Summary.TotalBytesProcessed) /
execTime.Seconds())
res.Summary.TotalLinesProcessed = res.Store.DecompressedLines + res.Store.HeadChunkLines +
res.Ingester.DecompressedLines + res.Ingester.HeadChunkLines
res.Summary.LinesProcessedPerSeconds =
int64(float64(res.Summary.TotalLinesProcessed) /
execTime.Seconds())
res.Summary.ExecTime = execTime
return res
} | pkg/logql/stats/context.go | 0.661595 | 0.465873 | context.go | starcoder |
package roman
import (
"fmt"
"strings"
)
var (
decimals = map[string]int{"": 0, "I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
values = []int{1, 5, 10, 50, 100, 500, 1000}
romans = map[int]string{0: "", 1: "I", 2: "II", 3: "III", 4: "IV", 5: "V", 6: "VI", 7: "VII", 8: "VIII", 9: "IX", 10: "X", 50: "L", 100: "C", 500: "D", 1000: "M"}
)
// Roman represent a roman numeral
type Roman struct {
roman string
arabic int
}
// New creates a Roman from r (roman number). Throws an error if an invalid roman numberal is given.
func New(r string) (*Roman, error) {
upper := strings.ToUpper(r)
arabic, err := toArabic(upper)
if err != nil {
return nil, err
}
return &Roman{roman: upper, arabic: arabic}, nil
}
// FromArabic creates a Roman from a (arabic number).
func FromArabic(a int) *Roman {
r := toRoman(a)
return &Roman{roman: r, arabic: a}
}
// Roman returns roman representation of the number.
func (r Roman) Roman() string {
return r.roman
}
// Arabic returns arabic representation of the number.
func (r Roman) Arabic() int {
return r.arabic
}
func toRoman(a int) string {
t := a
r := ""
if decimal, ok := romans[a]; ok {
return decimal
}
tp := 1
for t > 0 {
q := t % 10
number := q * tp
if decimal, ok := romans[number]; ok {
r = decimal + r
} else {
for i := 0; i < len(values); i++ {
iq := number / values[i]
if iq == 3 {
for ; iq > 0; iq-- {
r = romans[values[i]] + r
}
break
} else if number < values[i] {
if values[i]-number == values[i]/10 {
r = romans[values[i]/10] + romans[values[i]] + r
} else {
r = romans[values[i]/5] + romans[values[i]] + r
}
break
}
}
}
t /= 10
tp *= 10
}
return r
}
func toArabic(v string) (int, error) {
if len(v) == 0 {
return -1, fmt.Errorf("number can't be empty")
}
counts := map[string]int{
"I": 0,
"V": 0,
"X": 0,
"L": 0,
"C": 0,
"D": 0,
"M": 0,
}
prev := ""
nxt := ""
compoundFirst := 0
length := len(v)
sum := 0
for i := 0; i < length; i++ {
cur := string(v[i])
if i != length-1 {
nxt = string(v[i+1])
} else {
nxt = ""
}
counts[cur]++
if compoundFirst > 0 && decimals[nxt] >= compoundFirst {
return -1, fmt.Errorf("invalid symbol %s at position %d", nxt, i+2)
}
switch cur {
case `D`, `L`, `V`:
if counts[cur] > 1 {
return -1, fmt.Errorf("invalid symbol %s at position %d", cur, i+1)
}
if nxt != "" && decimals[cur] < decimals[nxt] {
return -1, fmt.Errorf("invalid symbol %c at position %d", v[i+1], i+2)
}
compoundFirst = 0
sum += decimals[cur]
case `I`:
counts["X"] = 0
counts["C"] = 0
counts["M"] = 0
if counts[cur] > 3 {
return -1, fmt.Errorf("invalid symbol %s at position %d", cur, i+1)
}
if decimals[cur] < decimals[nxt] {
switch nxt {
case "V", "X":
compoundFirst = decimals[cur]
sum -= compoundFirst
default:
return -1, fmt.Errorf("invalid symbol %s at position %d", cur, i+1)
}
} else {
compoundFirst = 0
sum += decimals[cur]
}
case `X`:
counts["I"] = 0
counts["C"] = 0
counts["M"] = 0
if counts[cur] > 3 {
return -1, fmt.Errorf("invalid symbol %s at position %d", cur, i+1)
}
if decimals[cur] < decimals[nxt] {
switch nxt {
case "L", "C":
compoundFirst = decimals[cur]
sum -= compoundFirst
default:
return -1, fmt.Errorf("invalid symbol %s at position %d", cur, i+1)
}
} else {
compoundFirst = 0
sum += decimals[cur]
}
case `C`:
counts["I"] = 0
counts["X"] = 0
counts["M"] = 0
if counts[cur] > 3 {
return -1, fmt.Errorf("invalid symbol %s at position %d", cur, i+1)
}
if decimals[cur] < decimals[nxt] {
compoundFirst = decimals[cur]
sum -= compoundFirst
} else {
compoundFirst = 0
sum += decimals[cur]
}
case `M`:
counts["I"] = 0
counts["X"] = 0
counts["C"] = 0
if counts[cur] > 3 {
return -1, fmt.Errorf("invalid symbol %s at position %d", cur, i+1)
}
compoundFirst = 0
sum += decimals[cur]
default:
return -1, fmt.Errorf("invalid symbol %s at position %d", cur, i+1)
}
if i > 0 && counts[prev] >= 2 && decimals[prev] < decimals[cur] {
return -1, fmt.Errorf("invalid symbol %s at position %d", cur, i+1)
}
prev = cur
}
return sum, nil
} | roman.go | 0.650023 | 0.426262 | roman.go | starcoder |
package mlpack
/*
#cgo CFLAGS: -I./capi -Wall
#cgo LDFLAGS: -L. -lmlpack_go_kernel_pca
#include <capi/kernel_pca.h>
#include <stdlib.h>
*/
import "C"
import "gonum.org/v1/gonum/mat"
type KernelPcaOptionalParam struct {
Bandwidth float64
Center bool
Degree float64
KernelScale float64
NewDimensionality int
NystroemMethod bool
Offset float64
Sampling string
Verbose bool
}
func KernelPcaOptions() *KernelPcaOptionalParam {
return &KernelPcaOptionalParam{
Bandwidth: 1,
Center: false,
Degree: 1,
KernelScale: 1,
NewDimensionality: 0,
NystroemMethod: false,
Offset: 0,
Sampling: "kmeans",
Verbose: false,
}
}
/*
This program performs Kernel Principal Components Analysis (KPCA) on the
specified dataset with the specified kernel. This will transform the data
onto the kernel principal components, and optionally reduce the dimensionality
by ignoring the kernel principal components with the smallest eigenvalues.
For the case where a linear kernel is used, this reduces to regular PCA.
The kernels that are supported are listed below:
* 'linear': the standard linear dot product (same as normal PCA):
K(x, y) = x^T y
* 'gaussian': a Gaussian kernel; requires bandwidth:
K(x, y) = exp(-(|| x - y || ^ 2) / (2 * (bandwidth ^ 2)))
* 'polynomial': polynomial kernel; requires offset and degree:
K(x, y) = (x^T y + offset) ^ degree
* 'hyptan': hyperbolic tangent kernel; requires scale and offset:
K(x, y) = tanh(scale * (x^T y) + offset)
* 'laplacian': Laplacian kernel; requires bandwidth:
K(x, y) = exp(-(|| x - y ||) / bandwidth)
* 'epanechnikov': Epanechnikov kernel; requires bandwidth:
K(x, y) = max(0, 1 - || x - y ||^2 / bandwidth^2)
* 'cosine': cosine distance:
K(x, y) = 1 - (x^T y) / (|| x || * || y ||)
The parameters for each of the kernels should be specified with the options
"Bandwidth", "KernelScale", "Offset", or "Degree" (or a combination of those
parameters).
Optionally, the Nystroem method ("Using the Nystroem method to speed up kernel
machines", 2001) can be used to calculate the kernel matrix by specifying the
"NystroemMethod" parameter. This approach works by using a subset of the data
as basis to reconstruct the kernel matrix; to specify the sampling scheme, the
"Sampling" parameter is used. The sampling scheme for the Nystroem method can
be chosen from the following list: 'kmeans', 'random', 'ordered'.
For example, the following command will perform KPCA on the dataset input
using the Gaussian kernel, and saving the transformed data to transformed:
// Initialize optional parameters for KernelPca().
param := mlpack.KernelPcaOptions()
transformed := mlpack.KernelPca(input, "gaussian", param)
Input parameters:
- input (mat.Dense): Input dataset to perform KPCA on.
- kernel (string): The kernel to use; see the above documentation for
the list of usable kernels.
- Bandwidth (float64): Bandwidth, for 'gaussian' and 'laplacian'
kernels. Default value 1.
- Center (bool): If set, the transformed data will be centered about
the origin.
- Degree (float64): Degree of polynomial, for 'polynomial' kernel.
Default value 1.
- KernelScale (float64): Scale, for 'hyptan' kernel. Default value 1.
- NewDimensionality (int): If not 0, reduce the dimensionality of the
output dataset by ignoring the dimensions with the smallest eigenvalues.
Default value 0.
- NystroemMethod (bool): If set, the Nystroem method will be used.
- Offset (float64): Offset, for 'hyptan' and 'polynomial' kernels.
Default value 0.
- Sampling (string): Sampling scheme to use for the Nystroem method:
'kmeans', 'random', 'ordered' Default value 'kmeans'.
- Verbose (bool): Display informational messages and the full list of
parameters and timers at the end of execution.
Output parameters:
- output (mat.Dense): Matrix to save modified dataset to.
*/
func KernelPca(input *mat.Dense, kernel string, param *KernelPcaOptionalParam) (*mat.Dense) {
resetTimers()
enableTimers()
disableBacktrace()
disableVerbose()
restoreSettings("Kernel Principal Components Analysis")
// Detect if the parameter was passed; set if so.
gonumToArmaMat("input", input)
setPassed("input")
// Detect if the parameter was passed; set if so.
setParamString("kernel", kernel)
setPassed("kernel")
// Detect if the parameter was passed; set if so.
if param.Bandwidth != 1 {
setParamDouble("bandwidth", param.Bandwidth)
setPassed("bandwidth")
}
// Detect if the parameter was passed; set if so.
if param.Center != false {
setParamBool("center", param.Center)
setPassed("center")
}
// Detect if the parameter was passed; set if so.
if param.Degree != 1 {
setParamDouble("degree", param.Degree)
setPassed("degree")
}
// Detect if the parameter was passed; set if so.
if param.KernelScale != 1 {
setParamDouble("kernel_scale", param.KernelScale)
setPassed("kernel_scale")
}
// Detect if the parameter was passed; set if so.
if param.NewDimensionality != 0 {
setParamInt("new_dimensionality", param.NewDimensionality)
setPassed("new_dimensionality")
}
// Detect if the parameter was passed; set if so.
if param.NystroemMethod != false {
setParamBool("nystroem_method", param.NystroemMethod)
setPassed("nystroem_method")
}
// Detect if the parameter was passed; set if so.
if param.Offset != 0 {
setParamDouble("offset", param.Offset)
setPassed("offset")
}
// Detect if the parameter was passed; set if so.
if param.Sampling != "kmeans" {
setParamString("sampling", param.Sampling)
setPassed("sampling")
}
// Detect if the parameter was passed; set if so.
if param.Verbose != false {
setParamBool("verbose", param.Verbose)
setPassed("verbose")
enableVerbose()
}
// Mark all output options as passed.
setPassed("output")
// Call the mlpack program.
C.mlpackKernelPca()
// Initialize result variable and get output.
var outputPtr mlpackArma
output := outputPtr.armaToGonumMat("output")
// Clear settings.
clearSettings()
// Return output(s).
return output
} | kernel_pca.go | 0.726911 | 0.449091 | kernel_pca.go | starcoder |
// Package tdt contains Tag Data Translation module from binary to Pure Identity
package tdt
import (
"fmt"
"math"
"strconv"
"github.com/iomz/go-llrp/binutil"
)
// PartitionTableKey is used for PartitionTables
type PartitionTableKey int
// PartitionTable is used to get the related values for each coding scheme
type PartitionTable map[int]map[PartitionTableKey]int
// Key values for PartitionTables
const (
PValue PartitionTableKey = iota
CPBits
IRBits
IRDigits
EBits
EDigits
ATBits
ATDigits
IARBits
IARDigits
)
// GIAI96PartitionTable is PT for GIAI
var GIAI96PartitionTable = PartitionTable{
12: {PValue: 0, CPBits: 40, IARBits: 42, IARDigits: 13},
11: {PValue: 1, CPBits: 37, IARBits: 45, IARDigits: 14},
10: {PValue: 2, CPBits: 34, IARBits: 48, IARDigits: 15},
9: {PValue: 3, CPBits: 30, IARBits: 52, IARDigits: 16},
8: {PValue: 4, CPBits: 27, IARBits: 55, IARDigits: 17},
7: {PValue: 5, CPBits: 24, IARBits: 58, IARDigits: 18},
6: {PValue: 6, CPBits: 20, IARBits: 62, IARDigits: 19},
}
// GRAI96PartitionTable is PT for GRAI
var GRAI96PartitionTable = PartitionTable{
12: {PValue: 0, CPBits: 40, ATBits: 4, ATDigits: 0},
11: {PValue: 1, CPBits: 37, ATBits: 7, ATDigits: 1},
10: {PValue: 2, CPBits: 34, ATBits: 10, ATDigits: 2},
9: {PValue: 3, CPBits: 30, ATBits: 14, ATDigits: 3},
8: {PValue: 4, CPBits: 27, ATBits: 17, ATDigits: 4},
7: {PValue: 5, CPBits: 24, ATBits: 20, ATDigits: 5},
6: {PValue: 6, CPBits: 20, ATBits: 24, ATDigits: 6},
}
// SGTIN96PartitionTable is PT for SGTIN
var SGTIN96PartitionTable = PartitionTable{
12: {PValue: 0, CPBits: 40, IRBits: 4, IRDigits: 1},
11: {PValue: 1, CPBits: 37, IRBits: 7, IRDigits: 2},
10: {PValue: 2, CPBits: 34, IRBits: 10, IRDigits: 3},
9: {PValue: 3, CPBits: 30, IRBits: 14, IRDigits: 4},
8: {PValue: 4, CPBits: 27, IRBits: 17, IRDigits: 5},
7: {PValue: 5, CPBits: 24, IRBits: 20, IRDigits: 6},
6: {PValue: 6, CPBits: 20, IRBits: 24, IRDigits: 7},
}
// SSCC96PartitionTable is PT for SSCC
var SSCC96PartitionTable = PartitionTable{
12: {PValue: 0, CPBits: 40, EBits: 18, EDigits: 5},
11: {PValue: 1, CPBits: 37, EBits: 21, EDigits: 6},
10: {PValue: 2, CPBits: 34, EBits: 24, EDigits: 7},
9: {PValue: 3, CPBits: 30, EBits: 28, EDigits: 8},
8: {PValue: 4, CPBits: 27, EBits: 31, EDigits: 9},
7: {PValue: 5, CPBits: 24, EBits: 34, EDigits: 10},
6: {PValue: 6, CPBits: 20, EBits: 38, EDigits: 11},
}
// getAssetType returns Asset Type as rune slice
func getAssetType(at string, pr map[PartitionTableKey]int) (assetType []rune) {
if at != "" {
assetType = binutil.ParseDecimalStringToBinRuneSlice(at)
if pr[ATBits] > len(assetType) {
leftPadding := binutil.GenerateNLengthZeroPaddingRuneSlice(pr[ATBits] - len(assetType))
assetType = append(leftPadding, assetType...)
}
} else {
assetType, _ = binutil.GenerateNLengthRandomBinRuneSlice(pr[ATBits], uint(math.Pow(float64(10), float64(pr[ATDigits]))))
}
return
}
// getCompanyPrefix returns Company Prefix as rune slice
func getCompanyPrefix(cp string, pt PartitionTable) (companyPrefix []rune) {
if cp != "" {
companyPrefix = binutil.ParseDecimalStringToBinRuneSlice(cp)
if pt[len(cp)][CPBits] > len(companyPrefix) {
leftPadding := binutil.GenerateNLengthZeroPaddingRuneSlice(pt[len(cp)][CPBits] - len(companyPrefix))
companyPrefix = append(leftPadding, companyPrefix...)
}
}
return
}
// getExtension returns Extension digit and Serial Reference as rune slice
func getExtension(e string, pr map[PartitionTableKey]int) (extension []rune) {
if e != "" {
extension = binutil.ParseDecimalStringToBinRuneSlice(e)
if pr[EBits] > len(extension) {
leftPadding := binutil.GenerateNLengthZeroPaddingRuneSlice(pr[EBits] - len(extension))
extension = append(leftPadding, extension...)
}
} else {
extension, _ = binutil.GenerateNLengthRandomBinRuneSlice(pr[EBits], uint(math.Pow(float64(10), float64(pr[EDigits]))))
}
return
}
// getFilter returns filter value as rune slice
func getFilter(fv string) (filter []rune) {
if fv != "" {
n, _ := strconv.ParseInt(fv, 10, 32)
filter = []rune(fmt.Sprintf("%.3b", n))
} else {
filter, _ = binutil.GenerateNLengthRandomBinRuneSlice(3, 7)
}
return
}
// getIndivisualAssetReference returns iar as rune slice
func getIndivisualAssetReference(iar string, pr map[PartitionTableKey]int) (indivisualAssetReference []rune) {
if iar != "" {
indivisualAssetReference = binutil.ParseDecimalStringToBinRuneSlice(iar)
if pr[IARBits] > len(indivisualAssetReference) {
leftPadding := binutil.GenerateNLengthZeroPaddingRuneSlice(pr[IARBits] - len(indivisualAssetReference))
indivisualAssetReference = append(leftPadding, indivisualAssetReference...)
}
} else {
indivisualAssetReference, _ = binutil.GenerateNLengthRandomBinRuneSlice(pr[IARBits], uint(math.Pow(float64(10), float64(pr[IARDigits]))))
}
return
}
// getItemReference converts ItemReference value to rune slice
func getItemReference(ir string, pr map[PartitionTableKey]int) (itemReference []rune) {
if ir != "" {
itemReference = binutil.ParseDecimalStringToBinRuneSlice(ir)
// If the itemReference is short, pad zeroes to the left
if pr[IRBits] > len(itemReference) {
leftPadding := binutil.GenerateNLengthZeroPaddingRuneSlice(pr[IRBits] - len(itemReference))
itemReference = append(leftPadding, itemReference...)
}
} else {
itemReference, _ = binutil.GenerateNLengthRandomBinRuneSlice(pr[IRBits], uint(math.Pow(float64(10), float64(pr[IRDigits]))))
}
return
}
// getSerial converts serial to rune slice
func getSerial(s string, serialLength int) (serial []rune) {
if s != "" {
serial = binutil.ParseDecimalStringToBinRuneSlice(s)
if serialLength > len(serial) {
leftPadding := binutil.GenerateNLengthZeroPaddingRuneSlice(serialLength - len(serial))
serial = append(leftPadding, serial...)
}
} else {
serial, _ = binutil.GenerateNLengthRandomBinRuneSlice(serialLength, uint(math.Pow(float64(2), float64(serialLength))))
}
return serial
}
// NewPrefixFilterGIAI96 takes field values in a slice and return a prefix filter string
func NewPrefixFilterGIAI96(fields []string) (string, error) {
nFields := len(fields) // filter, companyPrefix, indivisualAssetReference
if nFields == 0 {
return "", fmt.Errorf("wrong fields: %q", fields)
}
// filter
filter := getFilter(fields[0])
if nFields == 1 {
return "00110100" + string(filter), nil
}
// companyPrefix
companyPrefix := getCompanyPrefix(fields[1], GIAI96PartitionTable)
partition := []rune(fmt.Sprintf("%.3b", GIAI96PartitionTable[len(fields[1])][PValue]))
if nFields == 2 {
return "00110100" + string(filter) + string(partition) + string(companyPrefix), nil
}
// indivisualAssetReference
indivisualAssetReference := getIndivisualAssetReference(fields[2], GIAI96PartitionTable[len(fields[1])])
if nFields == 3 {
return "00110100" + string(filter) + string(partition) + string(companyPrefix) + string(indivisualAssetReference), nil
}
return "", fmt.Errorf("unknown fields provided %q", fields)
}
// NewPrefixFilterGRAI96 takes field values in a slice and return a prefix filter string
func NewPrefixFilterGRAI96(fields []string) (string, error) {
nFields := len(fields) // filter, companyPrefix, assetType, serial
if nFields == 0 {
return "", fmt.Errorf("wrong fields: %q", fields)
}
// filter
filter := getFilter(fields[0])
if nFields == 1 {
return "00110011" + string(filter), nil
}
// companyPrefix
companyPrefix := getCompanyPrefix(fields[1], GRAI96PartitionTable)
partition := []rune(fmt.Sprintf("%.3b", GRAI96PartitionTable[len(fields[1])][PValue]))
if nFields == 2 {
return "00110011" + string(filter) + string(partition) + string(companyPrefix), nil
}
// assetType
assetType := getAssetType(fields[2], GRAI96PartitionTable[len(fields[1])])
if nFields == 3 {
return "00110011" + string(filter) + string(partition) + string(companyPrefix) + string(assetType), nil
}
// serial
serial := getSerial(fields[3], 38)
if nFields == 4 {
return "00110011" + string(filter) + string(partition) + string(companyPrefix) + string(assetType) + string(serial), nil
}
return "", fmt.Errorf("unknown fields provided %q", fields)
}
// NewPrefixFilterSGTIN96 takes field values in a slice and return a prefix filter string
func NewPrefixFilterSGTIN96(fields []string) (string, error) {
nFields := len(fields) // filter, companyPrefix, itemReference, serial
if nFields == 0 {
return "", fmt.Errorf("wrong fields: %q", fields)
}
// filter
filter := getFilter(fields[0])
if nFields == 1 {
return "00110000" + string(filter), nil
}
// companyPrefix
companyPrefix := getCompanyPrefix(fields[1], SGTIN96PartitionTable)
partition := []rune(fmt.Sprintf("%.3b", SGTIN96PartitionTable[len(fields[1])][PValue]))
if nFields == 2 {
return "00110000" + string(filter) + string(partition) + string(companyPrefix), nil
}
// itemReference
itemReference := getItemReference(fields[2], SGTIN96PartitionTable[len(fields[1])])
if nFields == 3 {
return "00110000" + string(filter) + string(partition) + string(companyPrefix) + string(itemReference), nil
}
// serial
serial := getSerial(fields[3], 38)
if nFields == 4 {
return "00110000" + string(filter) + string(partition) + string(companyPrefix) + string(itemReference) + string(serial), nil
}
return "", fmt.Errorf("unknown fields provided %q", fields)
}
// NewPrefixFilterSSCC96 takes field values in a slice and return a prefix filter string
func NewPrefixFilterSSCC96(fields []string) (string, error) {
nFields := len(fields) // filter, companyPrefix, extension
if nFields == 0 {
return "", fmt.Errorf("wrong fields: %q", fields)
}
// filter
filter := getFilter(fields[0])
if nFields == 1 {
return "00110001" + string(filter), nil
}
// companyPrefix
companyPrefix := getCompanyPrefix(fields[1], SSCC96PartitionTable)
partition := []rune(fmt.Sprintf("%.3b", SSCC96PartitionTable[len(fields[1])][PValue]))
if nFields == 2 {
return "00110001" + string(filter) + string(partition) + string(companyPrefix), nil
}
// extension
extension := getExtension(fields[2], SSCC96PartitionTable[len(fields[1])])
if nFields == 3 {
return "00110001" + string(filter) + string(partition) + string(companyPrefix) + string(extension), nil
}
return "", fmt.Errorf("unknown fields provided %q", fields)
} | tdt/epc.go | 0.680454 | 0.489748 | epc.go | starcoder |
// Package errors implements functions to manipulate compression errors.
//
// In idiomatic Go, it is an anti-pattern to use panics as a form of error
// reporting in the API. Instead, the expected way to transmit errors is by
// returning an error value. Unfortunately, the checking of "err != nil" in
// tight loops commonly found in compression causes non-negligible performance
// degradation. While this may not be idiomatic, the internal packages of this
// repository rely on panics as a normal means to convey errors. In order to
// ensure that these panics do not leak across the public API, the public
// packages must recover from these panics and present an error value.
//
// The Panic and Recover functions in this package provide a safe way to
// recover from errors only generated from within this repository.
//
// Example usage:
// func Foo() (err error) {
// defer errors.Recover(&err)
//
// if rand.Intn(2) == 0 {
// // Unexpected panics will not be caught by Recover.
// io.Closer(nil).Close()
// } else {
// // Errors thrown by Panic will be caught by Recover.
// errors.Panic(errors.New("whoopsie"))
// }
// }
//
package errors
import "strings"
const (
// Unknown indicates that there is no classification for this error.
Unknown = iota
// Internal indicates that this error is due to an internal bug.
// Users should file a issue report if this type of error is encountered.
Internal
// Invalid indicates that this error is due to the user misusing the API
// and is indicative of a bug on the user's part.
Invalid
// Deprecated indicates the use of a deprecated and unsupported feature.
Deprecated
// Corrupted indicates that the input stream is corrupted.
Corrupted
// Closed indicates that the handlers are closed.
Closed
)
var codeMap = map[int]string{
Unknown: "unknown error",
Internal: "internal error",
Invalid: "invalid argument",
Deprecated: "deprecated format",
Corrupted: "corrupted input",
Closed: "closed handler",
}
type Error struct {
Code int // The error type
Pkg string // Name of the package where the error originated
Msg string // Descriptive message about the error (optional)
}
func (e Error) Error() string {
var ss []string
for _, s := range []string{e.Pkg, codeMap[e.Code], e.Msg} {
if s != "" {
ss = append(ss, s)
}
}
return strings.Join(ss, ": ")
}
func (e Error) CompressError() {}
func (e Error) IsInternal() bool { return e.Code == Internal }
func (e Error) IsInvalid() bool { return e.Code == Invalid }
func (e Error) IsDeprecated() bool { return e.Code == Deprecated }
func (e Error) IsCorrupted() bool { return e.Code == Corrupted }
func (e Error) IsClosed() bool { return e.Code == Closed }
func IsInternal(err error) bool { return isCode(err, Internal) }
func IsInvalid(err error) bool { return isCode(err, Invalid) }
func IsDeprecated(err error) bool { return isCode(err, Deprecated) }
func IsCorrupted(err error) bool { return isCode(err, Corrupted) }
func IsClosed(err error) bool { return isCode(err, Closed) }
func isCode(err error, code int) bool {
if cerr, ok := err.(Error); ok && cerr.Code == code {
return true
}
return false
}
// errWrap is used by Panic and Recover to ensure that only errors raised by
// Panic are recovered by Recover.
type errWrap struct{ e *error }
func Recover(err *error) {
switch ex := recover().(type) {
case nil:
// Do nothing.
case errWrap:
*err = *ex.e
default:
panic(ex)
}
}
func Panic(err error) {
panic(errWrap{&err})
} | vendor/github.com/dsnet/compress/internal/errors/errors.go | 0.59302 | 0.400955 | errors.go | starcoder |
package luminance
import (
"image"
"image/color"
"math"
"sync"
colorful "github.com/lucasb-eyer/go-colorful"
"gonum.org/v1/gonum/mat"
)
const (
maxrange = 65535
dimension = 3
// padding space
paddingS = 2
// padding range (luminance)
paddingR = 2
)
// A FastBilateral filter is a non-linear, edge-preserving and noise-reducing
// smoothing filter for images. The intensity value at each pixel in an image is
// replaced by a weighted average of intensity values from nearby pixels.
type FastBilateral struct {
Image image.Image
SigmaRange float64
SigmaSpace float64
minmaxOnce sync.Once
min float64
max float64
// size:
// 0 -> smallWidth
// 1 -> smallHeight
// 2 -> smallLuminance
size []int
grid *mat.Dense
auto bool
}
// Auto instanciates a new FastBilateral with automatic sigma values.
func Auto(m image.Image) *FastBilateral {
f := New(m, 16, 0.1)
f.auto = true
return f
}
// New instanciates a new FastBilateral.
func New(m image.Image, sigmaSpace, sigmaRange float64) *FastBilateral {
return &FastBilateral{
Image: m,
SigmaRange: sigmaRange,
SigmaSpace: sigmaSpace,
min: math.Inf(1),
max: math.Inf(-1),
size: make([]int, dimension),
}
}
// Execute runs the bilateral filter.
func (f *FastBilateral) Execute() {
f.minmaxOnce.Do(f.minmax)
f.downsampling()
f.convolution()
f.normalize()
}
// ColorModel returns the Image's color model.
func (f *FastBilateral) ColorModel() color.Model {
return color.RGBAModel
}
// Bounds implements image.Image interface.
func (f *FastBilateral) Bounds() image.Rectangle {
return f.Image.Bounds()
}
// At computes the interpolation and returns the filtered color at the given coordinates.
func (f *FastBilateral) At(x, y int) color.Color {
r, g, b, a := f.Image.At(x, y).RGBA()
X, Y, Z := colorful.LinearRgbToXyz(f.color(r), f.color(g), f.color(b))
// Grid coords
gw := float64(x)/f.SigmaSpace + paddingS // Grid width
gh := float64(y)/f.SigmaSpace + paddingS // Grid height
gc := (Y-f.min)/f.SigmaRange + paddingR // Grid luminance
Y2 := f.trilinearInterpolation(gw, gh, gc)
delta := Y - Y2
R, G, B := colorful.XyzToLinearRgb(X-delta, Y2, Z-delta)
return color.RGBA{
R: uint8(f.clamp(0, 255, int(R*255))),
G: uint8(f.clamp(0, 255, int(G*255))),
B: uint8(f.clamp(0, 255, int(B*255))),
A: uint8(a),
}
}
// ResultImage computes the interpolation and returns the filtered image.
func (f *FastBilateral) ResultImage() image.Image {
d := f.Image.Bounds()
dst := image.NewRGBA(d)
for x := 0; x < d.Dx(); x++ {
for y := 0; y < d.Dy(); y++ {
dst.Set(x, y, f.At(x, y))
}
}
return dst
}
func (f *FastBilateral) minmax() {
d := f.Image.Bounds()
for y := 0; y < d.Dy(); y++ {
for x := 0; x < d.Dx(); x++ {
r, g, b, _ := f.Image.At(x, y).RGBA()
_, Y, _ := colorful.LinearRgbToXyz(f.color(r), f.color(g), f.color(b))
f.min = math.Min(f.min, Y)
f.max = math.Max(f.max, Y)
}
}
if f.auto {
f.SigmaRange = (f.max - f.min) * 0.1
}
f.size[0] = int(float64(d.Dx()-1)/f.SigmaSpace) + 1 + 2*paddingS
f.size[1] = int(float64(d.Dy()-1)/f.SigmaSpace) + 1 + 2*paddingS
f.size[2] = int((f.max-f.min)/f.SigmaRange) + 1 + 2*paddingR
// fmt.Println("ssp:", f.SigmaSpace, " - sra:", f.SigmaRange)
// fmt.Println("min:", f.min, "- max:", f.max)
// fmt.Println("size:", f.mul(f.size...), f.size)
}
func (f *FastBilateral) downsampling() {
d := f.Image.Bounds()
offset := make([]int, dimension)
size := f.mul(f.size...)
dim := dimension - 1 // # 1 luminance and 1 threshold (edge weight)
f.grid = mat.NewDense(size, dim, make([]float64, dim*size))
for x := 0; x < d.Dx(); x++ {
offset[0] = int(float64(x)/f.SigmaSpace+0.5) + paddingS
for y := 0; y < d.Dy(); y++ {
offset[1] = int(float64(y)/f.SigmaSpace+0.5) + paddingS
r, g, b, _ := f.Image.At(x, y).RGBA()
_, Y, _ := colorful.LinearRgbToXyz(f.color(r), f.color(g), f.color(b))
offset[2] = int((Y-f.min)/f.SigmaRange+0.5) + paddingR
i := f.offset(offset...)
v := f.grid.RawRowView(i)
v[0] += Y // luminance
v[1]++ // threshold
f.grid.SetRow(i, v)
}
}
}
func (f *FastBilateral) convolution() {
size := f.mul(f.size...)
dim := dimension - 1 // # luminance and 1 threshold (edge weight)
buffer := mat.NewDense(size, dim, make([]float64, dim*size))
for dim := 0; dim < dimension; dim++ { // x, y, and luminance
off := make([]int, dimension)
off[dim] = 1 // Wanted dimension offset
for n := 0; n < 2; n++ { // itterations (pass?)
f.grid, buffer = buffer, f.grid
for x := 1; x < f.size[0]-1; x++ {
for y := 1; y < f.size[1]-1; y++ {
for z := 1; z < f.size[2]-1; z++ {
vg := f.grid.RowView(f.offset(x, y, z)).(*mat.VecDense)
prev := buffer.RowView(f.offset(x-off[0], y-off[1], z-off[2])).(*mat.VecDense)
curr := buffer.RowView(f.offset(x, y, z)).(*mat.VecDense)
next := buffer.RowView(f.offset(x+off[0], y+off[1], z+off[2])).(*mat.VecDense)
// (prev + 2.0 * curr + next) / 4.0
vg.AddVec(prev, next)
vg.AddScaledVec(vg, 2, curr)
vg.ScaleVec(0.25, vg)
}
}
}
}
}
return
}
func (f *FastBilateral) normalize() {
r, _ := f.grid.Dims()
for i := 0; i < r; i++ {
if threshold := f.grid.At(i, 1); threshold != 0 {
f.grid.Set(i, 0, f.grid.At(i, 0)/threshold)
}
}
}
func (f *FastBilateral) trilinearInterpolation(gx, gy, gz float64) float64 {
width := f.size[0]
height := f.size[1]
depth := f.size[2]
// Index
x := f.clamp(0, width-1, int(gx))
xx := f.clamp(0, width-1, x+1)
y := f.clamp(0, height-1, int(gy))
yy := f.clamp(0, height-1, y+1)
z := f.clamp(0, depth-1, int(gz))
zz := f.clamp(0, depth-1, z+1)
// Alpha
xa := gx - float64(x)
ya := gy - float64(y)
za := gz - float64(z)
// Interpolation
return (1.0-ya)*(1.0-xa)*(1.0-za)*f.grid.At(f.offset(x, y, z), 0) +
(1.0-ya)*xa*(1.0-za)*f.grid.At(f.offset(xx, y, z), 0) +
ya*(1.0-xa)*(1.0-za)*f.grid.At(f.offset(x, yy, z), 0) +
ya*xa*(1.0-za)*f.grid.At(f.offset(xx, yy, z), 0) +
(1.0-ya)*(1.0-xa)*za*f.grid.At(f.offset(x, y, zz), 0) +
(1.0-ya)*xa*za*f.grid.At(f.offset(xx, y, zz), 0) +
ya*(1.0-xa)*za*f.grid.At(f.offset(x, yy, zz), 0) +
ya*xa*za*f.grid.At(f.offset(xx, yy, zz), 0)
}
func (f *FastBilateral) clamp(min, max, v int) int {
if v < min {
v = 0
}
if v > max {
v = max
}
return v
}
func (f *FastBilateral) mul(size ...int) (n int) {
n = 1
for _, v := range size {
n *= v
}
return
}
// slice[x + WIDTH*y + WIDTH*HEIGHT*z)]
func (f *FastBilateral) offset(size ...int) (n int) {
n = size[0] // x
for i, v := range size[1:] {
n += v * f.mul(f.size[0:i+1]...) // y, z
}
return
}
func (f *FastBilateral) color(v uint32) float64 {
return float64(v) / maxrange
} | luminance/fast_bilateral.go | 0.774071 | 0.543469 | fast_bilateral.go | starcoder |
package sdl2
import (
"fmt"
"github.com/veandco/go-sdl2/img"
"github.com/veandco/go-sdl2/sdl"
"github.com/evelritual/goose/graphics"
)
// TextureAtlas wraps an SDL Texture. It splits the texture into tiles to
// allow for easy drawing of sprites in a spritesheet.
type TextureAtlas struct {
renderer *sdl.Renderer // reference to renderer to use
image *sdl.Surface
texture *sdl.Texture
tiles []*sdl.Rect
tileH int32
tileW int32
}
// NewTextureAtlas loads an image as an SDL texture and splices it into
// separate rectangles for use in drawing.
func (s *SDL2) NewTextureAtlas(imgPath string, splitX, splitY int32) (graphics.TextureAtlas, error) {
img, err := img.Load(imgPath)
if err != nil {
return nil, fmt.Errorf("error loading image: %v", err)
}
// Split up image
// Doesn't count for remainder
b := img.Bounds().Size()
w := int32(b.X) / splitX
h := int32(b.Y) / splitY
tiles := []*sdl.Rect{}
for y := int32(0); y < h; y++ {
for x := int32(0); x < w; x++ {
tiles = append(tiles, &sdl.Rect{X: x * splitX, Y: y * splitY, W: splitX, H: splitY})
}
}
tex, err := s.renderer.CreateTextureFromSurface(img)
if err != nil {
return nil, fmt.Errorf("error creating texture: %v", err)
}
return &TextureAtlas{
renderer: s.renderer,
image: img,
texture: tex,
tiles: tiles,
tileH: splitY,
tileW: splitX,
}, nil
}
// Draw renders the texture of the given tile to the SDL renderer.
func (t *TextureAtlas) Draw(tile int, x, y int32, scaleX, scaleY float32, rotation float64) error {
if tile > len(t.tiles)-1 {
return fmt.Errorf("tile out of range")
}
// Handle negative scale to flip
// TODO: Handle flipping both X and Y
flip := sdl.FLIP_NONE
if scaleX < 0 {
scaleX = -scaleX
flip = sdl.FLIP_HORIZONTAL
}
if scaleY < 0 {
scaleX = -scaleY
flip = sdl.FLIP_VERTICAL
}
err := t.renderer.CopyEx(
t.texture,
t.tiles[tile],
&sdl.Rect{
X: int32(float32(x+offsetX) * scaleFactorX),
Y: int32(float32(y+offsetY) * scaleFactorY),
W: int32(float32(t.tileW) * scaleX * scaleFactorX),
H: int32(float32(t.tileH) * scaleY * scaleFactorY),
},
rotation,
nil,
flip,
)
if err != nil {
return fmt.Errorf("error drawing: %v", err)
}
return nil
}
// Len returns the number of spliced tiles in the texture atlas.
func (t *TextureAtlas) Len() int {
return len(t.tiles)
}
// Close releases the texture and the image from memory.
func (t *TextureAtlas) Close() error {
err := t.texture.Destroy()
if err != nil {
return fmt.Errorf("error destroying sdl texture: %v", err)
}
t.image.Free()
return nil
} | internal/drivers/sdl2/textureatlas.go | 0.561696 | 0.46557 | textureatlas.go | starcoder |
package grid
import "math"
// Coordinate is an x, y location
type Coordinate struct {
X int
Y int
}
// Distance returns the distance between two points
func Distance(a Coordinate, b Coordinate) float64 {
distance := math.Sqrt(math.Pow(float64(b.X-a.X), 2) + math.Pow(float64(b.Y-a.Y), 2))
return distance
}
// Equals tests the equality of two coordinates
func Equals(a Coordinate, b Coordinate) bool {
if a.X == b.X && a.Y == b.Y {
return true
}
return false
}
// GetUniqueCoordinates returns only unique coordinates from a slice of coordinates
func GetUniqueCoordinates(coords []Coordinate) []Coordinate {
unique := []Coordinate{}
for _, c := range coords {
if !InSlice(c, unique) {
unique = append(unique, c)
}
}
return unique
}
// InSlice tests to see if a coordinate is in a slice of coordinates
func InSlice(c Coordinate, target []Coordinate) bool {
for _, t := range target {
if Equals(c, t) {
return true
}
}
return false
}
// RemoveCoordinatesFromSlice returns a slice of coordinates with a set removed
func RemoveCoordinatesFromSlice(coordinatesToRemove []Coordinate, target []Coordinate) []Coordinate {
coords := []Coordinate{}
for _, c := range target {
if !InSlice(c, coordinatesToRemove) {
coords = append(coords, c)
}
}
return coords
}
// SortCoordinatesByDistance sorts coordinates by distance
func SortCoordinatesByDistance(coords []Coordinate) []Coordinate {
nearest := 0
for i, c := range coords {
nearest = NearestCoordinateIndex(c, coords)
coords[i], coords[nearest] = coords[nearest], coords[i]
}
return coords
}
// NearestCoordinateIndex finds the index in a slice of coordinates of the nearest coordinate to the given one
func NearestCoordinateIndex(p Coordinate, coords []Coordinate) int {
var distance float64
smallestDistance := Distance(p, coords[0])
smallest := 0
for i, c := range coords {
distance = Distance(p, c)
if distance < smallestDistance {
smallest = i
smallestDistance = distance
}
}
return smallest
} | pkg/grid/coordinates.go | 0.922735 | 0.644519 | coordinates.go | starcoder |
package creator
import (
"github.com/pzduniak/unipdf/contentstream/draw"
"github.com/pzduniak/unipdf/model"
)
// Rectangle defines a rectangle with upper left corner at (x,y) and a specified width and height. The rectangle
// can have a colored fill and/or border with a specified width.
// Implements the Drawable interface and can be drawn on PDF using the Creator.
type Rectangle struct {
x float64 // Upper left corner
y float64
width float64
height float64
fillColor *model.PdfColorDeviceRGB
borderColor *model.PdfColorDeviceRGB
borderWidth float64
}
// newRectangle creates a new Rectangle with default parameters with left corner at (x,y) and width, height as specified.
func newRectangle(x, y, width, height float64) *Rectangle {
rect := &Rectangle{}
rect.x = x
rect.y = y
rect.width = width
rect.height = height
rect.borderColor = model.NewPdfColorDeviceRGB(0, 0, 0)
rect.borderWidth = 1.0
return rect
}
// GetCoords returns coordinates of the Rectangle's upper left corner (x,y).
func (rect *Rectangle) GetCoords() (float64, float64) {
return rect.x, rect.y
}
// SetBorderWidth sets the border width.
func (rect *Rectangle) SetBorderWidth(bw float64) {
rect.borderWidth = bw
}
// SetBorderColor sets border color.
func (rect *Rectangle) SetBorderColor(col Color) {
rect.borderColor = model.NewPdfColorDeviceRGB(col.ToRGB())
}
// SetFillColor sets the fill color.
func (rect *Rectangle) SetFillColor(col Color) {
rect.fillColor = model.NewPdfColorDeviceRGB(col.ToRGB())
}
// GeneratePageBlocks draws the rectangle on a new block representing the page. Implements the Drawable interface.
func (rect *Rectangle) GeneratePageBlocks(ctx DrawContext) ([]*Block, DrawContext, error) {
block := NewBlock(ctx.PageWidth, ctx.PageHeight)
drawrect := draw.Rectangle{
Opacity: 1.0,
X: rect.x,
Y: ctx.PageHeight - rect.y - rect.height,
Height: rect.height,
Width: rect.width,
}
if rect.fillColor != nil {
drawrect.FillEnabled = true
drawrect.FillColor = rect.fillColor
}
if rect.borderColor != nil && rect.borderWidth > 0 {
drawrect.BorderEnabled = true
drawrect.BorderColor = rect.borderColor
drawrect.BorderWidth = rect.borderWidth
}
contents, _, err := drawrect.Draw("")
if err != nil {
return nil, ctx, err
}
err = block.addContentsByString(string(contents))
if err != nil {
return nil, ctx, err
}
return []*Block{block}, ctx, nil
} | bot/vendor/github.com/pzduniak/unipdf/creator/rectangle.go | 0.851583 | 0.55917 | rectangle.go | starcoder |
package dct
import (
"fmt"
"math"
"gonum.org/v1/gonum/mat"
)
// F computes the forward discrete cosine transform of src and places it in dst,
// also returning dst.
// If dst is nil, a new matrix is allocated and returned.
func F(src, dst *mat.Dense) *mat.Dense {
r, c := src.Dims()
if r%2 != 0 || c%2 != 0 {
panic(fmt.Errorf("dct: matrix dimensions must be even"))
}
if dst == nil {
dst = mat.NewDense(r, c, nil)
}
N1 := float64(r)
N2 := float64(c)
for ik1 := 0; ik1 < r; ik1++ {
k1 := float64(ik1)
for ik2 := 0; ik2 < c; ik2++ {
k2 := float64(ik2)
var sum float64
for in1 := 0; in1 < r; in1++ {
n1 := float64(in1)
for n2 := 0; n2 < c; n2++ {
sum += src.At(in1, n2) * math.Cos(math.Pi/N1*(n1+0.5)*k1) * math.Cos(math.Pi/N2*(float64(n2)+0.5)*k2)
}
}
var ck1, ck2 float64
if ik1 == 0 {
ck1 = 1 / math.Sqrt(N1)
} else {
ck1 = math.Sqrt(2 / N1)
}
if ik2 == 0 {
ck2 = 1 / math.Sqrt(N2)
} else {
ck2 = math.Sqrt(2 / N2)
}
dst.Set(ik1, ik2, sum*ck1*ck2)
}
}
return dst
}
// I computes the inverse discrete cosine transform of src and places it in dst,
// also returning dst.
// If dst is nil, a new matrix is allocated and returned.
func I(src, dst *mat.Dense) *mat.Dense {
r, c := src.Dims()
if r%2 != 0 || c%2 != 0 {
panic(fmt.Errorf("dct: matrix dimensions must be even"))
}
if dst == nil {
dst = mat.NewDense(r, c, nil)
}
N1 := float64(r)
N2 := float64(c)
for ik1 := 0; ik1 < r; ik1++ {
k1 := float64(ik1)
for ik2 := 0; ik2 < c; ik2++ {
k2 := float64(ik2)
var sum, cn1, cn2 float64
for in1 := 0; in1 < r; in1++ {
if in1 == 0 {
cn1 = 1 / math.Sqrt(N1)
} else {
cn1 = math.Sqrt(2 / N1)
}
n1 := float64(in1)
for n2 := 0; n2 < c; n2++ {
if n2 == 0 {
cn2 = 1 / math.Sqrt(N2)
} else {
cn2 = math.Sqrt(2 / N2)
}
sum += src.At(in1, n2) * cn1 * cn2 * math.Cos(math.Pi/N1*(k1+0.5)*n1) * math.Cos(math.Pi/N2*(k2+0.5)*float64(n2))
}
}
dst.Set(ik1, ik2, sum)
}
}
return dst
} | dct.go | 0.685529 | 0.425426 | dct.go | starcoder |
package op
import (
"github.com/coschain/contentos-go/common/constants"
. "github.com/coschain/contentos-go/dandelion"
"github.com/coschain/contentos-go/prototype"
"github.com/coschain/contentos-go/tests/economist"
"github.com/stretchr/testify/assert"
"strconv"
"testing"
)
type VoteTester struct {
acc0, acc1, acc2 *DandelionAccount
}
func ISqrt(n uint64) uint64 {
if n == 0 {
return 0
}
var r1, r uint64 = n, n + 1
for r1 < r {
r, r1 = r1, (r1+n/r1)>>1
}
return r
}
func Vest2VotePower(d *Dandelion, vest uint64) uint64 {
switch d.TrxPool().HardFork() {
case constants.HardFork1:
return vest
default:
return ISqrt(vest)
}
}
func (tester *VoteTester) TestNormal(t *testing.T, d *Dandelion) {
tester.acc0 = d.Account("actor0")
tester.acc1 = d.Account("actor1")
tester.acc2 = d.Account("actor2")
economist.RegisterBlockProducer( tester.acc2, t)
t.Run("normal", d.Test(tester.normal))
t.Run("normal", d.Test(tester.voteSelf))
}
func (tester *VoteTester) TestRevote(t *testing.T, d *Dandelion) {
tester.acc0 = d.Account("actor0")
tester.acc1 = d.Account("actor1")
tester.acc2 = d.Account("actor2")
economist.RegisterBlockProducer( tester.acc2, t)
t.Run("revote", d.Test(tester.revote))
t.Run("vote to ghost post", d.Test(tester.voteToGhostPost))
}
func (tester *VoteTester) TestZeroPower(t *testing.T, d *Dandelion) {
tester.acc0 = d.Account("actor0")
tester.acc1 = d.Account("actor1")
tester.acc2 = d.Account("actor2")
stakeSelf(tester.acc0, t)
stakeSelf(tester.acc1, t)
stakeSelf(tester.acc2, t)
t.Run("fullpower", d.Test(tester.zeroPower))
}
func (tester *VoteTester) TestVoteAfterCashout(t *testing.T, d *Dandelion) {
tester.acc0 = d.Account("actor0")
tester.acc1 = d.Account("actor1")
tester.acc2 = d.Account("actor2")
economist.RegisterBlockProducer( tester.acc2, t)
t.Run("voteaftercashout", d.Test(tester.voteAfterPostCashout))
}
func (tester *VoteTester) normal(t *testing.T, d *Dandelion) {
a := assert.New(t)
const POST1 = 1
const POST2 = 2
const POST3 = 3
a.Equal(constants.Original, d.TrxPool().HardFork())
a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST1, tester.acc0.Name, "title", "content", []string{"1"}, nil)))
a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST2, tester.acc0.Name, "title", "content", []string{"1"}, nil)))
a.NoError(tester.acc1.SendTrxAndProduceBlock(Vote(tester.acc1.Name, POST1)))
usedVp := uint32(constants.FullVP / constants.VPMarks)
expectedPost1Vp := strconv.FormatUint(uint64(usedVp) * Vest2VotePower(d, tester.acc1.GetVest().Value), 10)
a.Equal(expectedPost1Vp, d.Post(POST1).GetWeightedVp())
a.NoError(tester.acc1.SendTrxAndProduceBlock(Vote(tester.acc1.Name, POST2)))
expectedPost2Vp := strconv.FormatUint(uint64(usedVp) * Vest2VotePower(d, tester.acc1.GetVest().Value), 10)
a.Equal(expectedPost2Vp, d.Post(POST2).GetWeightedVp())
// entered hard fork 1 and test again
a.NoError(d.ProduceBlocks(int(constants.HardFork1)))
a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST3, tester.acc0.Name, "title", "content", []string{"1"}, nil)))
a.NoError(tester.acc1.SendTrxAndProduceBlock(Vote(tester.acc1.Name, POST3)))
a.Equal(constants.HardFork1, d.TrxPool().HardFork())
expectedPost3Vp := strconv.FormatUint(uint64(usedVp) * Vest2VotePower(d, tester.acc1.GetVest().Value), 10)
a.Equal(expectedPost3Vp, d.Post(POST3).GetWeightedVp())
}
func (tester *VoteTester) voteSelf(t *testing.T, d *Dandelion) {
a := assert.New(t)
const POST1 = 11
const REPLY1 = 12
a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(POST1, tester.acc0.Name, "title", "content", []string{"1"}, nil)))
a.NoError(tester.acc0.SendTrxAndProduceBlock(Reply(REPLY1, POST1, tester.acc0.Name, "content", nil)))
a.Equal( d.TrxReceiptByAccount( tester.acc0.Name, Vote(tester.acc0.Name, POST1) ).Status , prototype.StatusFailDeductStamina)
a.Equal( d.TrxReceiptByAccount( tester.acc0.Name, Vote(tester.acc0.Name, REPLY1) ).Status , prototype.StatusFailDeductStamina)
}
func (tester *VoteTester) revote(t *testing.T, d *Dandelion) {
a := assert.New(t)
const POST = 1
a.NoError(tester.acc1.SendTrxAndProduceBlock(Post(1, tester.acc1.Name, "title", "content", []string{"1"}, nil)))
a.NoError(tester.acc0.SendTrxAndProduceBlock(Vote(tester.acc0.Name, 1)))
usedVp := uint32(constants.FullVP / constants.VPMarks)
expectedPostVp := strconv.FormatUint(uint64(usedVp) * Vest2VotePower(d, tester.acc0.GetVest().Value), 10)
a.Equal(expectedPostVp, d.Post(POST).GetWeightedVp())
receipt, err := tester.acc0.SendTrxEx(Vote(tester.acc0.Name, 1))
a.NoError(err)
a.NotEqual(receipt.Status, prototype.StatusSuccess)
}
func (tester *VoteTester) voteToGhostPost(t *testing.T, d *Dandelion) {
a := assert.New(t)
receipt, err := tester.acc0.SendTrxEx(Vote(tester.acc0.Name, 2))
a.NoError(err)
a.NotEqual(receipt.Status, prototype.StatusSuccess)
}
func (tester *VoteTester) zeroPower(t *testing.T, d *Dandelion) {
a := assert.New(t)
// waiting vote power recover
i := 1
for i < constants.VPMarks + 1 {
a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(uint64(i), tester.acc0.Name, "title", "content", []string{"1"}, nil)))
a.NoError(tester.acc1.SendTrxAndProduceBlock(Vote(tester.acc1.Name, uint64(i))))
i ++
}
a.Equal(uint32(constants.FullVP - (constants.FullVP / constants.VPMarks) * constants.VPMarks), d.Account(tester.acc1.Name).GetVotePower())
}
func (tester *VoteTester) voteAfterPostCashout(t *testing.T, d *Dandelion) {
a := assert.New(t)
a.NoError(tester.acc0.SendTrxAndProduceBlock(Post(uint64(1), tester.acc0.Name, "title", "content", []string{"1"}, nil)))
a.NoError(tester.acc1.SendTrxAndProduceBlock(Vote(tester.acc1.Name, uint64(1))))
a.NoError(d.ProduceBlocks(constants.PostCashOutDelayBlock))
// waiting vote power recover
//oldVp := d.Post(1).GetWeightedVp()
//accountVP := d.Account(tester.acc2.Name).GetVotePower()
//oldVoterCnt := d.Post(1).GetVoteCnt()
a.Error(tester.acc2.SendTrxAndProduceBlock(Vote(tester.acc2.Name, 1)))
//a.Equal(oldVp, d.Post(1).GetWeightedVp())
//a.Equal(accountVP, d.Account(tester.acc2.Name).GetVotePower())
//a.Equal( d.GlobalProps().Time.UtcSeconds - 1, d.Account(tester.acc2.Name).GetLastVoteTime().UtcSeconds )
//a.Equal( oldVoterCnt + 1, d.Post(1).GetVoteCnt() )
} | tests/op/vote.go | 0.568655 | 0.464962 | vote.go | starcoder |
package fields
import (
bls12377 "github.com/consensys/gnark-crypto/ecc/bls12-377"
"github.com/consensys/gnark/frontend"
)
// E6 element in a quadratic extension
type E6 struct {
B0, B1, B2 E2
}
// Add creates a fp6elmt from fp elmts
func (e *E6) Add(cs *frontend.ConstraintSystem, e1, e2 *E6) *E6 {
e.B0.Add(cs, &e1.B0, &e2.B0)
e.B1.Add(cs, &e1.B1, &e2.B1)
e.B2.Add(cs, &e1.B2, &e2.B2)
return e
}
// NewFp6Zero creates a new
func NewFp6Zero(cs *frontend.ConstraintSystem) E6 {
return E6{
B0: E2{cs.Constant(0), cs.Constant(0)},
B1: E2{cs.Constant(0), cs.Constant(0)},
B2: E2{cs.Constant(0), cs.Constant(0)},
}
}
// Sub creates a fp6elmt from fp elmts
func (e *E6) Sub(cs *frontend.ConstraintSystem, e1, e2 *E6) *E6 {
e.B0.Sub(cs, &e1.B0, &e2.B0)
e.B1.Sub(cs, &e1.B1, &e2.B1)
e.B2.Sub(cs, &e1.B2, &e2.B2)
return e
}
// Neg negates an Fp6 elmt
func (e *E6) Neg(cs *frontend.ConstraintSystem, e1 *E6) *E6 {
e.B0.Neg(cs, &e1.B0)
e.B1.Neg(cs, &e1.B1)
e.B2.Neg(cs, &e1.B2)
return e
}
// Mul creates a fp6elmt from fp elmts
// icube is the imaginary elmt to the cube
func (e *E6) Mul(cs *frontend.ConstraintSystem, e1, e2 *E6, ext Extension) *E6 {
// notations: (a+bv+cv2)*(d+ev+fe2)
var ad, bf, ce E2
ad.Mul(cs, &e1.B0, &e2.B0, ext) // 5C
bf.Mul(cs, &e1.B1, &e2.B2, ext).MulByIm(cs, &bf, ext) // 6C
ce.Mul(cs, &e1.B2, &e2.B1, ext).MulByIm(cs, &ce, ext) // 6C
var cf, ae, bd E2
cf.Mul(cs, &e1.B2, &e2.B2, ext).MulByIm(cs, &cf, ext) // 6C
ae.Mul(cs, &e1.B0, &e2.B1, ext) // 5C
bd.Mul(cs, &e1.B1, &e2.B0, ext) // 5C
var af, be, cd E2
af.Mul(cs, &e1.B0, &e2.B2, ext) // 5C
be.Mul(cs, &e1.B1, &e2.B1, ext) // 5C
cd.Mul(cs, &e1.B2, &e2.B0, ext) // 5C
e.B0.Add(cs, &ad, &bf).Add(cs, &e.B0, &ce) // 4C
e.B1.Add(cs, &cf, &ae).Add(cs, &e.B1, &bd) // 4C
e.B2.Add(cs, &af, &be).Add(cs, &e.B2, &cd) // 4C
return e
}
// MulByFp2 creates a fp6elmt from fp elmts
// icube is the imaginary elmt to the cube
func (e *E6) MulByFp2(cs *frontend.ConstraintSystem, e1 *E6, e2 *E2, ext Extension) *E6 {
res := E6{}
res.B0.Mul(cs, &e1.B0, e2, ext)
res.B1.Mul(cs, &e1.B1, e2, ext)
res.B2.Mul(cs, &e1.B2, e2, ext)
e.B0 = res.B0
e.B1 = res.B1
e.B2 = res.B2
return e
}
// MulByNonResidue multiplies e by the imaginary elmt of Fp6 (noted a+bV+cV where V**3 in F^2)
func (e *E6) MulByNonResidue(cs *frontend.ConstraintSystem, e1 *E6, ext Extension) *E6 {
res := E6{}
res.B0.Mul(cs, &e1.B2, ext.vCube, ext)
e.B1 = e1.B0
e.B2 = e1.B1
e.B0 = res.B0
return e
}
// Inverse inverses an Fp2 elmt
func (e *E6) Inverse(cs *frontend.ConstraintSystem, e1 *E6, ext Extension) *E6 {
var t [7]E2
var c [3]E2
var buf E2
t[0].Mul(cs, &e1.B0, &e1.B0, ext)
t[1].Mul(cs, &e1.B1, &e1.B1, ext)
t[2].Mul(cs, &e1.B2, &e1.B2, ext)
t[3].Mul(cs, &e1.B0, &e1.B1, ext)
t[4].Mul(cs, &e1.B0, &e1.B2, ext)
t[5].Mul(cs, &e1.B1, &e1.B2, ext)
c[0].MulByIm(cs, &t[5], ext)
c[0].Neg(cs, &c[0]).Add(cs, &c[0], &t[0])
c[1].MulByIm(cs, &t[2], ext)
c[1].Sub(cs, &c[1], &t[3])
c[2].Sub(cs, &t[1], &t[4])
t[6].Mul(cs, &e1.B2, &c[1], ext)
buf.Mul(cs, &e1.B1, &c[2], ext)
t[6].Add(cs, &t[6], &buf)
t[6].MulByIm(cs, &t[6], ext)
buf.Mul(cs, &e1.B0, &c[0], ext)
t[6].Add(cs, &t[6], &buf)
t[6].Inverse(cs, &t[6], ext)
e.B0.Mul(cs, &c[0], &t[6], ext)
e.B1.Mul(cs, &c[1], &t[6], ext)
e.B2.Mul(cs, &c[2], &t[6], ext)
return e
}
// Assign a value to self (witness assignment)
func (e *E6) Assign(a *bls12377.E6) {
e.B0.Assign(&a.B0)
e.B1.Assign(&a.B1)
e.B2.Assign(&a.B2)
}
// MustBeEqual constraint self to be equal to other into the given constraint system
func (e *E6) MustBeEqual(cs *frontend.ConstraintSystem, other E6) {
e.B0.MustBeEqual(cs, other.B0)
e.B1.MustBeEqual(cs, other.B1)
e.B2.MustBeEqual(cs, other.B2)
} | std/algebra/fields/e6.go | 0.701917 | 0.5984 | e6.go | starcoder |
package geolite2v2
import (
"encoding/csv"
"errors"
"io"
"log"
"strconv"
"strings"
"github.com/m-lab/annotation-service/iputils"
)
var (
ipNumColumnsGlite2 = 10
)
// GeoIPNode defines IPv4 and IPv6 databases
type GeoIPNode struct {
iputils.BaseIPNode
LocationIndex int // Index to slice of locations
PostalCode string
Latitude float64
Longitude float64
}
// Clone clones the GeoIPNode struct to satistfy the IPNode interface
func (n *GeoIPNode) Clone() iputils.IPNode {
return &GeoIPNode{
BaseIPNode: iputils.BaseIPNode{IPAddressLow: n.IPAddressLow, IPAddressHigh: n.IPAddressHigh},
LocationIndex: n.LocationIndex,
PostalCode: n.PostalCode,
Latitude: n.Latitude,
Longitude: n.Longitude,
}
}
// DataEquals checks if the data source specific data of the IPNode specified in the parameter is equal to this node. This function
// supports the merge of the equivalent overlapping nodes
func (n *GeoIPNode) DataEquals(other iputils.IPNode) bool {
otherNode := other.(*GeoIPNode)
return n.LocationIndex == otherNode.LocationIndex && n.PostalCode == otherNode.PostalCode && n.Latitude == otherNode.Latitude && n.Longitude == otherNode.Longitude
}
// asnNodeParser the parser object
type geoNodeParser struct {
idMap map[int]int
list []GeoIPNode
}
func newGeoNodeParser(locationIDMap map[int]int) *geoNodeParser {
return &geoNodeParser{
idMap: locationIDMap,
list: []GeoIPNode{},
}
}
// PreconfigureReader for details see the iputils.IPNodeParser interface!
func (p *geoNodeParser) PreconfigureReader(reader *csv.Reader) error {
// Skip first line
_, err := reader.Read()
if err == io.EOF {
log.Println("Empty input data")
return errors.New("Empty input data")
}
return nil
}
// ValidateRecord for details see the iputils.IPNodeParser interface!
func (p *geoNodeParser) ValidateRecord(record []string) error {
return checkNumColumns(record, ipNumColumnsGlite2)
}
// ExtractIP for details see the iputils.IPNodeParser interface!
func (p *geoNodeParser) ExtractIP(record []string) string {
return record[0]
}
// PopulateRecordData for details see the iputils.IPNodeParser interface!
func (p *geoNodeParser) PopulateRecordData(record []string, node iputils.IPNode) error {
newNode, ok := node.(*GeoIPNode)
if !ok {
return errors.New("Illegal node type, expected GeoIPNode")
}
// Look for GeoId within idMap and return index
index, err := lookupGeoID(record[1], p.idMap)
if err != nil {
if backupIndex, err := lookupGeoID(record[2], p.idMap); err == nil {
index = backupIndex
} else {
// TODO There are an enormous number of these in the log. Why? What does it mean?
log.Println("Couldn't get a valid Geoname id!", record)
//TODO: Add a prometheus metric here
}
}
newNode.LocationIndex = index
newNode.PostalCode = record[6]
newNode.Latitude, err = stringToFloat(record[7], "Latitude")
if err != nil {
return err
}
newNode.Longitude, err = stringToFloat(record[8], "Longitude")
if err != nil {
return err
}
return nil
}
// CreateNode for details see the iputils.IPNodeParser interface!
func (p *geoNodeParser) CreateNode() iputils.IPNode {
return &GeoIPNode{}
}
// AppendNode for details see the iputils.IPNodeParser interface!
func (p *geoNodeParser) AppendNode(node iputils.IPNode) {
n := node.(*GeoIPNode)
p.list = append(p.list, *n)
}
// LastNode for details see the iputils.IPNodeParser interface!
func (p *geoNodeParser) LastNode() iputils.IPNode {
if len(p.list) < 1 {
return nil
}
return &p.list[len(p.list)-1]
}
func checkNumColumns(record []string, size int) error {
if len(record) != size {
log.Println("Incorrect number of columns in IP list", size, " got: ", len(record), record)
return errors.New("Corrupted Data: wrong number of columns")
}
return nil
}
// Finds provided geonameID within idMap and returns the index in idMap
// locationIdMap := map[int]int{
// 609013: 0,
// 104084: 4,
// 17: 4,
// }
// lookupGeoID("17",locationIdMap) would return (2,nil).
// TODO: Add error metrics
func lookupGeoID(gnid string, idMap map[int]int) (int, error) {
geonameID, err := strconv.Atoi(gnid)
if err != nil {
return 0, errors.New("Corrupted Data: geonameID should be a number")
}
loadIndex, ok := idMap[geonameID]
if !ok {
log.Println("geonameID not found ", geonameID)
return 0, errors.New("Corrupted Data: geonameId not found")
}
return loadIndex, nil
}
func stringToFloat(str, field string) (float64, error) {
flt, err := strconv.ParseFloat(str, 64)
if err != nil {
if len(str) > 0 {
log.Println(field, " was not a number")
output := strings.Join([]string{"Corrupted Data: ", field, " should be an int"}, "")
return 0, errors.New(output)
}
}
return flt, nil
}
// LoadIPListG2 creates a List of IPNodes from a GeoLite2 reader.
// TODO(gfr) Update to use recursion instead of stack.
// TODO(yachang) If a database fails to load, the cache should mark it as unloadable,
// the error message should indicate that we need a different dataset for that date range.
func LoadIPListG2(reader io.Reader, idMap map[int]int) ([]GeoIPNode, error) {
parser := newGeoNodeParser(idMap)
err := iputils.BuildIPNodeList(reader, parser)
return parser.list, err
} | geolite2v2/geo-ip-ip-loader.go | 0.52756 | 0.465995 | geo-ip-ip-loader.go | starcoder |
package convert
import (
"strconv"
"strings"
)
// StringToBool -- Converts a value from string to boolean
func StringToBool(value string) (bool, error) {
return strconv.ParseBool(strings.TrimSpace(value))
}
// StringToFloat32 -- Converts a value from string to float32
func StringToFloat32(value string) (float32, error) {
result, err := strconv.ParseFloat(strings.TrimSpace(value), 32)
return float32(result), err
}
// StringToFloat64 -- Converts a value from string to float64
func StringToFloat64(value string) (float64, error) {
return strconv.ParseFloat(strings.TrimSpace(value), 64)
}
// StringToInt -- Converts a value from string to int
func StringToInt(value string) (int, error) {
result64, err := strconv.ParseInt(strings.TrimSpace(value), 10, 0)
return int(result64), err
}
// StringToInt8 -- Converts a value from string to int8
func StringToInt8(value string) (int8, error) {
result64, err := strconv.ParseInt(strings.TrimSpace(value), 10, 8)
return int8(result64), err
}
// StringToInt16 -- Converts a value from string to int16
func StringToInt16(value string) (int16, error) {
result64, err := strconv.ParseInt(strings.TrimSpace(value), 10, 16)
return int16(result64), err
}
// StringToInt32 -- Converts a value from string to int32
func StringToInt32(value string) (int32, error) {
result64, err := strconv.ParseInt(strings.TrimSpace(value), 10, 32)
return int32(result64), err
}
// StringToInt64 -- Converts a value from string to int64
func StringToInt64(value string) (int64, error) {
return strconv.ParseInt(strings.TrimSpace(value), 10, 64)
}
// StringToUint -- Converts a value from string to uint
func StringToUint(value string) (uint, error) {
result64, err := strconv.ParseUint(strings.TrimSpace(value), 10, 0)
return uint(result64), err
}
// StringToUint8 -- Converts a value from string to uint8
func StringToUint8(value string) (uint8, error) {
result64, err := strconv.ParseUint(strings.TrimSpace(value), 10, 8)
return uint8(result64), err
}
// StringToUint16 -- Converts a value from string to uint16
func StringToUint16(value string) (uint16, error) {
result64, err := strconv.ParseUint(strings.TrimSpace(value), 10, 16)
return uint16(result64), err
}
// StringToUint32 -- Converts a value from string to uint32
func StringToUint32(value string) (uint32, error) {
result64, err := strconv.ParseUint(strings.TrimSpace(value), 10, 32)
return uint32(result64), err
}
// StringToUint64 -- Converts a value from string to uint64
func StringToUint64(value string) (uint64, error) {
return strconv.ParseUint(strings.TrimSpace(value), 10, 64)
} | string.go | 0.856498 | 0.572364 | string.go | starcoder |
package lowest_common_ancestor_of_a_binary_search_tree
/*
235. 二叉搜索树的最近公共祖先 https://leetcode-cn.com/problems/lowest-common-ancestor-of-a-binary-search-tree
给定一个二叉搜索树, 找到该树中两个指定结点的最近公共祖先。
百度百科中最近公共祖先的定义为:“对于有根树 T 的两个结点 p、q,最近公共祖先表示为一个结点 x,
满足 x 是 p、q 的祖先且 x 的深度尽可能大(一个结点也可以是它自己的祖先)。”
例如,给定如下二叉搜索树: root = [6,2,8,0,4,7,9,null,null,3,5]
示例 1:
输入: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 8
输出: 6
解释: 结点 2 和结点 8 的最近公共祖先是 6。
示例 2:
输入: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 4
输出: 2
解释: 结点 2 和结点 4 的最近公共祖先是 2, 因为根据定义最近公共祖先结点可以为结点本身。
说明:
所有结点的值都是唯一的。
p、q 为不同结点且均存在于给定的二叉搜索树中。
*/
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
// 根据bst性质递归;时空复杂度都是O(N)
func lowestCommonAncestor0(root, p, q *TreeNode) *TreeNode {
if root == nil || root == p || root == q {
return root
}
if p.Val > root.Val && q.Val > root.Val {
return lowestCommonAncestor(root.Right, p, q)
}
if p.Val < root.Val && q.Val < root.Val {
return lowestCommonAncestor(root.Left, p, q)
}
return root
}
// 可以处理成尾递归;空间复杂度降为O(1)
func lowestCommonAncestor(root, p, q *TreeNode) *TreeNode {
if root == nil || root == p || root == q {
return root
}
lo, hi := p.Val, q.Val
if lo > hi {
lo, hi = hi, lo
}
if root.Val > lo && root.Val < hi {
return root
}
if lo > root.Val {
return lowestCommonAncestor(root.Right, p, q)
}
return lowestCommonAncestor(root.Left, p, q)
}
// 根据bst性质迭代;时间复杂度O(n),空间复杂度O(1)
func lowestCommonAncestor1(root, p, q *TreeNode) *TreeNode {
for root != nil {
switch {
case root.Val > p.Val && root.Val > q.Val:
root = root.Left
case root.Val < p.Val && root.Val < q.Val:
root = root.Right
default:
return root
}
}
return nil
}
// 不借助bst性质,对一个普遍二叉树递归;时空复杂度都是O(N)
func lowestCommonAncestor2(root, p, q *TreeNode) *TreeNode {
if root == nil || root == p || root == q {
return root
}
left := lowestCommonAncestor(root.Left, p, q)
right := lowestCommonAncestor(root.Right, p, q)
if left != nil && right != nil {
return root
}
if left != nil {
return left
}
return right
} | solutions/lowest-common-ancestor-of-a-binary-search-tree/d.go | 0.641759 | 0.515071 | d.go | starcoder |
package pcircle
import (
"strconv"
"strings"
)
// Background specifies parameters of beatmap background.
// Example of an Background:
// 0,0,"bg.jpg",0,0
type Background struct {
FileName string
XOffset, YOffset int
}
// String returns string of Background as it would be in .osu file
func (b Background) String() string {
return "0,0," + strings.Join(
[]string{
`"` + b.FileName + `"`,
strconv.Itoa(b.XOffset),
strconv.Itoa(b.YOffset),
}, ",")
}
// FromString fills Background fields with data parsed from string.
func (b *Background) FromString(str string) (err error) {
attrs := strings.Split(strings.TrimLeft(str, "0,0,"), ",")
b.FileName = strings.TrimRight(strings.TrimLeft(attrs[0], `"`), `"`)
b.XOffset, err = strconv.Atoi(attrs[1])
if err != nil {
return err
}
b.YOffset, err = strconv.Atoi(attrs[2])
return err
}
// Break defines a single break period.
// Example of an break period:
// 2,4627,5743
type Break struct {
// Both are an number of milliseconds from the beginning of the song defining the start and end point of the break period
StartTime int
EndTime int
}
// String returns string of Break as it would be in .osu file
func (b Break) String() string {
return "2," + strings.Join(
[]string{
strconv.Itoa(b.StartTime),
strconv.Itoa(b.EndTime),
}, ",")
}
// FromString fills Break fields with data parsed from string.
func (b *Break) FromString(str string) (err error) {
str = strings.TrimPrefix(str, "2,")
sep := strings.Index(str, ",")
b.StartTime, err = strconv.Atoi(str[:sep])
if err != nil {
return err
}
b.EndTime, err = strconv.Atoi(str[sep+1:])
return err
}
// RGB provides color manipulation.
// Example:
// 128,128,0
type RGB struct {
R, G, B int
}
// String returns string of RGB as it would be in .osu file
func (c RGB) String() string {
return strings.Join([]string{
strconv.Itoa(c.R),
strconv.Itoa(c.G),
strconv.Itoa(c.B),
}, ",")
}
// FromString fills RGB triplet with data parsed from string.
func (c *RGB) FromString(str string) (err error) {
attrs := strings.Split(str, ",")
c.R, err = strconv.Atoi(attrs[0])
if err != nil {
return err
}
c.G, err = strconv.Atoi(attrs[1])
if err != nil {
return err
}
c.B, err = strconv.Atoi(attrs[2])
return err
}
// Extras define additional parameters related to the hit sound samples.
// The most common example:
// 0:0:0:0:
type Extras struct {
SampleSet SampleSet // The sample set of the normal hit sound. When sampleSet is 0, its value should be inherited from the timing point.
AdditionalSet SampleSet // The sample set for the other hit sounds
CustomIndex int // Custom sample set index
SampleVolume int // Volume of the sample, and ranges from 0 to 100 (percent)
Filename string // Names an audio file in the folder to play instead of sounds from sample sets
}
// String returns string of Extras as it would be in .osu file
func (e Extras) String() string {
return strings.Join([]string{
strconv.Itoa(int(e.SampleSet)),
strconv.Itoa(int(e.AdditionalSet)),
strconv.Itoa(e.CustomIndex),
strconv.Itoa(e.SampleVolume),
e.Filename,
}, ":")
}
// FromString fills Extras fields with data parsed from string.
func (e *Extras) FromString(str string) (err error) {
attrs := strings.Split(str, ":")
ss, err := strconv.Atoi(attrs[0])
if err != nil {
return err
}
e.SampleSet = SampleSet(ss)
ss, err = strconv.Atoi(attrs[1])
if err != nil {
return err
}
e.AdditionalSet = SampleSet(ss)
e.CustomIndex, err = strconv.Atoi(attrs[2])
if err != nil {
return err
}
e.SampleVolume, err = strconv.Atoi(attrs[3])
e.Filename = attrs[4]
return err
} | special.go | 0.864896 | 0.486454 | special.go | starcoder |
package input
import (
"encoding/json"
"errors"
"fmt"
"sync/atomic"
"time"
"github.com/Jeffail/benthos/v3/lib/condition"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/Jeffail/benthos/v3/lib/x/docs"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeReadUntil] = TypeSpec{
constructor: NewReadUntil,
Summary: `
Reads messages from a child input until a consumed message passes a condition,
at which point the input closes.`,
Description: `
Messages are read continuously while the condition returns false, when the
condition returns true the message that triggered the condition is sent out and
the input is closed. Use this type to define inputs where the stream should end
once a certain message appears.
Sometimes inputs close themselves. For example, when the ` + "`file`" + ` input
type reaches the end of a file it will shut down. By default this type will also
shut down. If you wish for the input type to be restarted every time it shuts
down until the condition is met then set ` + "`restart_input` to `true`." + `
### Metadata
A metadata key ` + "`benthos_read_until` containing the value `final`" + ` is
added to the first part of the message that triggers the input to stop.`,
Footnotes: `
## Examples
This input is useful when paired with the
` + "[`count`](/docs/components/conditions/count)" + ` condition, as it can be
used to cut the input stream off once a certain number of messages have been
read:
` + "```yaml" + `
# Only read 100 messages, and then exit.
input:
read_until:
input:
kafka_balanced:
addresses: [ TODO ]
topics: [ foo, bar ]
consumer_group: foogroup
condition:
not:
count:
arg: 100
` + "```" + ``,
sanitiseConfigFunc: func(conf Config) (interface{}, error) {
condSanit, err := condition.SanitiseConfig(conf.ReadUntil.Condition)
if err != nil {
return nil, err
}
var inputSanit interface{} = struct{}{}
if conf.ReadUntil.Input != nil {
if inputSanit, err = SanitiseConfig(*conf.ReadUntil.Input); err != nil {
return nil, err
}
}
return map[string]interface{}{
"input": inputSanit,
"restart_input": conf.ReadUntil.Restart,
"condition": condSanit,
}, nil
},
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon("input", "The child input to consume from."),
docs.FieldCommon("condition", "The [condition](/docs/components/conditions/about) to test messages against."),
docs.FieldCommon("restart_input", "Whether the input should be reopened if it closes itself before the condition has resolved to true."),
},
}
}
//------------------------------------------------------------------------------
// ReadUntilConfig contains configuration values for the ReadUntil input type.
type ReadUntilConfig struct {
Input *Config `json:"input" yaml:"input"`
Restart bool `json:"restart_input" yaml:"restart_input"`
Condition condition.Config `json:"condition" yaml:"condition"`
}
// NewReadUntilConfig creates a new ReadUntilConfig with default values.
func NewReadUntilConfig() ReadUntilConfig {
return ReadUntilConfig{
Input: nil,
Restart: false,
Condition: condition.NewConfig(),
}
}
//------------------------------------------------------------------------------
type dummyReadUntilConfig struct {
Input interface{} `json:"input" yaml:"input"`
Restart bool `json:"restart_input" yaml:"restart_input"`
Condition condition.Config `json:"condition" yaml:"condition"`
}
// MarshalJSON prints an empty object instead of nil.
func (r ReadUntilConfig) MarshalJSON() ([]byte, error) {
dummy := dummyReadUntilConfig{
Input: r.Input,
Restart: r.Restart,
Condition: r.Condition,
}
if r.Input == nil {
dummy.Input = struct{}{}
}
return json.Marshal(dummy)
}
// MarshalYAML prints an empty object instead of nil.
func (r ReadUntilConfig) MarshalYAML() (interface{}, error) {
dummy := dummyReadUntilConfig{
Input: r.Input,
Restart: r.Restart,
Condition: r.Condition,
}
if r.Input == nil {
dummy.Input = struct{}{}
}
return dummy, nil
}
//------------------------------------------------------------------------------
// ReadUntil is an input type that continuously reads another input type until a
// condition returns true on a message consumed.
type ReadUntil struct {
running int32
conf ReadUntilConfig
wrapped Type
cond condition.Type
wrapperMgr types.Manager
wrapperLog log.Modular
wrapperStats metrics.Type
stats metrics.Type
log log.Modular
transactions chan types.Transaction
closeChan chan struct{}
closedChan chan struct{}
}
// NewReadUntil creates a new ReadUntil input type.
func NewReadUntil(
conf Config,
mgr types.Manager,
log log.Modular,
stats metrics.Type,
) (Type, error) {
if conf.ReadUntil.Input == nil {
return nil, errors.New("cannot create read_until input without a child")
}
wrapped, err := New(
*conf.ReadUntil.Input, mgr, log, stats,
)
if err != nil {
return nil, fmt.Errorf("failed to create input '%v': %v", conf.ReadUntil.Input.Type, err)
}
var cond condition.Type
if cond, err = condition.New(
conf.ReadUntil.Condition, mgr,
log.NewModule(".read_until.condition"),
metrics.Namespaced(stats, "read_until.condition"),
); err != nil {
return nil, fmt.Errorf("failed to create condition '%v': %v", conf.ReadUntil.Condition.Type, err)
}
rdr := &ReadUntil{
running: 1,
conf: conf.ReadUntil,
wrapperLog: log,
wrapperStats: stats,
wrapperMgr: mgr,
log: log.NewModule(".read_until"),
stats: metrics.Namespaced(stats, "read_until"),
wrapped: wrapped,
cond: cond,
transactions: make(chan types.Transaction),
closeChan: make(chan struct{}),
closedChan: make(chan struct{}),
}
go rdr.loop()
return rdr, nil
}
//------------------------------------------------------------------------------
func (r *ReadUntil) loop() {
var (
mRunning = r.stats.GetGauge("running")
mRestartErr = r.stats.GetCounter("restart.error")
mRestartSucc = r.stats.GetCounter("restart.success")
mInputClosed = r.stats.GetCounter("input.closed")
mCount = r.stats.GetCounter("count")
mPropagated = r.stats.GetCounter("propagated")
mFinalPropagated = r.stats.GetCounter("final.propagated")
mFinalResSent = r.stats.GetCounter("final.response.sent")
mFinalResSucc = r.stats.GetCounter("final.response.success")
mFinalResErr = r.stats.GetCounter("final.response.error")
)
defer func() {
if r.wrapped != nil {
r.wrapped.CloseAsync()
err := r.wrapped.WaitForClose(time.Second)
for ; err != nil; err = r.wrapped.WaitForClose(time.Second) {
}
}
mRunning.Decr(1)
close(r.transactions)
close(r.closedChan)
}()
mRunning.Incr(1)
var open bool
runLoop:
for atomic.LoadInt32(&r.running) == 1 {
if r.wrapped == nil {
if r.conf.Restart {
var err error
if r.wrapped, err = New(
*r.conf.Input, r.wrapperMgr, r.wrapperLog, r.wrapperStats,
); err != nil {
mRestartErr.Incr(1)
r.log.Errorf("Failed to create input '%v': %v\n", r.conf.Input.Type, err)
return
}
mRestartSucc.Incr(1)
} else {
return
}
}
var tran types.Transaction
select {
case tran, open = <-r.wrapped.TransactionChan():
if !open {
mInputClosed.Incr(1)
r.wrapped = nil
continue runLoop
}
case <-r.closeChan:
return
}
mCount.Incr(1)
if !r.cond.Check(tran.Payload) {
select {
case r.transactions <- tran:
mPropagated.Incr(1)
case <-r.closeChan:
return
}
continue
}
tran.Payload.Get(0).Metadata().Set("benthos_read_until", "final")
// If this transaction succeeds we shut down.
tmpRes := make(chan types.Response)
select {
case r.transactions <- types.NewTransaction(tran.Payload, tmpRes):
mFinalPropagated.Incr(1)
case <-r.closeChan:
return
}
var res types.Response
select {
case res, open = <-tmpRes:
if !open {
return
}
streamEnds := res.Error() == nil
select {
case tran.ResponseChan <- res:
mFinalResSent.Incr(1)
case <-r.closeChan:
return
}
if streamEnds {
mFinalResSucc.Incr(1)
return
}
mFinalResErr.Incr(1)
case <-r.closeChan:
return
}
}
}
// TransactionChan returns a transactions channel for consuming messages from
// this input type.
func (r *ReadUntil) TransactionChan() <-chan types.Transaction {
return r.transactions
}
// Connected returns a boolean indicating whether this input is currently
// connected to its target.
func (r *ReadUntil) Connected() bool {
return r.wrapped.Connected()
}
// CloseAsync shuts down the ReadUntil input and stops processing requests.
func (r *ReadUntil) CloseAsync() {
if atomic.CompareAndSwapInt32(&r.running, 1, 0) {
close(r.closeChan)
}
}
// WaitForClose blocks until the ReadUntil input has closed down.
func (r *ReadUntil) WaitForClose(timeout time.Duration) error {
select {
case <-r.closedChan:
case <-time.After(timeout):
return types.ErrTimeout
}
return nil
}
//------------------------------------------------------------------------------ | lib/input/read_until.go | 0.678007 | 0.581303 | read_until.go | starcoder |
package smd
import (
"fmt"
"strings"
"time"
"github.com/soniakeys/meeus/planetposition"
)
const (
// AU is one astronomical unit in kilometers.
AU = 1.49597870700e8
)
// CelestialObject defines a celestial object.
// Note: globe and elements may be nil; does not support satellites yet.
type CelestialObject struct {
Name string
Radius float64
a float64
μ float64
tilt float64 // Axial tilt
incl float64 // Ecliptic inclination
SOI float64 // With respect to the Sun
J2 float64
J3 float64
J4 float64
RotRate float64
PP *planetposition.V87Planet
}
// GM returns μ (which is unexported because it's a lowercase letter)
func (c CelestialObject) GM() float64 {
return c.μ
}
// J returns the perturbing J_n factor for the provided n.
// Currently only J2 and J3 are supported.
func (c CelestialObject) J(n uint8) float64 {
switch n {
case 2:
return c.J2
case 3:
return c.J3
case 4:
return c.J4
default:
return 0.0
}
}
// String implements the Stringer interface.
func (c CelestialObject) String() string {
return c.Name + " body"
}
// Equals returns whether the provided celestial object is the same.
func (c *CelestialObject) Equals(b CelestialObject) bool {
return c.Name == b.Name && c.Radius == b.Radius && c.a == b.a && c.μ == b.μ && c.SOI == b.SOI && c.J2 == b.J2
}
// HelioOrbit returns the heliocentric position and velocity of this planet at a given time in equatorial coordinates.
// Note that the whole file is loaded. In fact, if we don't, then whoever is the first to call this function will
// set the Epoch at which the ephemeris are available, and that sucks.
func (c *CelestialObject) HelioOrbit(dt time.Time) Orbit {
if c.Name == "Sun" {
return *NewOrbitFromRV([]float64{0, 0, 0}, []float64{0, 0, 0}, *c)
}
pstate := config.HelioState(c.Name, dt)
R := pstate.R
V := pstate.V
return *NewOrbitFromRV(R, V, Sun)
}
// CelestialObjectFromString returns the object from its name
func CelestialObjectFromString(name string) (CelestialObject, error) {
switch strings.ToLower(name) {
case "sun":
return Sun, nil
case "earth":
return Earth, nil
case "venus":
return Venus, nil
case "mars":
return Mars, nil
case "jupiter":
return Jupiter, nil
case "saturn":
return Saturn, nil
case "uranus":
return Uranus, nil
case "neptune":
return Neptune, nil
case "pluto":
return Pluto, nil
default:
return CelestialObject{}, fmt.Errorf("undefined planet '%s'", name)
}
}
/* Definitions */
// Sun is our closest star.
var Sun = CelestialObject{"Sun", 695700, -1, 1.32712440017987e11, 0.0, 0.0, -1, 0, 0, 0, 0, nil}
// Venus is poisonous.
var Venus = CelestialObject{"Venus", 6051.8, 108208601, 3.24858599e5, 117.36, 3.39458, 0.616e6, 0.000027, 0, 0, 0, nil}
// Earth is home.
var Earth = CelestialObject{"Earth", 6378.1363, 149598023, 3.98600433e5, 23.4393, 0.00005, 924645.0, 1082.6269e-6, -2.5324e-6, -1.6204e-6, 7.292115900231276e-5, nil}
// Mars is the vacation place.
var Mars = CelestialObject{"Mars", 3396.19, 227939282.5616, 4.28283100e4, 25.19, 1.85, 576000, 1964e-6, 36e-6, -18e-6, 3.878785053314509e-05, nil}
// Jupiter is big.
var Jupiter = CelestialObject{"Jupiter", 71492.0, 778298361, 1.266865361e8, 3.13, 1.30326966, 48.2e6, 0.01475, 0, -0.00058, 0, nil}
// Saturn floats and that's really cool.
// TODO: SOI
var Saturn = CelestialObject{"Saturn", 60268.0, 1429394133, 3.7931208e7, 0.93, 2.485, 0, 0.01645, 0, -0.001, 0, nil}
// Uranus is no joke.
// TODO: SOI
var Uranus = CelestialObject{"Uranus", 25559.0, 2875038615, 5.7939513e6, 1.02, 0.773, 0, 0.012, 0, 0, 0, nil}
// Neptune is giant.
// TODO: SOI
var Neptune = CelestialObject{"Neptune", 24622.0, 30.110387 * AU, 6.8365299e6, 1.767, 0.72, 0, 0, 0, 0, 0, nil}
// Pluto is not a planet and had that down ranking coming. It should have stayed in its lane.
// WARNING: Pluto SOI is not defined.
var Pluto = CelestialObject{"Pluto", 1151.0, 5915799000, 9. * 1e2, 118.0, 17.14216667, 1, 0, 0, 0, 0, nil} | celestial.go | 0.682045 | 0.508117 | celestial.go | starcoder |
package grader
var _ BlockGrader = (*V5BlockGrader)(nil)
// V5BlockGrader implements the V5 grading algorithm.
// Entries are encoded in Protobuf with 25 winners each block.
// Valid assets can be found in ´opr.V5Assets´
type V5BlockGrader struct {
baseGrader
}
// Version 5
func (v5 *V5BlockGrader) Version() uint8 {
return 5
}
// WinnerAmount is the number of OPRs that receive a payout
func (v5 *V5BlockGrader) WinnerAmount() int {
return 25
}
// AddOPR verifies and adds a V5 OPR.
func (v5 *V5BlockGrader) AddOPR(entryhash []byte, extids [][]byte, content []byte) error {
gopr, err := ValidateV5(entryhash, extids, v5.height, v5.prevWinners, content)
if err != nil {
return err
}
v5.oprs = append(v5.oprs, gopr)
return nil
}
// Grade the OPRs. The V5 algorithm works the following way:
// 1. Take the top 50 entries with the best proof of work
// 2. Remove top and low's 1% band from each of the 32 assets
// 3. Calculate the average of each of the 32 assets
// 4. Calculate the distance of each OPR to the average, where distance is the sum of quadratic differences
// to the average of each asset. If an asset is within `band`% of the average, that asset's
// distance is 0.
// 5. Throw out the OPR with the highest distance
// 6. Repeat 3-4 until there are only 25 OPRs left
// 7. Repeat 3 but this time don't apply the band and don't throw out OPRs, just reorder them
// until you are left with one
func (v5 *V5BlockGrader) Grade() GradedBlock {
return v5.GradeCustom(50)
}
// GradeCustom grades the block using a custom cutoff for the top X
func (v5 *V5BlockGrader) GradeCustom(cutoff int) GradedBlock {
block := new(V5GradedBlock)
block.cutoff = cutoff
block.height = v5.height
block.cloneOPRS(v5.oprs)
block.filterDuplicates()
block.sortByDifficulty(cutoff)
block.grade()
if len(block.oprs) < 25 {
block.shorthashes = v5.prevWinners
} else {
block.createShortHashes(25)
}
return block
}
// Payout returns the amount of Pegtoshi awarded to the OPR at the specified index
func (v5 *V5BlockGrader) Payout(index int) int64 {
return V5Payout(index)
} | modules/grader/v5grader.go | 0.795301 | 0.432183 | v5grader.go | starcoder |
package heap
import "github.com/badgerodon/goreify/generics"
//go:generate goreify github.com/badgerodon/container/heap.PairingHeap,pairingHeapNode numeric,string
// A PairingHeap implements the Heap interface using the Pairing Heap data structure
// see: wikipedia.org/wiki/Pairing_heap
type PairingHeap struct {
less func(generics.T1, generics.T1) bool
root *pairingHeapNode
}
type pairingHeapNode struct {
elem generics.T1
parent, child, sibling *pairingHeapNode
}
// NewPairingHeap creates a new PairingHeap using the less function to define
// the ordering of the elements
func NewPairingHeap(less func(generics.T1, generics.T1) bool) *PairingHeap {
return &PairingHeap{
less: less,
}
}
// Peek returns the minimum element in the heap
func (h *PairingHeap) Peek() (el generics.T1, ok bool) {
if h.root == nil {
return el, false
}
return h.root.elem, true
}
// Pop returns the minimum element in the heap and removes it
func (h *PairingHeap) Pop() (el generics.T1, ok bool) {
if h.root == nil {
return el, false
}
el = h.root.elem
h.root = h.mergePairs(h.root.child)
return el, true
}
// Push adds the element to the heap
func (h *PairingHeap) Push(el generics.T1) {
h.root = h.merge(h.root, &pairingHeapNode{
elem: el,
})
}
// Merge merges two heaps
func (h *PairingHeap) Merge(h2 *PairingHeap) *PairingHeap {
return &PairingHeap{
less: h.less,
root: h.merge(h.root, h2.root),
}
}
func (h *PairingHeap) merge(n1, n2 *pairingHeapNode) *pairingHeapNode {
switch {
case n1 == nil:
return n2
case n2 == nil:
return n1
case h.less(n1.elem, n2.elem):
c1 := n1.child
n1.child = n2
n2.parent = n1
n2.sibling = c1
return n1
default:
c2 := n2.child
n2.child = n1
n1.parent = n2
n1.sibling = c2
return n2
}
}
func (h *PairingHeap) mergePairs(n *pairingHeapNode) *pairingHeapNode {
switch {
case n == nil:
return nil
case n.sibling == nil:
return n
default:
cousin := n.sibling.sibling
return h.merge(h.merge(n, n.sibling), h.mergePairs(cousin))
}
} | heap/pairingheap.go | 0.703346 | 0.456591 | pairingheap.go | starcoder |
package kamakiri
import "math"
// Body is a physics body.
type Body struct {
World *World
ID uint // Reference unique identifier
Enabled bool // Enabled dynamics state (collisions are calculated anyway)
UseGravity bool // Apply gravity force to dynamics
IsGrounded bool // Physics grounded on other body state
FreezeOrient bool // Physics rotation constraint
Position XY // Physics body shape pivot
Velocity XY // Current linear velocity applied to position
Force XY // Current linear force (reset to 0 every step)
AngularVelocity float64 // Current angular velocity applied to orient
Torque float64 // Current angular force (reset to 0 every step)
Orient float64 // Rotation in radians
Inertia float64 // Moment of inertia
Mass float64 // Physics body mass
StaticFriction float64 // Friction when the body has not movement (0 to 1)
DynamicFriction float64 // Friction when the body has movement (0 to 1)
Restitution float64 // Restitution coefficient of the body (0 to 1)
Shape *Shape // Physics body shape information (type, radius, vertices, normals)
}
// Finds a valid index for a new physics body initialization.
func (w *World) findAvailableBodyIndex() uint {
if len(w.Bodies) == 0 {
return 0
}
for i := uint(0); ; i++ {
seen := false
for _, body := range w.Bodies {
if body.ID == i {
seen = true
break
}
}
if !seen {
return i
}
}
}
// NewBodyCircle creates a new circle physics body with generic parameters.
func (w *World) NewBodyCircle(
pos XY, radius, density float64, vertices int,
) *Body {
// Initialize new body with generic values
body := &Body{
World: w,
ID: w.findAvailableBodyIndex(),
Enabled: true,
Position: pos,
Velocity: XY{0, 0},
Force: XY{0, 0},
AngularVelocity: 0.0,
Torque: 0.0,
Orient: 0.0,
Mass: math.Pi * radius * radius * density,
StaticFriction: 0.4,
DynamicFriction: 0.2,
Restitution: 0.0,
UseGravity: true,
IsGrounded: false,
FreezeOrient: false,
}
body.Inertia = body.Mass * radius * radius
body.Shape = &Shape{
Type: ShapeTypeCircle,
Body: body,
Radius: radius,
Transform: Mat2{},
Vertices: make([]Vertex, vertices),
}
// Add new body to bodies pointers array and update bodies count
w.Bodies = append(w.Bodies, body)
return body
}
func (w *World) thing(body *Body) (float64, XY, float64) {
// Calculate centroid and moment of inertia
center := XY{0, 0}
area := 0.0
inertia := 0.0
for i := 0; i < len(body.Shape.Vertices); i++ {
// Triangle vertices, third vertex implied as (0, 0)
p1 := body.Shape.Vertices[i].Position
next := (i + 1) % len(body.Shape.Vertices)
p2 := body.Shape.Vertices[next].Position
D := p1.CrossXY(p2)
triangleArea := D / 2
area += triangleArea
center.X += triangleArea * k * (p1.X + p2.X)
center.Y += triangleArea * k * (p1.Y + p2.Y)
intX2 := p1.X*p1.X + p2.X*p1.X + p2.X*p2.X
intY2 := p1.Y*p1.Y + p2.Y*p1.Y + p2.Y*p2.Y
inertia += (0.25 * k * D) * (intX2 + intY2)
}
return area, center, inertia
}
// NewBodyRectangle creates a new rectangle physics body with generic
// parameters.
func (w *World) NewBodyRectangle(
pos XY, width, height, density float64,
) *Body {
body := &Body{}
// Initialize new body with generic values
body.World = w
body.ID = w.findAvailableBodyIndex()
body.Enabled = true
body.Position = pos
body.Velocity = XY{0, 0}
body.Force = XY{0, 0}
body.AngularVelocity = 0.0
body.Torque = 0.0
body.Orient = 0.0
body.Shape = &Shape{
Type: ShapeTypePolygon,
Body: body,
Radius: 0.0,
Transform: Mat2{},
Vertices: newRectangleVertices(pos, XY{width, height}),
}
area, center, inertia := w.thing(body)
center.X *= 1.0 / area
center.Y *= 1.0 / area
// Translate vertices to centroid (make the centroid (0, 0) for the polygon in model space)
// Note: this is not really necessary
for i := 0; i < len(body.Shape.Vertices); i++ {
body.Shape.Vertices[i].Position.X -= center.X
body.Shape.Vertices[i].Position.Y -= center.Y
}
body.Mass = density * area
body.Inertia = density * inertia
body.StaticFriction = 0.4
body.DynamicFriction = 0.2
body.Restitution = 0.0
body.UseGravity = true
body.IsGrounded = false
body.FreezeOrient = false
// Add new body to bodies pointers array and update bodies count
w.Bodies = append(w.Bodies, body)
return body
}
// NewBodyPolygon creates a new polygon physics body with generic parameters.
func (w *World) NewBodyPolygon(pos XY, radius float64, sides int, density float64) *Body {
body := &Body{}
// Initialize new body with generic values
body.World = w
body.ID = w.findAvailableBodyIndex()
body.Enabled = true
body.Position = pos
body.Velocity = XY{0, 0}
body.Force = XY{0, 0}
body.AngularVelocity = 0.0
body.Torque = 0.0
body.Orient = 0.0
body.Shape = &Shape{
Type: ShapeTypePolygon,
Body: body,
Radius: 0.0,
Transform: Mat2{},
Vertices: newRandomVertices(radius, sides),
}
// Calculate centroid and moment of inertia
center := XY{0, 0}
area := 0.0
inertia := 0.0
for i, vertex := range body.Shape.Vertices {
// Triangle vertices, third vertex implied as (0, 0)
p1 := vertex.Position
next := (i + 1) % len(body.Shape.Vertices)
p2 := body.Shape.Vertices[next].Position
D := p1.CrossXY(p2)
triangleArea := D / 2
area += triangleArea
center.X += triangleArea * k * (p1.X + p2.X)
center.Y += triangleArea * k * (p1.Y + p2.Y)
intX2 := p1.X*p1.X + p2.X*p1.X + p2.X*p2.X
intY2 := p1.Y*p1.Y + p2.Y*p1.Y + p2.Y*p2.Y
inertia += (0.25 * k * D) * (intX2 + intY2)
}
center.X *= 1.0 / area
center.Y *= 1.0 / area
// Translate vertices to centroid (make the centroid (0, 0) for the polygon in model space)
// Note: this is not really necessary
for i := 0; i < len(body.Shape.Vertices); i++ {
body.Shape.Vertices[i].Position.X -= center.X
body.Shape.Vertices[i].Position.Y -= center.Y
}
body.Mass = density * area
body.Inertia = density * inertia
body.StaticFriction = 0.4
body.DynamicFriction = 0.2
body.Restitution = 0.0
body.UseGravity = true
body.IsGrounded = false
body.FreezeOrient = false
// Add new body to bodies pointers array and update bodies count
w.Bodies = append(w.Bodies, body)
return body
}
// AddForce adds a force to a physics body.
func (b *Body) AddForce(force XY) {
if b != nil {
b.Force = b.Force.Add(force)
}
}
// AddTorque adds an angular force to a physics body.
func (b *Body) AddTorque(amount float64) {
if b != nil {
b.Torque += amount
}
}
// Destroy unitializes and destroy a physics body.
func (b *Body) Destroy() {
id := b.ID
index := -1
for i := 0; i < len(b.World.Bodies); i++ {
if b.World.Bodies[i].ID == id {
index = i
break
}
}
if index == -1 {
return
}
// Free body allocated memory
b.World.Bodies[index] = b.World.Bodies[len(b.World.Bodies)-1]
b.World.Bodies[len(b.World.Bodies)-1] = nil
b.World.Bodies = b.World.Bodies[:len(b.World.Bodies)-1]
}
// GetShapeVertex returns transformed position of a body shape (body position + vertex transformed position).
func (b *Body) GetShapeVertex(vertex int) XY {
position := XY{}
if b == nil {
return position
}
switch b.Shape.Type {
case ShapeTypeCircle:
position = XY{
b.Position.X + math.Cos(360.0/float64(len(b.Shape.Vertices)*vertex)*
deg2Rad)*b.Shape.Radius,
b.Position.Y + math.Sin(360.0/float64(len(b.Shape.Vertices)*vertex)*
deg2Rad)*b.Shape.Radius,
}
case ShapeTypePolygon:
vertexData := b.Shape.Vertices
position = b.Position.Add(b.Shape.Transform.MultiplyXY(
vertexData[vertex].Position))
default:
}
return position
}
// InverseInertia returns the inverse value of b.Inertia.
func (b *Body) InverseInertia() float64 {
if b.Inertia == 0.0 {
return 0.0
}
return 1 / b.Inertia
}
// InverseMass returns the inverse value of b.Mass.
func (b *Body) InverseMass() float64 {
if b.Mass == 0.0 {
return 0.0
}
return 1.0 / b.Mass
}
// Shatter shatters a polygon shape physics body to little physics bodies with explosion force.
func (b *Body) Shatter(pos XY, force float64) {
if b == nil {
return
}
if b.Shape.Type == ShapeTypePolygon {
vertices := b.Shape.Vertices
collision := false
for i := 0; i < len(vertices); i++ {
posA := b.Position
posB := b.Shape.Transform.MultiplyXY(b.Position.Add(vertices[i].Position))
next := i + 1
if next <= len(vertices) {
next = 0
}
posC := b.Shape.Transform.MultiplyXY(b.Position.Add(vertices[next].Position))
// Check collision between each triangle.
alpha := ((posB.Y-posC.Y)*(pos.X-posC.X) + (posC.X-posB.X)*(pos.Y-posC.Y)) /
((posB.Y-posC.Y)*(posA.X-posC.X) + (posC.X-posB.X)*(posA.Y-posC.Y))
beta := ((posC.Y-posA.Y)*(pos.X-posC.X) + (posA.X-posC.X)*(pos.Y-posC.Y)) /
((posB.Y-posC.Y)*(posA.X-posC.X) + (posC.X-posB.X)*(posA.Y-posC.Y))
gamma := 1.0 - alpha - beta
if (alpha > 0.0) && (beta > 0.0) && (gamma > 0.0) {
collision = true
break
}
}
if collision {
count := len(vertices)
bPos := b.Position
positions := make([]XY, count)
trans := b.Shape.Transform
for i := 0; i < count; i++ {
positions[i] = vertices[i].Position
}
// Destroy shattered physics body
b.Destroy()
for i := 0; i < count; i++ {
next := (i + 1) % count
center := TriangleBarycenter(vertices[i].Position,
vertices[next].Position, XY{0, 0})
center = bPos.Add(center)
offset := center.Subtract(bPos)
newBody := b.World.NewBodyPolygon(center, 10, 3, 10)
newPoly := []Vertex{
{vertices[i].Position.Subtract(offset), XY{0, 0}},
{vertices[next].Position.Subtract(offset), XY{0, 0}},
{pos.Subtract(center), XY{0, 0}},
}
// Separate vertices to avoid unnecessary physics collisions
newPoly[0].Position.X *= 0.95
newPoly[0].Position.Y *= 0.95
newPoly[1].Position.X *= 0.95
newPoly[1].Position.Y *= 0.95
newPoly[2].Position.X *= 0.95
newPoly[2].Position.Y *= 0.95
// Calculate polygon faces normals
for j := 0; j < len(newPoly); j++ {
next := (j + 1) % len(newPoly)
face := newPoly[next].Position.Subtract(newPoly[j].Position)
newPoly[j].Normal = XY{face.Y, -face.X}
newPoly[j].Normal = newPoly[j].Normal.Normalize()
}
// Apply computed vertex data to new physics body shape
newBody.Shape.Vertices = newPoly
newBody.Shape.Transform = trans
// Calculate centroid and moment of inertia
area, center, inertia := b.World.thing(b)
center.X *= 1.0 / area
center.Y *= 1.0 / area
newBody.Mass = area
newBody.Inertia = inertia
// Calculate explosion force direction
pointA := newBody.Position
pointB := newPoly[1].Position.Subtract(newPoly[0].Position)
pointB.X /= 2.0
pointB.Y /= 2.0
forceDirection := pointA.Add(newPoly[0].Position.Add(pointB)).
Subtract(newBody.Position)
forceDirection = forceDirection.Normalize()
forceDirection.X *= force
forceDirection.Y *= force
// Apply force to new physics body
newBody.AddForce(forceDirection)
}
}
}
}
// SetRotation sets physics body shape transform based on radians parameter.
func (b *Body) SetRotation(radians float64) {
if b == nil {
return
}
b.Orient = radians
if b.Shape.Type == ShapeTypePolygon {
b.Shape.Transform = Mat2Radians(radians)
}
}
// Integrates physics forces into velocity.
func (b *Body) integrateForces() {
imass := b.InverseMass()
if (b == nil) || (imass == 0.0) || !b.Enabled {
return
}
b.Velocity.X += b.Force.X * imass * b.World.Delta() / 2
b.Velocity.Y += b.Force.Y * imass * b.World.Delta() / 2
if b.UseGravity {
b.Velocity.X += b.World.GravityForce.X * (b.World.Delta() / 1000 / 2)
b.Velocity.Y += b.World.GravityForce.Y * (b.World.Delta() / 1000 / 2)
}
if !b.FreezeOrient {
b.AngularVelocity += b.Torque * b.InverseInertia() * (b.World.Delta() / 2)
}
}
// Integrates physics velocity into position and forces.
func (b *Body) integrateVelocity() {
if b == nil || !b.Enabled {
return
}
b.Position.X += b.Velocity.X * b.World.Delta()
b.Position.Y += b.Velocity.Y * b.World.Delta()
if !b.FreezeOrient {
b.Orient += b.AngularVelocity * b.World.Delta()
}
b.Shape.Transform = Mat2Radians(b.Orient)
b.integrateForces()
} | body.go | 0.790854 | 0.533337 | body.go | starcoder |
package apiclient
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type Anomaly_AnomalyLevel int32
const (
Anomaly_UNKNOWN_ANOMALY_LEVEL Anomaly_AnomalyLevel = 0
Anomaly_VERY_LOW Anomaly_AnomalyLevel = 1
Anomaly_LOW Anomaly_AnomalyLevel = 2
Anomaly_MEDIUM Anomaly_AnomalyLevel = 3
Anomaly_HIGH Anomaly_AnomalyLevel = 4
Anomaly_VERY_HIGH Anomaly_AnomalyLevel = 5
)
var Anomaly_AnomalyLevel_name = map[int32]string{
0: "UNKNOWN_ANOMALY_LEVEL",
1: "VERY_LOW",
2: "LOW",
3: "MEDIUM",
4: "HIGH",
5: "VERY_HIGH",
}
var Anomaly_AnomalyLevel_value = map[string]int32{
"UNKNOWN_ANOMALY_LEVEL": 0,
"VERY_LOW": 1,
"LOW": 2,
"MEDIUM": 3,
"HIGH": 4,
"VERY_HIGH": 5,
}
func (x Anomaly_AnomalyLevel) Enum() *Anomaly_AnomalyLevel {
p := new(Anomaly_AnomalyLevel)
*p = x
return p
}
func (x Anomaly_AnomalyLevel) String() string {
return proto.EnumName(Anomaly_AnomalyLevel_name, int32(x))
}
func (x *Anomaly_AnomalyLevel) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(Anomaly_AnomalyLevel_value, data, "Anomaly_AnomalyLevel")
if err != nil {
return err
}
*x = Anomaly_AnomalyLevel(value)
return nil
}
func (Anomaly_AnomalyLevel) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 0} }
type Anomaly_AnomalyType int32
const (
Anomaly_UNKNOWN_ANOMALY_TYPE Anomaly_AnomalyType = 0
Anomaly_PARSER_ANOMALY Anomaly_AnomalyType = 1
Anomaly_ANALYSIS_ANOMALY Anomaly_AnomalyType = 2
Anomaly_MANUAL_ANOMALY Anomaly_AnomalyType = 3
)
var Anomaly_AnomalyType_name = map[int32]string{
0: "UNKNOWN_ANOMALY_TYPE",
1: "PARSER_ANOMALY",
2: "ANALYSIS_ANOMALY",
3: "MANUAL_ANOMALY",
}
var Anomaly_AnomalyType_value = map[string]int32{
"UNKNOWN_ANOMALY_TYPE": 0,
"PARSER_ANOMALY": 1,
"ANALYSIS_ANOMALY": 2,
"MANUAL_ANOMALY": 3,
}
func (x Anomaly_AnomalyType) Enum() *Anomaly_AnomalyType {
p := new(Anomaly_AnomalyType)
*p = x
return p
}
func (x Anomaly_AnomalyType) String() string {
return proto.EnumName(Anomaly_AnomalyType_name, int32(x))
}
func (x *Anomaly_AnomalyType) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(Anomaly_AnomalyType_value, data, "Anomaly_AnomalyType")
if err != nil {
return err
}
*x = Anomaly_AnomalyType(value)
return nil
}
func (Anomaly_AnomalyType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0, 1} }
// type?, labels
type Anomaly struct {
Type *Anomaly_AnomalyType `protobuf:"varint,1,opt,name=type,enum=Anomaly_AnomalyType" json:"type,omitempty"`
Severity *Anomaly_AnomalyLevel `protobuf:"varint,2,opt,name=severity,enum=Anomaly_AnomalyLevel" json:"severity,omitempty"`
Confidence *Anomaly_AnomalyLevel `protobuf:"varint,3,opt,name=confidence,enum=Anomaly_AnomalyLevel" json:"confidence,omitempty"`
Symptom *string `protobuf:"bytes,4,opt,name=symptom" json:"symptom,omitempty"`
Explanation *string `protobuf:"bytes,5,opt,name=explanation" json:"explanation,omitempty"`
GeneratedBy *string `protobuf:"bytes,6,opt,name=generated_by" json:"generated_by,omitempty"`
ReferencePathspec *PathSpec `protobuf:"bytes,7,opt,name=reference_pathspec" json:"reference_pathspec,omitempty"`
AnomalyReferenceId []string `protobuf:"bytes,8,rep,name=anomaly_reference_id" json:"anomaly_reference_id,omitempty"`
Finding []string `protobuf:"bytes,9,rep,name=finding" json:"finding,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Anomaly) Reset() { *m = Anomaly{} }
func (m *Anomaly) String() string { return proto.CompactTextString(m) }
func (*Anomaly) ProtoMessage() {}
func (*Anomaly) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} }
func (m *Anomaly) GetType() Anomaly_AnomalyType {
if m != nil && m.Type != nil {
return *m.Type
}
return Anomaly_UNKNOWN_ANOMALY_TYPE
}
func (m *Anomaly) GetSeverity() Anomaly_AnomalyLevel {
if m != nil && m.Severity != nil {
return *m.Severity
}
return Anomaly_UNKNOWN_ANOMALY_LEVEL
}
func (m *Anomaly) GetConfidence() Anomaly_AnomalyLevel {
if m != nil && m.Confidence != nil {
return *m.Confidence
}
return Anomaly_UNKNOWN_ANOMALY_LEVEL
}
func (m *Anomaly) GetSymptom() string {
if m != nil && m.Symptom != nil {
return *m.Symptom
}
return ""
}
func (m *Anomaly) GetExplanation() string {
if m != nil && m.Explanation != nil {
return *m.Explanation
}
return ""
}
func (m *Anomaly) GetGeneratedBy() string {
if m != nil && m.GeneratedBy != nil {
return *m.GeneratedBy
}
return ""
}
func (m *Anomaly) GetReferencePathspec() *PathSpec {
if m != nil {
return m.ReferencePathspec
}
return nil
}
func (m *Anomaly) GetAnomalyReferenceId() []string {
if m != nil {
return m.AnomalyReferenceId
}
return nil
}
func (m *Anomaly) GetFinding() []string {
if m != nil {
return m.Finding
}
return nil
}
func init() {
proto.RegisterType((*Anomaly)(nil), "Anomaly")
proto.RegisterEnum("Anomaly_AnomalyLevel", Anomaly_AnomalyLevel_name, Anomaly_AnomalyLevel_value)
proto.RegisterEnum("Anomaly_AnomalyType", Anomaly_AnomalyType_name, Anomaly_AnomalyType_value)
}
func init() { proto.RegisterFile("anomaly.proto", fileDescriptor2) }
var fileDescriptor2 = []byte{
// 675 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x93, 0xdd, 0x6e, 0x1a, 0x39,
0x14, 0xc7, 0x43, 0x20, 0x01, 0x9c, 0x0f, 0x21, 0x8b, 0x48, 0xb3, 0x7b, 0x65, 0xb1, 0x5a, 0x09,
0x69, 0x57, 0xb3, 0x4a, 0xf6, 0x22, 0x8a, 0xb4, 0xda, 0xec, 0xb0, 0xcb, 0x26, 0x51, 0x07, 0x82,
0x42, 0x3e, 0xc4, 0x4d, 0x89, 0x67, 0xe6, 0x0c, 0x38, 0x1a, 0x6c, 0xcb, 0x36, 0xa4, 0xa8, 0x52,
0x9f, 0xae, 0x0f, 0xd0, 0x67, 0x68, 0x5f, 0xa3, 0x17, 0x95, 0xcd, 0x40, 0x21, 0xad, 0x7a, 0x85,
0xcf, 0x99, 0xff, 0xf9, 0xff, 0x8e, 0x8f, 0x0f, 0xe8, 0x80, 0x72, 0x31, 0xa1, 0xd9, 0xdc, 0x97,
0x4a, 0x18, 0xf1, 0x33, 0x7a, 0x12, 0x91, 0xce, 0xcf, 0x87, 0x1a, 0x26, 0x94, 0x1b, 0x16, 0x2f,
0xe2, 0xc6, 0x87, 0x0a, 0x2a, 0x07, 0x0b, 0x35, 0x0e, 0x51, 0xc9, 0xcc, 0x25, 0x78, 0x05, 0x52,
0x68, 0x1e, 0x9e, 0xd4, 0xfd, 0x3c, 0xbf, 0xfc, 0xbd, 0x9d, 0x4b, 0x68, 0x35, 0x3f, 0x7e, 0xfe,
0xf4, 0xbe, 0xd0, 0xc0, 0xc4, 0x46, 0x44, 0xa4, 0x24, 0x47, 0x11, 0x33, 0x66, 0x9a, 0x28, 0x90,
0x0a, 0x34, 0x70, 0xa3, 0x7d, 0x1c, 0xa1, 0x8a, 0x86, 0x19, 0x28, 0x66, 0xe6, 0xde, 0xb6, 0x73,
0x3c, 0x7a, 0xe9, 0x18, 0xc2, 0x0c, 0xb2, 0xd6, 0x99, 0xb3, 0xfc, 0x13, 0x1f, 0xf7, 0x73, 0xb9,
0xb5, 0x35, 0x63, 0x58, 0x59, 0xb3, 0x94, 0x30, 0x43, 0x98, 0x26, 0x94, 0x18, 0x35, 0x05, 0x22,
0x85, 0x66, 0x86, 0xcd, 0xc0, 0x32, 0x50, 0x2c, 0x78, 0xca, 0x12, 0xe0, 0x31, 0x78, 0xc5, 0x1f,
0x51, 0x4e, 0x1d, 0xe5, 0x18, 0xff, 0xf1, 0xef, 0xaa, 0x80, 0x98, 0x31, 0x35, 0x9b, 0xa4, 0xef,
0x30, 0xfe, 0x42, 0x65, 0x3d, 0x9f, 0x48, 0x23, 0x26, 0x5e, 0x89, 0x14, 0x9a, 0xd5, 0xd6, 0x6f,
0xce, 0xe9, 0x57, 0xfc, 0x4b, 0x40, 0x12, 0xd0, 0xb1, 0x62, 0xd2, 0x30, 0xc1, 0x6d, 0xd3, 0xcf,
0xd6, 0xcf, 0x7a, 0x38, 0x3b, 0x31, 0xd5, 0x3e, 0x7e, 0x8b, 0xf6, 0xe0, 0x8d, 0xcc, 0x28, 0xa7,
0x56, 0xe4, 0xed, 0x38, 0x87, 0xd4, 0x39, 0x3c, 0xe2, 0xd7, 0xdf, 0x38, 0x48, 0xa1, 0x35, 0x8b,
0x32, 0x20, 0x6b, 0x45, 0x9a, 0xa4, 0x42, 0xbd, 0x1c, 0x08, 0x4d, 0x12, 0x66, 0x3f, 0xd2, 0x8c,
0x30, 0x9e, 0x0a, 0x35, 0x71, 0x52, 0x07, 0x9f, 0x51, 0x96, 0xd1, 0x28, 0x03, 0x1f, 0xbf, 0x43,
0xfb, 0x23, 0xe0, 0xa0, 0xa8, 0x81, 0x64, 0x18, 0xcd, 0xbd, 0x5d, 0x47, 0x1f, 0x3b, 0x7a, 0x84,
0x1f, 0xfb, 0x46, 0x31, 0x3e, 0xca, 0x5b, 0x88, 0xec, 0xd1, 0x5d, 0x60, 0x55, 0xb2, 0xce, 0xfc,
0x7d, 0xf1, 0xc0, 0x4c, 0x13, 0x6e, 0x61, 0x99, 0x7b, 0x71, 0x20, 0x9c, 0x4e, 0x16, 0x5b, 0x40,
0x24, 0x55, 0x1a, 0x14, 0x11, 0x8a, 0x50, 0x65, 0x58, 0x4a, 0x63, 0xe3, 0xe3, 0x09, 0xc2, 0x0a,
0x52, 0x50, 0x76, 0xd8, 0x43, 0x49, 0xcd, 0x58, 0x4b, 0x88, 0xbd, 0x32, 0x29, 0x34, 0xf7, 0x4e,
0xaa, 0x7e, 0x8f, 0x9a, 0x71, 0x5f, 0x42, 0xdc, 0xfa, 0xdf, 0x35, 0xf4, 0x0f, 0xfe, 0x3b, 0x20,
0x4b, 0x11, 0x91, 0x82, 0x71, 0x63, 0x5b, 0x32, 0xc2, 0x81, 0x44, 0xf4, 0x04, 0xb1, 0x21, 0xcf,
0x63, 0x50, 0xb0, 0x31, 0x8b, 0x67, 0x6a, 0xe7, 0x33, 0xe5, 0x89, 0x8f, 0x01, 0xd5, 0xf3, 0xec,
0xf0, 0x2b, 0x96, 0x25, 0x5e, 0x85, 0x14, 0x9b, 0xd5, 0xd6, 0x85, 0xa3, 0x04, 0xf8, 0x3c, 0x20,
0x7a, 0x71, 0xf1, 0xa9, 0xb6, 0x77, 0x14, 0x64, 0x25, 0xde, 0x1c, 0x32, 0x77, 0x61, 0xbe, 0x44,
0x24, 0xa1, 0x86, 0x46, 0x54, 0x83, 0x8f, 0x7b, 0xa8, 0x9c, 0x32, 0x9e, 0x30, 0x3e, 0xf2, 0xaa,
0xce, 0xf9, 0xdc, 0x39, 0x9f, 0xe1, 0xd3, 0x8d, 0x81, 0xca, 0xc5, 0xdb, 0x89, 0xd4, 0x95, 0xe6,
0xbb, 0xa6, 0xd8, 0x68, 0x04, 0x2a, 0x1f, 0xed, 0x72, 0x4f, 0x1b, 0x80, 0xf6, 0xd7, 0x77, 0x15,
0xff, 0x84, 0x8e, 0xee, 0xba, 0xaf, 0xba, 0xd7, 0x0f, 0xdd, 0x61, 0xd0, 0xbd, 0xee, 0x04, 0xe1,
0x60, 0x18, 0xb6, 0xef, 0xdb, 0x61, 0x6d, 0x0b, 0xef, 0xa3, 0xca, 0x7d, 0xfb, 0x66, 0x30, 0x0c,
0xaf, 0x1f, 0x6a, 0x05, 0x5c, 0x46, 0x45, 0x7b, 0xd8, 0xc6, 0x08, 0xed, 0x76, 0xda, 0xff, 0x5d,
0xdd, 0x75, 0x6a, 0x45, 0x5c, 0x41, 0xa5, 0xcb, 0xab, 0x8b, 0xcb, 0x5a, 0x09, 0x1f, 0xa0, 0xaa,
0x13, 0xbb, 0x70, 0xa7, 0x01, 0x68, 0x6f, 0xed, 0xaf, 0x8c, 0x3d, 0x54, 0x7f, 0x49, 0xb9, 0x1d,
0xf4, 0xda, 0xb5, 0x2d, 0x8c, 0xd1, 0x61, 0x2f, 0xb8, 0xe9, 0xb7, 0x6f, 0x96, 0x1f, 0x6a, 0x05,
0x5c, 0x47, 0xb5, 0xa0, 0x1b, 0x84, 0x83, 0xfe, 0x55, 0x7f, 0x95, 0xdd, 0xb6, 0xca, 0x4e, 0xd0,
0xbd, 0x0b, 0xc2, 0x55, 0xae, 0xf8, 0x25, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xa5, 0x14, 0xc3, 0x7e,
0x04, 0x00, 0x00,
} | anomaly.pb.go | 0.593845 | 0.426859 | anomaly.pb.go | starcoder |
package prometheus
import (
"encoding/json"
"fmt"
"math"
"sort"
"strconv"
"strings"
)
// Response represents Prometheus's query response.
type Response struct {
// Status is the response status.
Status string `json:"status"`
// Data is the response data.
Data data `json:"data"`
}
type data struct {
// ResultType is the type of Result (matrix, vector, etc.).
ResultType string
// Result contains the query result (concrete type depends on ResultType).
Result result
}
type result interface {
matches(other result) (MatchInformation, error)
}
// MatrixResult contains a list matrixRow.
type MatrixResult struct {
Result []matrixRow `json:"result"`
}
// VectorResult contains a list of vectorItem.
type VectorResult struct {
Result []vectorItem `json:"result"`
}
// ScalarResult is the scalar Value for the response.
type ScalarResult struct {
Result Value `json:"result"`
}
// StringResult is the string Value for the response.
type StringResult struct {
Result Value `json:"result"`
}
// UnmarshalJSON unmarshals the data struct of query response.
func (d *data) UnmarshalJSON(bytes []byte) error {
var discriminator struct {
ResultType string `json:"resultType"`
}
if err := json.Unmarshal(bytes, &discriminator); err != nil {
return err
}
*d = data{ResultType: discriminator.ResultType}
switch discriminator.ResultType {
case "matrix":
d.Result = &MatrixResult{}
case "vector":
d.Result = &VectorResult{}
case "scalar":
d.Result = &ScalarResult{}
case "string":
d.Result = &StringResult{}
default:
return fmt.Errorf("unknown resultType: %s", discriminator.ResultType)
}
return json.Unmarshal(bytes, d.Result)
}
// Len is the number of elements in the collection.
func (r MatrixResult) Len() int { return len(r.Result) }
// Less reports whether the element with
// index i should sort before the element with index j.
func (r MatrixResult) Less(i, j int) bool {
return r.Result[i].id < r.Result[j].id
}
// Swap swaps the elements with indexes i and j.
func (r MatrixResult) Swap(i, j int) { r.Result[i], r.Result[j] = r.Result[j], r.Result[i] }
// Sort sorts the MatrixResult.
func (r MatrixResult) Sort() {
for i, result := range r.Result {
r.Result[i].id = result.Metric.genID()
}
sort.Sort(r)
}
// Len is the number of elements in the vector.
func (r VectorResult) Len() int { return len(r.Result) }
// Less reports whether the element with
// index i should sort before the element with index j.
func (r VectorResult) Less(i, j int) bool {
return r.Result[i].id < r.Result[j].id
}
// Swap swaps the elements with indexes i and j.
func (r VectorResult) Swap(i, j int) { r.Result[i], r.Result[j] = r.Result[j], r.Result[i] }
// Sort sorts the VectorResult.
func (r VectorResult) Sort() {
for i, result := range r.Result {
r.Result[i].id = result.Metric.genID()
}
sort.Sort(r)
}
// matrixRow is a single row of "matrix" Result.
type matrixRow struct {
// Metric is the tags for the matrixRow.
Metric Tags `json:"metric"`
// Values is the set of values for the matrixRow.
Values Values `json:"values"`
id string
}
// vectorItem is a single item of "vector" Result.
type vectorItem struct {
// Metric is the tags for the vectorItem.
Metric Tags `json:"metric"`
// Value is the value for the vectorItem.
Value Value `json:"value"`
id string
}
// Tags is a simple representation of Prometheus tags.
type Tags map[string]string
// Values is a list of values for the Prometheus Result.
type Values []Value
// Value is a single value for Prometheus Result.
type Value []interface{}
func (t *Tags) genID() string {
tags := make(sort.StringSlice, len(*t))
for k, v := range *t {
tags = append(tags, fmt.Sprintf("%s:%s,", k, v))
}
sort.Sort(tags)
var sb strings.Builder
// NB: this may clash but exact tag values are also checked, and this is a
// validation endpoint so there's less concern over correctness.
for _, t := range tags {
sb.WriteString(t)
}
return sb.String()
}
// MatchInformation describes how well two responses match.
type MatchInformation struct {
// FullMatch indicates a full match.
FullMatch bool
// NoMatch indicates that the responses do not match sufficiently.
NoMatch bool
}
// Matches compares two responses and determines how closely they match.
func (r Response) Matches(other Response) (MatchInformation, error) {
if r.Status != other.Status {
err := fmt.Errorf("status %s does not match other status %s",
r.Status, other.Status)
return MatchInformation{
NoMatch: true,
}, err
}
if r.Status == "error" {
return MatchInformation{
FullMatch: true,
}, nil
}
return r.Data.matches(other.Data)
}
func (d data) matches(other data) (MatchInformation, error) {
if d.ResultType != other.ResultType {
err := fmt.Errorf("result type %s does not match other result type %s",
d.ResultType, other.ResultType)
return MatchInformation{
NoMatch: true,
}, err
}
return d.Result.matches(other.Result)
}
func (r MatrixResult) matches(other result) (MatchInformation, error) {
otherMatrix, ok := other.(*MatrixResult)
if !ok {
err := fmt.Errorf("incorrect type for matching, expected MatrixResult, %v", other)
return MatchInformation{
NoMatch: true,
}, err
}
if len(r.Result) != len(otherMatrix.Result) {
err := fmt.Errorf("result length %d does not match other result length %d",
len(r.Result), len(otherMatrix.Result))
return MatchInformation{
NoMatch: true,
}, err
}
r.Sort()
otherMatrix.Sort()
for i, result := range r.Result {
if err := result.matches(otherMatrix.Result[i]); err != nil {
return MatchInformation{
NoMatch: true,
}, err
}
}
return MatchInformation{FullMatch: true}, nil
}
func (r VectorResult) matches(other result) (MatchInformation, error) {
otherVector, ok := other.(*VectorResult)
if !ok {
err := fmt.Errorf("incorrect type for matching, expected VectorResult")
return MatchInformation{
NoMatch: true,
}, err
}
if len(r.Result) != len(otherVector.Result) {
err := fmt.Errorf("result length %d does not match other result length %d",
len(r.Result), len(otherVector.Result))
return MatchInformation{
NoMatch: true,
}, err
}
r.Sort()
otherVector.Sort()
for i, result := range r.Result {
if err := result.matches(otherVector.Result[i]); err != nil {
return MatchInformation{
NoMatch: true,
}, err
}
}
return MatchInformation{FullMatch: true}, nil
}
func (r ScalarResult) matches(other result) (MatchInformation, error) {
otherScalar, ok := other.(*ScalarResult)
if !ok {
err := fmt.Errorf("incorrect type for matching, expected ScalarResult")
return MatchInformation{
NoMatch: true,
}, err
}
if err := r.Result.matches(otherScalar.Result); err != nil {
return MatchInformation{
NoMatch: true,
}, err
}
return MatchInformation{FullMatch: true}, nil
}
func (r StringResult) matches(other result) (MatchInformation, error) {
otherString, ok := other.(*StringResult)
if !ok {
err := fmt.Errorf("incorrect type for matching, expected StringResult")
return MatchInformation{
NoMatch: true,
}, err
}
if err := r.Result.matches(otherString.Result); err != nil {
return MatchInformation{
NoMatch: true,
}, err
}
return MatchInformation{FullMatch: true}, nil
}
func (r matrixRow) matches(other matrixRow) error {
// NB: tags should match by here so this is more of a sanity check.
if err := r.Metric.matches(other.Metric); err != nil {
return err
}
return r.Values.matches(other.Values)
}
func (r vectorItem) matches(other vectorItem) error {
// NB: tags should match by here so this is more of a sanity check.
if err := r.Metric.matches(other.Metric); err != nil {
return err
}
return r.Value.matches(other.Value)
}
func (t Tags) matches(other Tags) error {
if len(t) != len(other) {
return fmt.Errorf("tag length %d does not match other tag length %d",
len(t), len(other))
}
for k, v := range t {
if vv, ok := other[k]; ok {
if v != vv {
return fmt.Errorf("tag %s value %s does not match other tag value %s", k, v, vv)
}
} else {
return fmt.Errorf("tag %s not found in other tagset", v)
}
}
return nil
}
func (v Values) matches(other Values) error {
if len(v) != len(other) {
return fmt.Errorf("values length %d does not match other values length %d",
len(v), len(other))
}
for i, val := range v {
if err := val.matches(other[i]); err != nil {
return err
}
}
return nil
}
func (v Value) matches(other Value) error {
if len(v) != 2 {
return fmt.Errorf("value length %d must be 2", len(v))
}
if len(other) != 2 {
return fmt.Errorf("other value length %d must be 2", len(other))
}
tsV := fmt.Sprint(v[0])
tsOther := fmt.Sprint(other[0])
if tsV != tsOther {
return fmt.Errorf("ts %s does not match other ts %s", tsV, tsOther)
}
valV, err := strconv.ParseFloat(fmt.Sprint(v[1]), 64)
if err != nil {
return err
}
valOther, err := strconv.ParseFloat(fmt.Sprint(other[1]), 64)
if err != nil {
return err
}
if math.Abs(valV-valOther) > tolerance {
return fmt.Errorf("point %f does not match other point %f", valV, valOther)
}
return nil
} | src/query/api/v1/handler/prometheus/response.go | 0.765637 | 0.424949 | response.go | starcoder |
package enum
import (
"errors"
"strconv"
"strings"
)
//RangeEnum is a special type of Enum that also allows indexing via numbers.
type RangeEnum interface {
Enum
NewImmutableRangeVal(indexes ...int) (ImmutableRangeVal, error)
NewRangeVal() RangeVal
MustNewImmutableRangeVal(indexes ...int) ImmutableRangeVal
MustNewRangeVal(indexes ...int) RangeVal
//RangeDimensions will return the number of dimensions used to create this
//enum with AddRange.
RangeDimensions() []int
//ValueToRange will return the multi-dimensional value associated with the
//given value. A simple convenience wrapper around
//enum.MutableNewVal(val).RangeValues(), except it won't panic if that
//value isn't legal.
ValueToRange(val int) []int
//RangeToValue takes multi-dimensional indexes and returns the int value
//associated with those indexes. Will return IllegalValue if it wasn't
//legal.
RangeToValue(indexes ...int) int
}
//ImmutableRangeVal is a Val that comes from a RangeEnum.
type ImmutableRangeVal interface {
ImmutableVal
//RangeValue will return an array of indexes that this value represents.
RangeValue() []int
}
//RangeVal is a MutableVal that comes from a RangeEnum.
type RangeVal interface {
Val
//RangeValue will return an array of indexes that this value represents.
RangeValue() []int
//SetRangeValue can be used to set Range values via the indexes directly.
SetRangeValue(indexes ...int) error
}
//MustAddRange is like AddRange, but instead of an error it will panic if the
//enum cannot be added. This is useful for defining your enums at the package
//level outside of an init().
func (e *Set) MustAddRange(enumName string, dimensionSize ...int) RangeEnum {
result, err := e.AddRange(enumName, dimensionSize...)
if err != nil {
panic("Couldn't add to enumset: " + err.Error())
}
return result
}
func keyForIndexes(values ...int) string {
result := make([]string, len(values))
for i, value := range values {
result[i] = strconv.Itoa(value)
}
return strings.Join(result, rangedValueSeparator)
}
func indexesForKey(key string) []int {
strs := strings.Split(key, rangedValueSeparator)
result := make([]int, len(strs))
for i, str := range strs {
theInt, err := strconv.Atoi(str)
if err != nil {
return nil
}
result[i] = theInt
}
return result
}
/*
AddRange creates a new Enum that automatically enumerates all indexes in
the multi-dimensional space provided. Each dimensionSize must be greater than
0 or AddRange will error. At its core a RangeEnum is just an enum with a known
mapping of multiple dimensions into string values in a known, stable way, and
with additional convenience methods to automatically convert between that
mapping.
//Returns an enum like:
//0 -> '0'
//1 -> '1'
AddRange("single", 2)
// Returns an enum like:
// 0 -> '0_0'
// 1 -> '0_1'
// 2 -> '0_2'
// 3 -> '1_0'
// 4 -> '1_1'
// 5 -> '1_2'
AddRange("double", 2,3)
*/
func (e *Set) AddRange(enumName string, dimensionSize ...int) (RangeEnum, error) {
if len(dimensionSize) == 0 {
return nil, errors.New("No dimensions passed")
}
numValues := 1
for i, dimension := range dimensionSize {
if dimension <= 0 {
return nil, errors.New("Dimension " + strconv.Itoa(i) + " is less than or equal to 0, which is illegal")
}
numValues *= dimension
}
values := make(map[int]string, numValues)
indexes := make([]int, len(dimensionSize))
for i := 0; i < numValues; i++ {
values[i] = keyForIndexes(indexes...)
//Now, increment indexes.
//Start at the back, and try to increment one
for j := len(indexes) - 1; j >= 0; j-- {
indexes[j]++
if indexes[j] < dimensionSize[j] {
break
}
//Uh oh, wrapped around.
indexes[j] = 0
//The for loop will go back and increment the next thing
}
}
enum, err := e.addEnumImpl(enumName, values)
if err != nil {
return nil, err
}
enum.dimensions = dimensionSize
return enum, nil
}
func (e *enum) RangeDimensions() []int {
return e.dimensions
}
func (e *enum) ValueToRange(val int) []int {
return indexesForKey(e.String(val))
}
func (e *enum) RangeToValue(indexes ...int) int {
return e.ValueFromString(keyForIndexes(indexes...))
}
func (e *variable) ImmutableRangeVal() ImmutableRangeVal {
if e.enum.RangeEnum() == nil {
return nil
}
return e
}
func (e *variable) RangeVal() RangeVal {
if e.enum.RangeEnum() == nil {
return nil
}
return e
}
func (e *enum) NewImmutableRangeVal(indexes ...int) (ImmutableRangeVal, error) {
val := e.NewRangeVal()
if err := val.SetRangeValue(indexes...); err != nil {
return nil, err
}
return val, nil
}
func (e *enum) NewRangeVal() RangeVal {
return &variable{
e,
e.DefaultValue(),
}
}
func (e *enum) MustNewImmutableRangeVal(indexes ...int) ImmutableRangeVal {
val, err := e.NewImmutableRangeVal(indexes...)
if err != nil {
panic("Couldn't create Range val: " + err.Error())
}
return val
}
func (e *enum) MustNewRangeVal(indexes ...int) RangeVal {
val := e.NewRangeVal()
if err := val.SetRangeValue(indexes...); err != nil {
panic("Couldn't create Range val: " + err.Error())
}
return val
}
func (e *variable) RangeValue() []int {
return indexesForKey(e.String())
}
func (e *variable) SetRangeValue(indexes ...int) error {
return e.SetStringValue(keyForIndexes(indexes...))
} | enum/range.go | 0.732879 | 0.470493 | range.go | starcoder |
package assert
import (
"fmt"
"reflect"
"github.com/ppapapetrou76/go-testing/types"
)
func shouldBeEqual(actual types.Assertable, expected interface{}) string {
return fmt.Sprintf("assertion failed: expected value of = %+v, to be equal to %+v", actual.Value(), expected)
}
func shouldNotBeEqual(actual types.Assertable, expected interface{}) string {
return fmt.Sprintf("assertion failed: expected value of = %+v, to be other than %+v", actual.Value(), expected)
}
func shouldBeGreater(actual types.Assertable, expected interface{}) string {
return fmt.Sprintf("assertion failed: expected value of = %+v, to be greater than %+v", actual.Value(), expected)
}
func shouldBeGreaterOrEqual(actual types.Assertable, expected interface{}) string {
return fmt.Sprintf("assertion failed: expected value of = %+v, to be greater than or equal to %+v", actual.Value(), expected)
}
func shouldBeLessThan(actual types.Assertable, expected interface{}) string {
return fmt.Sprintf("assertion failed: expected value of = %+v, to be less than %+v", actual.Value(), expected)
}
func shouldBeLessOrEqual(actual types.Assertable, expected interface{}) string {
return fmt.Sprintf("assertion failed: expected value of = %+v, to be less than or equal to %+v", actual.Value(), expected)
}
func shouldBeEmpty(actual types.Assertable) string {
return fmt.Sprintf("assertion failed: expected %+v to be empty, but it's not", actual.Value())
}
func shouldNotBeEmpty(actual types.Assertable) string {
return fmt.Sprintf("assertion failed: expected %+v not to be empty, but it is", actual.Value())
}
func shouldBeNil(actual types.Assertable) string {
return fmt.Sprintf("assertion failed: expected value of = %+v, to be nil but it wasn't", actual.Value())
}
func shouldNotBeNil(actual types.Assertable) string {
return fmt.Sprintf("assertion failed: expected value of = %+v, to be non-nil but it was", actual.Value())
}
func shouldHaveSize(actual types.Sizeable, expected int) string {
return fmt.Sprintf("assertion failed: expected size of = [%d], to be but it has size of [%d] ", actual.Size(), expected)
}
func shouldContain(actual types.Assertable, elements interface{}) string {
return fmt.Sprintf("assertion failed: containable [%v] should contain [%+v], but it doesn't", actual.Value(), elements)
}
func shouldContainOnly(actual types.Assertable, elements interface{}) string {
return fmt.Sprintf("assertion failed: containable [%v] should contain only [%+v], but it doesn't", actual.Value(), elements)
}
func shouldNotContain(actual types.Assertable, elements interface{}) string {
return fmt.Sprintf("assertion failed: containable [%v] should not contain [%+v], but it does", actual.Value(), elements)
}
func shouldBeMap(actual types.Assertable) string {
return fmt.Sprintf("assertion failed: assertable should be a map but it is %T", reflect.ValueOf(actual.Value()).Kind())
}
func shouldHaveKey(actual types.Assertable, elements interface{}) string {
return fmt.Sprintf("assertion failed: map [%v] should have the key [%+v], but it doesn't", actual.Value(), elements)
}
func shouldHaveValue(actual types.Assertable, elements interface{}) string {
return fmt.Sprintf("assertion failed: map [%v] should have the value [%+v], but it doesn't", actual.Value(), elements)
}
func shouldHaveEntry(actual types.Assertable, entry types.MapEntry) string {
return fmt.Sprintf("assertion failed: map [%v] should have the entry [%+v], but it doesn't", actual.Value(), entry)
}
func shouldNotHaveKey(actual types.Assertable, elements interface{}) string {
return fmt.Sprintf("assertion failed: map [%v] should not have the key [%+v], but it does", actual.Value(), elements)
}
func shouldNotHaveValue(actual types.Assertable, elements interface{}) string {
return fmt.Sprintf("assertion failed: map [%v] should not have the value [%+v], but it does", actual.Value(), elements)
}
func shouldNotHaveEntry(actual types.Assertable, entry types.MapEntry) string {
return fmt.Sprintf("assertion failed: map [%v] should not have the entry [%+v], but it does", actual.Value(), entry)
} | assert/error.go | 0.79854 | 0.820937 | error.go | starcoder |
package potato
// DataList object is a list of something data
type DataList []interface{}
// Len implements length function for using sort
func (l DataList) Len() int {
return len(l)
}
// Swap implements swap function for using sort
func (l DataList) Swap(i, j int) {
l[i], l[j] = l[j], l[i]
}
// ByName is decorator of DataList for sorting by name
type ByName struct{ DataList }
// Less is compare method for sorting by name
func (b ByName) Less(i, j int) bool {
switch v := b.DataList[i].(type) {
case Background:
return v.Name < b.DataList[j].(Background).Name
case Effect:
return v.Name < b.DataList[j].(Effect).Name
case Engine:
return v.Name < b.DataList[j].(Engine).Name
case Particle:
return v.Name < b.DataList[j].(Particle).Name
case Skin:
return v.Name < b.DataList[j].(Skin).Name
case Level:
return v.Name < b.DataList[j].(Level).Name
default:
return i < j
}
}
// ByCreatedTime is decorator of DataList for sorting by created time
type ByCreatedTime struct{ DataList }
// Less is compare method for sorting by created time
func (b ByCreatedTime) Less(i, j int) bool {
switch v := b.DataList[i].(type) {
case Background:
return v.CreatedTime < b.DataList[j].(Background).CreatedTime
case Effect:
return v.CreatedTime < b.DataList[j].(Effect).CreatedTime
case Engine:
return v.CreatedTime < b.DataList[j].(Engine).CreatedTime
case Particle:
return v.CreatedTime < b.DataList[j].(Particle).CreatedTime
case Skin:
return v.CreatedTime < b.DataList[j].(Skin).CreatedTime
case Level:
return v.CreatedTime < b.DataList[j].(Level).CreatedTime
default:
return i < j
}
}
// ByUpdatedTime is decorator of DataList for sorting by updated time
type ByUpdatedTime struct{ DataList }
// Less is compare method for sorting by updated time
func (b ByUpdatedTime) Less(i, j int) bool {
switch v := b.DataList[i].(type) {
case Background:
return v.UpdatedTime < b.DataList[j].(Background).UpdatedTime
case Effect:
return v.UpdatedTime < b.DataList[j].(Effect).UpdatedTime
case Engine:
return v.UpdatedTime < b.DataList[j].(Engine).UpdatedTime
case Particle:
return v.UpdatedTime < b.DataList[j].(Particle).UpdatedTime
case Skin:
return v.UpdatedTime < b.DataList[j].(Skin).UpdatedTime
case Level:
return v.UpdatedTime < b.DataList[j].(Level).UpdatedTime
default:
return i < j
}
}
// ByDifficulty is compare method for sorting by difficulty
type ByDifficulty struct{ DataList }
// Less is compare method for sorting by difficulty
func (b ByDifficulty) Less(i, j int) bool {
switch v := b.DataList[i].(type) {
case Level:
return v.Rating < b.DataList[j].(Level).Rating
default:
return i < j
}
}
// ByNotes is compare method for sorting by notes
type ByNotes struct{ DataList }
// Less is compare method for sorting by notes
func (b ByNotes) Less(i, j int) bool {
switch v := b.DataList[i].(type) {
case Level:
return v.Notes < b.DataList[j].(Level).Notes
default:
return i < j
}
} | potato/model_cache_datalist.go | 0.649579 | 0.523786 | model_cache_datalist.go | starcoder |
package linear
import (
"math"
)
/**
* Calculates the rank-revealing QR-decomposition of a matrix, with column pivoting.
* The rank-revealing QR-decomposition of a matrix A consists of three matrices Q,
* R and P such that AP=QR. Q is orthogonal (Q<sup>T</sup>Q = I), and R is upper triangular.
* If A is m×n, Q is m×m and R is m×n and P is n×n.
* QR decomposition with column pivoting produces a rank-revealing QR
* decomposition and the Rank(float64) method may be used to return the rank of the
* input matrix A.
* This class compute the decomposition using Householder reflectors.
* For efficiency purposes, the decomposition in packed form is transposed.
* This allows inner loop to iterate inside rows.
*/
type RRQRDecomposition struct {
QRDecomposition
p []int
cachedP RealMatrix
}
func NewRRQRDecomposition(matrix RealMatrix) (*RRQRDecomposition, error) {
return NewRRQRDecompositionWithThreshold(matrix, 0.)
}
func NewRRQRDecompositionWithThreshold(matrix RealMatrix, threshold float64) (*RRQRDecomposition, error) {
ans := new(RRQRDecomposition)
ans.threshold = threshold
m := matrix.RowDimension()
n := matrix.ColumnDimension()
ans.qrt = matrix.Transpose().Data()
ans.rDiag = make([]float64, int(math.Min(float64(m), float64(n))))
ans.decompose(ans.qrt)
return ans, nil
}
/** Decompose matrix.
*/
func (rrqrd *RRQRDecomposition) decompose(qrt [][]float64) {
rrqrd.p = make([]int, len(qrt))
for i := 0; i < len(rrqrd.p); i++ {
rrqrd.p[i] = i
}
for minor := 0; minor < int(math.Min(float64(len(qrt)), float64(len(qrt[0])))); minor++ {
rrqrd.performHouseholderReflection(minor, qrt)
}
}
/**
* Perform Householder reflection for a minor A(minor, minor) of A.
*/
func (rrqrd *RRQRDecomposition) performHouseholderReflection(minor int, qrt [][]float64) {
var l2NormSquaredMax float64
// Find the unreduced column with the greatest L2-Norm
l2NormSquaredMaxIndex := minor
for i := minor; i < len(qrt); i++ {
var l2NormSquared float64
for j := minor; j < len(qrt[i]); j++ {
l2NormSquared += qrt[i][j] * qrt[i][j]
}
if l2NormSquared > l2NormSquaredMax {
l2NormSquaredMax = l2NormSquared
l2NormSquaredMaxIndex = i
}
}
// swap the current column with that with the greated L2-Norm and record in p
if l2NormSquaredMaxIndex != minor {
qrt[minor], qrt[l2NormSquaredMaxIndex] = qrt[l2NormSquaredMaxIndex], qrt[minor]
rrqrd.p[minor], rrqrd.p[l2NormSquaredMaxIndex] = rrqrd.p[l2NormSquaredMaxIndex], rrqrd.p[minor]
}
rrqrd.QRDecomposition.performHouseholderReflection(minor, qrt)
}
/**
* Returns the pivot matrix, P, used in the QR Decomposition of matrix A such that AP = QR.
*
* If no pivoting is used in this decomposition then P is equal to the identity matrix.
*/
func (rrqrd *RRQRDecomposition) P() RealMatrix {
if rrqrd.cachedP == nil {
n := len(rrqrd.p)
var err error
rrqrd.cachedP, err = NewRealMatrixWithDimension(n, n)
if err != nil {
panic(err)
}
for i := 0; i < n; i++ {
rrqrd.cachedP.SetEntry(rrqrd.p[i], i, 1)
}
}
return rrqrd.cachedP
}
/**
* Return the effective numerical matrix rank.
* The effective numerical rank is the number of non-negligible
* singular values.
* This implementation looks at Frobenius norms of the sequence of
* bottom right submatrices. When a large fall in norm is seen,
* the rank is returned. The drop is computed as:
*
* (thisNorm/lastNorm) * rNorm < dropThreshold
*
* where thisNorm is the Frobenius norm of the current submatrix,
* lastNorm is the Frobenius norm of the previous submatrix,
* rNorm is is the Frobenius norm of the complete matrix
*/
func (rrqrd *RRQRDecomposition) Rank(dropThreshold float64) int {
r := rrqrd.R()
rows := r.RowDimension()
columns := r.ColumnDimension()
rank := 1
lastNorm := MatFrobeniusNorm(r)
rNorm := lastNorm
for rank < int(math.Min(float64(rows), float64(columns))) {
thisNorm := MatFrobeniusNorm(r.SubMatrix(rank, rows-1, rank, columns-1))
if thisNorm == 0 || (thisNorm/lastNorm)*rNorm < dropThreshold {
break
}
lastNorm = thisNorm
rank++
}
return rank
}
/**
* Get a solver for finding the A × X = B solution in least square sense.
*
* Least Square sense means a solver can be computed for an overdetermined system,
* (i.e. a system with more equations than unknowns, which corresponds to a tall A
* matrix with more rows than columns). In any case, if the matrix is singular
* within the tolerance set at RRQRDecomposition(RealMatrix, float64) construction, an error will be triggered when
* the DecompositionSolver.Solve() method will be called.
*/
func (rrqrd *RRQRDecomposition) Solver() DecompositionSolver {
return newRRQRDecompositionSolver(rrqrd)
}
type rrqrDecompositionSolver struct {
rrqrd *RRQRDecomposition
qrds *qrDecompositionSolver
}
func newRRQRDecompositionSolver(rrqrd *RRQRDecomposition) *rrqrDecompositionSolver {
return &rrqrDecompositionSolver{rrqrd: rrqrd, qrds: rrqrd.QRDecomposition.Solver().(*qrDecompositionSolver)}
}
func (rrqrds *rrqrDecompositionSolver) IsNonSingular() bool {
return rrqrds.qrds.IsNonSingular()
}
func (rrqrds *rrqrDecompositionSolver) SolveVector(b RealVector) RealVector {
return rrqrds.rrqrd.P().OperateVector(rrqrds.qrds.SolveVector(b))
}
func (rrqrds *rrqrDecompositionSolver) SolveMatrix(b RealMatrix) RealMatrix {
return rrqrds.rrqrd.P().Multiply(rrqrds.qrds.SolveMatrix(b))
}
func (rrqrds *rrqrDecompositionSolver) Inverse() RealMatrix {
m, err := NewRealIdentityMatrix(rrqrds.rrqrd.P().RowDimension())
if err != nil {
panic(err)
}
return rrqrds.SolveMatrix(m)
} | rrqr_decomposition.go | 0.866782 | 0.647534 | rrqr_decomposition.go | starcoder |
package constant
const CreatePolicyDocument = `
mutation createPolicy($namespace: String, $code: String!, $description: String, $statements: [PolicyStatementInput!]!) {
createPolicy(namespace: $namespace, code: $code, description: $description, statements: $statements) {
namespace
code
isDefault
description
statements {
resource
actions
effect
condition {
param
operator
value
}
}
createdAt
updatedAt
assignmentsCount
}
}
`
const ListPolicyDocument = `
query policies($page: Int, $limit: Int, $namespace: String) {
policies(page: $page, limit: $limit, namespace: $namespace) {
totalCount
list {
namespace
code
description
createdAt
updatedAt
statements {
resource
actions
effect
condition {
param
operator
value
}
}
}
}
}
`
const DetailPolicyDocument = `
query policy($namespace: String, $code: String!) {
policy(code: $code, namespace: $namespace) {
namespace
code
isDefault
description
statements {
resource
actions
effect
condition {
param
operator
value
}
}
createdAt
updatedAt
}
}
`
const UpdatePolicyDocument = `
mutation updatePolicy($namespace: String, $code: String!, $description: String, $statements: [PolicyStatementInput!], $newCode: String) {
updatePolicy(namespace: $namespace, code: $code, description: $description, statements: $statements, newCode: $newCode) {
namespace
code
description
statements {
resource
actions
effect
condition {
param
operator
value
}
}
createdAt
updatedAt
}
}
`
const DeletePolicyDocument = `
mutation deletePolicy($code: String!, $namespace: String) {
deletePolicy(code: $code, namespace: $namespace) {
message
code
}
}
`
const BatchDeletePolicyDocument = `
mutation deletePolicies($codeList: [String!]!, $namespace: String) {
deletePolicies(codeList: $codeList, namespace: $namespace) {
message
code
}
}
`
const PolicyAssignmentsDocument = `
query policyAssignments($namespace: String, $code: String, $targetType: PolicyAssignmentTargetType, $targetIdentifier: String, $page: Int, $limit: Int) {
policyAssignments(namespace: $namespace, code: $code, targetType: $targetType, targetIdentifier: $targetIdentifier, page: $page, limit: $limit) {
totalCount
list {
code
targetType
targetIdentifier
}
}
}
`
const AddAssignmentsDocument = `
mutation addPolicyAssignments($policies: [String!]!, $targetType: PolicyAssignmentTargetType!, $targetIdentifiers: [String!], $inheritByChildren: Boolean, $namespace: String) {
addPolicyAssignments(policies: $policies, targetType: $targetType, targetIdentifiers: $targetIdentifiers, inheritByChildren: $inheritByChildren, namespace: $namespace) {
message
code
}
}
`
const RemoveAssignmentsDocument = `
mutation removePolicyAssignments($policies: [String!]!, $targetType: PolicyAssignmentTargetType!, $targetIdentifiers: [String!], $namespace: String) {
removePolicyAssignments(policies: $policies, targetType: $targetType, targetIdentifiers: $targetIdentifiers, namespace: $namespace) {
message
code
}
}
`
const EnablePolicyAssignmentDocument = `
mutation enablePolicyAssignment($policy: String!, $targetType: PolicyAssignmentTargetType!, $targetIdentifier: String!, $namespace: String) {
enablePolicyAssignment(policy: $policy, targetType: $targetType, targetIdentifier: $targetIdentifier, namespace: $namespace) {
message
code
}
}
`
const DisablePolicyAssignmentDocument = `
mutation disbalePolicyAssignment($policy: String!, $targetType: PolicyAssignmentTargetType!, $targetIdentifier: String!, $namespace: String) {
disbalePolicyAssignment(policy: $policy, targetType: $targetType, targetIdentifier: $targetIdentifier, namespace: $namespace) {
message
code
}
}
` | lib/constant/gql_manage_policy.go | 0.728169 | 0.479016 | gql_manage_policy.go | starcoder |
package myplot
import "image/color"
// Maps a value to a color
type Colormapper interface {
Colormap(float64) color.Color
}
// Single Color is a colormap which returns a single color regardless
// of the value of the input
type Uniform struct {
Value color.Color
}
// Returns the color present in the struct
func (s *Uniform) Colormap(v float64) color.Color {
return s.Value
}
// Maps a value in [0,1] to a color
type ScaledColormapper interface {
Colormapper
SetScale(min float64, max float64)
}
// Implementation of the jet colormap
type Jet struct {
// reference: http://www.metastine.com/?p=7
min float64
max float64
}
func (c *Jet) SetScale(min, max float64) {
c.min = min
c.max = max
}
func (c *Jet) Scale() (float64, float64) {
return c.min, c.max
}
func (c *Jet) SetMax(val float64) {
c.max = val
}
func (c *Jet) SetMin(val float64) {
c.min = val
}
func clamp(v float64) float64 {
if v < 0 {
return 0
}
if v > 1 {
return 1
}
return v
}
func (c *Jet) Colormap(z float64) color.Color {
v := BoundedNormalize(z, c.min, c.max)
fourvalue := 4 * v
var red float64
red1 := fourvalue - 1.5
red2 := -fourvalue + 4.5
if red1 < red2 {
red = red1
} else {
red = red2
}
red = clamp(red)
var blue float64
blue1 := fourvalue - 0.5
blue2 := -fourvalue + 3.5
if blue1 < blue2 {
blue = blue1
} else {
blue = blue2
}
blue = clamp(blue)
var green float64
green1 := fourvalue + 0.5
green2 := -fourvalue + 2.5
if green1 < green2 {
green = green1
} else {
green = green2
}
green = clamp(green)
red8 := uint8(255 * red)
blue8 := uint8(255 * blue)
green8 := uint8(255 * green)
return color.RGBA{red8, blue8, green8, 255}
}
// Linearly maps the colors between light gray and black
type Grayscale struct {
min float64
max float64
Inverted bool // Flip the direction of the maximum
}
func (c *Grayscale) SetScale(min, max float64) {
c.min = min
c.max = max
}
func (c *Grayscale) Scale() (float64, float64) {
return c.min, c.max
}
func (c *Grayscale) SetMax(val float64) {
c.max = val
}
func (c *Grayscale) SetMin(val float64) {
c.min = val
}
func (c *Grayscale) Colormap(z float64) color.Color {
v := BoundedNormalize(z, c.min, c.max)
var val float64
if c.Inverted {
val = v
} else {
val = (1 - v)
}
val *= 255 * 0.9
u8v := uint8(val)
return color.RGBA{u8v, u8v, u8v, 255}
}
func BlueRed() *Diverging {
d := &Diverging{}
blue := color.RGBA{R: 59, B: 192, G: 76, A: 255}
red := color.RGBA{R: 180, B: 38, G: 4, A: 255}
d.SetColors(blue, red)
//d.Low = blue
//d.high = red
return d
} | colormap.go | 0.879082 | 0.559471 | colormap.go | starcoder |
package bitarray
// ToggleBitAt flips a single bit at the position specified by off in the
// buffer.
func (buf *Buffer) ToggleBitAt(off int) {
switch {
case off < 0:
panicf("ToggleBitAt: negative off %d.", off)
case buf.nBits <= off:
panicf("ToggleBitAt: out of range: off=%d >= len=%d.", off, buf.nBits)
}
off += buf.off
buf.b[off>>3] ^= byte(0x80) >> (off & 7)
}
// ToggleBitsAt inverts the nBits bits starting at off.
func (buf *Buffer) ToggleBitsAt(off, nBits int) {
switch {
case off < 0:
panicf("ToggleBitsAt: negative off %d.", off)
case nBits < 0:
panicf("ToggleBitsAt: negative nBits %d.", nBits)
case buf.nBits < off+nBits:
panicf("ToggleBitsAt: out of range: off=%d + nBits=%d > len=%d.", off, nBits, buf.nBits)
case nBits == 0:
// no-op
default:
toggleBits(buf.b, buf.off+off, nBits)
}
}
// AndAt applies a bitwise AND operation with x at the offset off. AND is
// applied only to the range from off to off+x.Len(), and other bits are
// preserved.
func (buf *Buffer) AndAt(off int, x BitArrayer) {
var bax *BitArray
if x != nil {
bax = x.BitArray()
}
switch {
case off < 0:
panicf("AndAt: negative off %d.", off)
case buf.nBits < off+bax.Len():
panicf("AndAt: out of range: off=%d + x.len=%d > len=%d.", off, bax.Len(), buf.nBits)
case bax.IsZero():
// no-op
case bax.b == nil:
clearBits(buf.b, buf.off+off, bax.nBits)
default:
andBits(buf.b, bax.b, buf.off+off, 0, bax.nBits)
}
}
// OrAt applies a bitwise OR operation with x at the offset off. OR is applied
// only to the range from off to off+x.Len(), and other bits are preserved.
func (buf *Buffer) OrAt(off int, x BitArrayer) {
var bax *BitArray
if x != nil {
bax = x.BitArray()
}
switch {
case off < 0:
panicf("OrAt: negative off %d.", off)
case buf.nBits < off+bax.Len():
panicf("OrAt: out of range: off=%d + x.len=%d > len=%d.", off, bax.Len(), buf.nBits)
case bax.IsZero(), bax.b == nil:
// no-op
default:
orBits(buf.b, bax.b, buf.off+off, 0, bax.nBits)
}
}
// XorAt applies a bitwise XOR operation with x at the offset off. XOR is
// applied only to the range from off to off+x.Len(), and other bits are
// preserved.
func (buf *Buffer) XorAt(off int, x BitArrayer) {
var bax *BitArray
if x != nil {
bax = x.BitArray()
}
switch {
case off < 0:
panicf("XorAt: negative off %d.", off)
case buf.nBits < off+bax.Len():
panicf("XorAt: out of range: off=%d + x.len=%d > len=%d.", off, bax.Len(), buf.nBits)
case bax.IsZero(), bax.b == nil:
// no-op
default:
xorBits(buf.b, bax.b, buf.off+off, 0, bax.nBits)
}
}
// LeadingZeros returns the number of leading zero bits in the Buffer.
func (buf *Buffer) LeadingZeros() int { return buf.BitArray().LeadingZeros() }
// TrailingZeros returns the number of trailing zero bits in the Buffer.
func (buf *Buffer) TrailingZeros() int { return buf.BitArray().TrailingZeros() }
// OnesCount returns the number of one bits, population count, in the Buffer.
func (buf *Buffer) OnesCount() int { return buf.BitArray().OnesCount() } | buffer_bitwise.go | 0.608361 | 0.652823 | buffer_bitwise.go | starcoder |
package types
import (
"go/ast"
"github.com/redneckbeard/thanos/bst"
)
type Regexp struct {
*proto
}
var RegexpType = Regexp{newProto("Regexp", "Object", ClassRegistry)}
var RegexpClass = NewClass("Regexp", "Object", RegexpType, ClassRegistry)
func (t Regexp) Equals(t2 Type) bool { return t == t2 }
func (t Regexp) String() string { return "RegexpType" }
func (t Regexp) GoType() string { return "*regexp.Regexp" }
func (t Regexp) IsComposite() bool { return false }
func (t Regexp) MethodReturnType(m string, b Type, args []Type) (Type, error) {
return t.proto.MustResolve(m, false).ReturnType(t, b, args)
}
func (t Regexp) BlockArgTypes(m string, args []Type) []Type {
return t.proto.MustResolve(m, false).blockArgs(t, args)
}
func (t Regexp) TransformAST(m string, rcvr ast.Expr, args []TypeExpr, blk *Block, it bst.IdentTracker) Transform {
return t.proto.MustResolve(m, false).TransformAST(TypeExpr{t, rcvr}, args, blk, it)
}
func (t Regexp) Resolve(m string) (MethodSpec, bool) {
return t.proto.Resolve(m, false)
}
func (t Regexp) MustResolve(m string) MethodSpec {
return t.proto.MustResolve(m, false)
}
func (t Regexp) HasMethod(m string) bool {
return t.proto.HasMethod(m, false)
}
func (t Regexp) Alias(existingMethod, newMethod string) {
t.proto.MakeAlias(existingMethod, newMethod, false)
}
func init() {
RegexpType.Def("=~", MethodSpec{
ReturnType: func(receiverType Type, blockReturnType Type, args []Type) (Type, error) {
// In reality the match operator returns an int, or nil if there's no match. However, in practical
// use it is relied on for evaluation to a boolean
return BoolType, nil
},
TransformAST: func(rcvr TypeExpr, args []TypeExpr, blk *Block, it bst.IdentTracker) Transform {
return Transform{
Expr: bst.Call(rcvr.Expr, "MatchString", args[0].Expr),
}
},
})
RegexpType.Def("===", MethodSpec{
ReturnType: func(receiverType Type, blockReturnType Type, args []Type) (Type, error) {
// In reality the match operator returns an int, or nil if there's no match. However, in practical
// use it is relied on for evaluation to a boolean
return BoolType, nil
},
TransformAST: func(rcvr TypeExpr, args []TypeExpr, blk *Block, it bst.IdentTracker) Transform {
return Transform{
Expr: bst.Call(rcvr.Expr, "MatchString", args[0].Expr),
}
},
})
} | types/regexp.go | 0.550366 | 0.461441 | regexp.go | starcoder |
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
/* Solution rationale
*
* Consider a subtree t = [3,2,1] of a bigger tree T.
* The two leaves and its parent look, from a
* programmatic perspective, like this:
*
*
* 3
* / \
* / \
* 2 1
* / \ / \
* / \ / \
* nil nil nil nil
*
* Levels: 2
* Nodes: 3
*
* A naive approach is to count the nodes and then derive its depth from that. For example,
* we see that a tree of size 2 to 3 has three levels, 4 to 7 has three levels, and so on.
* However, this assumes that the tree is filled towards completion. That is, a new leaf is
* added to complete the tree. This is not a solution and we can see this through a counter example
* of this with the subtree t = [1,2,null,3,null,4,null,5].
*
*
* 1
* / \
* 2 nil
* / \
* 3 nil
* / \
* 4 nil
* / \
* 5 nil
* / \
* nil nil
*
* Levels: 5
* Nodes: 5
*
*
*/
// Iterative approach
func maxDepth(root *TreeNode) int {
if root == nil {
return 0
}
// Solution based on breadth-first search
height := func(node *TreeNode) int {
// treasureMap is a queue that represents the nodes we need to search through.
// A search is defined as checking whether a node has one or two children and
// adding the ones that exists to the treasure map.
treasureMap := []*TreeNode{node}
// missions is the amount of times we have gone on a search. Amount of missions
// corresponds to the levels (or depth) of a tree since at each mission we search
// through all the nodes that exists based on what we know from a previous level.
var missions int
/*
* We need to explore the tree level by level and this is done by marking all of the
* nodes of that level on the treasure map. To know when to stop each exploration
* round, we count the amount of nodes to go through, `amountSearch`.
*/
for {
// contains the amount of nodes we need to search at each level of the tree
amountSearch := len(treasureMap)
// we stop iff when there are no nodes to search (all leaves processed)
if amountSearch == 0 {
return missions
}
// we count every new mission to embark on
missions++
// a mission is not done until we have searched through all of the nodes
// on our map
for amountSearch > 0 {
next := treasureMap[0]
treasureMap = treasureMap[1:]
if next.Left != nil {
treasureMap = append(treasureMap, next.Left)
}
if next.Right != nil {
treasureMap = append(treasureMap, next.Right)
}
// done with checking a node; one less node to search
amountSearch--
}
}
}(root)
return height
}
// @lc code=end | leetcode/104.maximum-depth-of-binary-tree.go | 0.833257 | 0.51562 | 104.maximum-depth-of-binary-tree.go | starcoder |
package main
import (
"errors"
"fmt"
)
type GraphType string
const (
DIRECTED = "DIRECTED"
UNDIRECTED = "UNDIRECTED"
)
type Node struct {
Next *Node
Weight int
Key int
}
type AdjacencyList struct {
Vertices int
Edges int
GraphType GraphType
AdjList []*Node
}
//Recursive method to add node to the last available slot
func (node Node) AddNode(value int) *Node {
n := node.Next
if n == nil {
newNode := &Node{Next: &Node{}, Key: value}
return newNode
}
nd := n.AddNode(value)
node.Next = nd
return &node
}
//Recursive method to append with weight
func (node Node) AddNodeWithWeight(value, weight int) *Node {
n := node.Next
if n == nil {
newNode := &Node{Next: &Node{}, Key: value, Weight: weight}
return newNode
}
nd := n.AddNodeWithWeight(value, weight)
node.Next = nd
return &node
}
func (node Node) FindNextNode(key int) (*Node, error) {
n := node
if n == (Node{}) {
return &Node{}, errors.New("Node not found")
}
if n.Key == key {
return &n, nil
}
nd := n.Next
return nd.FindNextNode(key)
}
func (G *AdjacencyList) Init() {
G.AdjList = make([]*Node, G.Vertices)
G.Edges = 0
for i := 0; i < G.Vertices; i++ {
G.AdjList[i] = &Node{}
}
}
func (G *AdjacencyList) AddEdge(vertexOne, vertexTwo int) error {
if vertexOne >= G.Vertices || vertexTwo >= G.Vertices || vertexOne < 0 || vertexTwo < 0 {
return errors.New("Index out of bound")
}
node := G.AdjList[vertexOne].AddNode(vertexTwo)
G.AdjList[vertexOne] = node
G.Edges++
if G.GraphType == UNDIRECTED {
node := G.AdjList[vertexTwo].AddNode(vertexOne)
G.AdjList[vertexTwo] = node
G.Edges++
}
return nil
}
func (G *AdjacencyList) AddEdgeWithWeight(vertexOne, vertexTwo, weight int) error {
if vertexOne >= G.Vertices || vertexTwo >= G.Vertices || vertexOne < 0 || vertexTwo < 0 {
return errors.New("Index out of bound")
}
node := G.AdjList[vertexOne].AddNodeWithWeight(vertexTwo, weight)
G.AdjList[vertexOne] = node
G.Edges++
if G.GraphType == UNDIRECTED {
node := G.AdjList[vertexTwo].AddNodeWithWeight(vertexOne, weight)
G.AdjList[vertexTwo] = node
G.Edges++
}
return nil
}
func (G *AdjacencyList) RemoveEdge(vertexOne, vertexTwo int) error {
if vertexOne >= G.Vertices || vertexTwo >= G.Vertices || vertexOne < 0 || vertexTwo < 0 {
return errors.New("Index out of bounds")
}
nodeAdj := G.AdjList[vertexOne]
if nodeAdj == (&Node{}) {
return errors.New("Node not found")
}
nextNode := nodeAdj
newNodes := Node{}
for nextNode != (&Node{}) && nextNode != nil {
if nextNode.Key != vertexTwo {
newNodes.Next = nextNode
newNodes.Key = nextNode.Key
newNodes.Weight = nextNode.Weight
nextNode = nextNode.Next
} else {
newNodes.Next = nextNode.Next
newNodes.Key = nextNode.Next.Key
newNodes.Weight = nextNode.Next.Weight
G.AdjList[vertexOne] = &newNodes
G.Edges--
return nil
}
}
G.Edges--
return nil
}
func (G *AdjacencyList) HasEdge(vertexOne, vertexTwo int) bool {
if vertexOne >= G.Vertices || vertexTwo >= G.Vertices || vertexOne < 0 || vertexTwo < 0 {
return false
}
nodeAdj := G.AdjList[vertexOne]
if nodeAdj == (&Node{}) {
return false
}
node, _ := nodeAdj.FindNextNode(vertexTwo)
if node != nil && node.Key == vertexTwo {
return true
}
return false
}
func (G *AdjacencyList) GetGraphType() GraphType {
return G.GraphType
}
func (G *AdjacencyList) GetAdjacencyNodesForVertex(vertex int) map[int]bool {
if vertex >= G.Vertices || vertex < 0 {
return map[int]bool{}
}
nodeAdj := G.AdjList[vertex]
nextNode := nodeAdj
nodes := map[int]bool{}
for nextNode != (&Node{}) && nextNode != nil {
nodes[nextNode.Key] = true
nextNode = nextNode.Next
}
return nodes
}
func (G *AdjacencyList) GetWeightForEdge(vertexOne, vertexTwo int) (int, error) {
if vertexOne >= G.Vertices || vertexTwo >= G.Vertices || vertexOne < 0 || vertexTwo < 0 {
return 0, errors.New("Error getting weight for the vertex")
}
nodeAdj := G.AdjList[vertexOne]
if nodeAdj == (&Node{}) {
return 0, errors.New("Error getting weight for vertex")
}
node, _ := nodeAdj.FindNextNode(vertexTwo)
if node != nil && node.Key == vertexTwo {
return nodeAdj.Weight, nil
}
return 0, errors.New("Error getting weight for vertex")
}
func (G *AdjacencyList) GetNumberOfVertices() int {
return G.Vertices
}
func (G *AdjacencyList) GetNumberOfEdges() int {
return G.Edges
}
func (G *AdjacencyList) GetIndegreeForVertext(vertex int) int {
if vertex >= G.Vertices || vertex < 0 {
return 0
}
nodeAdj := G.AdjList[vertex]
nextNode := nodeAdj.Next
length := 0
for nextNode != (&Node{}) && nextNode != nil {
length += 1
nextNode = nextNode.Next
}
return length
}
func main() {
var testAdjListDirected = &AdjacencyList{4, 0, DIRECTED, nil}
//var testAdjListUndirected = &AdjacencyList{4, 0, UNDIRECTED, nil}
testAdjListDirected.Init()
testAdjListDirected.AddEdge(2, 1)
//testAdjListUndirected.Init()
err := testAdjListDirected.AddEdge(2, 3)
if err != nil {
fmt.Printf("Error adding edge")
}
if testAdjListDirected.AdjList[2].Key != 1 {
fmt.Printf("Data not found")
}
if testAdjListDirected.AdjList[2].Next.Key != 3 {
fmt.Printf("Data not found at index")
}
} | Graphs/graphs_adjacency_list.go | 0.643441 | 0.42471 | graphs_adjacency_list.go | starcoder |
package gnat
import (
"bytes"
"math/big"
"net"
"strconv"
)
// NetworkNode is the over-the-wire representation of a node
type NetworkNode struct {
// ID is a 32 byte unique identifier
ID []byte
// IP is the IPv4 address of the node
IP net.IP
// Port is the port of the node
Port int
}
// node represents a node in the network locally
// a separate struct due to the fact that we may want to add some metadata
// here later such as RTT, or LastSeen time
type node struct {
*NetworkNode
}
// NewNetworkNode creates a new NetworkNode for bootstrapping
func NewNetworkNode(ip string, port string) *NetworkNode {
p, _ := strconv.Atoi(port)
return &NetworkNode{
IP: net.ParseIP(ip),
Port: p,
}
}
func newNode(networkNode *NetworkNode) *node {
n := &node{}
n.NetworkNode = networkNode
return n
}
// nodeList is used in order to sort a list of arbitrary nodes against a
// comparator. These nodes are sorted by xor distance
type shortList struct {
// Nodes are a list of nodes to be compared
Nodes []*NetworkNode
// Comparator is the ID to compare to
Comparator []byte
}
func areNodesEqual(n1 *NetworkNode, n2 *NetworkNode, allowNilID bool) bool {
if n1 == nil || n2 == nil {
return false
}
if !allowNilID {
if n1.ID == nil || n2.ID == nil {
return false
}
if bytes.Compare(n1.ID, n2.ID) != 0 {
return false
}
}
if !n1.IP.Equal(n2.IP) {
return false
}
if n1.Port != n2.Port {
return false
}
return true
}
func (n *shortList) RemoveNode(node *NetworkNode) {
for i := 0; i < n.Len(); i++ {
if bytes.Compare(n.Nodes[i].ID, node.ID) == 0 {
n.Nodes = append(n.Nodes[:i], n.Nodes[i+1:]...)
return
}
}
}
func (n *shortList) AppendUniqueNetworkNodes(nodes []*NetworkNode) {
for _, vv := range nodes {
exists := false
for _, v := range n.Nodes {
if bytes.Compare(v.ID, vv.ID) == 0 {
exists = true
}
}
if !exists {
n.Nodes = append(n.Nodes, vv)
}
}
}
func (n *shortList) AppendUnique(nodes []*node) {
for _, vv := range nodes {
exists := false
for _, v := range n.Nodes {
if bytes.Compare(v.ID, vv.ID) == 0 {
exists = true
}
}
if !exists {
n.Nodes = append(n.Nodes, vv.NetworkNode)
}
}
}
func (n *shortList) Len() int {
return len(n.Nodes)
}
func (n *shortList) Swap(i, j int) {
n.Nodes[i], n.Nodes[j] = n.Nodes[j], n.Nodes[i]
}
func (n *shortList) Less(i, j int) bool {
iDist := getDistance(n.Nodes[i].ID, n.Comparator)
jDist := getDistance(n.Nodes[j].ID, n.Comparator)
if iDist.Cmp(jDist) == -1 {
return true
}
return false
}
func getDistance(id1 []byte, id2 []byte) *big.Int {
buf1 := new(big.Int).SetBytes(id1)
buf2 := new(big.Int).SetBytes(id2)
result := new(big.Int).Xor(buf1, buf2)
return result
} | node.go | 0.684053 | 0.466724 | node.go | starcoder |
package bungieapigo
// Data regarding the progress of a Quest for a specific character. Quests are composed of
// multiple steps, each with potentially multiple objectives: this QuestStatus will return
// Objective data for the *currently active* step in this quest.
type DestinyQuestStatus struct {
// The hash identifier for the Quest Item. (Note: Quests are defined as Items, and thus you would
// use this to look up the quest's DestinyInventoryItemDefinition). For information on all
// steps in the quest, you can then examine its DestinyInventoryItemDefinition.setData
// property for Quest Steps (which are *also* items). You can use the Item Definition to display
// human readable data about the overall quest.
QuestHash int `json:"questHash"`
// The hash identifier of the current Quest Step, which is also a
// DestinyInventoryItemDefinition. You can use this to get human readable data about the
// current step and what to do in that step.
StepHash int `json:"stepHash"`
// A step can have multiple objectives. This will give you the progress for each objective in the
// current step, in the order in which they are rendered in-game.
StepObjectives []DestinyObjectiveProgress `json:"stepObjectives"`
// Whether or not the quest is tracked
Tracked bool `json:"tracked"`
// The current Quest Step will be an instanced item in the player's inventory. If you care about
// that, this is the instance ID of that item.
ItemInstanceId int64 `json:"itemInstanceId,string"`
// Whether or not the whole quest has been completed, regardless of whether or not you have
// redeemed the rewards for the quest.
Completed bool `json:"completed"`
// Whether or not you have redeemed rewards for this quest.
Redeemed bool `json:"redeemed"`
// Whether or not you have started this quest.
Started bool `json:"started"`
// If the quest has a related Vendor that you should talk to in order to initiate the quest/earn
// rewards/continue the quest, this will be the hash identifier of that Vendor. Look it up its
// DestinyVendorDefinition.
VendorHash int `json:"vendorHash"`
} | pkg/models/DestinyQuestStatus.go | 0.510741 | 0.478833 | DestinyQuestStatus.go | starcoder |
package runes
import "unicode"
// IndexRune returns the index of the first instance of the Unicode code point
// r, or -1 if rune is not present in s.
func IndexRune(s []rune, c rune) int {
for i, b := range s {
if b == c {
return i
}
}
return -1
}
// Equal reports whether a and b
// are the same length and contain the same bytes.
// A nil argument is equivalent to an empty slice.
func Equal(a, b []rune) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if a[i] != b[i] {
return false
}
}
return true
}
// Index returns the index of the first instance of substr in s, or -1 if substr is not present in s.
func Index(s, substr []rune) int {
n := len(substr)
switch {
case n == 0:
return 0
case n == 1:
return IndexRune(s, substr[0])
case n == len(s):
if Equal(substr, s) {
return 0
}
return -1
case n > len(s):
return -1
default:
return indexRabinKarp(s, substr)
}
}
func indexRabinKarp(s, sep []rune) int {
// Rabin-Karp search
hashsep, pow := hashStr(sep)
n := len(sep)
var h uint32
for i := 0; i < n; i++ {
h = h*primeRK + uint32(s[i])
}
if h == hashsep && Equal(s[:n], sep) {
return 0
}
for i := n; i < len(s); {
h *= primeRK
h += uint32(s[i])
h -= pow * uint32(s[i-n])
i++
if h == hashsep && Equal(s[i-n:i], sep) {
return i - n
}
}
return -1
}
// primeRK is the prime base used in Rabin-Karp algorithm.
const primeRK = 16777619
// hashStr returns the hash and the appropriate multiplicative
// factor for use in Rabin-Karp algorithm.
func hashStr(sep []rune) (uint32, uint32) {
hash := uint32(0)
for i := 0; i < len(sep); i++ {
hash = hash*primeRK + uint32(sep[i])
}
var pow, sq uint32 = 1, primeRK
for i := len(sep); i > 0; i >>= 1 {
if i&1 != 0 {
pow *= sq
}
sq *= sq
}
return hash, pow
}
// Map returns a copy of the rune slice s with all its characters modified
// according to the mapping function. If mapping returns a negative value, the character is
// dropped from the byte slice with no replacement. The characters in s and the
// output are interpreted as UTF-8-encoded code points.
func Map(mapping func(r rune) rune, s []rune) []rune {
ret := make([]rune, len(s))
for j, r := range s {
ret[j] = mapping(r)
}
return ret
}
// ToLower returns a copy of the rune slice s with all Unicode letters mapped to their lower case.
func ToLower(s []rune) []rune { return Map(unicode.ToLower, s) }
// ToUpper returns a copy of the rune slice s with all Unicode letters mapped to their lower case.
func ToUpper(s []rune) []rune { return Map(unicode.ToUpper, s) }
// ToTitle treats s as UTF-8-encoded bytes and returns a copy with all the Unicode letters mapped to their title case.
func ToTitle(s []rune) []rune { return Map(unicode.ToTitle, s) } | runes/runes.go | 0.761538 | 0.410343 | runes.go | starcoder |
Contains functions that manage the reading and writing of files related to package summarize.
This includes reading and interpreting JSON files as actionable data, memoizing function
results to JSON, and outputting results once the summarization process is complete.
*/
package summarize
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"sort"
"strconv"
"strings"
)
// loadFailures loads a builds file and one or more test failure files. It maps build paths to builds
// and groups test failures by test name.
func loadFailures(buildsFilepath string, testsFilepaths []string) (map[string]build, map[string][]failure, error) {
const memoMessage string = "loading failed tests"
builds := make(map[string]build)
tests := make(map[string][]failure)
// Try to retrieve memoized results first to avoid another computation
if getMemoizedResults("memo_load_failures-builds.json", "", &builds) &&
getMemoizedResults("memo_load_failures-tests.json", "", &tests) {
logInfo("Done (cached) " + memoMessage)
return builds, tests, nil
}
builds, err := loadBuilds(buildsFilepath)
if err != nil {
return nil, nil, fmt.Errorf("Could not retrieve builds: %s", err)
}
tests, err = loadTests(testsFilepaths)
if err != nil {
return nil, nil, fmt.Errorf("Could not retrieve tests: %s", err)
}
memoizeResults("memo_load_failures-builds.json", "", builds)
memoizeResults("memo_load_failures-tests.json", "", tests)
logInfo("Done " + memoMessage)
return builds, tests, nil
}
// loadPrevious loads a previous output and returns the 'clustered' field.
func loadPrevious(filepath string) ([]jsonCluster, error) {
var previous jsonOutput
err := getJSON(filepath, &previous)
if err != nil {
return nil, fmt.Errorf("Could not get previous results JSON: %s", err)
}
return previous.Clustered, nil
}
// loadOwners loads an owners JSON file and returns it.
func loadOwners(filepath string) (map[string][]string, error) {
var owners map[string][]string
err := getJSON(filepath, &owners)
if err != nil {
return nil, fmt.Errorf("Could not get owners JSON: %s", err)
}
return owners, nil
}
// writeResults outputs the results of clustering to a file.
func writeResults(filepath string, data jsonOutput) error {
err := writeJSON(filepath, data)
if err != nil {
return fmt.Errorf("Could not write results to disk: %s", err)
}
return nil
}
type renderedSliceOutput struct {
Clustered []jsonCluster `json:"clustered"`
Builds columns `json:"builds"`
}
// writeRenderedSlice outputs the results of a call to renderSlice() to a file.
func writeRenderedSlice(filepath string, clustered []jsonCluster, builds columns) error {
output := renderedSliceOutput{
clustered,
builds,
}
err := writeJSON(filepath, output)
if err != nil {
return fmt.Errorf("Could not write subset to disk: %s", err)
}
return nil
}
/*
getMemoizedResults attempts to retrieve memoized function results from the given filepath. If it
succeeds, it places the results into v and returns true. Otherwise, it returns false. Internally,
it calls encoding/json's Unmarshal using v as the second argument. Therefore, v mut be a non-nil
pointer.
message is a message that gets printed on success, appended to "Done (cached) ". If it is the empty
string, no message is printed.
*/
func getMemoizedResults(filepath string, message string, v interface{}) (ok bool) {
err := getJSON(filepath, v)
if err == nil {
if message != "" {
logInfo("Done (cached) " + message)
}
return true
}
return false
}
/*
memoizeResults saves the results stored in v to a JSON file. v should be a value, not a pointer. It
prints a warning if the results could not be memoized.
message is a message that gets printed on success, appended to "Done ". If it is the empty
string, no message is printed.
*/
func memoizeResults(filepath string, message string, v interface{}) {
err := writeJSON(filepath, v)
if err == nil && message != "" {
logInfo("Done " + message)
return
}
logWarning("Could not memoize results to '%s': %s", filepath, err)
}
/* Functions below this comment are only used within this file as of this commit. */
// jsonBuild represents a build as reported by the JSON. All values are strings.
// This should not be instantiated directly, but rather via the encoding/json package's
// Unmarshal method. This is an intermediary state for the data until it can be put into
// a build object.
type jsonBuild struct {
Path string `json:"path"`
Started string `json:"started"`
Elapsed string `json:"elapsed"`
TestsRun string `json:"tests_run"`
TestsFailed string `json:"tests_failed"`
Result string `json:"result"`
Executor string `json:"executor"`
Job string `json:"job"`
Number string `json:"number"`
PR string `json:"pr"`
Key string `json:"key"` // Often nonexistent
}
// asBuild is a factory function that creates a build object from a jsonBuild object, appropriately
// handling all type conversions.
func (jb *jsonBuild) asBuild() (build, error) {
// The build object that will be returned, initialized with the values that
// don't need conversion.
b := build{
Path: jb.Path,
Result: jb.Result,
Executor: jb.Executor,
Job: jb.Job,
PR: jb.PR,
Key: jb.Key,
}
// To avoid assignment issues
var err error
// started
if jb.Started != "" {
b.Started, err = strconv.Atoi(jb.Started)
if err != nil {
return build{}, fmt.Errorf("Error converting JSON string '%s' to int for build field 'started': %s", jb.Started, err)
}
}
// elapsed
if jb.Elapsed != "" {
tempElapsed, err := strconv.ParseFloat(jb.Elapsed, 32)
if err != nil {
return build{}, fmt.Errorf("Error converting JSON string '%s' to float32 for build field 'elapsed': %s", jb.Elapsed, err)
}
b.Elapsed = int(tempElapsed)
}
// testsRun
if jb.TestsRun != "" {
b.TestsRun, err = strconv.Atoi(jb.TestsRun)
if err != nil {
return build{}, fmt.Errorf("Error converting JSON string '%s' to int for build field 'testsRun': %s", jb.TestsRun, err)
}
}
// testsFailed
if jb.TestsFailed != "" {
b.TestsFailed, err = strconv.Atoi(jb.TestsFailed)
if err != nil {
return build{}, fmt.Errorf("Error converting JSON string '%s' to int for build field 'testsFailed': %s", jb.TestsFailed, err)
}
}
// number
if jb.Number != "" {
b.Number, err = strconv.Atoi(jb.Number)
if err != nil {
return build{}, fmt.Errorf("Error converting JSON string '%s' to int for build field 'number': %s", jb.Number, err)
}
}
return b, nil
}
// loadBuilds parses a JSON file containing build information and returns a map from build paths
// to build objects.
func loadBuilds(filepath string) (map[string]build, error) {
// The map
builds := make(map[string]build)
// jsonBuilds temporarily stores the builds as they are retrieved from the JSON file
// until they can be converted to build objects
jsonBuilds := make([]jsonBuild, 0)
err := getJSON(filepath, &jsonBuilds)
if err != nil {
return nil, fmt.Errorf("Could not get builds JSON: %s", err)
}
// Convert the build information to internal build objects and store them in the builds map
for _, jBuild := range jsonBuilds {
// Skip builds without a start time or build number
if jBuild.Started == "" || jBuild.Number == "" {
continue
}
bld, err := jBuild.asBuild()
if err != nil {
return nil, fmt.Errorf("Could not create build object from jsonBuild object: %s", err)
}
if strings.Contains(bld.Path, "pr-logs") {
parts := strings.Split(bld.Path, "/")
bld.PR = parts[len(parts)-3]
}
builds[bld.Path] = bld
}
return builds, nil
}
// jsonFailure represents a test failure as reported by the JSON. All values are strings.
// This should not be instantiated directly, but rather via the encoding/json package's
// Unmarshal method. This is an intermediary state for the data until it can be put into
// a failure object.
type jsonFailure struct {
Started string `json:"started"`
Build string `json:"build"`
Name string `json:"name"`
FailureText string `json:"failure_text"`
}
// asFailure is a factory function that creates a failure object from the jsonFailure object,
// appropriately handling all type conversions.
func (jf *jsonFailure) asFailure() (failure, error) {
// The failure object that will be returned, initialized with the values that
// don't need conversion.
f := failure{
Build: jf.Build,
Name: jf.Name,
FailureText: jf.FailureText,
}
// To avoid assignment issues
var err error
// started
if jf.Started != "" {
f.Started, err = strconv.Atoi(jf.Started)
if err != nil {
return failure{}, fmt.Errorf("Error converting JSON string '%s' to int for failure field 'started': %s", jf.Started, err)
}
}
return f, nil
}
// loadTests parses multiple JSON files containing test information for failed tests. It returns a
// map from test names to failure objects.
func loadTests(testsFilepaths []string) (map[string][]failure, error) {
// The map
tests := make(map[string][]failure)
// jsonTests temporarily stores the tests as they are retrieved from the JSON file
// until they can be converted to failure objects
jsonFailures := make([]jsonFailure, 0)
for _, filepath := range testsFilepaths {
file, err := os.Open(filepath)
if err != nil {
return nil, fmt.Errorf("Could not open tests file '%s': %s", filepath, err)
}
defer file.Close()
// Read each line in the file as its own JSON object
scanner := bufio.NewScanner(file)
for scanner.Scan() {
var jf jsonFailure
err = json.Unmarshal(scanner.Bytes(), &jf)
if err != nil {
return nil, fmt.Errorf("Could not unmarshal JSON for text '%s': %s", scanner.Text(), err)
}
jsonFailures = append(jsonFailures, jf)
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("Could not read file line by line: %s", err)
}
// Convert the failure information to internal failure objects and store them in tests
for _, jf := range jsonFailures {
// Check if tests of this type are already in the map
if _, ok := tests[jf.Name]; !ok {
tests[jf.Name] = make([]failure, 0)
}
test, err := jf.asFailure()
if err != nil {
return nil, fmt.Errorf("Could not create failure object from jsonFailure object: %s", err)
}
tests[jf.Name] = append(tests[jf.Name], test)
}
}
// Sort the failures within each test by build
for _, testSlice := range tests {
sort.Slice(testSlice, func(i, j int) bool { return testSlice[i].Build < testSlice[j].Build })
}
return tests, nil
}
// getJSON opens a JSON file, parses it according to the schema provided by v, and places the results
// into v. Internally, it calls encoding/json's Unmarshal using v as the second argument. Therefore,
// v mut be a non-nil pointer.
func getJSON(filepath string, v interface{}) error {
contents, err := ioutil.ReadFile(filepath)
if err != nil {
return fmt.Errorf("Could not open file '%s': %s", filepath, err)
}
// Decode the JSON into the provided interface
err = json.Unmarshal(contents, v)
if err != nil {
return fmt.Errorf("Could not unmarshal JSON: %s", err)
}
return nil
}
// writeJSON generates JSON according to v and writes the results to filepath.
func writeJSON(filepath string, v interface{}) error {
output, err := json.Marshal(v)
if err != nil {
return fmt.Errorf("Could not encode JSON: %s", err)
}
err = ioutil.WriteFile(filepath, output, 0644)
if err != nil {
return fmt.Errorf("Could not write JSON to file: %s", err)
}
return nil
} | triage/summarize/files.go | 0.811377 | 0.474814 | files.go | starcoder |
package common
import (
"encoding/hex"
"fmt"
"math/big"
"github.com/altair-lab/xoreum/common/math"
)
const (
HashLength = 32
AddressLength = 32 // can be changed later
)
var (
// original
Difficulty = math.BigPow(2, 256-1) // mining difficulty: 10
// this is for test
//Difficulty = math.BigPow(2, 260)
)
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
type Hash [HashLength]byte
// Bytes gets the byte representation of the underlying hash.
func (h Hash) Bytes() []byte { return h[:] }
// BytesToHash sets b to hash.
// If b is larger than len(h), b will be cropped from the left.
func BytesToHash(b []byte) Hash {
var h Hash
h.SetBytes(b)
return h
}
func BytesToAddress(b []byte) Address {
var a Address
a.SetBytes(b)
return a
}
// SetBytes sets the hash to the value of b.
// If b is larger than len(h), b will be cropped from the left.
func (h *Hash) SetBytes(b []byte) {
if len(b) > len(h) {
b = b[len(b)-HashLength:]
}
copy(h[HashLength-len(b):], b)
}
type Address [AddressLength]byte
// Bytes gets the byte representation of the underlying hash.
func (a Address) Bytes() []byte { return a[:] }
func (a *Address) SetBytes(b []byte) {
if len(b) > len(a) {
b = b[len(b)-AddressLength:]
}
copy(a[AddressLength-len(b):], b)
}
func (h Hash) ToBigInt() *big.Int {
byteArr := []byte{}
for i := 0; i < HashLength; i++ {
byteArr = append(byteArr, h[i])
}
r := new(big.Int)
r.SetBytes(byteArr)
return r
}
func (h Hash) ToHex() string {
var b = make([]byte, HashLength)
for i := 0; i < HashLength; i++ {
b[i] = h[i]
}
hex := Bytes2Hex(b)
if len(hex) == 0 {
hex = "0"
}
return "0x" + hex
}
func (a Address) ToHex() string {
var b = make([]byte, AddressLength)
for i := 0; i < AddressLength; i++ {
b[i] = a[i]
}
hex := Bytes2Hex(b)
if len(hex) == 0 {
hex = "0"
}
return "0x" + hex
}
func Bytes2Hex(d []byte) string {
return hex.EncodeToString(d)
}
func HexToHash(s string) Hash {
return Hash{}
}
func ToBytes(v interface{}) []byte {
return []byte(fmt.Sprintf("%v", v))
}
// CopyBytes returns an exact copy of the provided bytes.
func CopyBytes(b []byte) (copiedBytes []byte) {
if b == nil {
return nil
}
copiedBytes = make([]byte, len(b))
copy(copiedBytes, b)
return
} | common/types.go | 0.747247 | 0.406567 | types.go | starcoder |
package common
import (
s "github.com/uber/cadence/.gen/go/shared"
)
// IntPtr makes a copy and returns the pointer to an int.
func IntPtr(v int) *int {
return &v
}
// Int32Ptr makes a copy and returns the pointer to an int32.
func Int32Ptr(v int32) *int32 {
return &v
}
// Int64Ptr makes a copy and returns the pointer to an int64.
func Int64Ptr(v int64) *int64 {
return &v
}
// Uint32Ptr makes a copy and returns the pointer to a uint32.
func Uint32Ptr(v uint32) *uint32 {
return &v
}
// Uint64Ptr makes a copy and returns the pointer to a uint64.
func Uint64Ptr(v uint64) *uint64 {
return &v
}
// Float64Ptr makes a copy and returns the pointer to an int64.
func Float64Ptr(v float64) *float64 {
return &v
}
// BoolPtr makes a copy and returns the pointer to a bool.
func BoolPtr(v bool) *bool {
return &v
}
// StringPtr makes a copy and returns the pointer to a string.
func StringPtr(v string) *string {
return &v
}
// TaskListPtr makes a copy and returns the pointer to a TaskList.
func TaskListPtr(v s.TaskList) *s.TaskList {
return &v
}
// ActivityTypePtr makes a copy and returns the pointer to a ActivityType.
func ActivityTypePtr(v s.ActivityType) *s.ActivityType {
return &v
}
// DecisionTypePtr makes a copy and returns the pointer to a DecisionType.
func DecisionTypePtr(t s.DecisionType) *s.DecisionType {
return &t
}
// EventTypePtr makes a copy and returns the pointer to a EventType.
func EventTypePtr(t s.EventType) *s.EventType {
return &t
}
// WorkflowTypePtr makes a copy and returns the pointer to a WorkflowType.
func WorkflowTypePtr(t s.WorkflowType) *s.WorkflowType {
return &t
}
// TimeoutTypePtr makes a copy and returns the pointer to a TimeoutType.
func TimeoutTypePtr(t s.TimeoutType) *s.TimeoutType {
return &t
}
// TaskListKindPtr makes a copy and returns the pointer to a TaskListKind.
func TaskListKindPtr(t s.TaskListKind) *s.TaskListKind {
return &t
}
// TaskListTypePtr makes a copy and returns the pointer to a TaskListKind.
func TaskListTypePtr(t s.TaskListType) *s.TaskListType {
return &t
}
// DecisionTaskFailedCausePtr makes a copy and returns the pointer to a DecisionTaskFailedCause.
func DecisionTaskFailedCausePtr(t s.DecisionTaskFailedCause) *s.DecisionTaskFailedCause {
return &t
}
// CancelExternalWorkflowExecutionFailedCausePtr makes a copy and returns the pointer to a CancelExternalWorkflowExecutionFailedCause.
func CancelExternalWorkflowExecutionFailedCausePtr(t s.CancelExternalWorkflowExecutionFailedCause) *s.CancelExternalWorkflowExecutionFailedCause {
return &t
}
// SignalExternalWorkflowExecutionFailedCausePtr makes a copy and returns the pointer to a SignalExternalWorkflowExecutionFailedCause.
func SignalExternalWorkflowExecutionFailedCausePtr(t s.SignalExternalWorkflowExecutionFailedCause) *s.SignalExternalWorkflowExecutionFailedCause {
return &t
}
// ChildPolicyPtr makes a copy and returns the pointer to a ChildPolicy.
func ChildPolicyPtr(t s.ChildPolicy) *s.ChildPolicy {
return &t
}
// ChildWorkflowExecutionFailedCausePtr makes a copy and returns the pointer to a ChildWorkflowExecutionFailedCause.
func ChildWorkflowExecutionFailedCausePtr(t s.ChildWorkflowExecutionFailedCause) *s.ChildWorkflowExecutionFailedCause {
return &t
}
// StringDefault returns value if string pointer is set otherwise default value of string
func StringDefault(v *string) string {
var defaultString string
if v == nil {
return defaultString
}
return *v
}
// Int32Default returns value if int32 pointer is set otherwise default value of int32
func Int32Default(v *int32) int32 {
var defaultInt32 int32
if v == nil {
return defaultInt32
}
return *v
}
// Int64Default returns value if int64 pointer is set otherwise default value of int64
func Int64Default(v *int64) int64 {
var defaultInt64 int64
if v == nil {
return defaultInt64
}
return *v
}
// BoolDefault returns value if bool pointer is set otherwise default value of bool
func BoolDefault(v *bool) bool {
var defaultBool bool
if v == nil {
return defaultBool
}
return *v
} | common/convert.go | 0.589007 | 0.436502 | convert.go | starcoder |
package main
import (
"github.com/go-gl/gl/v2.1/gl"
"github.com/go-gl/glfw/v3.1/glfw"
"github.com/vova616/chipmunk"
"github.com/vova616/chipmunk/vect"
"math"
"log"
"math/rand"
"os"
"runtime"
"time"
)
var (
ballRadius = 25
ballMass = 1
space *chipmunk.Space
balls []*chipmunk.Shape
staticLines []*chipmunk.Shape
deg2rad = math.Pi / 180
)
// drawCircle draws a circle for the specified radius, rotation angle, and the specified number of sides
func drawCircle(radius float64, sides int) {
gl.Begin(gl.LINE_LOOP)
for a := 0.0; a < 2*math.Pi; a += (2 * math.Pi / float64(sides)) {
gl.Vertex2d(math.Sin(a)*radius, math.Cos(a)*radius)
}
gl.Vertex3f(0, 0, 0)
gl.End()
}
// OpenGL draw function
func draw() {
gl.Clear(gl.COLOR_BUFFER_BIT)
gl.Enable(gl.BLEND)
gl.Enable(gl.POINT_SMOOTH)
gl.Enable(gl.LINE_SMOOTH)
gl.BlendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
gl.LoadIdentity()
gl.Begin(gl.LINES)
gl.Color3f(.2, .2, .2)
for i := range staticLines {
x := staticLines[i].GetAsSegment().A.X
y := staticLines[i].GetAsSegment().A.Y
gl.Vertex3f(float32(x), float32(y), 0)
x = staticLines[i].GetAsSegment().B.X
y = staticLines[i].GetAsSegment().B.Y
gl.Vertex3f(float32(x), float32(y), 0)
}
gl.End()
gl.Color4f(.3, .3, 1, .8)
// draw balls
for _, ball := range balls {
gl.PushMatrix()
pos := ball.Body.Position()
rot := ball.Body.Angle() * chipmunk.DegreeConst
gl.Translatef(float32(pos.X), float32(pos.Y), 0.0)
gl.Rotatef(float32(rot), 0, 0, 1)
drawCircle(float64(ballRadius), 60)
gl.PopMatrix()
}
}
func addBall() {
x := rand.Intn(350-115) + 115
ball := chipmunk.NewCircle(vect.Vector_Zero, float32(ballRadius))
ball.SetElasticity(0.95)
body := chipmunk.NewBody(vect.Float(ballMass), ball.Moment(float32(ballMass)))
body.SetPosition(vect.Vect{vect.Float(x), 600.0})
body.SetAngle(vect.Float(rand.Float32() * 2 * math.Pi))
body.AddShape(ball)
space.AddBody(body)
balls = append(balls, ball)
}
// step advances the physics engine and cleans up any balls that are off-screen
func step(dt float32) {
space.Step(vect.Float(dt))
for i := 0; i < len(balls); i++ {
p := balls[i].Body.Position()
if p.Y < -100 {
space.RemoveBody(balls[i].Body)
balls[i] = nil
balls = append(balls[:i], balls[i+1:]...)
i-- // consider same index again
}
}
}
// createBodies sets up the chipmunk space and static bodies
func createBodies() {
space = chipmunk.NewSpace()
space.Gravity = vect.Vect{0, -900}
staticBody := chipmunk.NewBodyStatic()
staticLines = []*chipmunk.Shape{
chipmunk.NewSegment(vect.Vect{111.0, 280.0}, vect.Vect{407.0, 246.0}, 0),
chipmunk.NewSegment(vect.Vect{407.0, 246.0}, vect.Vect{407.0, 343.0}, 0),
}
for _, segment := range staticLines {
segment.SetElasticity(0.6)
staticBody.AddShape(segment)
}
space.AddBody(staticBody)
}
// onResize sets up a simple 2d ortho context based on the window size
func onResize(window *glfw.Window, w, h int) {
w, h = window.GetSize() // query window to get screen pixels
width, height := window.GetFramebufferSize()
gl.Viewport(0, 0, int32(width), int32(height))
gl.MatrixMode(gl.PROJECTION)
gl.LoadIdentity()
gl.Ortho(0, float64(w), 0, float64(h), -1, 1)
gl.MatrixMode(gl.MODELVIEW)
gl.LoadIdentity()
gl.ClearColor(1, 1, 1, 1)
}
func main() {
runtime.LockOSThread()
// initialize glfw
if err := glfw.Init(); err != nil {
log.Fatalln("Failed to initialize GLFW: ", err)
}
defer glfw.Terminate()
// create window
window, err := glfw.CreateWindow(600, 600, os.Args[0], nil, nil)
if err != nil {
log.Fatal(err)
}
window.SetFramebufferSizeCallback(onResize)
window.MakeContextCurrent()
if err := gl.Init(); err != nil {
log.Fatal(err)
}
// set up opengl context
onResize(window, 600, 600)
// set up physics
createBodies()
runtime.LockOSThread()
glfw.SwapInterval(1)
ticksToNextBall := 10
ticker := time.NewTicker(time.Second / 60)
for !window.ShouldClose() {
ticksToNextBall--
if ticksToNextBall == 0 {
ticksToNextBall = rand.Intn(100) + 1
addBall()
}
draw()
step(1.0 / 60.0)
window.SwapBuffers()
glfw.PollEvents()
<-ticker.C // wait up to 1/60th of a second
}
} | examples/glfw3/bouncing_balls/bouncing_balls.go | 0.53048 | 0.403537 | bouncing_balls.go | starcoder |
package discovery
import "fmt"
type Cover struct {
// A list of MQTT topics subscribed to receive availability (online/offline) updates. Must not be used together with `availability_topic`
// Default: <no value>
Availability []Availability `json:"availability,omitempty"`
// When `availability` is configured, this controls the conditions needed to set the entity to `available`. Valid entries are `all`, `any`, and `latest`. If set to `all`, `payload_available` must be received on all configured availability topics before the entity is marked as online. If set to `any`, `payload_available` must be received on at least one configured availability topic before the entity is marked as online. If set to `latest`, the last `payload_available` or `payload_not_available` received on any configured availability topic controls the availability
// Default: latest
AvailabilityMode string `json:"availability_mode,omitempty"`
// Defines a [template](/docs/configuration/templating/#processing-incoming-data) to extract device's availability from the `availability_topic`. To determine the devices's availability result of this template will be compared to `payload_available` and `payload_not_available`
// Default: <no value>
AvailabilityTemplate string `json:"availability_template,omitempty"`
// The MQTT topic subscribed to to receive birth and LWT messages from the MQTT cover device. If an `availability` topic is not defined, the cover availability state will always be `available`. If an `availability` topic is defined, the cover availability state will be `unavailable` by default. Must not be used together with `availability`
// Default: <no value>
AvailabilityTopic string `json:"availability_topic,omitempty"`
// The MQTT topic to publish commands to control the cover
// Default: <no value>
CommandTopic string `json:"command_topic,omitempty"`
// Information about the device this cover is a part of to tie it into the [device registry](https://developers.home-assistant.io/docs/en/device_registry_index.html). Only works through [MQTT discovery](/docs/mqtt/discovery/) and when [`unique_id`](#unique_id) is set. At least one of identifiers or connections must be present to identify the device
// Default: <no value>
Device *Device `json:"device,omitempty"`
// Sets the [class of the device](/integrations/cover/), changing the device state and icon that is displayed on the frontend
// Default: <no value>
DeviceClass string `json:"device_class,omitempty"`
// Flag which defines if the entity should be enabled when first added
// Default: true
EnabledByDefault bool `json:"enabled_by_default,omitempty"`
// The [category](https://developers.home-assistant.io/docs/core/entity#generic-properties) of the entity
// Default: None
EntityCategory string `json:"entity_category,omitempty"`
// [Icon](/docs/configuration/customizing-devices/#icon) for the entity
// Default: <no value>
Icon string `json:"icon,omitempty"`
// Defines a [template](/docs/configuration/templating/#processing-incoming-data) to extract the JSON dictionary from messages received on the `json_attributes_topic`. Usage example can be found in [MQTT sensor](/integrations/sensor.mqtt/#json-attributes-template-configuration) documentation
// Default: <no value>
JsonAttributesTemplate string `json:"json_attributes_template,omitempty"`
// The MQTT topic subscribed to receive a JSON dictionary payload and then set as sensor attributes. Usage example can be found in [MQTT sensor](/integrations/sensor.mqtt/#json-attributes-topic-configuration) documentation
// Default: <no value>
JsonAttributesTopic string `json:"json_attributes_topic,omitempty"`
// The name of the cover
// Default: MQTT Cover
Name string `json:"name,omitempty"`
// Used instead of `name` for automatic generation of `entity_id
// Default: <no value>
ObjectId string `json:"object_id,omitempty"`
// Flag that defines if switch works in optimistic mode
// Default: `false` if state or position topic defined, else `true`.
Optimistic bool `json:"optimistic,omitempty"`
// The payload that represents the online state
// Default: online
PayloadAvailable string `json:"payload_available,omitempty"`
// The command payload that closes the cover
// Default: CLOSE
PayloadClose string `json:"payload_close,omitempty"`
// The payload that represents the offline state
// Default: offline
PayloadNotAvailable string `json:"payload_not_available,omitempty"`
// The command payload that opens the cover
// Default: OPEN
PayloadOpen string `json:"payload_open,omitempty"`
// The command payload that stops the cover
// Default: STOP
PayloadStop string `json:"payload_stop,omitempty"`
// Number which represents closed position
// Default: 0
PositionClosed int `json:"position_closed,omitempty"`
// Number which represents open position
// Default: 100
PositionOpen int `json:"position_open,omitempty"`
// Defines a [template](/topics/templating/) that can be used to extract the payload for the `position_topic` topic. Within the template the following variables are available: `entity_id`, `position_open`; `position_closed`; `tilt_min`; `tilt_max`. The `entity_id` can be used to reference the entity's attributes with help of the [states](/docs/configuration/templating/#states) template function
// Default: <no value>
PositionTemplate string `json:"position_template,omitempty"`
// The MQTT topic subscribed to receive cover position messages
// Default: <no value>
PositionTopic string `json:"position_topic,omitempty"`
// The maximum QoS level to be used when receiving and publishing messages
// Default: 0
Qos int `json:"qos,omitempty"`
// Defines if published messages should have the retain flag set
// Default: false
Retain bool `json:"retain,omitempty"`
// Defines a [template](/topics/templating/) to define the position to be sent to the `set_position_topic` topic. Incoming position value is available for use in the template `{% raw %}{{ position }}{% endraw %}`. Within the template the following variables are available: `entity_id`, `position`, the target position in percent; `position_open`; `position_closed`; `tilt_min`; `tilt_max`. The `entity_id` can be used to reference the entity's attributes with help of the [states](/docs/configuration/templating/#states) template function
// Default: <no value>
SetPositionTemplate string `json:"set_position_template,omitempty"`
// The MQTT topic to publish position commands to. You need to set position_topic as well if you want to use position topic. Use template if position topic wants different values than within range `position_closed` - `position_open`. If template is not defined and `position_closed != 100` and `position_open != 0` then proper position value is calculated from percentage position
// Default: <no value>
SetPositionTopic string `json:"set_position_topic,omitempty"`
// The payload that represents the closed state
// Default: closed
StateClosed string `json:"state_closed,omitempty"`
// The payload that represents the closing state
// Default: closing
StateClosing string `json:"state_closing,omitempty"`
// The payload that represents the open state
// Default: open
StateOpen string `json:"state_open,omitempty"`
// The payload that represents the opening state
// Default: opening
StateOpening string `json:"state_opening,omitempty"`
// The payload that represents the stopped state (for covers that do not report `open`/`closed` state)
// Default: stopped
StateStopped string `json:"state_stopped,omitempty"`
// The MQTT topic subscribed to receive cover state messages. State topic can only read (`open`, `opening`, `closed`, `closing` or `stopped`) state
// Default: <no value>
StateTopic string `json:"state_topic,omitempty"`
// The value that will be sent on a `close_cover_tilt` command
// Default: 0
TiltClosedValue int `json:"tilt_closed_value,omitempty"`
// Defines a [template](/topics/templating/) that can be used to extract the payload for the `tilt_command_topic` topic. Within the template the following variables are available: `entity_id`, `tilt_position`, the target tilt position in percent; `position_open`; `position_closed`; `tilt_min`; `tilt_max`. The `entity_id` can be used to reference the entity's attributes with help of the [states](/docs/configuration/templating/#states) template function
// Default: <no value>
TiltCommandTemplate string `json:"tilt_command_template,omitempty"`
// The MQTT topic to publish commands to control the cover tilt
// Default: <no value>
TiltCommandTopic string `json:"tilt_command_topic,omitempty"`
// The maximum tilt value
// Default: 100
TiltMax int `json:"tilt_max,omitempty"`
// The minimum tilt value
// Default: 0
TiltMin int `json:"tilt_min,omitempty"`
// The value that will be sent on an `open_cover_tilt` command
// Default: 100
TiltOpenedValue int `json:"tilt_opened_value,omitempty"`
// Flag that determines if tilt works in optimistic mode
// Default: `true` if `tilt_status_topic` is not defined, else `false`
TiltOptimistic bool `json:"tilt_optimistic,omitempty"`
// Defines a [template](/topics/templating/) that can be used to extract the payload for the `tilt_status_topic` topic. Within the template the following variables are available: `entity_id`, `position_open`; `position_closed`; `tilt_min`; `tilt_max`. The `entity_id` can be used to reference the entity's attributes with help of the [states](/docs/configuration/templating/#states) template function
// Default: <no value>
TiltStatusTemplate string `json:"tilt_status_template,omitempty"`
// The MQTT topic subscribed to receive tilt status update values
// Default: <no value>
TiltStatusTopic string `json:"tilt_status_topic,omitempty"`
// An ID that uniquely identifies this cover. If two covers have the same unique ID, Home Assistant will raise an exception
// Default: <no value>
UniqueId string `json:"unique_id,omitempty"`
// Defines a [template](/topics/templating/) that can be used to extract the payload for the `state_topic` topic
// Default: <no value>
ValueTemplate string `json:"value_template,omitempty"`
}
// AnnounceTopic returns the topic to announce the discoverable Cover
// Topic has the format below:
// <discovery_prefix>/<component>/<object_id>/config
// 'object_id' is either the UniqueId, the Name, or a hash of the Cover
func (d *Cover) AnnounceTopic(prefix string) string {
topicFormat := "%s/cover/%s/config"
objectID := ""
switch {
case d.UniqueId != "":
objectID = d.UniqueId
case d.Name != "":
objectID = d.Name
default:
objectID = hash(d)
}
return fmt.Sprintf(topicFormat, prefix, objectID)
} | cover.go | 0.850562 | 0.472379 | cover.go | starcoder |
package instasarama
import (
"bytes"
"encoding/hex"
"fmt"
"strings"
)
// The following functions perform the packing and unpacking of the trace context
// according to https://github.com/instana/technical-documentation/tree/master/tracing/specification#kafka
// PackTraceContextHeader packs the trace and span ID into a byte slice to be used as (sarama.RecordHeader).Value.
// The returned slice is always 24 bytes long.
func PackTraceContextHeader(traceID, spanID string) []byte {
buf := make([]byte, 24)
// hex representation uses 2 bytes to encode one byte of information, which means that
// the length of both trace and span IDs must be even. instana.FormatID() truncates leading
// zeroes, which may lead to data corruption as hex.Decode() will ignore the incomplete byte
// representation at the end
traceID = strings.Repeat("0", len(traceID)%2) + traceID
spanID = strings.Repeat("0", len(spanID)%2) + spanID
// write the trace ID into the first 16 bytes with zero padding at the beginning
if traceID != "" {
hex.Decode(buf[16-hex.DecodedLen(len(traceID)):16], []byte(traceID))
}
// write the span ID into the last 8 bytes
if spanID != "" {
hex.Decode(buf[24-hex.DecodedLen(len(spanID)):], []byte(spanID))
}
return buf
}
// UnpackTraceContextHeader unpacks and returns the trace and span ID, padding them with zeroes
// to 32 and 16 characters correspondingly. It expects the provided buffer to have exactly 24 bytes.
func UnpackTraceContextHeader(val []byte) (string, string, error) {
if len(val) != 24 {
return "", "", fmt.Errorf("unexpected value length: want 24, got %d", len(val))
}
traceID := hex.EncodeToString(bytes.TrimLeft(val[:16], "\000"))
if traceID != "" && len(traceID) < 32 {
traceID = strings.Repeat("0", 32-len(traceID)) + traceID
}
spanID := hex.EncodeToString(bytes.TrimLeft(val[16:], "\000"))
if spanID != "" && len(spanID) < 16 {
spanID = strings.Repeat("0", 16-len(spanID)) + spanID
}
return traceID, spanID, nil
}
// PackTraceLevelHeader packs the X-INSTANA-L value into a byte slice to be used as (sarama.RecordHeader).Value.
// It returns a 1-byte slice containing 0x00 if the passed value is "0", and 0x01 otherwise.
func PackTraceLevelHeader(val string) []byte {
switch val {
case "0":
return []byte{0x00}
default:
return []byte{0x01}
}
}
// UnpackTraceLevelHeader returns "1" if the value contains a non-zero byte, and "0" otherwise.
// It expects the provided buffer to have exactly 1 byte.
func UnpackTraceLevelHeader(val []byte) (string, error) {
if len(val) != 1 {
return "", fmt.Errorf("unexpected value length: want 1, got %d", len(val))
}
switch val[0] {
case 0x00:
return "0", nil
default:
return "1", nil
}
} | instrumentation/instasarama/record_header.go | 0.773388 | 0.4184 | record_header.go | starcoder |
package nmea0183
func DefaultSentances() *Sentences {
var defaults Sentences
defaults.formats = GetDefaultFormats()
defaults.variables = GetDefaultVars()
return &defaults
}
func GetDefaultVars() map[string]string{
vars := map[string]string {
"arrived_circle": "A",
"passed_waypt": "A",
"arrival_radius": "x.x",
"radius_units":"A",
"waypt_id": "c--c",
"ap_status": "A",
"ap_loran": "A",
"bearing_to_waypt": "xxx,T",
"bearing_origin_to_waypt": "xxx,T",
"bearing_position_to_waypt": "xxx,T",
"hts": "xxx,T", // Heading to Steer T True or M magnetic
"ap_mode": "A",
"faa_mode": "A",
"nav_status": "A",
"fix_time": "hhmmss.ss",
"datetime": "hhmmss,day,month,year,tz",
"status": "A", // status of fix A = ok ie 1 V = fail ie 0
"lat": "lat,NS", // formated latitude
"long": "long,WE", // formated longitude
"position": "lat,NS,long,WE", //formated lat, long
"sog": "x.x", // Speed Over Ground float knots
"tmg": "x.x", // Track Made Good
"fix_date": "ddmmyy",
"mag_var": "x.x,w", // Mag Var E positive, W negative
"day": "DD_day",
"month": "DD_month",
"year": "DD_year",
"tz": "tz_h,tz_m", // Datetime from ZDA if available - tz:m returns minutes part of tx as hh:mm format
"xte": "x.x,R,N", // Cross Track Error turn R or L eg prefix L12.3N post fix N = Nm
"acir": "A", // Arrived at way pt circle
"aper": "A", // Perpendicular passing of way pt
"bod": "x.x", // Bearing origin to destination
"bod_true": "T", // Bearing origin to destination True
"did": "c--c", //Destination Waypoint ID as a str
"hdm": "x.x,T", // Heading Magnetic
"dbt": "x.x", // Depth below transducer
"toff": "-x.x", // Transducer offset -ve from transducer to keel +ve transducer to water line
"stw": "x.x", // Speed Through Water float knots
"dw": "x.x", // Water distance since reset float knots
}
return vars
}
func GetDefaultFormats() map[string][]string{
formats := map[string][]string {
"aam": {"arrived_circle", "passed_waypt", "arrival_radius", "radius_units", "waypt_id"},
"apa": {"ap_status","ap_loran", "xte", "arrived_circle", "passed_waypt", "bearing_to_waypt", "waypt_id"},
"apb": {"ap_status", "ap_loran", "xte", "arrived_circle", "passed_waypt", "bearing_origin_to_waypt", "waypt_id", "bearing_position_to_waypt", "hts", "ap_mode"},
"rmc": {"fix_time", "status", "position", "sog", "tmg", "fix_date", "mag_var", "faa_mode","nav_status"},
//"zda": {"time", "day", "month", "year", "tz"},
"zda": {"datetime"},
"hdg": {"n/a", "n/a", "n/a", "mag_var"},
"hdm": {"hdm"},
"dpt": {"dbt", "toff"},
"vhm": {"n/a", "n/a", "n/a", "n/a", "stw"},
"vlw": {"n/a", "n/a", "wd"},
}
return formats
} | defaults.go | 0.76454 | 0.405154 | defaults.go | starcoder |
package list
import (
"testing"
"github.com/calebcase/base/data"
"github.com/calebcase/base/data/eq"
"github.com/calebcase/base/data/monoid"
)
type Class[A any] interface {
monoid.Class[List[A]]
}
type Type[
A any,
] struct {
monoid.Type[List[A]]
}
// Ensure Type implements Class.
var _ Class[int] = Type[int]{}
func NewType[
A any,
]() Type[A] {
return Type[A]{
Type: monoid.NewType(
func(x, y List[A]) List[A] {
r := make(List[A], 0, len(x)+len(y))
r = append(List[A]{}, x...)
r = append(r, y...)
return r
},
func() List[A] {
return List[A]{}
},
),
}
}
type List[A any] []A
// Ensure List implements data.Data.
var _ data.Data[int] = List[int]{}
func (l List[A]) DEmpty() bool {
return len(l) == 0
}
func (l List[A]) DValue() A {
return l[0]
}
func (l List[A]) DRest() data.Data[A] {
if len(l) > 1 {
return l[1:]
}
return nil
}
// NewEqualFn returns a list equality checking function given the eq.Class for
// the type A.
func NewEqualFn[A any, LA ~[]A](e eq.Class[A]) func(x, y LA) bool {
return func(x, y LA) bool {
if len(x) != len(y) {
return false
}
for i := 0; i < len(x); i++ {
if e.NE(x[i], y[i]) {
return false
}
}
return true
}
}
// List transformations
func Map[A, B any, LA ~[]A](fn func(A) B, xs LA) []B {
ys := make([]B, 0, len(xs))
for _, x := range xs {
ys = append(ys, fn(x))
}
return ys
}
func Reverse[A any, LA ~[]A](xs LA) []A {
ys := make([]A, len(xs))
for i, x := range xs {
ys[len(xs)-1-i] = x
}
return ys
}
func Intersperse[A any, LA ~[]A](v A, xs LA) []A {
ys := make([]A, 0, len(xs)+len(xs)/2)
for i, x := range xs {
if len(xs)-1 == i {
ys = append(ys, x)
} else {
ys = append(ys, x, v)
}
}
return ys
}
func Intercalate[A any, LA ~[]A, LLA ~[]LA](xs LA, xss LLA) []A {
return Concat(Intersperse(xs, xss))
}
func Transpose[A any, LA ~[]A, LLA ~[]LA](xss LLA) [][]A {
result := [][]A{}
for _, row := range xss {
for j, col := range row {
if len(result)-1 < j {
result = append(result, []A{})
}
result[j] = append(result[j], col)
}
}
return result
}
func FoldR[A, B any, LA ~[]A](f func(A, B) B, z B, xs LA) B {
if len(xs) == 0 {
return z
}
if len(xs) == 1 {
return f(xs[0], z)
}
return f(xs[0], FoldR(f, z, xs[1:]))
}
func NonEmptySubsequences[A any, LA ~[]A](la LA) [][]A {
if len(la) == 0 {
return [][]A{}
}
x := la[0]
xs := la[1:]
f := func(ys []A, r [][]A) [][]A {
m := append([]A{x}, ys...)
return append([][]A{ys, m}, r...)
}
return append([][]A{{x}}, FoldR(f, [][]A{}, NonEmptySubsequences(xs))...)
}
func Subsequences[A any, LA ~[]A](xs LA) [][]A {
return append([][]A{{}}, NonEmptySubsequences(xs)...)
}
type T[A any] interface{}
func Concat[A any, LA ~[]A, LLA ~[]LA](xss LLA) []A {
result := LA{}
if len(xss) == 0 {
return result
}
for _, xs := range xss {
result = append(result, xs...)
}
return result
}
// Conform returns a function testing if the implementation abides by its laws.
func Conform[A any, CA Class[A]](c CA) func(t *testing.T, x, y, z List[A]) {
return func(t *testing.T, x, y, z List[A]) {
t.Run("monoid.Conform", func(t *testing.T) {
monoid.Conform[List[A]](c)(t, x, y, z)
})
}
} | data/list/list.go | 0.665737 | 0.643133 | list.go | starcoder |
// Package jp provides holiday definitions for Japan.
package jp
import (
"github.com/rickar/cal/v2"
"github.com/rickar/cal/v2/aa"
"math"
"time"
)
var (
// Standard Japan weekend substitution rules: Sundays move to Monday
weekendAlt = []cal.AltDay{
{Day: time.Sunday, Offset: 1},
}
// NewYear represents New Year's Day on 1-Jan
NewYear = aa.NewYear.Clone(&cal.Holiday{Type: cal.ObservancePublic, Observed: weekendAlt})
// ComingOfAgeDay represents Coming of Age Day on the 2nd Monday in January
ComingOfAgeDay = &cal.Holiday{
Name: "Coming of Age Day",
Type: cal.ObservancePublic,
Month: time.January,
Weekday: time.Monday,
Offset: 2,
Func: cal.CalcWeekdayOffset,
}
// NationalFoundationDay represents National Foundation Day on 11-February
NationalFoundationDay = &cal.Holiday{
Name: "National Foundation Day",
Type: cal.ObservancePublic,
Month: time.February,
Day: 11,
Observed: weekendAlt,
Func: cal.CalcDayOfMonth,
}
// TheEmperorsBirthday represents The Emperor's Birthday on 23-February
TheEmperorsBirthday = &cal.Holiday{
Name: "The Emperor's Birthday",
Type: cal.ObservancePublic,
Month: time.February,
Day: 23,
Observed: weekendAlt,
Func: func(h *cal.Holiday, year int) time.Time {
if year <= 2019 {
// Emperor Akihito abdicated in 2019.
holiday := *h
holiday.Month = time.December
holiday.Day = 23
return cal.CalcDayOfMonth(&holiday, year)
}
return cal.CalcDayOfMonth(h, year)
},
}
// VernalEquinoxDay represents Vernal Equinox Day on Around 20-March
VernalEquinoxDay = &cal.Holiday{
Name: "Vernal Equinox Day",
Type: cal.ObservancePublic,
Month: time.March,
Observed: weekendAlt,
Func: func(h *cal.Holiday, year int) time.Time {
holiday := h
holiday.Day = calcVernalEquinoxDate(year)
return cal.CalcDayOfMonth(holiday, year)
},
}
// ShowaDay represents Showa Day on 29-April
ShowaDay = &cal.Holiday{
Name: "Showa Day",
Type: cal.ObservancePublic,
Month: time.April,
Day: 29,
Observed: weekendAlt,
Func: cal.CalcDayOfMonth,
}
// ConstitutionMemorialDay represents Constitution Memorial Day on 3-May
ConstitutionMemorialDay = &cal.Holiday{
Name: "Constitution Memorial Day",
Type: cal.ObservancePublic,
Month: time.May,
Day: 3,
Observed: []cal.AltDay{
{Day: time.Sunday, Offset: 3},
},
Func: cal.CalcDayOfMonth,
}
// GreeneryDay represents Greenery Day on 4-May
GreeneryDay = &cal.Holiday{
Name: "Greenery Day",
Type: cal.ObservancePublic,
Month: time.May,
Day: 4,
Observed: []cal.AltDay{
{Day: time.Sunday, Offset: 2},
},
Func: cal.CalcDayOfMonth,
}
// ChildrensDay represents Children's Day on 5-May
ChildrensDay = &cal.Holiday{
Name: "Children's Day",
Type: cal.ObservancePublic,
Month: time.May,
Day: 5,
Observed: weekendAlt,
Func: cal.CalcDayOfMonth,
}
// MarineDay represents Marine Day on the 3rd Monday in July
MarineDay = &cal.Holiday{
Name: "Marine Day",
Type: cal.ObservancePublic,
Month: time.July,
Weekday: time.Monday,
Offset: 3,
Func: func(h *cal.Holiday, year int) time.Time {
if year == 2020 || year == 2021 {
// As special arrangement for the 2020 Summer Olympics, the 2020 and 2021 date for Marine Day was moved
holiday := *h
holiday.Weekday = time.Thursday
holiday.Offset = 4
return cal.CalcWeekdayOffset(&holiday, year)
}
return cal.CalcWeekdayOffset(h, year)
},
}
// MountainDay represents Mountain Day on 11-August
MountainDay = &cal.Holiday{
Name: "Mountain Day",
Type: cal.ObservancePublic,
Month: time.August,
Day: 11,
Observed: weekendAlt,
Func: cal.CalcDayOfMonth,
StartYear: 2016,
}
// RespectForTheAgedDay represents Respect for the Aged Day on the 3rd Monday in September
RespectForTheAgedDay = &cal.Holiday{
Name: "Respect for the Aged Day",
Type: cal.ObservancePublic,
Month: time.September,
Weekday: time.Monday,
Offset: 3,
Func: cal.CalcWeekdayOffset,
}
// AutumnalEquinoxDay represents Autumnal Equinox Day on Around 23-September
AutumnalEquinoxDay = &cal.Holiday{
Name: "Autumnal Equinox Day",
Type: cal.ObservancePublic,
Month: time.September,
Observed: weekendAlt,
Func: func(h *cal.Holiday, year int) time.Time {
holiday := *h
holiday.Day = calcAutumnalEquinoxDate(year)
return cal.CalcDayOfMonth(&holiday, year)
},
}
// SportsDay represents Sports Day on the 2nd Monday in October
SportsDay = &cal.Holiday{
Name: "Sports Day",
Type: cal.ObservancePublic,
Month: time.October,
Weekday: time.Monday,
Offset: 2,
Func: func(h *cal.Holiday, year int) time.Time {
if year == 2020 || year == 2021 {
// As special arrangement for the 2020 Summer Olympics, the 2020 and 2021 date for Sports Day was moved
holiday := *h
holiday.Month = time.July
holiday.Weekday = time.Friday
holiday.Offset = 4
return cal.CalcWeekdayOffset(&holiday, year)
}
return cal.CalcWeekdayOffset(h, year)
},
}
// CultureDay represents Culture Day on 3-November
CultureDay = &cal.Holiday{
Name: "Culture Day",
Type: cal.ObservancePublic,
Month: time.November,
Day: 3,
Observed: weekendAlt,
Func: cal.CalcDayOfMonth,
}
// LaborThanksgivingDay represents Labor Thanksgiving Day on 23-November
LaborThanksgivingDay = &cal.Holiday{
Name: "<NAME>",
Type: cal.ObservancePublic,
Month: time.November,
Day: 23,
Observed: weekendAlt,
Func: cal.CalcDayOfMonth,
}
// NationalHolidayBetweenRespectForTheAgedDayAndAutumnalEquinoxDay represents National holiday between Respect for the Aged Day and Autumnal Equinox Day in September
NationalHolidayBetweenRespectForTheAgedDayAndAutumnalEquinoxDay = &cal.Holiday{
Name: "National holiday between Respect for the Aged Day and Autumnal Equinox Day",
Type: cal.ObservancePublic,
Month: time.September,
Func: func(h *cal.Holiday, year int) time.Time {
switch year {
// only dates in September 2009 - 2032 are supported
case 2009:
return time.Date(year, h.Month, 22, 0, 0, 0, 0, cal.DefaultLoc)
case 2015:
return time.Date(year, h.Month, 22, 0, 0, 0, 0, cal.DefaultLoc)
case 2026:
return time.Date(year, h.Month, 22, 0, 0, 0, 0, cal.DefaultLoc)
case 2032:
return time.Date(year, h.Month, 21, 0, 0, 0, 0, cal.DefaultLoc)
default:
return time.Time{}
}
},
}
// NationalHolidayBetweenShowaDayAndNewEmperorEnthronementDay represents
// National Holiday Between Showa Day And New Emperor Enthronement Day on 30-April 2019
NationalHolidayBetweenShowaDayAndNewEmperorEnthronementDay = &cal.Holiday{
Name: "National Holiday Between Showa Day And New Emperor Enthronement Day",
Type: cal.ObservancePublic,
Month: time.April,
Day: 30,
Func: cal.CalcDayOfMonth,
StartYear: 2019,
EndYear: 2019,
}
// TheNewEmperorEnthronementDay represents The New Emperor Enthronement Day on 1-May 2019
TheNewEmperorEnthronementDay = &cal.Holiday{
Name: "New Emperor Enthronement Day",
Type: cal.ObservancePublic,
Month: time.May,
Day: 1,
Func: cal.CalcDayOfMonth,
StartYear: 2019,
EndYear: 2019,
}
// NationalHolidayBetweenTheNewEmperorEnthronementDayAndConstitutionMemorialDay represents
// National holiday between The New Emperor Enthronement Day and Constitution Memorial Day on 2-May 2019
NationalHolidayBetweenTheNewEmperorEnthronementDayAndConstitutionMemorialDay = &cal.Holiday{
Name: "National holiday between New Emperor Enthronement Day and Constitution Memorial Day",
Type: cal.ObservancePublic,
Month: time.May,
Day: 2,
Func: cal.CalcDayOfMonth,
StartYear: 2019,
EndYear: 2019,
}
// TheNewEmperorEnthronementCeremony represents The New Emperor Enthronement Day on 22-October 2019
TheNewEmperorEnthronementCeremony = &cal.Holiday{
Name: "The New Emperor Enthronement Ceremony",
Type: cal.ObservancePublic,
Month: time.October,
Day: 22,
Func: cal.CalcDayOfMonth,
StartYear: 2019,
EndYear: 2019,
}
exceptionalNationalHolidays = []*cal.Holiday{
NationalHolidayBetweenRespectForTheAgedDayAndAutumnalEquinoxDay,
NationalHolidayBetweenShowaDayAndNewEmperorEnthronementDay,
TheNewEmperorEnthronementDay,
NationalHolidayBetweenTheNewEmperorEnthronementDayAndConstitutionMemorialDay,
TheNewEmperorEnthronementCeremony,
}
Holidays = append(
[]*cal.Holiday{
NewYear,
ComingOfAgeDay,
NationalFoundationDay,
TheEmperorsBirthday,
VernalEquinoxDay,
ShowaDay,
ConstitutionMemorialDay,
GreeneryDay,
ChildrensDay,
MarineDay,
MountainDay,
RespectForTheAgedDay,
AutumnalEquinoxDay,
SportsDay,
CultureDay,
LaborThanksgivingDay,
},
exceptionalNationalHolidays...,
)
)
func calcVernalEquinoxDate(year int) int {
val := calcEquinoxBase(year)
switch {
case 1851 <= year && year <= 1899:
val += 19.8277
case 1900 <= year && year <= 1979:
val += 20.8357
case 1980 <= year && year <= 2099:
val += 20.8431
case 2100 <= year && year <= 2150:
val += 21.8510
}
return int(math.Floor(val))
}
func calcAutumnalEquinoxDate(year int) int {
val := calcEquinoxBase(year)
switch {
case 1851 <= year && year <= 1899:
val += 22.2588
case 1900 <= year && year <= 1979:
val += 23.2588
case 1980 <= year && year <= 2099:
val += 23.2488
case 2100 <= year && year <= 2150:
val += 24.2488
}
return int(math.Floor(val))
}
func calcEquinoxBase(year int) float64 {
return 0.242194*float64(year-1980) - math.Floor(float64(year-1980)/4.0)
} | v2/jp/jp_holidays.go | 0.588416 | 0.665057 | jp_holidays.go | starcoder |
package xmath
type Evolution struct {
i int
combinations [][]float64
procedures []*Sequence
}
func NewEvolution(procedures ...*Sequence) *Evolution {
// build the parameter space
parameters := make([][]float64, len(procedures))
for i, proc := range procedures {
parameters[i] = proc.Run()
}
combinations := CartesianProduct(parameters, 0, len(parameters))
return &Evolution{
combinations: combinations,
procedures: procedures,
}
}
// Limit returns the amount of iterations defined in this evolution
func (e *Evolution) Limit() int {
return len(e.combinations)
}
// Current returns the current state of the evolution
func (e *Evolution) Current() int {
return e.i
}
// Next updates the procedures with the next value
// it returns true if there was an update and false if there is nothing more to evolve.
func (e *Evolution) Next() bool {
if e.i >= len(e.combinations) {
return false
}
for i, value := range e.combinations[e.i] {
e.procedures[i].set(value)
}
e.i++
return true
}
type Sequence struct {
initialValue float64
value *float64
limit int
count int
Transform
}
func PerturbationSequence(value *float64, step float64, limit, rounding int) *Sequence {
transform := IncNum(step, rounding)
initialValue := *value - (step * float64(limit) / 2)
s := &Sequence{
initialValue: initialValue,
value: value,
limit: limit,
Transform: transform,
}
s.set(initialValue)
return s
}
func RangeSequence(value *float64, start, end float64, limit, rounding int) *Sequence {
step := (end - start) / float64(limit)
transform := IncNum(step, rounding)
s := &Sequence{
initialValue: start,
value: value,
limit: limit,
Transform: transform,
}
s.set(start)
return s
}
func NewSequence(value *float64, transform Transform, limit int) *Sequence {
return &Sequence{
initialValue: *value,
value: value,
limit: limit,
Transform: transform,
}
}
func (p *Sequence) Next() bool {
p.count++
if p.count > 1 {
newValue := p.Transform(*p.value)
p.set(newValue)
}
return p.count >= p.limit
}
func (p *Sequence) set(newValue float64) {
*p.value = newValue
}
func (p *Sequence) Reset() {
*p.value = p.initialValue
p.count = 0
}
func (p *Sequence) Run() []float64 {
values := make([]float64, 0)
var done bool
for !done {
done = p.Next()
values = append(values, *p.value)
if done {
p.Reset()
}
}
return values
}
type Transform func(v float64) float64
func IncNum(w float64, rounding int) Transform {
return func(v float64) float64 {
return Round(rounding)(v + w)
}
}
func IncMul(w float64, rounding int) Transform {
return func(v float64) float64 {
return Round(rounding)(v * w)
}
} | oremi/vendor/github.com/drakos74/go-ex-machina/xmath/evolution.go | 0.82485 | 0.531209 | evolution.go | starcoder |
package iso20022
// Instruction to pay an amount of money to an ultimate beneficiary, on behalf of an originator. This instruction may have to be forwarded several times to complete the settlement chain.
type PaymentInstruction11 struct {
// Reference assigned by a sending party to unambiguously identify the payment information block within the message.
PaymentInformationIdentification *Max35Text `xml:"PmtInfId,omitempty"`
// Specifies the means of payment that will be used to move the amount of money.
PaymentMethod *PaymentMethod7Code `xml:"PmtMtd"`
// Set of elements used to further specify the type of transaction.
PaymentTypeInformation *PaymentTypeInformation19 `xml:"PmtTpInf,omitempty"`
// Date at which the initiating party requests the clearing agent to process the payment. If payment by cheque, the date when the cheque must be generated by the bank.
//
// Usage: This is the date on which the debtor's account(s) is (are) to be debited.
RequestedExecutionDate *ISODate `xml:"ReqdExctnDt"`
// Party that owes an amount of money to the (ultimate) creditor.
Debtor *PartyIdentification43 `xml:"Dbtr"`
// Account used to process charges associated with a transaction.
DebtorAccount *CashAccount24 `xml:"DbtrAcct,omitempty"`
// Financial institution servicing an account for the debtor.
DebtorAgent *BranchAndFinancialInstitutionIdentification5 `xml:"DbtrAgt"`
// Ultimate party that owes an amount of money to the (ultimate) creditor.
UltimateDebtor *PartyIdentification43 `xml:"UltmtDbtr,omitempty"`
// Specifies which party/parties will bear the charges associated with the processing of the payment transaction.
ChargeBearer *ChargeBearerType1Code `xml:"ChrgBr,omitempty"`
// Payment processes required to transfer cash from the debtor to the creditor.
CreditTransferTransaction []*CreditTransferTransaction10 `xml:"CdtTrfTx"`
}
func (p *PaymentInstruction11) SetPaymentInformationIdentification(value string) {
p.PaymentInformationIdentification = (*Max35Text)(&value)
}
func (p *PaymentInstruction11) SetPaymentMethod(value string) {
p.PaymentMethod = (*PaymentMethod7Code)(&value)
}
func (p *PaymentInstruction11) AddPaymentTypeInformation() *PaymentTypeInformation19 {
p.PaymentTypeInformation = new(PaymentTypeInformation19)
return p.PaymentTypeInformation
}
func (p *PaymentInstruction11) SetRequestedExecutionDate(value string) {
p.RequestedExecutionDate = (*ISODate)(&value)
}
func (p *PaymentInstruction11) AddDebtor() *PartyIdentification43 {
p.Debtor = new(PartyIdentification43)
return p.Debtor
}
func (p *PaymentInstruction11) AddDebtorAccount() *CashAccount24 {
p.DebtorAccount = new(CashAccount24)
return p.DebtorAccount
}
func (p *PaymentInstruction11) AddDebtorAgent() *BranchAndFinancialInstitutionIdentification5 {
p.DebtorAgent = new(BranchAndFinancialInstitutionIdentification5)
return p.DebtorAgent
}
func (p *PaymentInstruction11) AddUltimateDebtor() *PartyIdentification43 {
p.UltimateDebtor = new(PartyIdentification43)
return p.UltimateDebtor
}
func (p *PaymentInstruction11) SetChargeBearer(value string) {
p.ChargeBearer = (*ChargeBearerType1Code)(&value)
}
func (p *PaymentInstruction11) AddCreditTransferTransaction() *CreditTransferTransaction10 {
newValue := new (CreditTransferTransaction10)
p.CreditTransferTransaction = append(p.CreditTransferTransaction, newValue)
return newValue
} | PaymentInstruction11.go | 0.72952 | 0.427397 | PaymentInstruction11.go | starcoder |
package searchalg
import (
"math"
"math/rand"
"time"
)
const BOLTZMAN_CONSTANT = 8.6173432e-5
// Interface that has behavior for a model used to search the optimum solution
type Function interface {
// Return the value of objective function for the problem.
Compute() float64
// Reconfigure the value(s) of the model, representing other point in the solution domine.
Reconfigure()
// Copy the value and states from another histance, other point in the solution domine.
Assign(f Function)
// Check if configuration is a valid point in the solution domine.
IsValid() bool
Clone() Function
}
type AnnealingContext struct {
// The temperature when the process start.
InitialTemperature float64
// The temperature when the process finish.
FinalTemperature float64
// Cooling percentage after each iteration.
Cooling float64
// Number of attempts to lower the temperature.
Steps int
// Time to finish, even final temperature could not be achived.
Deadline int64
}
/*
Given an annealing context and two copies of the model that we want to
optimize, SimulatedAnnealing function run the simulation connecting the
annealing, a physical process, with the behavior of the model implemented
the Function interface.
*/
func SimulatedAnnealing(ctx AnnealingContext, best Function) {
rand.Seed(time.Now().UnixNano())
var passoAtual int
var delta float64
var sorteio float64
var prob float64
last := best.Clone()
energiaInicial := best.Compute() // Pega a energia inicial do sistema
energiaFinal := 0.0
// Calcula o momento de termino T.
fim := time.Now().Local().Add(time.Second * time.Duration(ctx.Deadline))
// Processa o resfriamento do sistema.
for temperatura := ctx.InitialTemperature; fim.After(time.Now()) && temperatura > ctx.FinalTemperature; temperatura = (1 - ctx.Cooling) * temperatura {
// Busca uma configuração para a dada temperatura
//um certo número de vezes.
for passoAtual = ctx.Steps; passoAtual >= 0; passoAtual -= 1 {
best.Reconfigure()
// Calcula a energia atual do sistema.
energiaFinal = best.Compute()
// Calcula a variação de energia.
delta = (energiaFinal - energiaInicial)
// Compute some probability
sorteio = rand.Float64()
// Compute Boltzman probability
prob = math.Exp((-1 * delta) / (BOLTZMAN_CONSTANT * temperatura))
// Verifica se aceita a nova configuração.
// Para uma nova configuração ser aceita além da variação de energia e da probabilidade
// deve atender as restrições do problema.
if (delta <= 0 || sorteio < prob) && best.IsValid() {
if delta != 0 {
energiaInicial = energiaFinal
last.Assign(best)
}
} else {
energiaFinal = energiaInicial
}
}
best.Assign(last)
passoAtual = ctx.Steps
}
} | annealing.go | 0.633183 | 0.545286 | annealing.go | starcoder |
package network
import (
"bytes"
"strings"
"github.com/AnneNamuli/go-stellar/hash"
"github.com/AnneNamuli/go-stellar/support/errors"
"github.com/AnneNamuli/go-stellar/xdr"
)
const (
// PublicNetworkPassphrase is the pass phrase used for every transaction intended for the public stellar network
PublicNetworkPassphrase = "Public Global Stellar Network ; September 2015"
// TestNetworkPassphrase is the pass phrase used for every transaction intended for the SDF-run test network
TestNetworkPassphrase = "Test SDF <PASSWORD> ; September 2015"
)
// ID returns the network ID derived from the provided passphrase. This value
// also happens to be the raw (i.e. not strkey encoded) secret key for the root
// account of the network.
func ID(passphrase string) [32]byte {
return hash.Hash([]byte(passphrase))
}
// HashTransactionInEnvelope derives the network specific hash for the transaction
// contained in the provided envelope using the network identified by the supplied passphrase.
// The resulting hash is the value that can be signed by stellar secret key to
// authorize the transaction identified by the hash to stellar validators.
func HashTransactionInEnvelope(envelope xdr.TransactionEnvelope, passphrase string) ([32]byte, error) {
var hash [32]byte
var err error
switch envelope.Type {
case xdr.EnvelopeTypeEnvelopeTypeTx:
hash, err = HashTransaction(envelope.V1.Tx, passphrase)
case xdr.EnvelopeTypeEnvelopeTypeTxV0:
hash, err = HashTransactionV0(envelope.V0.Tx, passphrase)
case xdr.EnvelopeTypeEnvelopeTypeTxFeeBump:
hash, err = HashFeeBumpTransaction(envelope.FeeBump.Tx, passphrase)
default:
err = errors.New("invalid transaction type")
}
return hash, err
}
// HashTransaction derives the network specific hash for the provided
// transaction using the network identified by the supplied passphrase. The
// resulting hash is the value that can be signed by stellar secret key to
// authorize the transaction identified by the hash to stellar validators.
func HashTransaction(tx xdr.Transaction, passphrase string) ([32]byte, error) {
taggedTx := xdr.TransactionSignaturePayloadTaggedTransaction{
Type: xdr.EnvelopeTypeEnvelopeTypeTx,
Tx: &tx,
}
return hashTx(taggedTx, passphrase)
}
// HashFeeBumpTransaction derives the network specific hash for the provided
// fee bump transaction using the network identified by the supplied passphrase. The
// resulting hash is the value that can be signed by stellar secret key to
// authorize the transaction identified by the hash to stellar validators.
func HashFeeBumpTransaction(tx xdr.FeeBumpTransaction, passphrase string) ([32]byte, error) {
taggedTx := xdr.TransactionSignaturePayloadTaggedTransaction{
Type: xdr.EnvelopeTypeEnvelopeTypeTxFeeBump,
FeeBump: &tx,
}
return hashTx(taggedTx, passphrase)
}
// HashTransactionV0 derives the network specific hash for the provided
// legacy transaction using the network identified by the supplied passphrase. The
// resulting hash is the value that can be signed by stellar secret key to
// authorize the transaction identified by the hash to stellar validators.
func HashTransactionV0(tx xdr.TransactionV0, passphrase string) ([32]byte, error) {
sa, err := xdr.NewMuxedAccount(xdr.CryptoKeyTypeKeyTypeEd25519, tx.SourceAccountEd25519)
if err != nil {
return [32]byte{}, err
}
v1Tx := xdr.Transaction{
SourceAccount: sa,
Fee: tx.Fee,
Memo: tx.Memo,
Operations: tx.Operations,
SeqNum: tx.SeqNum,
TimeBounds: tx.TimeBounds,
}
return HashTransaction(v1Tx, passphrase)
}
func hashTx(
tx xdr.TransactionSignaturePayloadTaggedTransaction,
passphrase string,
) ([32]byte, error) {
if strings.TrimSpace(passphrase) == "" {
return [32]byte{}, errors.New("empty network passphrase")
}
var txBytes bytes.Buffer
payload := xdr.TransactionSignaturePayload{
NetworkId: ID(passphrase),
TaggedTransaction: tx,
}
_, err := xdr.Marshal(&txBytes, payload)
if err != nil {
return [32]byte{}, errors.Wrap(err, "marshal tx failed")
}
return hash.Hash(txBytes.Bytes()), nil
} | network/main.go | 0.819569 | 0.436502 | main.go | starcoder |
package effects
import (
"math"
"github.com/faiface/beep"
)
type (
// This parametric equalizer is based on the GK Nilsen's post at:
// https://octovoid.com/2017/11/04/coding-a-parametric-equalizer-for-audio-applications/
equalizer struct {
streamer beep.Streamer
sections []section
}
section struct {
a, b [2][]float64
xPast, yPast [][2]float64
}
// EqualizerSections is the interfacd that is passed into NewEqualizer
EqualizerSections interface {
sections(fs float64) []section
}
StereoEqualizerSection struct {
Left MonoEqualizerSection
Right MonoEqualizerSection
}
MonoEqualizerSection struct {
// F0 (center frequency) sets the mid-point of the section’s
// frequency range and is given in Hertz [Hz].
F0 float64
// Bf (bandwidth) represents the width of the section across
// frequency and is measured in Hertz [Hz]. A low bandwidth
// corresponds to a narrow frequency range meaning that the
// section will concentrate its operation to only the
// frequencies close to the center frequency. On the other hand,
// a high bandwidth yields a section of wide frequency range —
// affecting a broader range of frequencies surrounding the
// center frequency.
Bf float64
// GB (bandwidth gain) is given in decibels [dB] and represents
// the level at which the bandwidth is measured. That is, to
// have a meaningful measure of bandwidth, we must define the
// level at which it is measured.
GB float64
// G0 (reference gain) is given in decibels [dB] and simply
// represents the level of the section’s offset.
G0 float64
// G (boost/cut gain) is given in decibels [dB] and prescribes
// the effect imposed on the audio loudness for the section’s
// frequency range. A boost/cut level of 0 dB corresponds to
// unity (no operation), whereas negative numbers corresponds to
// cut (volume down) and positive numbers to boost (volume up).
G float64
}
// StereoEqualizerSections implements EqualizerSections and can be passed into NewEqualizer
StereoEqualizerSections []StereoEqualizerSection
// MonoEqualizerSections implements EqualizerSections and can be passed into NewEqualizer
MonoEqualizerSections []MonoEqualizerSection
)
// NewEqualizer returns a beep.Streamer that modifies the stream based on the EqualizerSection slice that is passed in.
// The SampleRate (sr) must match that of the Streamer.
func NewEqualizer(st beep.Streamer, sr beep.SampleRate, s EqualizerSections) beep.Streamer {
return &equalizer{
streamer: st,
sections: s.sections(float64(sr)),
}
}
func (m MonoEqualizerSections) sections(fs float64) []section {
out := make([]section, len(m))
for i, s := range m {
out[i] = s.section(fs)
}
return out
}
func (m StereoEqualizerSections) sections(fs float64) []section {
out := make([]section, len(m))
for i, s := range m {
out[i] = s.section(fs)
}
return out
}
// Stream streams the wrapped Streamer modified by Equalizer.
func (e *equalizer) Stream(samples [][2]float64) (n int, ok bool) {
n, ok = e.streamer.Stream(samples)
for _, s := range e.sections {
s.apply(samples)
}
return n, ok
}
// Err propagates the wrapped Streamer's errors.
func (e *equalizer) Err() error {
return e.streamer.Err()
}
func (m MonoEqualizerSection) section(fs float64) section {
beta := math.Tan(m.Bf/2.0*math.Pi/(fs/2.0)) *
math.Sqrt(math.Abs(math.Pow(math.Pow(10, m.GB/20.0), 2.0)-
math.Pow(math.Pow(10.0, m.G0/20.0), 2.0))) /
math.Sqrt(math.Abs(math.Pow(math.Pow(10.0, m.G/20.0), 2.0)-
math.Pow(math.Pow(10.0, m.GB/20.0), 2.0)))
b := []float64{
(math.Pow(10.0, m.G0/20.0) + math.Pow(10.0, m.G/20.0)*beta) / (1 + beta),
(-2 * math.Pow(10.0, m.G0/20.0) * math.Cos(m.F0*math.Pi/(fs/2.0))) / (1 + beta),
(math.Pow(10.0, m.G0/20) - math.Pow(10.0, m.G/20.0)*beta) / (1 + beta),
}
a := []float64{
1.0,
-2 * math.Cos(m.F0*math.Pi/(fs/2.0)) / (1 + beta),
(1 - beta) / (1 + beta),
}
return section{
a: [2][]float64{a, a},
b: [2][]float64{b, b},
}
}
func (s StereoEqualizerSection) section(fs float64) section {
l := s.Left.section(fs)
r := s.Right.section(fs)
return section{
a: [2][]float64{l.a[0], r.a[0]},
b: [2][]float64{l.b[0], r.b[0]},
}
}
func (s *section) apply(x [][2]float64) {
ord := len(s.a[0]) - 1
np := len(x) - 1
if np < ord {
x = append(x, make([][2]float64, ord-np)...)
np = ord
}
y := make([][2]float64, len(x))
if len(s.xPast) < len(x) {
s.xPast = append(s.xPast, make([][2]float64, len(x)-len(s.xPast))...)
}
if len(s.yPast) < len(x) {
s.yPast = append(s.yPast, make([][2]float64, len(x)-len(s.yPast))...)
}
for i := 0; i < len(x); i++ {
for j := 0; j < ord+1; j++ {
if i-j < 0 {
y[i][0] = y[i][0] + s.b[0][j]*s.xPast[len(s.xPast)-j][0]
y[i][1] = y[i][1] + s.b[1][j]*s.xPast[len(s.xPast)-j][1]
} else {
y[i][0] = y[i][0] + s.b[0][j]*x[i-j][0]
y[i][1] = y[i][1] + s.b[1][j]*x[i-j][1]
}
}
for j := 0; j < ord; j++ {
if i-j-1 < 0 {
y[i][0] = y[i][0] - s.a[0][j+1]*s.yPast[len(s.yPast)-j-1][0]
y[i][1] = y[i][1] - s.a[1][j+1]*s.yPast[len(s.yPast)-j-1][1]
} else {
y[i][0] = y[i][0] - s.a[0][j+1]*y[i-j-1][0]
y[i][1] = y[i][1] - s.a[1][j+1]*y[i-j-1][1]
}
}
y[i][0] = y[i][0] / s.a[0][0]
y[i][1] = y[i][1] / s.a[1][0]
}
s.xPast = x[:]
s.yPast = y[:]
copy(x, y)
} | effects/equalizer.go | 0.777511 | 0.703736 | equalizer.go | starcoder |
package bigcache
import (
"bytes"
)
type ringBuf struct {
begin int
size int
data []byte
}
func newRingBuf(size int) ringBuf {
return ringBuf{
begin: 0,
size: 0,
data: make([]byte, size),
}
}
func (r *ringBuf) append(data []byte) int {
n := len(data)
max := len(r.data)
end := r.getEnd()
copy(r.data[end:], data)
if end+n > max {
firstPart := max - end
copy(r.data, data[firstPart:])
}
r.size += n
return end
}
func (r *ringBuf) appendEmpty(n int) {
r.size += n
}
func (r *ringBuf) readAt(data []byte, offset int) {
offset = offset % len(r.data)
n := len(data)
max := len(r.data)
copy(data, r.data[offset:])
if offset+n > max {
firstPart := max - offset
copy(data[firstPart:], r.data)
}
}
func (r *ringBuf) writeAt(data []byte, offset int) {
offset = offset % len(r.data)
n := len(data)
max := len(r.data)
copy(r.data[offset:], data)
if offset+n > max {
firstPart := max - offset
secondPart := n - firstPart
copy(r.data[:secondPart], data[firstPart:])
}
}
func (r *ringBuf) getBegin() int {
return r.begin
}
func (r *ringBuf) getEnd() int {
return (r.begin + r.size) % len(r.data)
}
func (r *ringBuf) getAvailable() int {
return len(r.data) - r.size
}
func (r *ringBuf) increaseBegin(n int) {
r.begin = (r.begin + n) % len(r.data)
}
func (r *ringBuf) skip(n int) {
r.increaseBegin(n)
r.size -= n
}
func (r *ringBuf) bytesEqual(from int, data []byte) bool {
from = from % len(r.data)
n := len(data)
toOffset := from + n
max := len(r.data)
if toOffset > max {
firstPart := max - from
secondPart := n - firstPart
if !bytes.Equal(r.data[from:], data[:firstPart]) {
return false
}
return bytes.Equal(r.data[:secondPart], data[firstPart:])
}
return bytes.Equal(r.data[from:toOffset], data)
}
func (r *ringBuf) evacuateContinuousSource(from int, end int, size int) {
max := len(r.data)
if end+size > max {
firstPart := max - end
secondPart := size - firstPart
copy(r.data[end:], r.data[from:])
copy(r.data[:secondPart], r.data[from+firstPart:])
} else {
copy(r.data[end:end+size], r.data[from:])
}
}
func (r *ringBuf) evacuate(size int) int {
begin := r.getBegin()
end := r.getEnd()
max := len(r.data)
if begin+size > max {
firstPart := max - begin
secondPart := size - firstPart
r.evacuateContinuousSource(begin, end, firstPart)
r.evacuateContinuousSource(0, end+firstPart, secondPart)
} else {
r.evacuateContinuousSource(begin, end, size)
}
r.increaseBegin(size)
return end
} | ringbuf.go | 0.608245 | 0.416381 | ringbuf.go | starcoder |
package main
import (
"math"
"sync"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
// ZipfGenerator is a random number generator that generates draws from a Zipf
// distribution. Unlike rand.Zipf, this generator supports incrementing the
// imax parameter without performing an expensive recomputation of the
// underlying hidden parameters, which is a pattern used in [1] for efficiently
// generating large volumes of Zipf-distributed records for synthetic data.
// Second, rand.Zipf only supports theta <= 1, we suppose all values of theta.
type ZipfGenerator struct {
// The underlying RNG
zipfGenMu ZipfGeneratorMu
// supplied values
theta float64
iMin uint64
// internally computed values
alpha, zeta2 float64
}
// ZipfGeneratorMu holds variables which must be globally synced.
type ZipfGeneratorMu struct {
mu sync.RWMutex
iMax uint64
iMaxHead uint64
eta float64
zetaN float64
}
// NewZipfGenerator constructs a new ZipfGenerator with the given parameters.
// It returns an error if the parameters are outside the accepted range.
func NewZipfGenerator(iMin, iMax uint64, theta float64) (*ZipfGenerator, error) {
if iMin > iMax {
return nil, errors.Errorf("iMin %d > iMax %d", iMin, iMax)
}
if theta < 0.0 || theta == 1.0 {
return nil, errors.Errorf("0 < theta, and theta != 1")
}
z := ZipfGenerator{
iMin: iMin,
zipfGenMu: ZipfGeneratorMu{
iMax: iMax,
},
theta: theta,
}
z.zipfGenMu.mu.Lock()
defer z.zipfGenMu.mu.Unlock()
// Compute hidden parameters
zeta2, err := computeZetaFromScratch(2, theta)
if err != nil {
return nil, errors.Errorf("Could not compute zeta(2,theta): %s", err)
}
var zetaN float64
zetaN, err = computeZetaFromScratch(iMax+1-iMin, theta)
if err != nil {
return nil, errors.Errorf("Could not compute zeta(2,%d): %s", iMax, err)
}
z.alpha = 1.0 / (1.0 - theta)
z.zipfGenMu.eta = (1 - math.Pow(2.0/float64(z.zipfGenMu.iMax+1-z.iMin), 1.0-theta)) / (1.0 - zeta2/zetaN)
z.zipfGenMu.zetaN = zetaN
z.zeta2 = zeta2
return &z, nil
}
// computeZetaIncrementally recomputes zeta(iMax, theta), assuming that
// sum = zeta(oldIMax, theta). It returns zeta(iMax, theta), computed incrementally.
func computeZetaIncrementally(oldIMax, iMax uint64, theta float64, sum float64) (float64, error) {
if iMax < oldIMax {
return 0, errors.Errorf("Can't increment iMax backwards!")
}
for i := oldIMax + 1; i <= iMax; i++ {
sum += 1.0 / math.Pow(float64(i), theta)
}
return sum, nil
}
// The function zeta computes the value
// zeta(n, theta) = (1/1)^theta + (1/2)^theta + (1/3)^theta + ... + (1/n)^theta
func computeZetaFromScratch(n uint64, theta float64) (float64, error) {
zeta, err := computeZetaIncrementally(0, n, theta, 0.0)
if err != nil {
return zeta, errors.Errorf("could not compute zeta: %s", err)
}
return zeta, nil
}
// Uint64 draws a new value between iMin and iMax, with probabilities
// according to the Zipf distribution.
func (z *ZipfGenerator) Uint64(u float64) uint64 {
z.zipfGenMu.mu.RLock()
uz := u * z.zipfGenMu.zetaN
var result uint64
if uz < 1.0 {
result = z.iMin
} else if uz < 1.0+math.Pow(0.5, z.theta) {
result = z.iMin + 1
} else {
spread := float64(z.zipfGenMu.iMax + 1 - z.iMin)
result = z.iMin + uint64(spread*math.Pow(z.zipfGenMu.eta*u-z.zipfGenMu.eta+1.0, z.alpha))
}
log.Debugf("Zip Generator: Uint64[%d, %d] -> %d", z.iMin, z.zipfGenMu.iMax, result)
z.zipfGenMu.mu.RUnlock()
return result
}
// IncrementIMax increments, iMax, and recompute the internal values that depend
// on it. It throws an error if the recomputation failed.
func (z *ZipfGenerator) IncrementIMax() error {
z.zipfGenMu.mu.Lock()
zetaN, err := computeZetaIncrementally(
z.zipfGenMu.iMax, z.zipfGenMu.iMax+1, z.theta, z.zipfGenMu.zetaN)
if err != nil {
z.zipfGenMu.mu.Unlock()
return errors.Errorf("Could not incrementally compute zeta: %s", err)
}
eta := (1 - math.Pow(2.0/float64(z.zipfGenMu.iMax+1-z.iMin), 1.0-z.theta)) / (1.0 - z.zeta2/zetaN)
z.zipfGenMu.eta = eta
z.zipfGenMu.zetaN = zetaN
z.zipfGenMu.iMax++
z.zipfGenMu.mu.Unlock()
return nil
}
// IMaxHead returns the current value of IMaxHead, and increments it after.
func (z *ZipfGenerator) IMaxHead() uint64 {
z.zipfGenMu.mu.Lock()
if z.zipfGenMu.iMaxHead < z.zipfGenMu.iMax {
z.zipfGenMu.iMaxHead = z.zipfGenMu.iMax
}
iMaxHead := z.zipfGenMu.iMaxHead
z.zipfGenMu.iMaxHead++
z.zipfGenMu.mu.Unlock()
return iMaxHead
} | ycsb/zipfgenerator.go | 0.709523 | 0.433981 | zipfgenerator.go | starcoder |
package textures
// The Vers-0 Image Format Description is a collection of data defining the pixel format, data type, size, and other
// miscellaneous characteristics of the monolithic block of image data
type ImageV0 struct {
// Pixel format specifies the format of the texture image pixel data. Depending on the format, anywhere from one
// to four elements of data exists per texel.
// = 0 − No format specified. Texture mapping is not applied.
// = 1 − A red color component followed by green and blue color components
// = 2 − A red color component followed by green, blue, and alpha color components
// = 3 − A single luminance component
// = 4 − A luminance component followed by an alpha color component.
// = 5 − A single stencil index.
// = 6 − A single depth component
// = 7 − A single red color component
// = 8 − A single green color component
// = 9 − A single blue color component
// = 10 − A single alpha color component
// = 11 − A blue color component, followed by green and red color components
// = 12 − A blue color component, followed by green, red, and alpha color components
PixelFormat uint32
// Pixel Data Type specifies the data type used to store the per texel data. If the Pixel Format represents a multi
// component value (e.g. red, green, blue) then each value requires the Pixel Data Type number of bytes of storage
// (e.g. a Pixel Format Type of “1” with Pixel Data Type of “7” would require 12 bytes of storage for each texel).
// = 0 − No type specified. Texture mapping is not applied.
// = 1 − Signed 8-bit integer
// = 2 − Single-precision 32-bit floating point
// = 3 − Unsigned 8-bit integer
// = 4 − Single bits in unsigned 8-bit integers
// = 5 − Unsigned 16-bit integer
// = 6 − Signed 16-bit integer
// = 7 − Unsigned 32-bit integer
// = 8 − Signed 32-bit integer
// = 9 − 16-bit floating point according to IEEE-754 format (i.e. 1 sign bit, 5 exponent bits, 10 mantissa bits)
PixelDataType uint32
// Dimensionality specifies the number of dimensions the texture image has. Valid values include:
// = 1 − One-dimensional texture
// = 2 − Two-dimensional texture
Dimensionality uint16
// Width specifies the width dimension (number of texel columns) of the texture image in number of pixels
Width int32
// Height specifies the height dimension (number of texel rows) of the texture image in number of pixels.
// Height is “1” for one-dimensional images.
Height int32
// Mipmaps Flag is a flag indicating whether the texture image has mipmaps.
// = 0 − No mipmaps
// = 1 − Yes has mipmaps. Image Texel Data is assumed to contain multiple textures, each a mipmap of the base texture
MipmapsFlag uint32
// Shared Image Flag is a flag indicating whether this texture image is shareable with other Texture Image Element attributes.
// = 0 − Image is not shareable with other Texture Image Elements.
// = 1 − Image is shareable with other Texture Image Elements.
SharedImageFlag uint32
} | jt/segments/textures/ImageV0.go | 0.682256 | 0.805096 | ImageV0.go | starcoder |
package mathexp
import (
"github.com/grafana/grafana-plugin-sdk-go/data"
"github.com/grafana/grafana/pkg/expr/mathexp/parse"
)
// Results is a container for Value interfaces.
type Results struct {
Values Values
}
// Values is a slice of Value interfaces
type Values []Value
// AsDataFrames returns each value as a slice of frames.
func (vals Values) AsDataFrames(refID string) []*data.Frame {
frames := make([]*data.Frame, len(vals))
for i, v := range vals {
frames[i] = v.AsDataFrame()
frames[i].RefID = refID
}
return frames
}
// Value is the interface that holds different types such as a Scalar, Series, or Number.
// all Value implementations should be a *data.Frame
type Value interface {
Type() parse.ReturnType
Value() interface{}
GetLabels() data.Labels
SetLabels(data.Labels)
GetName() string
AsDataFrame() *data.Frame
}
// Scalar is the type that holds a single number constant.
// Before returning from an expression it will be wrapped in a
// data frame.
type Scalar struct{ Frame *data.Frame }
// Type returns the Value type and allows it to fulfill the Value interface.
func (s Scalar) Type() parse.ReturnType { return parse.TypeScalar }
// Value returns the actual value allows it to fulfill the Value interface.
func (s Scalar) Value() interface{} { return s }
func (s Scalar) GetLabels() data.Labels { return nil }
func (s Scalar) SetLabels(ls data.Labels) {}
func (s Scalar) GetName() string { return s.Frame.Name }
// AsDataFrame returns the underlying *data.Frame.
func (s Scalar) AsDataFrame() *data.Frame { return s.Frame }
// NewScalar creates a Scalar holding value f.
func NewScalar(f *float64) Scalar {
frame := data.NewFrame("",
data.NewField("Scalar", nil, []*float64{f}),
)
return Scalar{frame}
}
// NewScalarResults creates a Results holding a single Scalar
func NewScalarResults(f *float64) Results {
return Results{
Values: []Value{NewScalar(f)},
}
}
// GetFloat64Value retrieves the single scalar value from the data
func (s Scalar) GetFloat64Value() *float64 {
return s.Frame.At(0, 0).(*float64)
}
// Number hold a labelled single number values.
type Number struct{ Frame *data.Frame }
// Type returns the Value type and allows it to fulfill the Value interface.
func (n Number) Type() parse.ReturnType { return parse.TypeNumberSet }
// Value returns the actual value allows it to fulfill the Value interface.
func (n Number) Value() interface{} { return &n }
func (n Number) GetLabels() data.Labels { return n.Frame.Fields[0].Labels }
func (n Number) SetLabels(ls data.Labels) { n.Frame.Fields[0].Labels = ls }
func (n Number) GetName() string { return n.Frame.Name }
// AsDataFrame returns the underlying *data.Frame.
func (n Number) AsDataFrame() *data.Frame { return n.Frame }
// SetValue sets the value of the Number to float64 pointer f
func (n Number) SetValue(f *float64) {
n.Frame.Set(0, 0, f)
}
// GetFloat64Value retrieves the single scalar value from the data
func (n Number) GetFloat64Value() *float64 {
return n.Frame.At(0, 0).(*float64)
}
// NewNumber returns a data that holds a float64Vector
func NewNumber(name string, labels data.Labels) Number {
return Number{
data.NewFrame("",
data.NewField(name, labels, make([]*float64, 1)),
),
}
} | pkg/expr/mathexp/types.go | 0.852107 | 0.507507 | types.go | starcoder |
package table
/*
The table file format looks like:
<start_of_file>
[data block 0]
[data block 1]
...
[data block N-1]
[meta block 0]
[meta block 1]
...
[meta block K-1]
[metaindex block]
[index block]
[footer]
<end_of_file>
Each block consists of some data and a 5 byte trailer: a 1 byte block type and
a 4 byte checksum of the compressed data. The block type gives the per-block
compression used; each block is compressed independently. The checksum
algorithm is described in the leveldb/crc package.
The decompressed block data consists of a sequence of key/value entries
followed by a trailer. Each key is encoded as a shared prefix length and a
remainder string. For example, if two adjacent keys are "tweedledee" and
"tweedledum", then the second key would be encoded as {8, "um"}. The shared
prefix length is varint encoded. The remainder string and the value are
encoded as a varint-encoded length followed by the literal contents. To
continue the example, suppose that the key "tweedledum" mapped to the value
"socks". The encoded key/value entry would be: "\x08\x02\x05umsocks".
Every block has a restart interval I. Every I'th key/value entry in that block
is called a restart point, and shares no key prefix with the previous entry.
Continuing the example above, if the key after "tweedledum" was "two", but was
part of a restart point, then that key would be encoded as {0, "two"} instead
of {2, "o"}. If a block has P restart points, then the block trailer consists
of (P+1)*4 bytes: (P+1) little-endian uint32 values. The first P of these
uint32 values are the block offsets of each restart point. The final uint32
value is P itself. Thus, when seeking for a particular key, one can use binary
search to find the largest restart point whose key is <= the key sought.
An index block is a block with N key/value entries. The i'th value is the
encoded block handle of the i'th data block. The i'th key is a separator for
i < N-1, and a successor for i == N-1. The separator between blocks i and i+1
is a key that is >= every key in block i and is < every key i block i+1. The
successor for the final block is a key that is >= every key in block N-1. The
index block restart interval is 1: every entry is a restart point.
The table footer is exactly 48 bytes long:
- the block handle for the metaindex block,
- the block handle for the index block,
- padding to take the two items above up to 40 bytes,
- an 8-byte magic string.
A block handle is an offset and a length; the length does not include the 5
byte trailer. Both numbers are varint-encoded, with no padding between the two
values. The maximum size of an encoded block handle is therefore 20 bytes.
*/
const (
blockTrailerLen = 5
footerLen = 48
magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb"
// The block type gives the per-block compression format.
// These constants are part of the file format and should not be changed.
// They are different from the db.Compression constants because the latter
// are designed so that the zero value of the db.Compression type means to
// use the default compression (which is snappy).
noCompressionBlockType = 0
snappyCompressionBlockType = 1
) | third_party/code.google.com/p/leveldb-go/leveldb/table/table.go | 0.59749 | 0.576304 | table.go | starcoder |
package types
import (
"fmt"
"math"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/trace_util_0"
)
// AddUint64 adds uint64 a and b if no overflow, else returns error.
func AddUint64(a uint64, b uint64) (uint64, error) {
trace_util_0.Count(_overflow_00000, 0)
if math.MaxUint64-a < b {
trace_util_0.Count(_overflow_00000, 2)
return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b))
}
trace_util_0.Count(_overflow_00000, 1)
return a + b, nil
}
// AddInt64 adds int64 a and b if no overflow, otherwise returns error.
func AddInt64(a int64, b int64) (int64, error) {
trace_util_0.Count(_overflow_00000, 3)
if (a > 0 && b > 0 && math.MaxInt64-a < b) ||
(a < 0 && b < 0 && math.MinInt64-a > b) {
trace_util_0.Count(_overflow_00000, 5)
return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b))
}
trace_util_0.Count(_overflow_00000, 4)
return a + b, nil
}
// AddInteger adds uint64 a and int64 b and returns uint64 if no overflow error.
func AddInteger(a uint64, b int64) (uint64, error) {
trace_util_0.Count(_overflow_00000, 6)
if b >= 0 {
trace_util_0.Count(_overflow_00000, 9)
return AddUint64(a, uint64(b))
}
trace_util_0.Count(_overflow_00000, 7)
if uint64(-b) > a {
trace_util_0.Count(_overflow_00000, 10)
return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b))
}
trace_util_0.Count(_overflow_00000, 8)
return a - uint64(-b), nil
}
// SubUint64 subtracts uint64 a with b and returns uint64 if no overflow error.
func SubUint64(a uint64, b uint64) (uint64, error) {
trace_util_0.Count(_overflow_00000, 11)
if a < b {
trace_util_0.Count(_overflow_00000, 13)
return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b))
}
trace_util_0.Count(_overflow_00000, 12)
return a - b, nil
}
// SubInt64 subtracts int64 a with b and returns int64 if no overflow error.
func SubInt64(a int64, b int64) (int64, error) {
trace_util_0.Count(_overflow_00000, 14)
if (a > 0 && b < 0 && math.MaxInt64-a < -b) ||
(a < 0 && b > 0 && math.MinInt64-a > -b) ||
(a == 0 && b == math.MinInt64) {
trace_util_0.Count(_overflow_00000, 16)
return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b))
}
trace_util_0.Count(_overflow_00000, 15)
return a - b, nil
}
// SubUintWithInt subtracts uint64 a with int64 b and returns uint64 if no overflow error.
func SubUintWithInt(a uint64, b int64) (uint64, error) {
trace_util_0.Count(_overflow_00000, 17)
if b < 0 {
trace_util_0.Count(_overflow_00000, 19)
return AddUint64(a, uint64(-b))
}
trace_util_0.Count(_overflow_00000, 18)
return SubUint64(a, uint64(b))
}
// SubIntWithUint subtracts int64 a with uint64 b and returns uint64 if no overflow error.
func SubIntWithUint(a int64, b uint64) (uint64, error) {
trace_util_0.Count(_overflow_00000, 20)
if a < 0 || uint64(a) < b {
trace_util_0.Count(_overflow_00000, 22)
return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b))
}
trace_util_0.Count(_overflow_00000, 21)
return uint64(a) - b, nil
}
// MulUint64 multiplies uint64 a and b and returns uint64 if no overflow error.
func MulUint64(a uint64, b uint64) (uint64, error) {
trace_util_0.Count(_overflow_00000, 23)
if b > 0 && a > math.MaxUint64/b {
trace_util_0.Count(_overflow_00000, 25)
return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b))
}
trace_util_0.Count(_overflow_00000, 24)
return a * b, nil
}
// MulInt64 multiplies int64 a and b and returns int64 if no overflow error.
func MulInt64(a int64, b int64) (int64, error) {
trace_util_0.Count(_overflow_00000, 26)
if a == 0 || b == 0 {
trace_util_0.Count(_overflow_00000, 32)
return 0, nil
}
trace_util_0.Count(_overflow_00000, 27)
var (
res uint64
err error
negative = false
)
if a > 0 && b > 0 {
trace_util_0.Count(_overflow_00000, 33)
res, err = MulUint64(uint64(a), uint64(b))
} else {
trace_util_0.Count(_overflow_00000, 34)
if a < 0 && b < 0 {
trace_util_0.Count(_overflow_00000, 35)
res, err = MulUint64(uint64(-a), uint64(-b))
} else {
trace_util_0.Count(_overflow_00000, 36)
if a < 0 && b > 0 {
trace_util_0.Count(_overflow_00000, 37)
negative = true
res, err = MulUint64(uint64(-a), uint64(b))
} else {
trace_util_0.Count(_overflow_00000, 38)
{
negative = true
res, err = MulUint64(uint64(a), uint64(-b))
}
}
}
}
trace_util_0.Count(_overflow_00000, 28)
if err != nil {
trace_util_0.Count(_overflow_00000, 39)
return 0, errors.Trace(err)
}
trace_util_0.Count(_overflow_00000, 29)
if negative {
trace_util_0.Count(_overflow_00000, 40)
// negative result
if res > math.MaxInt64+1 {
trace_util_0.Count(_overflow_00000, 42)
return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b))
}
trace_util_0.Count(_overflow_00000, 41)
return -int64(res), nil
}
// positive result
trace_util_0.Count(_overflow_00000, 30)
if res > math.MaxInt64 {
trace_util_0.Count(_overflow_00000, 43)
return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b))
}
trace_util_0.Count(_overflow_00000, 31)
return int64(res), nil
}
// MulInteger multiplies uint64 a and int64 b, and returns uint64 if no overflow error.
func MulInteger(a uint64, b int64) (uint64, error) {
trace_util_0.Count(_overflow_00000, 44)
if a == 0 || b == 0 {
trace_util_0.Count(_overflow_00000, 47)
return 0, nil
}
trace_util_0.Count(_overflow_00000, 45)
if b < 0 {
trace_util_0.Count(_overflow_00000, 48)
return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b))
}
trace_util_0.Count(_overflow_00000, 46)
return MulUint64(a, uint64(b))
}
// DivInt64 divides int64 a with b, returns int64 if no overflow error.
// It just checks overflow, if b is zero, a "divide by zero" panic throws.
func DivInt64(a int64, b int64) (int64, error) {
trace_util_0.Count(_overflow_00000, 49)
if a == math.MinInt64 && b == -1 {
trace_util_0.Count(_overflow_00000, 51)
return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b))
}
trace_util_0.Count(_overflow_00000, 50)
return a / b, nil
}
// DivUintWithInt divides uint64 a with int64 b, returns uint64 if no overflow error.
// It just checks overflow, if b is zero, a "divide by zero" panic throws.
func DivUintWithInt(a uint64, b int64) (uint64, error) {
trace_util_0.Count(_overflow_00000, 52)
if b < 0 {
trace_util_0.Count(_overflow_00000, 54)
if a != 0 && uint64(-b) <= a {
trace_util_0.Count(_overflow_00000, 56)
return 0, ErrOverflow.GenWithStackByArgs("BIGINT UNSIGNED", fmt.Sprintf("(%d, %d)", a, b))
}
trace_util_0.Count(_overflow_00000, 55)
return 0, nil
}
trace_util_0.Count(_overflow_00000, 53)
return a / uint64(b), nil
}
// DivIntWithUint divides int64 a with uint64 b, returns uint64 if no overflow error.
// It just checks overflow, if b is zero, a "divide by zero" panic throws.
func DivIntWithUint(a int64, b uint64) (uint64, error) {
trace_util_0.Count(_overflow_00000, 57)
if a < 0 {
trace_util_0.Count(_overflow_00000, 59)
if uint64(-a) >= b {
trace_util_0.Count(_overflow_00000, 61)
return 0, ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("(%d, %d)", a, b))
}
trace_util_0.Count(_overflow_00000, 60)
return 0, nil
}
trace_util_0.Count(_overflow_00000, 58)
return uint64(a) / b, nil
}
var _overflow_00000 = "types/overflow.go" | types/overflow.go | 0.63273 | 0.412057 | overflow.go | starcoder |
package reporting
import (
metrics "github.com/rcrowley/go-metrics"
"github.com/wavefronthq/wavefront-sdk-go/histogram"
)
// Histogram wrapper of Wavefront Histogram so it can be used with metrics.Registry
type Histogram struct {
delegate histogram.Histogram
}
// NewHistogram create a new Wavefront Histogram and the wrapper
func NewHistogram(options ...histogram.Option) metrics.Histogram {
return Histogram{delegate: histogram.New(options...)}
}
// Clear will panic
func (h Histogram) Clear() {
panic("Clear called on a Histogram")
}
// Count returns the total number of samples on this histogram.
func (h Histogram) Count() int64 {
return int64(h.delegate.Count())
}
// Min returns the minimum Value of samples on this histogram.
func (h Histogram) Min() int64 {
return int64(h.delegate.Min())
}
// Max returns the maximum Value of samples on this histogram.
func (h Histogram) Max() int64 {
return int64(h.delegate.Max())
}
// Sum returns the sum of all values on this histogram.
func (h Histogram) Sum() int64 {
return int64(h.delegate.Sum())
}
// Mean returns the mean values of samples on this histogram.
func (h Histogram) Mean() float64 {
return h.delegate.Mean()
}
// Update registers a new sample in the histogram.
func (h Histogram) Update(v int64) {
h.delegate.Update(float64(v))
}
// Sample will panic
func (h Histogram) Sample() metrics.Sample {
panic("Sample not supported")
}
// Snapshot create a metrics.Histogram
func (h Histogram) Snapshot() metrics.Histogram {
c := 0
for _, distribution := range h.delegate.Snapshot() {
for _, centroid := range distribution.Centroids {
c += centroid.Count
}
}
sample := metrics.NewUniformSample(c)
for _, distribution := range h.delegate.Snapshot() {
for _, centroid := range distribution.Centroids {
for i := 0; i < centroid.Count; i++ {
sample.Update(int64(centroid.Value))
}
}
}
return metrics.NewHistogram(sample)
}
// StdDev returns the standard deviation.
func (h Histogram) StdDev() float64 {
return h.Snapshot().StdDev()
}
// Variance returns the variance of inputs.
func (h Histogram) Variance() float64 {
return h.Snapshot().Variance()
}
// Percentile returns the desired percentile estimation.
func (h Histogram) Percentile(p float64) float64 {
return h.delegate.Quantile(p)
}
// Percentiles returns a slice of arbitrary percentiles of values in the sample
func (h Histogram) Percentiles(ps []float64) []float64 {
var res []float64
for _, p := range ps {
res = append(res, h.Percentile(p))
}
return res
}
// Distributions returns all samples on completed time slices, and clear the histogram
func (h Histogram) Distributions() []histogram.Distribution {
return h.delegate.Distributions()
}
// Granularity value
func (h Histogram) Granularity() histogram.Granularity {
return h.delegate.Granularity()
} | reporting/histogram.go | 0.892334 | 0.594669 | histogram.go | starcoder |
package moving_average
import (
"github.com/apache/arrow/go/arrow/array"
"github.com/wolffcm/flux/arrow"
"github.com/wolffcm/flux/values"
)
type ArrayContainer struct {
array array.Interface
}
func NewArrayContainer(a array.Interface) *ArrayContainer {
return &ArrayContainer{a}
}
func (a *ArrayContainer) IsNull(i int) bool {
return a.array.IsNull(i)
}
func (a *ArrayContainer) IsValid(i int) bool {
return a.array.IsValid(i)
}
func (a *ArrayContainer) Len() int {
return a.array.Len()
}
func (a *ArrayContainer) Value(i int) values.Value {
switch a.array.(type) {
case *array.Boolean:
return values.New(a.array.(*array.Boolean).Value(i))
case *array.Int64:
return values.New(float64(a.array.(*array.Int64).Value(i)))
case *array.Uint64:
return values.New(float64(a.array.(*array.Uint64).Value(i)))
case *array.Float64:
return values.New(float64(a.array.(*array.Float64).Value(i)))
case *array.Binary:
return values.New(string(a.array.(*array.Binary).Value(i)))
default:
return nil
}
}
func (a *ArrayContainer) OrigValue(i int) interface{} {
switch a.array.(type) {
case *array.Boolean:
return a.array.(*array.Boolean).Value(i)
case *array.Int64:
return a.array.(*array.Int64).Value(i)
case *array.Uint64:
return a.array.(*array.Uint64).Value(i)
case *array.Float64:
return a.array.(*array.Float64).Value(i)
case *array.Binary:
return string(a.array.(*array.Binary).Value(i))
default:
return nil
}
}
func (a *ArrayContainer) Slice(i int, j int) *ArrayContainer {
slice := &ArrayContainer{}
switch a.array.(type) {
case *array.Boolean:
slice.array = arrow.BoolSlice(a.array.(*array.Boolean), i, j)
case *array.Int64:
slice.array = arrow.IntSlice(a.array.(*array.Int64), i, j)
case *array.Uint64:
slice.array = arrow.UintSlice(a.array.(*array.Uint64), i, j)
case *array.Float64:
slice.array = arrow.FloatSlice(a.array.(*array.Float64), i, j)
case *array.Binary:
slice.array = arrow.StringSlice(a.array.(*array.Binary), i, j)
default:
slice.array = nil
}
return slice
}
func (a *ArrayContainer) Array() array.Interface {
return a.array
}
func (a *ArrayContainer) Release() {
a.array.Release()
} | internal/moving_average/array_container.go | 0.685529 | 0.517693 | array_container.go | starcoder |
package ec
import (
"crypto/elliptic"
"math/big"
)
// Koblitz curve math
// http://www.secg.org/sec2-v2.pdf 2.4.1
// https://github.com/mndrix/btcutil/blob/master/secp256k1.go
// https://github.com/btcsuite/btcd/blob/master/btcec/btcec.go
// KoblitzCurve A Koblitz Curve with a=0.
type KoblitzCurve struct {
*elliptic.CurveParams
q *big.Int
}
var secp256k1 *KoblitzCurve
// Secp265k1 return Curve
func Secp265k1() *KoblitzCurve {
return secp256k1
}
func init() {
secp256k1 = new(KoblitzCurve)
secp256k1.CurveParams = new(elliptic.CurveParams)
secp256k1.Name = "secp256k1"
secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16)
secp256k1.Gx, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16)
secp256k1.Gy, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16)
secp256k1.BitSize = 256
secp256k1.q = new(big.Int).Div(new(big.Int).Add(secp256k1.P,
big.NewInt(1)), big.NewInt(4))
}
// Params returns the parameters for the curve
func (curve *KoblitzCurve) Params() *elliptic.CurveParams {
return secp256k1.CurveParams
}
// IsOnCurve reports whether the given (x,y) lies on the curve.
func (curve *KoblitzCurve) IsOnCurve(x *big.Int, y *big.Int) bool {
// y^2 mod p = ( x^3 + b) mod p
y2 := new(big.Int).Mul(y, y)
y2.Mod(y2, curve.P)
x3 := new(big.Int).Mul(x, x)
x3.Mul(x3, x)
x3.Add(x3, curve.B)
x3.Mod(x3, curve.P)
return x3.Cmp(y2) == 0
}
func (curve *KoblitzCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
zinv := new(big.Int).ModInverse(z, curve.P)
zinvsq := new(big.Int).Mul(zinv, zinv)
xOut = new(big.Int).Mul(x, zinvsq)
xOut.Mod(xOut, curve.P)
zinvsq.Mul(zinvsq, zinv)
yOut = new(big.Int).Mul(y, zinvsq)
yOut.Mod(yOut, curve.P)
return
}
// Add returns the sum of (x1,y1) and (x2,y2)
func (curve *KoblitzCurve) Add(x1 *big.Int, y1 *big.Int, x2 *big.Int, y2 *big.Int) (x *big.Int, y *big.Int) {
z := new(big.Int).SetInt64(1)
return curve.affineFromJacobian(curve.addJacobian(x1, y1, z, x2, y2, z))
}
// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and
// (x2, y2, z2) and returns their sum, also in Jacobian form.
func (curve *KoblitzCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) {
// See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl
z1z1 := new(big.Int).Mul(z1, z1)
z1z1.Mod(z1z1, curve.P)
z2z2 := new(big.Int).Mul(z2, z2)
z2z2.Mod(z2z2, curve.P)
u1 := new(big.Int).Mul(x1, z2z2)
u1.Mod(u1, curve.P)
u2 := new(big.Int).Mul(x2, z1z1)
u2.Mod(u2, curve.P)
h := new(big.Int).Sub(u2, u1)
if h.Sign() == -1 {
h.Add(h, curve.P)
}
i := new(big.Int).Lsh(h, 1)
i.Mul(i, i)
j := new(big.Int).Mul(h, i)
s1 := new(big.Int).Mul(y1, z2)
s1.Mul(s1, z2z2)
s1.Mod(s1, curve.P)
s2 := new(big.Int).Mul(y2, z1)
s2.Mul(s2, z1z1)
s2.Mod(s2, curve.P)
r := new(big.Int).Sub(s2, s1)
if r.Sign() == -1 {
r.Add(r, curve.P)
}
r.Lsh(r, 1)
v := new(big.Int).Mul(u1, i)
x3 := new(big.Int).Set(r)
x3.Mul(x3, x3)
x3.Sub(x3, j)
x3.Sub(x3, v)
x3.Sub(x3, v)
x3.Mod(x3, curve.P)
y3 := new(big.Int).Set(r)
v.Sub(v, x3)
y3.Mul(y3, v)
s1.Mul(s1, j)
s1.Lsh(s1, 1)
y3.Sub(y3, s1)
y3.Mod(y3, curve.P)
z3 := new(big.Int).Add(z1, z2)
z3.Mul(z3, z3)
z3.Sub(z3, z1z1)
if z3.Sign() == -1 {
z3.Add(z3, curve.P)
}
z3.Sub(z3, z2z2)
if z3.Sign() == -1 {
z3.Add(z3, curve.P)
}
z3.Mul(z3, h)
z3.Mod(z3, curve.P)
return x3, y3, z3
}
// Double returns 2*(x,y)
func (curve *KoblitzCurve) Double(x1 *big.Int, y1 *big.Int) (x *big.Int, y *big.Int) {
z1 := new(big.Int).SetInt64(1)
return curve.affineFromJacobian(curve.doubleJacobian(x1, y1, z1))
}
// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and
// returns its double, also in Jacobian form.
func (curve *KoblitzCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) {
// See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
a := new(big.Int).Mul(x, x) //X1²
b := new(big.Int).Mul(y, y) //Y1²
c := new(big.Int).Mul(b, b) //B²
d := new(big.Int).Add(x, b) //X1+B
d.Mul(d, d) //(X1+B)²
d.Sub(d, a) //(X1+B)²-A
d.Sub(d, c) //(X1+B)²-A-C
d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C)
e := new(big.Int).Mul(big.NewInt(3), a) //3*A
f := new(big.Int).Mul(e, e) //E²
x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D
x3.Sub(f, x3) //F-2*D
x3.Mod(x3, curve.P)
y3 := new(big.Int).Sub(d, x3) //D-X3
y3.Mul(e, y3) //E*(D-X3)
y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C
y3.Mod(y3, curve.P)
z3 := new(big.Int).Mul(y, z) //Y1*Z1
z3.Mul(big.NewInt(2), z3) //3*Y1*Z1
z3.Mod(z3, curve.P)
return x3, y3, z3
}
// ScalarMult returns k*(Bx,By) where k is a number in big-endian form.
func (curve *KoblitzCurve) ScalarMult(x1 *big.Int, y1 *big.Int, k []byte) (*big.Int, *big.Int) {
// We have a slight problem in that the identity of the group (the
// point at infinity) cannot be represented in (x, y) form on a finite
// machine. Thus the standard add/double algorithm has to be tweaked
// slightly: our initial state is not the identity, but x, and we
// ignore the first true bit in |k|. If we don't find any true bits in
// |k|, then we return nil, nil, because we cannot return the identity
// element.
Bz := new(big.Int).SetInt64(1)
x := x1
y := y1
z := Bz
seenFirstTrue := false
for _, byte := range k {
for bitNum := 0; bitNum < 8; bitNum++ {
if seenFirstTrue {
x, y, z = curve.doubleJacobian(x, y, z)
}
if byte&0x80 == 0x80 {
if !seenFirstTrue {
seenFirstTrue = true
} else {
x, y, z = curve.addJacobian(x1, y1, Bz, x, y, z)
}
}
byte <<= 1
}
}
if !seenFirstTrue {
return nil, nil
}
return curve.affineFromJacobian(x, y, z)
}
// ScalarBaseMult returns k*G, where G is the base point of the group
// and k is an integer in big-endian form.
func (curve *KoblitzCurve) ScalarBaseMult(k []byte) (x *big.Int, y *big.Int) {
return curve.ScalarMult(curve.Gx, curve.Gy, k)
}
// QPlus1Div4 returns the Q+1/4 constant for the curve for use in calculating
// square roots via exponention.
func (curve *KoblitzCurve) QPlus1Div4() *big.Int {
return curve.q
} | commons/ec/secp256k1.go | 0.850453 | 0.487307 | secp256k1.go | starcoder |
// Package utf16 implements encoding and decoding of UTF-16 sequences.
package utf16
// The conditions replacementChar==unicode.ReplacementChar and
// maxRune==unicode.MaxRune are verified in the tests.
// Defining them locally avoids this package depending on package unicode.
const (
replacementChar = '\uFFFD' // Unicode replacement character
maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
)
const (
// 0xd800-0xdc00 encodes the high 10 bits of a pair.
// 0xdc00-0xe000 encodes the low 10 bits of a pair.
// the value is those 20 bits plus 0x10000.
surr1 = 0xd800
surr2 = 0xdc00
surr3 = 0xe000
surrSelf = 0x10000
)
// IsSurrogate returns true if the specified Unicode code point
// can appear in a surrogate pair.
func IsSurrogate(r rune) bool {
return surr1 <= r && r < surr3
}
// DecodeRune returns the UTF-16 decoding of a surrogate pair.
// If the pair is not a valid UTF-16 surrogate pair, DecodeRune returns
// the Unicode replacement code point U+FFFD.
func DecodeRune(r1, r2 rune) rune {
if surr1 <= r1 && r1 < surr2 && surr2 <= r2 && r2 < surr3 {
return (r1-surr1)<<10 | (r2 - surr2) + 0x10000
}
return replacementChar
}
// EncodeRune returns the UTF-16 surrogate pair r1, r2 for the given rune.
// If the rune is not a valid Unicode code point or does not need encoding,
// EncodeRune returns U+FFFD, U+FFFD.
func EncodeRune(r rune) (r1, r2 rune) {
if r < surrSelf || r > maxRune || IsSurrogate(r) {
return replacementChar, replacementChar
}
r -= surrSelf
return surr1 + (r>>10)&0x3ff, surr2 + r&0x3ff
}
// Encode returns the UTF-16 encoding of the Unicode code point sequence s.
func Encode(s []rune) []uint16 {
n := len(s)
for _, v := range s {
if v >= surrSelf {
n++
}
}
a := make([]uint16, n)
n = 0
for _, v := range s {
switch {
case v < 0, surr1 <= v && v < surr3, v > maxRune:
v = replacementChar
fallthrough
case v < surrSelf:
a[n] = uint16(v)
n++
default:
r1, r2 := EncodeRune(v)
a[n] = uint16(r1)
a[n+1] = uint16(r2)
n += 2
}
}
return a[0:n]
}
// Decode returns the Unicode code point sequence represented
// by the UTF-16 encoding s.
func Decode(s []uint16) []rune {
a := make([]rune, len(s))
n := 0
for i := 0; i < len(s); i++ {
switch r := s[i]; {
case surr1 <= r && r < surr2 && i+1 < len(s) &&
surr2 <= s[i+1] && s[i+1] < surr3:
// valid surrogate sequence
a[n] = DecodeRune(rune(r), rune(s[i+1]))
i++
n++
case surr1 <= r && r < surr3:
// invalid surrogate sequence
a[n] = replacementChar
n++
default:
// normal rune
a[n] = rune(r)
n++
}
}
return a[0:n]
} | src/pkg/unicode/utf16/utf16.go | 0.546496 | 0.436322 | utf16.go | starcoder |
package vectors
import (
"fmt"
"reflect"
)
var Bool = reflect.TypeOf(true)
var Int = reflect.TypeOf(int(1))
var Int8 = reflect.TypeOf(int8(1))
var Int16 = reflect.TypeOf(int16(1))
var Int32 = reflect.TypeOf(int32(1))
var Int64 = reflect.TypeOf(int64(1))
var Uint = reflect.TypeOf(uint(1))
var Uint8 = reflect.TypeOf(uint8(1))
var Uint16 = reflect.TypeOf(uint16(1))
var Uint32 = reflect.TypeOf(uint32(1))
var Uint64 = reflect.TypeOf(uint64(1))
var Uintptr = reflect.TypeOf(uintptr(1))
var Float32 = reflect.TypeOf(float32(1.0))
var Float64 = reflect.TypeOf(float64(1))
var Complex64 = reflect.TypeOf(complex64(1))
var Complex128 = reflect.TypeOf(complex128(1))
//var Array
//var Chan
//var Func
//var Interface
//var Map
//var Ptr
//var Slice
var String = reflect.TypeOf(string(""))
//var Struct
//var UnsafePointer
type Vector struct {
slice reflect.Value
typeof reflect.Type
}
//NewVector Creates a Vector of Type T and returns it
func NewVector(T reflect.Type) *Vector {
return &Vector{
slice: reflect.MakeSlice(reflect.SliceOf(T), 0, 0),
typeof: T,
}
}
func newVector(t reflect.Type, len, cap int) *Vector {
return &Vector{
slice: reflect.MakeSlice(reflect.SliceOf(t), len, cap),
typeof: t,
}
}
//Get Returns the value from the Index in the Vector
func (v *Vector) Get(index int) interface{} {
return v.slice.Index(index)
}
//Put Sets multiple element in the vector
func (v *Vector) Put(elements ...interface{}) {
for i := range elements {
if reflect.ValueOf(elements[i]).Type() != v.slice.Type().Elem() {
panic(fmt.Sprintf("Put: cannot put a %T into a vector of %s", elements[i], v.slice.Type().Elem()))
}
v.slice = reflect.Append(v.slice, reflect.ValueOf(elements[i]))
}
}
//PutFront Sets multiple element in the front of the vector
func (v *Vector) PutFront(elements ...interface{}) {
v2 := newVector(v.typeof, 0, 0)
for i := range elements {
if reflect.ValueOf(elements[i]).Type() != v.slice.Type().Elem() {
panic(fmt.Sprintf("Put: cannot put a %T into a vector of %s", elements[i], v.slice.Type().Elem()))
}
v2.slice = reflect.Append(v2.slice, reflect.ValueOf(elements[i]))
}
v.slice = reflect.Append(v2.slice, v.slice)
}
//Copy Clones an entire Vector and returns it
func (v *Vector) Copy() *Vector {
v2 := newVector(v.typeof, v.slice.Len(), v.slice.Cap())
reflect.Copy(v2.slice, v.slice)
return v2
}
//Cut Removes a section or slice from the Vector
func (v *Vector) Cut(i, j int) {
lastItem := v.slice.Len()
cutLen := j - i
reflect.Copy(v.slice.Slice(i, lastItem), v.slice.Slice(j, lastItem))
for n := v.slice.Len() - cutLen; n < v.slice.Len(); n++ {
v.slice.Index(n).Set(reflect.Zero(v.typeof))
}
v.slice = v.slice.Slice(0, v.slice.Len()-cutLen)
}
//Delete Removes a single index from the vector
func (v *Vector) Delete(i int) {
reflect.Copy(v.slice.Slice(i, v.slice.Len()), v.slice.Slice(i+1, v.slice.Len()))
v.slice.Index(v.slice.Len() - 1).Set(reflect.Zero(v.typeof))
v.slice = v.slice.Slice(0, v.slice.Len()-1)
}
//DeleteNoPreserveOrder Removes a single index from the vector without preserving order
func (v *Vector) DeleteNoPreserveOrder(i int) {
v.slice.Index(i).Set(v.slice.Index(v.slice.Len() - 1))
v.slice.Index(v.slice.Len() - 1).Set(reflect.Zero(v.typeof))
v.slice = v.slice.Slice(0, v.slice.Len()-1)
}
//Expand Increases the size of the vector at the offset with the amount of indexes
func (v *Vector) Expand(offset, indexes int) {
// Zeroed Out, Expander
v2 := newVector(v.typeof, indexes, indexes)
// Empty Vector
v3 := newVector(v.typeof, 0, 0)
//Before Offset
bef := v.slice.Slice(0, offset)
//After Offset
aft := v.slice.Slice(offset, v.slice.Len())
// Expand Operation
v.slice = reflect.AppendSlice(v3.slice, bef)
v.slice = reflect.AppendSlice(v.slice, v2.slice)
v.slice = reflect.AppendSlice(v.slice, aft)
}
//Extend Increases the size of the vector by placing new indexes at the end
func (v *Vector) Extend(indexes int) {
v.slice = reflect.Append(v.slice, newVector(v.typeof, indexes, indexes).slice)
}
//Insert Sets a element in the vector at the offset
func (v *Vector) Insert(offset int, element interface{}) {
if reflect.ValueOf(element).Type() != v.slice.Type().Elem() {
panic(fmt.Sprintf("Insert: cannot insert a %T into a vector of %s", element, v.slice.Type().Elem()))
}
v.slice = reflect.Append(v.slice, reflect.ValueOf(0))
reflect.Copy(v.slice.Slice(offset+1, v.slice.Len()), v.slice.Slice(offset, v.slice.Len()))
v.slice.Index(offset).Set(reflect.ValueOf(element))
}
//InsertVector Sets a vector in the vector at the offset
func (v *Vector) InsertVector(offset int, vec *Vector) {
if vec.typeof != v.slice.Type().Elem() {
panic(fmt.Sprintf("InsertVector: cannot insert a %T vector into a vector of %s", vec.slice.Interface(), v.slice.Type().Elem()))
}
v.slice = reflect.AppendSlice(v.slice.Slice(0, offset), reflect.AppendSlice(vec.slice, v.slice.Slice(offset, v.slice.Len())))
}
//Pop Removes the first element from a vector and returns it
func (v *Vector) Pop() interface{} {
var x reflect.Value
x, v.slice = v.slice.Index(0), v.slice.Slice(1, v.slice.Len())
return x.Interface()
}
//PopBack Removes the last element from a vector and returns it
func (v *Vector) PopBack() interface{} {
var x reflect.Value
x, v.slice = v.slice.Index(v.slice.Len()-1), v.slice.Slice(0, v.slice.Len()-1)
return x.Interface()
}
//PopOut Removes the specified element in the index from a vector and returns it
func (v *Vector) PopOut(i int) interface{} {
x := v.slice.Index(i).Interface()
reflect.Copy(v.slice.Slice(i, v.slice.Len()), v.slice.Slice(i+1, v.slice.Len()))
v.slice.Index(v.slice.Len() - 1).Set(reflect.Zero(v.typeof))
v.slice = v.slice.Slice(0, v.slice.Len()-1)
return x
}
//Push Sets an element to the back of a vector
func (v *Vector) Push(element interface{}) {
if reflect.ValueOf(element).Type() != v.slice.Type().Elem() {
panic(fmt.Sprintf("Put: cannot put a %T into a vector of %s", element, v.slice.Type().Elem()))
}
v.slice = reflect.Append(v.slice, reflect.ValueOf(element))
}
//PushFront Sets an element to the front of a vector
func (v *Vector) PushFront(element interface{}) {
v2 := newVector(v.typeof, 0, 0)
v2.Push(element)
v.slice = reflect.Append(v2.slice, v.slice)
}
//Modify Changes the index of a vector with a new element
func (v *Vector) Modify(index int, element interface{}) {
if reflect.ValueOf(element).Type() != v.slice.Type().Elem() {
panic(fmt.Sprintf("Modify: cannot change a %T into a type of %s", element, v.slice.Type().Elem()))
}
v.slice.Index(index).Set(reflect.ValueOf(element))
} | vectors/vectors.go | 0.642657 | 0.51129 | vectors.go | starcoder |
package neighbors
import (
"fmt"
"runtime"
"sort"
"github.com/pa-m/sklearn/base"
"github.com/pa-m/sklearn/metrics"
"gonum.org/v1/gonum/mat"
"gonum.org/v1/gonum/stat"
)
// KNeighborsRegressor is a Regression based on k-nearest neighbors.
// The target is predicted by local interpolation of the targets
// associated of the nearest neighbors in the training set.
type KNeighborsRegressor struct {
NearestNeighbors
K int
Weight string
Scale bool
Distance Distance
// Runtime members
Xscaled, Y *mat.Dense
}
// NewKNeighborsRegressor returns an initialized *KNeighborsRegressor
func NewKNeighborsRegressor(K int, Weights string) base.Predicter {
return &KNeighborsRegressor{NearestNeighbors: *NewNearestNeighbors(), K: K, Weight: Weights}
}
// PredicterClone return a (possibly unfitted) copy of predicter
func (m *KNeighborsRegressor) PredicterClone() base.Predicter {
clone := *m
return &clone
}
// IsClassifier returns false for KNeighborsRegressor
func (*KNeighborsRegressor) IsClassifier() bool { return false }
// Fit ...
func (m *KNeighborsRegressor) Fit(Xmatrix, Ymatrix mat.Matrix) base.Fiter {
X, Y := base.ToDense(Xmatrix), base.ToDense(Ymatrix)
m.Xscaled = mat.DenseCopyOf(X)
m.Y = mat.DenseCopyOf(Y)
if m.Distance == nil {
m.Distance = EuclideanDistance
}
if m.K <= 0 {
panic(fmt.Errorf("K<=0"))
}
m.NearestNeighbors.Fit(X, Y)
return m
}
// GetNOutputs return Y width
func (m *KNeighborsRegressor) GetNOutputs() int { return m.Y.RawMatrix().Cols }
// Predict ...
func (m *KNeighborsRegressor) Predict(X mat.Matrix, Ymutable mat.Mutable) *mat.Dense {
Y := base.ToDense(Ymutable)
nSamples, _ := X.Dims()
if Y.IsZero() {
*Y = *mat.NewDense(nSamples, m.GetNOutputs(), nil)
}
NFitSamples, _ := m.Xscaled.Dims()
NX, _ := X.Dims()
_, outputs := m.Y.Dims()
NCPU := runtime.NumCPU()
isWeightDistance := m.Weight == "distance"
distances, indices := m.KNeighbors(X, m.K)
base.Parallelize(NCPU, NX, func(th, start, end int) {
d2 := make([]float64, NFitSamples)
idx := make([]int, NFitSamples)
weights := make([]float64, m.K)
ys := make([]float64, m.K)
epsilon := 1e-15
for ik := range weights {
weights[ik] = 1.
}
for sample := start; sample < end; sample++ {
// sort idx to get first K nearest
sort.Slice(idx, func(i, j int) bool { return d2[idx[i]] < d2[idx[j]] })
// set Y(sample,output) to weighted average of K nearest
for o := 0; o < outputs; o++ {
for ik := range ys {
ys[ik] = m.Y.At(int(indices.At(sample, ik)), o)
if isWeightDistance {
weights[ik] = 1. / (epsilon + distances.At(sample, ik))
}
}
Y.Set(sample, o, stat.Mean(ys, weights))
}
}
})
return base.FromDense(Ymutable, Y)
}
// Score for KNeighborsRegressor
func (m *KNeighborsRegressor) Score(X, Y mat.Matrix) float64 {
NSamples, NOutputs := Y.Dims()
Ypred := mat.NewDense(NSamples, NOutputs, nil)
m.Predict(X, Ypred)
return metrics.R2Score(Y, Ypred, nil, "").At(0, 0)
} | neighbors/regression.go | 0.79649 | 0.44559 | regression.go | starcoder |
package strutil
import (
"math"
"strings"
)
// FillBytes fill the destination byte array with the given pattern.
func FillBytes(dst []byte, pattern []byte) {
for i := 0; i < len(dst); i++ {
dst[i] = pattern[i%len(pattern)]
}
}
// FillByte fills the destination byte array with a single byte.
func FillByte(dst []byte, pattern byte) {
for i := 0; i < len(dst); i++ {
dst[i] = pattern
}
}
// StrPad method pads the input string with the padString until the resulting string reaches the given length
func StrPad(input string, padLength int, padString string, rightPad bool) (output string) {
var (
inputLength = len(input)
padStringLength = len(padString)
)
if inputLength >= padLength {
return input
}
var (
repeat = int(math.Ceil(float64(1) + (float64(padLength-padStringLength))/float64(padStringLength)))
builder strings.Builder
)
builder.Grow(inputLength + padStringLength*repeat)
if rightPad {
builder.WriteString(input)
for i := 0; i < repeat; i++ {
builder.WriteString(padString)
}
output = builder.String()[:padLength]
} else {
for i := 0; i < repeat; i++ {
builder.WriteString(padString)
}
builder.WriteString(input)
output = builder.String()[builder.Len()-padLength:]
}
return
}
// StrPadSingle method pads the input string with a single character until the resulting string reaches the given length
func StrPadSingle(input string, padLength int, pad byte, rightPad bool) (output string) {
var (
inputLength = len(input)
)
if inputLength >= padLength {
return input
}
var (
builder strings.Builder
)
builder.Grow(inputLength + padLength)
if rightPad {
builder.WriteString(input)
for i := 0; i < padLength; i++ {
builder.WriteByte(pad)
}
output = builder.String()[:padLength]
} else {
for i := 0; i < padLength; i++ {
builder.WriteByte(pad)
}
builder.WriteString(input)
output = builder.String()[builder.Len()-padLength:]
}
return
}
// BytesPad method pads the input byte array with the padData byte array until the resulting string reaches the given length
func BytesPad(input []byte, padLength int, padData []byte, rightPad bool) (output []byte) {
var (
inputLength = len(input)
padDataLength = len(padData)
)
if inputLength >= padLength {
output = make([]byte, inputLength)
copy(output, input)
return
}
var (
repeat = int(math.Ceil(float64(1) + (float64(padLength-padDataLength))/float64(padDataLength)))
maxFillLength = repeat * padDataLength
bufSize = inputLength + repeat*padDataLength
padArea, fillArea []byte
)
output = make([]byte, bufSize)
if rightPad {
fillArea, padArea = output[0:inputLength], output[inputLength:bufSize]
output = output[:padLength]
} else {
fillArea, padArea = output[maxFillLength:bufSize], output[0:maxFillLength]
output = output[inputLength+maxFillLength-padLength:]
}
// copy data
copy(fillArea, input)
// fill pad
for i := 0; i < repeat; i++ {
copy(padArea[:padDataLength], padData)
padArea = padArea[padDataLength:]
}
return
}
// BytesPad method pads the input byte array with a single byte until the resulting string reaches the given length
func BytesPadSingle(input []byte, padLength int, pad byte, rightPad bool) (output []byte) {
var (
inputLength = len(input)
)
if inputLength >= padLength {
output = make([]byte, inputLength)
copy(output, input)
return
}
var (
maxFillLength = padLength
bufSize = inputLength + padLength
padArea, fillArea []byte
)
output = make([]byte, bufSize)
if rightPad {
fillArea, padArea = output[0:inputLength], output[inputLength:bufSize]
output = output[:padLength]
} else {
fillArea, padArea = output[maxFillLength:bufSize], output[0:maxFillLength]
output = output[inputLength+maxFillLength-padLength:]
}
// copy data
copy(fillArea, input)
// fill pad
FillByte(padArea, pad)
return
}
// BytesUnPadSingle method removes a given pad character from both ends.
func BytesUnPadSingle(input []byte, pad byte, rightPad bool, copyData bool) (output []byte) {
var (
limit = len(input)
offset = 0
)
if rightPad {
for ; limit > offset; limit-- {
if input[limit-1] != pad {
break
}
}
} else {
for ; offset < limit; offset++ {
if input[offset] != pad {
break
}
}
}
if limit-offset < 1 {
return
}
if copyData {
output = make([]byte, limit-offset)
copy(output, input[offset:limit])
} else {
output = input[offset:limit]
}
return
} | strutil/format.go | 0.703346 | 0.417153 | format.go | starcoder |
package f32
import (
"log"
"reflect"
"context"
)
func init() {
RegisterMatrix(reflect.TypeOf((*CSCMatrix)(nil)).Elem())
}
// CSCMatrix compressed storage by columns (CSC)
type CSCMatrix struct {
r int // number of rows in the sparse matrix
c int // number of columns in the sparse matrix
values []float32
rows []int
colStart []int
}
// NewCSCMatrix returns a CSCMatrix
func NewCSCMatrix(r, c int) *CSCMatrix {
return newCSCMatrix(r, c, 0)
}
// NewCSCMatrixFromArray returns a CSCMatrix
func NewCSCMatrixFromArray(data [][]float32) *CSCMatrix {
r := len(data)
c := len(data[0])
s := newCSCMatrix(r, c, 0)
for i := 0; i < r; i++ {
for k := 0; k < c; k++ {
s.Set(i, k, data[i][k])
}
}
return s
}
func newCSCMatrix(r, c int, l int) *CSCMatrix {
s := &CSCMatrix{
r: r,
c: c,
values: make([]float32, l),
rows: make([]int, l),
colStart: make([]int, c+1),
}
return s
}
// Columns the number of columns of the matrix
func (s *CSCMatrix) Columns() int {
return s.c
}
// Rows the number of rows of the matrix
func (s *CSCMatrix) Rows() int {
return s.r
}
// Update does a At and Set on the matrix element at r-th, c-th
func (s *CSCMatrix) Update(r, c int, f func(float32) float32) {
if r < 0 || r >= s.r {
log.Panicf("Row '%+v' is invalid", r)
}
if c < 0 || c >= s.c {
log.Panicf("Column '%+v' is invalid", c)
}
pointerStart, pointerEnd := s.rowIndex(r, c)
if pointerStart < pointerEnd && s.rows[pointerStart] == r {
value := f(s.values[pointerStart])
if value == 0 {
s.remove(pointerStart, c)
} else {
s.values[pointerStart] = value
}
} else {
s.insert(pointerStart, r, c, f(0))
}
}
// At returns the value of a matrix element at r-th, c-th
func (s *CSCMatrix) At(r, c int) (value float32) {
s.Update(r, c, func(v float32) float32 {
value = v
return v
})
return
}
// Set sets the value at r-th, c-th of the matrix
func (s *CSCMatrix) Set(r, c int, value float32) {
s.Update(r, c, func(v float32) float32 {
return value
})
}
// ColumnsAt return the columns at c-th
func (s *CSCMatrix) ColumnsAt(c int) Vector {
if c < 0 || c >= s.c {
log.Panicf("Column '%+v' is invalid", c)
}
columns := NewSparseVector(s.r)
start := s.colStart[c]
end := s.colStart[c+1]
for i := start; i < end; i++ {
columns.SetVec(s.rows[i], s.values[i])
}
return columns
}
// RowsAt return the rows at r-th
func (s *CSCMatrix) RowsAt(r int) Vector {
if r < 0 || r >= s.r {
log.Panicf("Row '%+v' is invalid", r)
}
rows := NewSparseVector(s.c)
for c := range s.colStart[:s.c] {
pointerStart, pointerEnd := s.rowIndex(r, c)
if pointerStart < pointerEnd && s.rows[pointerStart] == r {
rows.SetVec(c, s.values[pointerStart])
}
}
return rows
}
// RowsAtToArray return the rows at r-th
func (s *CSCMatrix) RowsAtToArray(r int) []float32 {
if r < 0 || r >= s.Rows() {
log.Panicf("Row '%+v' is invalid", r)
}
rows := make([]float32, s.c)
for c := range s.colStart[:s.c] {
pointerStart, pointerEnd := s.rowIndex(r, c)
if pointerStart < pointerEnd && s.rows[pointerStart] == r {
rows[c] = s.values[pointerStart]
}
}
return rows
}
func (s *CSCMatrix) insert(pointer, r, c int, value float32) {
if value == 0 {
return
}
s.rows = append(s.rows[:pointer], append([]int{r}, s.rows[pointer:]...)...)
s.values = append(s.values[:pointer], append([]float32{value}, s.values[pointer:]...)...)
for i := c + 1; i <= s.c; i++ {
s.colStart[i]++
}
}
func (s *CSCMatrix) remove(pointer, c int) {
s.rows = append(s.rows[:pointer], s.rows[pointer+1:]...)
s.values = append(s.values[:pointer], s.values[pointer+1:]...)
for i := c + 1; i <= s.c; i++ {
s.colStart[i]--
}
}
func (s *CSCMatrix) rowIndex(r, c int) (int, int) {
start := s.colStart[c]
end := s.colStart[c+1]
if start-end == 0 {
return start, end
}
if r > s.rows[end-1] {
return end, end
}
for start < end {
p := (start + end) / 2
if s.rows[p] > r {
end = p
} else if s.rows[p] < r {
start = p + 1
} else {
return p, end
}
}
return start, end
}
// Copy copies the matrix
func (s *CSCMatrix) Copy() Matrix {
matrix := newCSCMatrix(s.r, s.c, len(s.values))
for i := range s.values {
matrix.values[i] = s.values[i]
matrix.rows[i] = s.rows[i]
}
for i := range s.colStart {
matrix.colStart[i] = s.colStart[i]
}
return matrix
}
// Scalar multiplication of a matrix by alpha
func (s *CSCMatrix) Scalar(alpha float32) Matrix {
return Scalar(context.Background(), s, alpha)
}
// Multiply multiplies a matrix by another matrix
func (s *CSCMatrix) Multiply(m Matrix) Matrix {
matrix := newCSCMatrix(s.Rows(), m.Columns(), 0)
MatrixMatrixMultiply(context.Background(), s, m, nil, matrix)
return matrix
}
// Add addition of a matrix by another matrix
func (s *CSCMatrix) Add(m Matrix) Matrix {
matrix := s.Copy()
Add(context.Background(), s, m, nil, matrix)
return matrix
}
// Subtract subtracts one matrix from another matrix
func (s *CSCMatrix) Subtract(m Matrix) Matrix {
matrix := m.Copy()
Subtract(context.Background(), s, m, nil, matrix)
return matrix
}
// Negative the negative of a matrix
func (s *CSCMatrix) Negative() Matrix {
matrix := s.Copy()
Negative(context.Background(), s, nil, matrix)
return matrix
}
// Transpose swaps the rows and columns
func (s *CSCMatrix) Transpose() Matrix {
matrix := newCSCMatrix(s.c, s.r, 0)
Transpose(context.Background(), s, nil, matrix)
return matrix
}
// Equal the two matrices are equal
func (s *CSCMatrix) Equal(m Matrix) bool {
return Equal(context.Background(), s, m)
}
// NotEqual the two matrices are not equal
func (s *CSCMatrix) NotEqual(m Matrix) bool {
return NotEqual(context.Background(), s, m)
}
// Size of the matrix
func (s *CSCMatrix) Size() int {
return s.Rows() * s.Columns()
}
// Values the number of non-zero elements in the matrix
func (s *CSCMatrix) Values() int {
return len(s.values)
}
// Clear removes all elements from a matrix
func (s *CSCMatrix) Clear() {
s.values = make([]float32, 0)
s.rows = make([]int, 0)
s.colStart = make([]int, s.c+1)
}
// Enumerate iterates through all non-zero elements, order is not guaranteed
func (s *CSCMatrix) Enumerate() Enumerate {
return s.iterator()
}
func (s *CSCMatrix) iterator() *cSCMatrixIterator {
i := &cSCMatrixIterator{
matrix: s,
size: len(s.values),
c: -1,
}
return i
}
type cSCMatrixIterator struct {
matrix *CSCMatrix
size int
last int
c int
r int
rIndex int
index int
pointerStart int
pointerEnd int
}
// HasNext checks the iterator has any more values
func (s *cSCMatrixIterator) HasNext() bool {
if s.last >= s.size {
return false
}
return true
}
func (s *cSCMatrixIterator) next() {
for s.pointerStart == s.pointerEnd {
s.c++
s.pointerStart = s.matrix.colStart[s.c]
s.pointerEnd = s.matrix.colStart[s.c+1]
s.rIndex = s.matrix.rows[s.pointerStart]
}
for s.pointerStart < s.pointerEnd {
if s.matrix.rows[s.pointerStart] == s.rIndex {
s.index = s.pointerStart
s.pointerStart++
s.r = s.rIndex
s.rIndex++
s.last++
return
}
s.rIndex++
}
}
// Next moves the iterator and returns the row, column and value
func (s *cSCMatrixIterator) Next() (int, int, float32) {
s.next()
return s.r, s.c, s.matrix.values[s.index]
}
// Map replace each element with the result of applying a function to its value
func (s *CSCMatrix) Map() Map {
t := s.iterator()
i := &cSCMatrixMap{t}
return i
}
type cSCMatrixMap struct {
*cSCMatrixIterator
}
// HasNext checks the iterator has any more values
func (s *cSCMatrixMap) HasNext() bool {
return s.cSCMatrixIterator.HasNext()
}
// Map move the iterator and uses a higher order function to changes the elements current value
func (s *cSCMatrixMap) Map(f func(int, int, float32) float32) {
s.next()
value := f(s.r, s.c, s.matrix.values[s.index])
if value != 0 {
s.matrix.values[s.index] = value
} else {
s.matrix.remove(s.index, s.c)
}
}
// Element of the mask for each tuple that exists in the matrix for which the value of the tuple cast to Boolean is true
func (s *CSCMatrix) Element(r, c int) (b bool) {
s.Update(r, c, func(v float32) float32 {
b = v > 0
return v
})
return
} | f32/cscMatrix.go | 0.796292 | 0.49762 | cscMatrix.go | starcoder |
package pgs
// FieldType describes the type of a Field.
type FieldType interface {
// Field returns the parent Field of this type. While two FieldTypes might be
// equivalent, each instance of a FieldType is tied to its Field.
Field() Field
// Name returns the TypeName for this Field, which represents the type of the
// field as it would exist in Go source code.
Name() TypeName
// IsRepeated returns true if and only if the field is marked as "repeated".
// While map fields may be labeled as repeated, this method will not return
// true for them.
IsRepeated() bool
// IsMap returns true if the field is a map type.
IsMap() bool
// IsEnum returns true if the field is a singular enum value. Maps or
// repeated fields containing enums will still return false.
IsEnum() bool
// IsEmbed returns true if the field is a singular message value. Maps or
// repeated fields containing embeds will still return false.
IsEmbed() bool
// IsOptional returns true if the message's syntax is not Proto2 or
// the field is prefixed as optional.
IsOptional() bool
// IsRequired returns true if and only if the field is prefixed as required.
IsRequired() bool
// IsSlice returns true if the field is represented in Go as a slice. This
// method returns true only for repeated and bytes-type fields.
IsSlice() bool
// ProtoType returns the ProtoType value for this field.
ProtoType() ProtoType
// ProtoLabel returns the ProtoLabel value for this field.
ProtoLabel() ProtoLabel
// Imports includes all external packages required by this field.
Imports() []Package
// Enum returns the Enum associated with this FieldType. If IsEnum returns
// false, this value will be nil.
Enum() Enum
// Embed returns the embedded Message associated with this FieldType. If
// IsEmbed returns false, this value will be nil.
Embed() Message
// Element returns the FieldTypeElem representing the element component of
// the type. Nil will be returned if IsRepeated and IsMap return false.
Element() FieldTypeElem
// Key returns the FieldTypeElem representing the key component of the type.
// Nil will be return sif IsMap returns false.
Key() FieldTypeElem
setField(f Field)
toElem() FieldTypeElem
}
type scalarT struct {
fld Field
name TypeName
}
func (s *scalarT) Field() Field { return s.fld }
func (s *scalarT) IsRepeated() bool { return false }
func (s *scalarT) IsMap() bool { return false }
func (s *scalarT) IsEnum() bool { return false }
func (s *scalarT) IsEmbed() bool { return false }
func (s *scalarT) Name() TypeName { return s.name }
func (s *scalarT) IsSlice() bool { return s.ProtoType().IsSlice() }
func (s *scalarT) ProtoType() ProtoType { return ProtoType(s.fld.Descriptor().GetType()) }
func (s *scalarT) ProtoLabel() ProtoLabel { return ProtoLabel(s.fld.Descriptor().GetLabel()) }
func (s *scalarT) Imports() []Package { return nil }
func (s *scalarT) setField(f Field) { s.fld = f }
func (s *scalarT) Enum() Enum { return nil }
func (s *scalarT) Embed() Message { return nil }
func (s *scalarT) Element() FieldTypeElem { return nil }
func (s *scalarT) Key() FieldTypeElem { return nil }
func (s *scalarT) IsOptional() bool {
return !s.fld.Syntax().SupportsRequiredPrefix() || s.ProtoLabel() == Optional
}
func (s *scalarT) IsRequired() bool {
return s.fld.Syntax().SupportsRequiredPrefix() && s.ProtoLabel() == Required
}
func (s *scalarT) toElem() FieldTypeElem {
return &scalarE{
typ: s,
ptype: s.ProtoType(),
name: s.name,
}
}
type enumT struct {
*scalarT
enum Enum
}
func (e *enumT) Enum() Enum { return e.enum }
func (e *enumT) IsEnum() bool { return true }
func (e *enumT) Imports() []Package {
if pkg := e.enum.Package(); pkg.GoName() != e.fld.Package().GoName() {
return []Package{pkg}
}
return nil
}
func (e *enumT) toElem() FieldTypeElem {
return &enumE{
scalarE: e.scalarT.toElem().(*scalarE),
enum: e.enum,
}
}
type embedT struct {
*scalarT
msg Message
}
func (e *embedT) Embed() Message { return e.msg }
func (e *embedT) IsEmbed() bool { return true }
func (e *embedT) Imports() []Package {
if pkg := e.msg.Package(); pkg.GoName() != e.fld.Package().GoName() {
return []Package{pkg}
}
return nil
}
func (e *embedT) toElem() FieldTypeElem {
return &embedE{
scalarE: e.scalarT.toElem().(*scalarE),
msg: e.msg,
}
}
type repT struct {
*scalarT
el FieldTypeElem
}
func (r *repT) IsRepeated() bool { return true }
func (r *repT) Element() FieldTypeElem { return r.el }
func (r *repT) IsSlice() bool { return true }
func (r *repT) Imports() []Package { return r.el.Imports() }
func (r *repT) toElem() FieldTypeElem { panic("cannot convert repeated FieldType to FieldTypeElem") }
type mapT struct {
*repT
key FieldTypeElem
}
func (m *mapT) IsRepeated() bool { return false }
func (m *mapT) IsMap() bool { return true }
func (m *mapT) IsSlice() bool { return false }
func (m *mapT) Key() FieldTypeElem { return m.key }
var (
_ FieldType = (*scalarT)(nil)
_ FieldType = (*enumT)(nil)
_ FieldType = (*embedT)(nil)
_ FieldType = (*repT)(nil)
_ FieldType = (*mapT)(nil)
) | ev/external/protoc-gen-validate/vendor/github.com/lyft/protoc-gen-star/field_type.go | 0.909868 | 0.46563 | field_type.go | starcoder |
package main
import (
"fmt"
"math"
"sort"
)
/*
The following program is implementation of Algorithm to find the shortest distance between 2 points from a set of points.
Time Complexity: O( n Log n )
*/
type Point struct {
X float64
Y float64
}
func NotEq(a Point, b Point) bool {
return !(a.X == b.X && a.Y == b.Y)
}
func MinDistance(p1 Point, p2 Point, p3 Point, p4 Point) (V1 Point, V2 Point, d float64) {
d = PointDistance(p1, p2)
V1 = p1
V2 = p2
//fmt.Println(p1,p2,p3,p4)
if NotEq(p1, p2) {
V1, V2, d = Point{}, Point{}, math.Inf(9)
}
if d > PointDistance(p1, p3) && NotEq(p1, p3) {
d = PointDistance(p1, p3)
V1 = p1
V2 = p3
}
if d > PointDistance(p1, p4) && NotEq(p1, p4) {
d = PointDistance(p1, p4)
V1 = p1
V2 = p4
}
if d > PointDistance(p2, p3) && NotEq(p2, p3) {
d = PointDistance(p2, p3)
V1 = p2
V2 = p3
}
if d > PointDistance(p2, p4) && NotEq(p2, p4) {
d = PointDistance(p2, p4)
V1 = p2
V2 = p4
}
if d > PointDistance(p3, p4) && NotEq(p3, p4) {
d = PointDistance(p3, p4)
V1 = p3
V2 = p4
}
return V1, V2, d
}
func PointDistance(a Point, b Point) float64 {
return math.Sqrt(math.Pow(a.X-b.X, 2) + math.Pow(a.Y-b.Y, 2))
}
func ClosestPoint(Sx []Point, Sy []Point, a int, b int, v Point, w Point, d float64) (V1 Point, V2 Point, Dist float64) {
s := b - a + 1
if s == 1 {
if d < math.Inf(8) {
//fmt.Println(w,v,d)
return w, v, d
} else {
//fmt.Println(w,v,math.Inf(8))
return w, v, math.Inf(8)
}
} else if s == 2 {
V1, V2, Dist = MinDistance(Sx[a-1], Sx[b-1], Sy[a-1], Sy[b-1])
if Dist < d {
return V1, V2, Dist
} else {
return v, w, d
}
} else {
mid := (a + b) / 2
Rv1, Rv2, d1 := ClosestPoint(Sx, Sy, mid+1, b, v, w, d)
Lv1, Lv2, d2 := ClosestPoint(Sx, Sy, a, mid, v, w, d)
if d1 > d2 {
V1, V2, Dist = ClosestSplitPoints(Sx, Sy, a, b, Lv1, Lv2, d2)
} else {
V1, V2, Dist = ClosestSplitPoints(Sx, Sy, a, b, Rv1, Rv2, d1)
}
return V1, V2, Dist
}
}
func ClosestSplitPoints(Sx []Point, Sy []Point, a int, b int, v Point, w Point, d float64) (v1 Point, v2 Point, D float64) {
s := b - a + 1
mid := Sx[s/2]
Y := make([]Point, 0, b-a+1)
for i := a - 1; i < b; i++ {
if Sy[i].X >= mid.X-d && Sy[i].X <= mid.X+d {
Y = append(Y, Sy[i])
}
}
D = d
for i, _ := range Y {
for j := 1; j < int(math.Min(7, float64(len(Y)-i))); j++ {
if PointDistance(Y[i], Y[i+j]) < D {
D = PointDistance(Y[i], Y[i+j])
v1 = Y[i]
v2 = Y[i+j]
}
}
}
if D == d {
return v, w, d
}
return v1, v2, D
}
func ShortestDistance() {
var Px = []Point{
{X: 91, Y: 1},
{X: 2, Y: 29},
{X: 12, Y: 6},
{X: 9, Y: 54},
{X: 5, Y: 98},
{X: 34, Y: 12},
{X: 53, Y: 23},
{X: 2, Y: 56},
{X: 4, Y: 12},
}
sort.Slice(Px, func(i, j int) bool {
return Px[i].X < Px[j].X
})
Py := make([]Point, len(Px))
copy(Py, Px)
sort.Slice(Px, func(i, j int) bool {
return Px[i].Y < Px[j].Y
})
fmt.Println(ClosestPoint(Px, Py, 1, len(Px), Point{}, Point{}, math.Inf(3)))
} | shortestDistance.go | 0.549157 | 0.566858 | shortestDistance.go | starcoder |
package giu
import (
"image"
"image/color"
"github.com/ImmortalHax/giu/imgui"
)
type Canvas struct {
drawlist imgui.DrawList
}
func GetCanvas() *Canvas {
return &Canvas{
drawlist: imgui.GetWindowDrawList(),
}
}
func (c *Canvas) AddLine(p1, p2 image.Point, color color.RGBA, thickness float32) {
c.drawlist.AddLine(ToVec2(p1), ToVec2(p2), ToVec4Color(color), thickness)
}
type CornerFlags int
const (
CornerFlags_None CornerFlags = 0
CornerFlags_TopLeft CornerFlags = 1 << 0 // 0x1
CornerFlags_TopRight CornerFlags = 1 << 1 // 0x2
CornerFlags_BotLeft CornerFlags = 1 << 2 // 0x4
CornerFlags_BotRight CornerFlags = 1 << 3 // 0x8
CornerFlags_Top CornerFlags = CornerFlags_TopLeft | CornerFlags_TopRight // 0x3
CornerFlags_Bot CornerFlags = CornerFlags_BotLeft | CornerFlags_BotRight // 0xC
CornerFlags_Left CornerFlags = CornerFlags_TopLeft | CornerFlags_BotLeft // 0x5
CornerFlags_Right CornerFlags = CornerFlags_TopRight | CornerFlags_BotRight // 0xA
CornerFlags_All CornerFlags = 0xF // In your function calls you may use ~0 (= all bits sets) instead of ImDrawCornerFlags_All, as a convenience
)
func (c *Canvas) AddRect(pMin, pMax image.Point, color color.RGBA, rounding float32, rounding_corners CornerFlags, thickness float32) {
c.drawlist.AddRect(ToVec2(pMin), ToVec2(pMax), ToVec4Color(color), rounding, int(rounding_corners), thickness)
}
func (c *Canvas) AddRectFilled(pMin, pMax image.Point, color color.RGBA, rounding float32, rounding_corners CornerFlags) {
c.drawlist.AddRectFilled(ToVec2(pMin), ToVec2(pMax), ToVec4Color(color), rounding, int(rounding_corners))
}
func (c *Canvas) AddText(pos image.Point, color color.RGBA, text string) {
c.drawlist.AddText(ToVec2(pos), ToVec4Color(color), text)
}
func (c *Canvas) AddBezierCubic(pos0, cp0, cp1, pos1 image.Point, color color.RGBA, thickness float32, num_segments int) {
c.drawlist.AddBezierCubic(ToVec2(pos0), ToVec2(cp0), ToVec2(cp1), ToVec2(pos1), ToVec4Color(color), thickness, num_segments)
}
func (c *Canvas) AddTriangle(p1, p2, p3 image.Point, color color.RGBA, thickness float32) {
c.drawlist.AddTriangle(ToVec2(p1), ToVec2(p2), ToVec2(p3), ToVec4Color(color), thickness)
}
func (c *Canvas) AddTriangleFilled(p1, p2, p3 image.Point, color color.RGBA) {
c.drawlist.AddTriangleFilled(ToVec2(p1), ToVec2(p2), ToVec2(p3), ToVec4Color(color))
}
func (c *Canvas) AddCircle(center image.Point, radius float32, color color.RGBA, thickness float32) {
c.drawlist.AddCircle(ToVec2(center), radius, ToVec4Color(color), thickness)
}
func (c *Canvas) AddCircleFilled(center image.Point, radius float32, color color.RGBA) {
c.drawlist.AddCircleFilled(ToVec2(center), radius, ToVec4Color(color))
}
func (c *Canvas) AddQuad(p1, p2, p3, p4 image.Point, color color.RGBA, thickness float32) {
c.drawlist.AddQuad(ToVec2(p1), ToVec2(p2), ToVec2(p3), ToVec2(p4), ToVec4Color(color), thickness)
}
func (c *Canvas) AddQuadFilled(p1, p2, p3, p4 image.Point, color color.RGBA) {
c.drawlist.AddQuadFilled(ToVec2(p1), ToVec2(p2), ToVec2(p3), ToVec2(p4), ToVec4Color(color))
}
// Stateful path API, add points then finish with PathFillConvex() or PathStroke()
func (c *Canvas) PathClear() {
c.drawlist.PathClear()
}
func (c *Canvas) PathLineTo(pos image.Point) {
c.drawlist.PathLineTo(ToVec2(pos))
}
func (c *Canvas) PathLineToMergeDuplicate(pos image.Point) {
c.drawlist.PathLineToMergeDuplicate(ToVec2(pos))
}
func (c *Canvas) PathFillConvex(color color.RGBA) {
c.drawlist.PathFillConvex(ToVec4Color(color))
}
func (c *Canvas) PathStroke(color color.RGBA, closed bool, thickness float32) {
c.drawlist.PathStroke(ToVec4Color(color), closed, thickness)
}
func (c *Canvas) PathArcTo(center image.Point, radius, a_min, a_max float32, num_segments int) {
c.drawlist.PathArcTo(ToVec2(center), radius, a_min, a_max, num_segments)
}
func (c *Canvas) PathArcToFast(center image.Point, radius float32, a_min_of_12, a_max_of_12 int) {
c.drawlist.PathArcToFast(ToVec2(center), radius, a_min_of_12, a_max_of_12)
}
func (c *Canvas) PathBezierCubicCurveTo(p1, p2, p3 image.Point, num_segments int) {
c.drawlist.PathBezierCubicCurveTo(ToVec2(p1), ToVec2(p2), ToVec2(p3), num_segments)
}
func (c *Canvas) AddImage(texture *Texture, pMin, pMax image.Point) {
c.drawlist.AddImage(texture.id, ToVec2(pMin), ToVec2(pMax))
}
func (c *Canvas) AddImageV(texture *Texture, pMin, pMax image.Point, uvMin, uvMax image.Point, color color.RGBA) {
c.drawlist.AddImageV(texture.id, ToVec2(pMin), ToVec2(pMax), ToVec2(uvMin), ToVec2(uvMax), ToVec4Color(color))
} | Canvas.go | 0.675122 | 0.441613 | Canvas.go | starcoder |
package matrix
func (P *PivotMatrix) Minus(A MatrixRO) (Matrix, error) {
if P.rows != A.Rows() || P.cols != A.Cols() {
return nil, ErrorDimensionMismatch
}
B := P.DenseMatrix()
B.Subtract(A)
return B, nil
}
func (P *PivotMatrix) Plus(A MatrixRO) (Matrix, error) {
if P.rows != A.Rows() || P.cols != A.Cols() {
return nil, ErrorDimensionMismatch
}
B := P.DenseMatrix()
B.Add(A)
return B, nil
}
/*
Multiply this pivot matrix by another.
*/
func (P *PivotMatrix) Times(A MatrixRO) (Matrix, error) {
if P.Cols() != A.Rows() {
return nil, ErrorDimensionMismatch
}
B := Zeros(P.rows, A.Cols())
for i := 0; i < P.rows; i++ {
k := 0
for ; i != P.pivots[k]; k++ {
}
for j := 0; j < A.Cols(); j++ {
B.Set(i, j, A.Get(k, j))
}
}
return B, nil
}
/*
Multiplication optimized for when two pivots are the operands.
*/
func (P *PivotMatrix) TimesPivot(A *PivotMatrix) (*PivotMatrix, error) {
if P.rows != A.rows {
return nil, ErrorDimensionMismatch
}
newPivots := make([]int, P.rows)
newSign := P.pivotSign * A.pivotSign
for i := 0; i < A.rows; i++ {
newPivots[i] = P.pivots[A.pivots[i]]
}
return MakePivotMatrix(newPivots, newSign), nil
}
/*
Equivalent to PxA, but streamlined to take advantage of the datastructures.
*/
func (P *PivotMatrix) RowPivotDense(A *DenseMatrix) (*DenseMatrix, error) {
if P.rows != A.rows {
return nil, ErrorDimensionMismatch
}
B := Zeros(A.rows, A.cols)
for si := 0; si < A.rows; si++ {
di := P.pivots[si]
Astart := si * A.step
Bstart := di * B.step
for j := 0; j < A.cols; j++ {
B.elements[Bstart+j] = A.elements[Astart+j]
}
}
return B, nil
}
/*
Equivalent to AxP, but streamlined to take advantage of the datastructures.
*/
func (P *PivotMatrix) ColPivotDense(A *DenseMatrix) (*DenseMatrix, error) {
if P.rows != A.cols {
return nil, ErrorDimensionMismatch
}
B := Zeros(A.rows, A.cols)
for i := 0; i < B.rows; i++ {
Astart := i * A.step
Bstart := i * B.step
for sj := 0; sj < B.cols; sj++ {
dj := P.pivots[sj]
B.elements[Bstart+dj] = A.elements[Astart+sj]
}
}
return B, nil
}
/*
Equivalent to PxA, but streamlined to take advantage of the datastructures.
*/
func (P *PivotMatrix) RowPivotSparse(A *SparseMatrix) (*SparseMatrix, error) {
if P.rows != A.rows {
return nil, ErrorDimensionMismatch
}
B := ZerosSparse(A.rows, A.cols)
for index, value := range A.elements {
si, j := A.GetRowColIndex(index)
di := P.pivots[si]
B.Set(di, j, value)
}
return B, nil
}
/*
Equivalent to AxP, but streamlined to take advantage of the datastructures.
*/
func (P *PivotMatrix) ColPivotSparse(A *SparseMatrix) (*SparseMatrix, error) {
if P.rows != A.cols {
return nil, ErrorDimensionMismatch
}
B := ZerosSparse(A.rows, A.cols)
for index, value := range A.elements {
i, sj := A.GetRowColIndex(index)
dj := P.pivots[sj]
B.Set(i, dj, value)
}
return B, nil
} | pivot_arithmetic.go | 0.789071 | 0.459986 | pivot_arithmetic.go | starcoder |
package iterables
type SliceIterable[K any] struct {
abstractIterable[K]
data []K
index int
}
func NewSliceIterable[K any](elements []K) Iterable[K] {
iter := &SliceIterable[K]{data: elements, index: 0}
iter.Iterable = iter
return iter
}
func (si *SliceIterable[K]) Next() (K, bool) {
if si.index < len(si.data) {
val := si.data[si.index]
si.index++
return val, true
}
var i K
return i, false
}
func (si *SliceIterable[K]) Reset() {
si.index = 0
}
type Pair[K comparable, V any] struct {
Key K
Value V
}
type MapIterable[K comparable, V any] struct {
abstractIterable[*Pair[K, V]]
data map[K]V
index int
generatorChan chan *Pair[K, V]
}
func NewMapIterable[K comparable, V any](data map[K]V) Iterable[*Pair[K, V]] {
iter := &MapIterable[K, V]{
data: data,
index: 0,
generatorChan: make(chan *Pair[K, V]),
}
iter.Reset()
iter.Iterable = iter
return iter
}
func (mi *MapIterable[K, V]) Next() (*Pair[K, V], bool) {
res, ok := <-mi.generatorChan
return res, ok
}
func (mi *MapIterable[K, V]) Reset() {
mi.generatorChan = make(chan *Pair[K, V])
go func() {
for k, v := range mi.data {
mi.generatorChan <- &Pair[K, V]{Key: k, Value: v}
}
close(mi.generatorChan)
}()
}
type MappingIterable[K, V any] struct {
abstractIterable[K]
parentIterable Iterable[V]
mapper func(V) K
}
func (mi *MappingIterable[K, V]) Next() (K, bool) {
element, ok := mi.parentIterable.Next()
if !ok {
var i K
return i, false
}
return mi.mapper(element), true
}
func (mi *MappingIterable[K, V]) Reset() {
mi.parentIterable.Reset()
}
func Map[K, V any](parent Iterable[K], mapper func(K) V) Iterable[V] {
iter := &MappingIterable[V, K]{
parentIterable: parent,
mapper: mapper,
}
iter.Iterable = iter
return iter
}
type FilterIterable[K any] struct {
abstractIterable[K]
parentIterable Iterable[K]
filter func(K) bool
}
func (fi *FilterIterable[K]) Next() (K, bool) {
element, ok := fi.parentIterable.Next()
for ok {
if fi.filter(element) {
return element, true
}
element, ok = fi.parentIterable.Next()
}
var i K
return i, false
}
func (fi *FilterIterable[K]) Reset() {
fi.parentIterable.Reset()
}
func Filter[K any](parent Iterable[K], filter func(K) bool) Iterable[K] {
iter := &FilterIterable[K]{
parentIterable: parent,
filter: filter,
}
iter.Iterable = iter
return iter
}
func Reduce[K, V any](elements Iterable[V], init K, reducer func(K, V) K) K {
result := init
elements.ForEach(func(_ int, element V) {
result = reducer(result, element)
})
return result
}
type abstractIterable[K any] struct {
Iterable[K]
}
func (ai *abstractIterable[K]) ForEach(callback func(int, K)) {
index := 0
element, ok := ai.Next()
for ok {
callback(index, element)
element, ok = ai.Next()
index++
}
}
func (ai *abstractIterable[K]) Last() (K, bool) {
var e K
element, ok := ai.Next()
for ok {
e = element
element, ok = ai.Next()
}
return e, ok
}
func (ai *abstractIterable[K]) Collect() []K {
res := []K{}
ai.ForEach(func(_ int, element K) {
res = append(res, element)
})
return res
}
type Iterable[K any] interface {
Next() (K, bool)
ForEach(func(int, K))
Last() (K, bool)
Reset()
} | pkg/iterables/iterables.go | 0.597725 | 0.440529 | iterables.go | starcoder |
package models
type Currency struct {
Query struct {
Apikey string `json:"apikey"`
BaseCurrency string `json:"base_currency"`
Timestamp int `json:"timestamp"`
} `json:"query"`
Data struct {
JPY float64 `json:"JPY"`
CNY float64 `json:"CNY"`
CHF float64 `json:"CHF"`
CAD float64 `json:"CAD"`
MXN float64 `json:"MXN"`
INR float64 `json:"INR"`
BRL float64 `json:"BRL"`
RUB float64 `json:"RUB"`
KRW float64 `json:"KRW"`
IDR float64 `json:"IDR"`
TRY float64 `json:"TRY"`
SAR float64 `json:"SAR"`
SEK float64 `json:"SEK"`
NGN float64 `json:"NGN"`
PLN float64 `json:"PLN"`
ARS float64 `json:"ARS"`
NOK float64 `json:"NOK"`
TWD float64 `json:"TWD"`
IRR float64 `json:"IRR"`
AED float64 `json:"AED"`
COP float64 `json:"COP"`
THB float64 `json:"THB"`
ZAR float64 `json:"ZAR"`
DKK float64 `json:"DKK"`
MYR float64 `json:"MYR"`
SGD float64 `json:"SGD"`
ILS float64 `json:"ILS"`
HKD float64 `json:"HKD"`
EGP float64 `json:"EGP"`
PHP float64 `json:"PHP"`
CLP float64 `json:"CLP"`
PKR float64 `json:"PKR"`
IQD float64 `json:"IQD"`
DZD float64 `json:"DZD"`
KZT float64 `json:"KZT"`
QAR float64 `json:"QAR"`
CZK float64 `json:"CZK"`
PEN float64 `json:"PEN"`
RON float64 `json:"RON"`
VND float64 `json:"VND"`
BDT float64 `json:"BDT"`
HUF float64 `json:"HUF"`
UAH float64 `json:"UAH"`
AOA float64 `json:"AOA"`
MAD float64 `json:"MAD"`
OMR float64 `json:"OMR"`
CUC float64 `json:"CUC"`
BYR float64 `json:"BYR"`
AZN float64 `json:"AZN"`
LKR float64 `json:"LKR"`
SDG float64 `json:"SDG"`
SYP float64 `json:"SYP"`
MMK float64 `json:"MMK"`
DOP float64 `json:"DOP"`
UZS float64 `json:"UZS"`
KES float64 `json:"KES"`
GTQ float64 `json:"GTQ"`
URY float64 `json:"URY"`
HRV float64 `json:"HRV"`
MOP float64 `json:"MOP"`
ETB float64 `json:"ETB"`
CRC float64 `json:"CRC"`
TZS float64 `json:"TZS"`
TMT float64 `json:"TMT"`
TND float64 `json:"TND"`
PAB float64 `json:"PAB"`
LBP float64 `json:"LBP"`
RSD float64 `json:"RSD"`
LYD float64 `json:"LYD"`
GHS float64 `json:"GHS"`
YER float64 `json:"YER"`
BOB float64 `json:"BOB"`
BHD float64 `json:"BHD"`
CDF float64 `json:"CDF"`
PYG float64 `json:"PYG"`
UGX float64 `json:"UGX"`
SVC float64 `json:"SVC"`
TTD float64 `json:"TTD"`
AFN float64 `json:"AFN"`
NPR float64 `json:"NPR"`
HNL float64 `json:"HNL"`
BIH float64 `json:"BIH"`
BND float64 `json:"BND"`
ISK float64 `json:"ISK"`
KHR float64 `json:"KHR"`
GEL float64 `json:"GEL"`
MZN float64 `json:"MZN"`
BWP float64 `json:"BWP"`
PGK float64 `json:"PGK"`
JMD float64 `json:"JMD"`
XAF float64 `json:"XAF"`
NAD float64 `json:"NAD"`
ALL float64 `json:"ALL"`
SSP float64 `json:"SSP"`
MUR float64 `json:"MUR"`
MNT float64 `json:"MNT"`
NIO float64 `json:"NIO"`
LAK float64 `json:"LAK"`
MKD float64 `json:"MKD"`
AMD float64 `json:"AMD"`
MGA float64 `json:"MGA"`
XPF float64 `json:"XPF"`
TJS float64 `json:"TJS"`
HTG float64 `json:"HTG"`
BSD float64 `json:"BSD"`
MDL float64 `json:"MDL"`
RWF float64 `json:"RWF"`
KGS float64 `json:"KGS"`
GNF float64 `json:"GNF"`
SRD float64 `json:"SRD"`
SLL float64 `json:"SLL"`
XOF float64 `json:"XOF"`
MWK float64 `json:"MWK"`
FJD float64 `json:"FJD"`
ERN float64 `json:"ERN"`
SZL float64 `json:"SZL"`
GYD float64 `json:"GYD"`
BIF float64 `json:"BIF"`
KYD float64 `json:"KYD"`
MVR float64 `json:"MVR"`
LSL float64 `json:"LSL"`
LRD float64 `json:"LRD"`
CVE float64 `json:"CVE"`
DJF float64 `json:"DJF"`
SCR float64 `json:"SCR"`
SOS float64 `json:"SOS"`
GMD float64 `json:"GMD"`
KMF float64 `json:"KMF"`
STD float64 `json:"STD"`
XRP float64 `json:"XRP"`
AUD float64 `json:"AUD"`
BGN float64 `json:"BGN"`
BTC float64 `json:"BTC"`
JOD float64 `json:"JOD"`
GBP float64 `json:"GBP"`
ETH float64 `json:"ETH"`
EUR float64 `json:"EUR"`
LTC float64 `json:"LTC"`
NZD float64 `json:"NZD"`
} `json:"data"`
} | models/Currency.go | 0.596668 | 0.463323 | Currency.go | starcoder |
package aoc2019
import (
"strconv"
"strings"
)
type step struct {
dir string
len int
}
type point struct {
x int
y int
}
func init() {
registerFun("03", SolveDay03)
}
func SolveDay03(input string) (interface{}, interface{}) {
firstPath, secondPath := parseInput(input)
var m = make(map[point]int)
walk(firstPath, m, false)
intersections, totalSteps := walk(secondPath, m, true)
minSteps := totalSteps[0]
minDistance := calculateDistance(intersections[0])
for i := 1; i < len(intersections); i++ {
distance := calculateDistance(intersections[i])
if minDistance > distance {
minDistance = distance
}
if minSteps > totalSteps[i] {
minSteps = totalSteps[i]
}
}
return minDistance, minSteps
}
func calculateDistance(p point) int {
return abs(p.x) + abs(p.y)
}
func abs(v int) int {
if v < 0 {
return -v
}
return v
}
func walk(plan []step, walked map[point]int, freeze bool) ([]point, []int) {
steps := 0
currentPoint := point{x: 0, y: 0}
var intersections []point
var totalSteps []int
for _, step := range plan {
for i := 0; i < step.len; i++ {
steps++
currentPoint = nextPoint(currentPoint, step.dir)
if walked[currentPoint] > 0 {
intersections = append(intersections, currentPoint)
totalSteps = append(totalSteps, walked[currentPoint]+steps)
} else {
if !freeze {
walked[currentPoint] = steps
}
}
}
}
return intersections, totalSteps
}
func nextPoint(currentPoint point, dir string) point {
newPoint := point{x: currentPoint.x, y: currentPoint.y}
switch dir {
case "L":
newPoint.x--
case "R":
newPoint.x++
case "U":
newPoint.y++
case "D":
newPoint.y--
}
return newPoint
}
func parseInput(rawInput string) ([]step, []step) {
pathsStr := strings.Split(rawInput, "\n")
return parsePath(pathsStr[0]), parsePath(pathsStr[1])
}
func parsePath(pathStr string) []step {
path := strings.Split(pathStr, ",")
parsed := make([]step, len(path))
for i, st := range path {
len, _ := strconv.Atoi(st[1:])
parsed[i] = step{dir: st[0:1], len: len}
}
return parsed
} | aoc2019/day03.go | 0.588298 | 0.414129 | day03.go | starcoder |
package faststats
import (
"math"
"sort"
"sync/atomic"
)
// Percentile implements an efficient percentile calculation of
// arbitrary float64 samples.
type Percentile struct {
percentile float64
samples int64
offset int64
values []float64
value uint64 // These bits are really a float64.
}
// NewPercentile returns a Percentile with a given threshold.
func NewPercentile(percentile float64) *Percentile {
return &Percentile{
percentile: percentile,
// 256 samples is fast, and accurate for most distributions.
values: make([]float64, 0, 256),
}
}
// NewPercentileWithWindow returns a Percentile with a given threshold
// and window size (accuracy).
func NewPercentileWithWindow(percentile float64, sampleWindow int) *Percentile {
return &Percentile{
percentile: percentile,
values: make([]float64, 0, sampleWindow),
}
}
// Value returns the current value at the stored percentile.
// It is thread-safe, and may be called concurrently with AddSample.
func (p *Percentile) Value() float64 {
bits := atomic.LoadUint64(&p.value)
return math.Float64frombits(bits)
}
// AddSample adds a single float64 sample to the data set.
// It is not thread-safe, and must not be called in parallel.
func (p *Percentile) AddSample(sample float64) {
p.samples++
if len(p.values) == cap(p.values) {
target := float64(p.samples)*p.percentile - float64(cap(p.values))/2
offset := round(math.Max(target, 0))
if sample > p.values[0] {
if offset > p.offset {
idx := sort.SearchFloat64s(p.values[1:], sample)
copy(p.values, p.values[1:idx+1])
p.values[idx] = sample
p.offset++
} else if sample < p.values[len(p.values)-1] {
idx := sort.SearchFloat64s(p.values, sample)
copy(p.values[idx+1:], p.values[idx:])
p.values[idx] = sample
}
} else {
if offset > p.offset {
p.offset++
} else {
copy(p.values[1:], p.values)
p.values[0] = sample
}
}
} else {
idx := sort.SearchFloat64s(p.values, sample)
p.values = p.values[:len(p.values)+1]
copy(p.values[idx+1:], p.values[idx:])
p.values[idx] = sample
}
bits := math.Float64bits(p.values[p.index()])
atomic.StoreUint64(&p.value, bits)
}
func (p *Percentile) index() int64 {
idx := round(float64(p.samples)*p.percentile - float64(p.offset))
last := int64(len(p.values)) - 1
if idx > last {
return last
}
return idx
} | percentile.go | 0.832985 | 0.653438 | percentile.go | starcoder |
package collision
import (
"github.com/teomat/mater/vect"
"math"
)
// Used to keep a linked list of all arbiters on a body.
type ArbiterEdge struct {
Arbiter *Arbiter
Next, Prev *ArbiterEdge
Other *Body
}
// The maximum number of ContactPoints a single Arbiter can have.
const MaxPoints = 2
type Arbiter struct {
// The two colliding shapes.
ShapeA, ShapeB *Shape
// The contact points between the shapes.
Contacts [MaxPoints]Contact
// The number of contact points.
NumContacts int
nodeA, nodeB *ArbiterEdge
Friction float64
// Used to keep a linked list of all arbiters in a space.
Next, Prev *Arbiter
}
func newArbiter() *Arbiter {
return new(Arbiter)
}
// Creates an arbiter between the given shapes.
// If the shapes do not collide, arbiter.NumContact is zero.
func CreateArbiter(sa, sb *Shape) *Arbiter {
arb := newArbiter()
if sa.ShapeType() < sb.ShapeType() {
arb.ShapeA = sa
arb.ShapeB = sb
} else {
arb.ShapeA = sb
arb.ShapeB = sa
}
arb.NumContacts = collide(&arb.Contacts, arb.ShapeA, arb.ShapeB)
arb.Friction = math.Sqrt(sa.Friction * sb.Friction)
arb.nodeA = new(ArbiterEdge)
arb.nodeB = new(ArbiterEdge)
return arb
}
func (arb *Arbiter) destroy() {
arb.ShapeA = nil
arb.ShapeB = nil
arb.NumContacts = 0
arb.Friction = 0
}
func (arb1 *Arbiter) equals(arb2 *Arbiter) bool {
if arb1.ShapeA == arb2.ShapeA && arb1.ShapeB == arb2.ShapeB {
return true
}
/*if arb1.ShapeA == arb2.ShapeB && arb1.ShapeB == arb1.ShapeA {
return true
}*/
return false
}
func (arb *Arbiter) update() {
/*var mergedContacts [MaxPoints]Contact
for i := 0; i < numNewContacts; i++ {
cNew := newContacts[i]
k := -1
for j := 0; j < arb.NumContacts; j++ {
cOld := arb.Contacts[j]
if cNew.Feature.Value() == cOld.Feature.Value() {
k = j
break
}
}
if k > -1 {
cOld := arb.Contacts[k]
mergedContacts[i] = cNew
c := mergedContacts[i]
const warmStarting = false
if warmStarting {
c.Pn = cOld.Pn
c.Pt = cOld.Pt
c.Pnb = cOld.Pnb
} else {
c.Pn = 0
c.Pt = 0
c.Pnb = 0
}
} else {
mergedContacts[i] = newContacts[i]
}
}
for i := 0; i < numNewContacts; i++ {
arb.Contacts[i] = mergedContacts[i]
}
arb.NumContacts = numNewContacts*/
arb.NumContacts = collide(&arb.Contacts, arb.ShapeA, arb.ShapeB)
}
func (arb *Arbiter) preStep(inv_dt float64) {
const allowedPenetration = 0.01
biasFactor := 0.0
if Settings.PositionCorrection {
biasFactor = 0.2
}
b1 := arb.ShapeA.Body
b2 := arb.ShapeB.Body
for i := 0; i < arb.NumContacts; i++ {
c := &arb.Contacts[i]
c.R1 = vect.Sub(c.Position, b1.Transform.Position)
c.R2 = vect.Sub(c.Position, b2.Transform.Position)
r1 := c.R1
r2 := c.R2
//Precompute normal mass, tangent mass, and bias
rn1 := vect.Dot(r1, c.Normal)
rn2 := vect.Dot(r2, c.Normal)
kNormal := b1.invMass + b2.invMass
kNormal += b1.invI*(vect.Dot(r1, r1)-rn1*rn1) +
b2.invI*(vect.Dot(r2, r2)-rn2*rn2)
c.MassNormal = 1.0 / kNormal
tangent := vect.CrossVF(c.Normal, 1.0)
rt1 := vect.Dot(r1, tangent)
rt2 := vect.Dot(r2, tangent)
kTangent := b1.invMass + b2.invMass
kTangent += b1.invI*(vect.Dot(r1, r1)-rt1*rt1) +
b2.invI*(vect.Dot(r2, r2)-rt2*rt2)
c.MassTangent = 1.0 / kTangent
c.Bias = -biasFactor * inv_dt * math.Min(0.0, c.Separation+allowedPenetration)
if Settings.AccumulateImpulses {
//Apply normal + friction impulse
P := vect.Add(vect.Mult(c.Normal, c.Pn), vect.Mult(tangent, c.Pt))
b1.Velocity.Sub(vect.Mult(P, b1.invMass))
b1.AngularVelocity -= b1.invI * vect.Cross(r1, P)
b2.Velocity.Add(vect.Mult(P, b2.invMass))
b2.AngularVelocity += b2.invI * vect.Cross(r2, P)
}
}
}
func (arb *Arbiter) applyImpulse() {
sA := arb.ShapeA
sB := arb.ShapeB
b1 := sA.Body
b2 := sB.Body
//xfA := b1.Transform
//xfB := b2.Transform
for i := 0; i < arb.NumContacts; i++ {
c := &arb.Contacts[i]
// Relative velocity at contact
dv := vect.Vect{}
{
t1 := vect.Add(b2.Velocity, vect.CrossFV(b2.AngularVelocity, c.R2))
t2 := vect.Sub(b1.Velocity, vect.CrossFV(b1.AngularVelocity, c.R1))
dv = vect.Sub(t1, t2)
}
// Compute normal impulse
vn := vect.Dot(dv, c.Normal)
dPn := c.MassNormal * (-vn + c.Bias)
if Settings.AccumulateImpulses {
// Clamp the accumulated impulse
Pn0 := c.Pn
c.Pn = math.Max(Pn0+dPn, 0.0)
dPn = c.Pn - Pn0
} else {
dPn = math.Max(dPn, 0.0)
}
//Apply contact impulse
Pn := vect.Mult(c.Normal, dPn)
b1.Velocity.Sub(vect.Mult(Pn, b1.invMass))
b1.AngularVelocity -= b1.invI * vect.Cross(c.R1, Pn)
b2.Velocity.Add(vect.Mult(Pn, b2.invMass))
b2.AngularVelocity += b2.invI * vect.Cross(c.R2, Pn)
//Relative velocity at contact
{
t1 := vect.Add(b2.Velocity, vect.CrossFV(b2.AngularVelocity, c.R2))
t2 := vect.Sub(b1.Velocity, vect.CrossFV(b1.AngularVelocity, c.R1))
dv = vect.Sub(t1, t2)
}
tangent := vect.CrossVF(c.Normal, 1.0)
vt := vect.Dot(dv, tangent)
dPt := c.MassTangent * (-vt)
if Settings.AccumulateImpulses {
//Compute friction impulse
maxPt := arb.Friction * c.Pn
//Clamp Friction
oldTangentImpulse := c.Pt
c.Pt = clamp(oldTangentImpulse+dPt, -maxPt, maxPt)
dPt = c.Pt - oldTangentImpulse
} else {
maxPt := arb.Friction * dPn
dPt = clamp(dPt, -maxPt, maxPt)
}
// Apply contact impulse
Pt := vect.Mult(tangent, dPt)
b1.Velocity.Sub(vect.Mult(Pt, b1.invMass))
b1.AngularVelocity -= b1.invI * vect.Cross(c.R1, Pt)
b2.Velocity.Add(vect.Mult(Pt, b2.invMass))
b2.AngularVelocity += b2.invI * vect.Cross(c.R2, Pt)
}
} | collision/arbiter.go | 0.773473 | 0.520253 | arbiter.go | starcoder |
package midi
import (
"encoding/binary"
"errors"
"fmt"
"io"
)
type timeFormat int
type nextChunkType int
const (
MetricalTF timeFormat = iota + 1
TimeCodeTF
)
const (
eventChunk nextChunkType = iota + 1
trackChunk
)
/*Decoder Format documented there: http://www.music.mcgill.ca/~ich/classes/mumt306/midiformat.pdf
<Header Chunk> = <chunk type><length><format><ntrks><division>
Division, specifies the meaning of the delta-times.
It has two formats, one for metrical time, and one for time-code-based
time:
+---+-----------------------------------------+
| 0 | ticks per quarter-note |
==============================================|
| 1 | negative SMPTE format | ticks per frame|
+---+-----------------------+-----------------+
|15 |14 8 |7 0 |
If bit 15 of <division> is zero, the bits 14 thru 0 represent the number
of delta time "ticks" which make up a quarter-note. For instance, if
division is 96, then a time interval of an eighth-note between two
events in the file would be 48.
If bit 15 of <division> is a one, delta times in a file correspond
to subdivisions of a second, in a way consistent with SMPTE and MIDI
Time Code. Bits 14 thru 8 contain one of the four values -24, -25, -29,
or -30, corresponding to the four standard SMPTE and MIDI Time Code
formats (-29 corresponds to 30 drop frome), and represents the
number of frames per second. These negative numbers are stored in
two's compliment form. The second byte (stored positive) is the
resolution within a frame: typical values may be 4 (MIDI Time Code
resolution), 8, 10, 80 (bit resolution), or 100. This stream allows
exact specifications of time-code-based tracks, but also allows
milisecond-based tracks by specifying 25|frames/sec and a
resolution of 40 units per frame. If the events in a file are stored
with a bit resolution of thirty-framel time code, the division word
would be E250 hex.
*/
type Decoder struct {
r io.Reader
currentTicks uint64
Debug bool
Ch chan *Track
/*
Format describes the tracks format
0 - single-track
Format 0 file has a header chunk followed by one track chunk. It
is the most interchangable representation of data. It is very useful
for a simple single-track player in a program which needs to make
synthesizers make sounds, but which is primarily concerened with
something else such as mixers or sound effect boxes. It is very
desirable to be able to produce such a format, even if your program
is track-based, in order to work with these simple programs. On the
other hand, perhaps someone will write a format conversion from
format 1 to format 0 which might be so easy to use in some setting
that it would save you the trouble of putting it into your program.
Synchronous multiple tracks means that the tracks will all be vertically synchronous, or in other words,
they all start at the same time, and so can represent different parts in one song.
1 - multiple tracks, synchronous
Asynchronous multiple tracks do not necessarily start at the same time, and can be completely asynchronous.
2 - multiple tracks, asynchronous
*/
Format uint16
// NumTracks represents the number of tracks in the midi file
NumTracks uint16
TicksPerQuarterNote uint16
TimeFormat timeFormat
Tracks []*Track
}
// CurrentTrack returns the current track
func (d *Decoder) CurrentTrack() *Track {
if d == nil || len(d.Tracks) == 0 {
return nil
}
return d.Tracks[len(d.Tracks)-1]
}
// Parse is a deprecated API, Decode() should be used instead.
func (d *Decoder) Parse() error {
return d.Decode()
}
// Decode decodes the MIDI file into a structure available from the decoder.
func (d *Decoder) Decode() error {
var err error
var code [4]byte
var division uint16
if err = binary.Read(d.r, binary.BigEndian, &code); err != nil {
return err
}
if code != headerChunkID {
return fmt.Errorf("%s - %s", ErrFmtNotSupported, code)
}
var headerSize uint32
if err = binary.Read(d.r, binary.BigEndian, &headerSize); err != nil {
return err
}
if headerSize != 6 {
return fmt.Errorf("%s - expected header size to be 6, was %d", ErrFmtNotSupported, headerSize)
}
if err = binary.Read(d.r, binary.BigEndian, &d.Format); err != nil {
return err
}
if err = binary.Read(d.r, binary.BigEndian, &d.NumTracks); err != nil {
return err
}
if err = binary.Read(d.r, binary.BigEndian, &division); err != nil {
return err
}
// If bit 15 of <division> is zero, the bits 14 thru 0 represent the number
// of delta time "ticks" which make up a quarter-note. For instance, if
// division is 96, then a time interval of an eighth-note between two
// events in the file would be 48.
if (division & 0x8000) == 0 {
d.TicksPerQuarterNote = division & 0x7FFF
d.TimeFormat = MetricalTF
} else {
/*
If bit 15 of <division> is a one, delta times in a file correspond
to subdivisions of a second, in a way consistent with SMPTE and MIDI
Time Code. Bits 14 thru 8 contain one of the four values -24, -25, -29,
or -30, corresponding to the four standard SMPTE and MIDI Time Code
formats (-29 corresponds to 30 drop frome), and represents the
number of frames per second. These negative numbers are stored in
two's compliment form. The second byte (stored positive) is the
resolution within a frame: typical values may be 4 (MIDI Time Code
resolution), 8, 10, 80 (bit resolution), or 100. This stream allows
exact specifications of time-code-based tracks, but also allows
milisecond-based tracks by specifying 25|frames/sec and a
resolution of 40 units per frame. If the events in a file are stored
with a bit resolution of thirty-frames time code, the division word
would be E250 hex.
*/
d.TimeFormat = TimeCodeTF
}
_, nextChunk, err := d.parseTrack()
if err != nil {
return err
}
for err != io.EOF {
switch nextChunk {
case eventChunk:
nextChunk, err = d.parseEvent()
case trackChunk:
_, nextChunk, err = d.parseTrack()
}
if err != nil && err != io.EOF {
return err
}
}
// All done
return nil
}
func (d *Decoder) parseTrack() (uint32, nextChunkType, error) {
id, size, err := d.IDnSize()
if err != nil {
return 0, trackChunk, err
}
if id != trackChunkID {
return 0, trackChunk, fmt.Errorf("%s - Expected track chunk ID %s, got %s", ErrUnexpectedData, trackChunkID, id)
}
d.Tracks = append(d.Tracks, &Track{Size: size})
return size, eventChunk, nil
}
// IDnSize returns the next ID + block size
func (d *Decoder) IDnSize() ([4]byte, uint32, error) {
var ID [4]byte
var blockSize uint32
if err := binary.Read(d.r, binary.BigEndian, &ID); err != nil {
return ID, blockSize, err
}
if err := binary.Read(d.r, binary.BigEndian, &blockSize); err != err {
return ID, blockSize, err
}
return ID, blockSize, nil
}
// VarLen returns the variable length value at the exact parser location.
func (d *Decoder) VarLen() (val uint32, readBytes uint32, err error) {
buf := []byte{}
var lastByte bool
var n uint32
for !lastByte {
b, err := d.ReadByte()
if err != nil {
return 0, n, err
}
buf = append(buf, b)
lastByte = (b>>7 == 0x0)
n++
}
val, nUsed := DecodeVarint(buf)
return val, n + uint32(nUsed), nil
}
// VarLenTxt Returns a variable length text string as well as the amount of
// bytes read
func (d *Decoder) VarLenTxt() (string, uint32, error) {
var l uint32
var err error
var n uint32
if l, n, err = d.VarLen(); err != nil {
return "", n, err
}
buf := make([]byte, l)
m, err := d.r.Read(buf)
if err != nil {
return string(buf), n + uint32(m), err
}
if uint32(m) != l {
err = errors.New("couldn't read full var length text")
}
return string(buf), n + uint32(m), err
}
// ReadByte returns one byte from the decoder
func (d *Decoder) ReadByte() (byte, error) {
var b byte
err := binary.Read(d.r, binary.BigEndian, &b)
return b, err
}
// Read reads n bytes from the parser's reader and stores them into the provided dst,
// which must be a pointer to a fixed-size value.
func (d *Decoder) Read(dst interface{}) error {
return binary.Read(d.r, binary.BigEndian, dst)
}
// Uint7 reads a byte and converts the first 7 bits into an uint8
func (d *Decoder) Uint7() (uint8, error) {
b, err := d.ReadByte()
if err != nil {
return 0, err
}
return (b & 0x7f), nil
}
// Uint24 reads 3 bytes and convert them into a uint32
func (d *Decoder) Uint24() (uint32, error) {
bytes := make([]byte, 3)
if err := d.Read(bytes); err != nil {
return 0, err
}
var output uint32
output |= uint32(bytes[2]) << 0
output |= uint32(bytes[1]) << 8
output |= uint32(bytes[0]) << 16
return output, nil
} | decoder.go | 0.716615 | 0.493348 | decoder.go | starcoder |
package parser
import (
"github.com/antlr/antlr4/runtime/Go/antlr"
"github.com/google/cel-go/common"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
type parserHelper struct {
source common.Source
nextID int64
positions map[int64]int32
}
func newParserHelper(source common.Source) *parserHelper {
return &parserHelper{
source: source,
nextID: 1,
positions: make(map[int64]int32),
}
}
func (p *parserHelper) getSourceInfo() *exprpb.SourceInfo {
return &exprpb.SourceInfo{
Location: p.source.Description(),
Positions: p.positions,
LineOffsets: p.source.LineOffsets()}
}
func (p *parserHelper) newLiteral(ctx interface{}, value *exprpb.Constant) *exprpb.Expr {
exprNode := p.newExpr(ctx)
exprNode.ExprKind = &exprpb.Expr_ConstExpr{ConstExpr: value}
return exprNode
}
func (p *parserHelper) newLiteralBool(ctx interface{}, value bool) *exprpb.Expr {
return p.newLiteral(ctx,
&exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: value}})
}
func (p *parserHelper) newLiteralString(ctx interface{}, value string) *exprpb.Expr {
return p.newLiteral(ctx,
&exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: value}})
}
func (p *parserHelper) newLiteralBytes(ctx interface{}, value []byte) *exprpb.Expr {
return p.newLiteral(ctx,
&exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: value}})
}
func (p *parserHelper) newLiteralInt(ctx interface{}, value int64) *exprpb.Expr {
return p.newLiteral(ctx,
&exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: value}})
}
func (p *parserHelper) newLiteralUint(ctx interface{}, value uint64) *exprpb.Expr {
return p.newLiteral(ctx, &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: value}})
}
func (p *parserHelper) newLiteralDouble(ctx interface{}, value float64) *exprpb.Expr {
return p.newLiteral(ctx,
&exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: value}})
}
func (p *parserHelper) newIdent(ctx interface{}, name string) *exprpb.Expr {
exprNode := p.newExpr(ctx)
exprNode.ExprKind = &exprpb.Expr_IdentExpr{IdentExpr: &exprpb.Expr_Ident{Name: name}}
return exprNode
}
func (p *parserHelper) newSelect(ctx interface{}, operand *exprpb.Expr, field string) *exprpb.Expr {
exprNode := p.newExpr(ctx)
exprNode.ExprKind = &exprpb.Expr_SelectExpr{
SelectExpr: &exprpb.Expr_Select{Operand: operand, Field: field}}
return exprNode
}
func (p *parserHelper) newPresenceTest(ctx interface{}, operand *exprpb.Expr, field string) *exprpb.Expr {
exprNode := p.newExpr(ctx)
exprNode.ExprKind = &exprpb.Expr_SelectExpr{
SelectExpr: &exprpb.Expr_Select{Operand: operand, Field: field, TestOnly: true}}
return exprNode
}
func (p *parserHelper) newGlobalCall(ctx interface{}, function string, args ...*exprpb.Expr) *exprpb.Expr {
exprNode := p.newExpr(ctx)
exprNode.ExprKind = &exprpb.Expr_CallExpr{
CallExpr: &exprpb.Expr_Call{Function: function, Args: args}}
return exprNode
}
func (p *parserHelper) newMemberCall(ctx interface{}, function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr {
exprNode := p.newExpr(ctx)
exprNode.ExprKind = &exprpb.Expr_CallExpr{
CallExpr: &exprpb.Expr_Call{Function: function, Target: target, Args: args}}
return exprNode
}
func (p *parserHelper) newList(ctx interface{}, elements ...*exprpb.Expr) *exprpb.Expr {
exprNode := p.newExpr(ctx)
exprNode.ExprKind = &exprpb.Expr_ListExpr{
ListExpr: &exprpb.Expr_CreateList{Elements: elements}}
return exprNode
}
func (p *parserHelper) newMap(ctx interface{}, entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
exprNode := p.newExpr(ctx)
exprNode.ExprKind = &exprpb.Expr_StructExpr{
StructExpr: &exprpb.Expr_CreateStruct{Entries: entries}}
return exprNode
}
func (p *parserHelper) newMapEntry(ctx interface{}, key *exprpb.Expr, value *exprpb.Expr) *exprpb.Expr_CreateStruct_Entry {
return &exprpb.Expr_CreateStruct_Entry{
Id: p.id(ctx),
KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{MapKey: key},
Value: value}
}
func (p *parserHelper) newObject(ctx interface{},
typeName string,
entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr {
exprNode := p.newExpr(ctx)
exprNode.ExprKind = &exprpb.Expr_StructExpr{
StructExpr: &exprpb.Expr_CreateStruct{
MessageName: typeName,
Entries: entries}}
return exprNode
}
func (p *parserHelper) newObjectField(ctx interface{}, field string, value *exprpb.Expr) *exprpb.Expr_CreateStruct_Entry {
return &exprpb.Expr_CreateStruct_Entry{
Id: p.id(ctx),
KeyKind: &exprpb.Expr_CreateStruct_Entry_FieldKey{FieldKey: field},
Value: value}
}
func (p *parserHelper) newComprehension(ctx interface{}, iterVar string,
iterRange *exprpb.Expr,
accuVar string,
accuInit *exprpb.Expr,
condition *exprpb.Expr,
step *exprpb.Expr,
result *exprpb.Expr) *exprpb.Expr {
exprNode := p.newExpr(ctx)
exprNode.ExprKind = &exprpb.Expr_ComprehensionExpr{
ComprehensionExpr: &exprpb.Expr_Comprehension{
AccuVar: accuVar,
AccuInit: accuInit,
IterVar: iterVar,
IterRange: iterRange,
LoopCondition: condition,
LoopStep: step,
Result: result}}
return exprNode
}
func (p *parserHelper) newExpr(ctx interface{}) *exprpb.Expr {
return &exprpb.Expr{Id: p.id(ctx)}
}
func (p *parserHelper) id(ctx interface{}) int64 {
var token antlr.Token
switch ctx.(type) {
case antlr.ParserRuleContext:
token = (ctx.(antlr.ParserRuleContext)).GetStart()
case antlr.Token:
token = ctx.(antlr.Token)
default:
// This should only happen if the ctx is nil
return -1
}
location := common.NewLocation(token.GetLine(), token.GetColumn())
id := p.nextID
p.positions[id], _ = p.source.LocationOffset(location)
p.nextID++
return id
}
func (p *parserHelper) getLocation(id int64) common.Location {
characterOffset := p.positions[id]
location, _ := p.source.OffsetLocation(characterOffset)
return location
} | parser/helper.go | 0.572962 | 0.462352 | helper.go | starcoder |
package types
import (
"bytes"
"fmt"
"sync"
)
type Node interface {
Uses() []Usage
}
type Type interface {
Sizeof() int
Kind() Kind
GoType() GoType
}
func UnkT(size int) Type {
if size <= 0 {
panic("size must be specified; set to ptr size, for example")
}
return &unkType{size: size}
}
type unkType struct {
isStruct bool
size int
}
func (t *unkType) Kind() Kind {
return Unknown
}
func (t *unkType) Sizeof() int {
return t.size
}
var (
uint8Type = IntType{size: 1, signed: false}
uint16Type = IntType{size: 2, signed: false}
uint32Type = IntType{size: 4, signed: false}
uint64Type = IntType{size: 8, signed: false}
int8Type = IntType{size: 1, signed: true}
int16Type = IntType{size: 2, signed: true}
int32Type = IntType{size: 4, signed: true}
int64Type = IntType{size: 8, signed: true}
)
func UntypedIntT(minSize int) IntType {
if minSize <= 0 {
panic("size must be specified")
}
return IntType{size: minSize, untyped: true}
}
func AsUntypedIntT(t IntType) IntType {
t.untyped = true
return t
}
func AsTypedIntT(t IntType) IntType {
t.untyped = false
return t
}
func ToPtrType(exp Type) PtrType {
if t, ok := Unwrap(exp).(PtrType); ok {
return t
}
return nil
}
func IntT(size int) IntType {
if size <= 0 {
panic("size must be specified")
}
switch size {
case 1:
return int8Type
case 2:
return int16Type
case 4:
return int32Type
case 8:
return int64Type
}
return IntType{size: size, signed: true}
}
func UintT(size int) IntType {
if size <= 0 {
panic("size must be specified")
}
switch size {
case 1:
return uint8Type
case 2:
return uint16Type
case 4:
return uint32Type
case 8:
return uint64Type
}
return IntType{size: size, signed: false}
}
var (
float32Type = FloatType{size: 4}
float64Type = FloatType{size: 8}
)
func FloatT(size int) Type {
switch size {
case 4:
return float32Type
case 8:
return float64Type
}
return FloatType{size: size}
}
func NilT(size int) PtrType {
return &ptrType{size: size}
}
type PtrType interface {
Type
Elem() Type
SetElem(e Type)
ElemKind() Kind
ElemSizeof() int
}
func PtrT(size int, elem Type) PtrType {
if size <= 0 {
panic("size must be set")
} else if size < 4 {
panic("unlikely")
}
return &ptrType{elem: elem, size: size}
}
var _ PtrType = &ptrType{}
type ptrType struct {
size int
zero bool
elem Type
}
func (t *ptrType) Sizeof() int {
return t.size
}
func (t *ptrType) Kind() Kind {
if t.zero {
return Nil
} else if t.elem == nil {
return UnsafePtr
}
return Ptr
}
func (t *ptrType) Elem() Type {
return t.elem
}
func (t *ptrType) SetElem(e Type) {
t.elem = e
}
func (t *ptrType) ElemKind() Kind {
e := t.elem
for e != nil {
p, ok := e.(*ptrType)
if !ok {
break
}
e = p.elem
}
if e == nil {
return UnsafePtr
}
return e.Kind()
}
func (t *ptrType) ElemSizeof() int {
if t.elem == nil {
return 1
}
return t.elem.Sizeof()
}
var (
_ PtrType = namedPtr{}
_ Named = namedPtr{}
)
type namedPtr struct {
name *Ident
*ptrType
}
func (t namedPtr) Name() *Ident {
return t.name
}
func (t namedPtr) Underlying() Type {
return t.ptrType
}
func (t namedPtr) SetUnderlying(typ Type) Named {
panic("trying to change the named type")
}
func (t namedPtr) Incomplete() bool {
return false
}
type Field struct {
Name *Ident
}
func (f *Field) Type() Type {
return f.Name.CType(nil)
}
func FuncT(ptrSize int, ret Type, args ...*Field) *FuncType {
return funcT(ptrSize, ret, args, false)
}
func FuncTT(ptrSize int, ret Type, args ...Type) *FuncType {
fields := make([]*Field, 0, len(args))
for _, t := range args {
fields = append(fields, &Field{Name: NewUnnamed(t)})
}
return FuncT(ptrSize, ret, fields...)
}
func VarFuncT(ptrSize int, ret Type, args ...*Field) *FuncType {
return funcT(ptrSize, ret, args, true)
}
func VarFuncTT(ptrSize int, ret Type, args ...Type) *FuncType {
fields := make([]*Field, 0, len(args))
for _, t := range args {
fields = append(fields, &Field{Name: NewUnnamed(t)})
}
return VarFuncT(ptrSize, ret, fields...)
}
func checkFields(fields []*Field) {
for _, f := range fields {
if f.Name == nil {
panic("nil argument name")
}
}
}
func funcT(ptrSize int, ret Type, args []*Field, vari bool) *FuncType {
if ptrSize <= 0 {
panic("size must be set")
} else if ptrSize < 4 {
panic("unlikely")
}
checkFields(args)
return &FuncType{
size: ptrSize,
args: append([]*Field{}, args...),
ret: ret,
vari: vari,
}
}
type FuncType struct {
ptr bool
size int
args []*Field
ret Type
vari bool
}
func (t *FuncType) Kind() Kind {
return Func
}
func (t *FuncType) Sizeof() int {
return t.size
}
func (t *FuncType) Return() Type {
return t.ret
}
func (t *FuncType) Variadic() bool {
return t.vari
}
func (t *FuncType) ArgN() int {
return len(t.args)
}
func (t *FuncType) Args() []*Field {
return append([]*Field{}, t.args...)
}
type IntType struct {
size int
signed bool
untyped bool
}
func (t IntType) Kind() Kind {
s := Unsigned
if t.signed {
s = Signed
}
if t.untyped {
return UntypedInt | s
}
return Int | s
}
func (t IntType) Sizeof() int {
return t.size
}
func (t IntType) Signed() bool {
return t.signed
}
type FloatType struct {
size int
}
func (t FloatType) Kind() Kind {
return Float
}
func (t FloatType) Sizeof() int {
return t.size
}
func BoolT() Type {
return BoolType{}
}
type BoolType struct{}
func (t BoolType) Kind() Kind {
return Bool
}
func (t BoolType) Sizeof() int {
return 1
}
func ArrayT(elem Type, size int) Type {
if size < 0 {
panic("negative size")
}
return ArrayType{
elem: elem,
size: size,
slice: false,
}
}
func SliceT(elem Type) Type {
return ArrayType{
elem: elem,
size: 0,
slice: true,
}
}
type ArrayType struct {
elem Type
size int
slice bool
}
func (t ArrayType) Kind() Kind {
return Array
}
func (t ArrayType) Elem() Type {
return t.elem
}
func (t ArrayType) Len() int {
if t.slice {
return 0
}
return t.size
}
func (t ArrayType) IsSlice() bool {
return t.slice
}
func (t ArrayType) Sizeof() int {
sz := t.size
if sz == 0 {
sz = 1
}
return sz * t.elem.Sizeof()
}
func NamedT(name string, typ Type) Named {
return NamedTGo(name, "", typ)
}
func NamedTGo(cname, goname string, typ Type) Named {
if cname == "" {
panic("name is not set")
}
if typ == nil {
panic("type is not set")
}
switch typ := typ.(type) {
case *ptrType:
named := &namedPtr{ptrType: typ}
named.name = NewIdentGo(cname, goname, named)
return named
}
named := &namedType{typ: typ}
named.name = NewIdentGo(cname, goname, named)
return named
}
type Named interface {
Type
Name() *Ident
Underlying() Type
}
type namedType struct {
name *Ident
typ Type
}
func (t *namedType) String() string {
return t.name.String()
}
func (t *namedType) Kind() Kind {
return t.typ.Kind()
}
func (t *namedType) Name() *Ident {
return t.name
}
func (t *namedType) Underlying() Type {
return t.typ
}
func (t *namedType) Sizeof() int {
return t.typ.Sizeof()
}
var (
structMu sync.RWMutex
structTypes = make(map[string]*StructType)
unionTypes = make(map[string]*StructType)
)
func StructT(fields []*Field) *StructType {
checkFields(fields)
s := &StructType{
fields: append([]*Field{}, fields...),
union: false,
}
h := s.hash()
structMu.RLock()
t, ok := structTypes[h]
structMu.RUnlock()
if ok {
return t
}
structMu.Lock()
defer structMu.Unlock()
if t, ok := structTypes[h]; ok {
return t
}
structTypes[h] = s
return s
}
func UnionT(fields []*Field) *StructType {
checkFields(fields)
s := &StructType{
fields: append([]*Field{}, fields...),
union: true,
}
h := s.hash()
structMu.RLock()
t, ok := unionTypes[h]
structMu.RUnlock()
if ok {
return t
}
structMu.Lock()
defer structMu.Unlock()
if t, ok := unionTypes[h]; ok {
return t
}
unionTypes[h] = s
return s
}
type StructType struct {
Where string
fields []*Field
union bool
}
func (t *StructType) hash() string {
buf := bytes.NewBuffer(nil)
for _, f := range t.fields {
buf.WriteString(f.Name.Name)
buf.WriteByte(0)
fmt.Fprintf(buf, "%p", f.Type())
buf.WriteByte(0)
}
return buf.String()
}
func (t *StructType) Fields() []*Field {
return append([]*Field{}, t.fields...)
}
func (t *StructType) Kind() Kind {
return Struct
}
func (t *StructType) Sizeof() int {
if t.union {
max := 0
for _, f := range t.fields {
if sz := f.Type().Sizeof(); sz > max {
max = sz
}
}
return max
}
n := 0
for _, f := range t.fields {
n += f.Type().Sizeof()
}
if n == 0 {
n = 1
}
return n
} | types/types.go | 0.52683 | 0.446555 | types.go | starcoder |
package influxdb
import (
"fmt"
"github.com/influxdata/flux/ast"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/influxdb/v2/models"
"github.com/influxdata/influxdb/v2/storage/reads/datatypes"
"github.com/pkg/errors"
)
// ToStoragePredicate will convert a FunctionExpression into a predicate that can be
// sent down to the storage layer.
func ToStoragePredicate(n semantic.Expression, objectName string) (*datatypes.Predicate, error) {
root, err := toStoragePredicateHelper(n, objectName)
if err != nil {
return nil, err
}
return &datatypes.Predicate{
Root: root,
}, nil
}
func mergePredicates(op ast.LogicalOperatorKind, predicates ...*datatypes.Predicate) (*datatypes.Predicate, error) {
if len(predicates) == 0 {
return nil, errors.New("at least one predicate is needed")
}
var value datatypes.Node_Logical
switch op {
case ast.AndOperator:
value = datatypes.Node_LogicalAnd
case ast.OrOperator:
value = datatypes.Node_LogicalOr
default:
return nil, fmt.Errorf("unknown logical operator %v", op)
}
// Nest the predicates backwards. This way we get a tree like this:
// a AND (b AND c)
root := predicates[len(predicates)-1].Root
for i := len(predicates) - 2; i >= 0; i-- {
root = &datatypes.Node{
NodeType: datatypes.Node_TypeLogicalExpression,
Value: &datatypes.Node_Logical_{Logical: value},
Children: []*datatypes.Node{
predicates[i].Root,
root,
},
}
}
return &datatypes.Predicate{
Root: root,
}, nil
}
func toStoragePredicateHelper(n semantic.Expression, objectName string) (*datatypes.Node, error) {
switch n := n.(type) {
case *semantic.LogicalExpression:
left, err := toStoragePredicateHelper(n.Left, objectName)
if err != nil {
return nil, errors.Wrap(err, "left hand side")
}
right, err := toStoragePredicateHelper(n.Right, objectName)
if err != nil {
return nil, errors.Wrap(err, "right hand side")
}
children := []*datatypes.Node{left, right}
switch n.Operator {
case ast.AndOperator:
return &datatypes.Node{
NodeType: datatypes.Node_TypeLogicalExpression,
Value: &datatypes.Node_Logical_{Logical: datatypes.Node_LogicalAnd},
Children: children,
}, nil
case ast.OrOperator:
return &datatypes.Node{
NodeType: datatypes.Node_TypeLogicalExpression,
Value: &datatypes.Node_Logical_{Logical: datatypes.Node_LogicalOr},
Children: children,
}, nil
default:
return nil, fmt.Errorf("unknown logical operator %v", n.Operator)
}
case *semantic.BinaryExpression:
left, err := toStoragePredicateHelper(n.Left, objectName)
if err != nil {
return nil, errors.Wrap(err, "left hand side")
}
right, err := toStoragePredicateHelper(n.Right, objectName)
if err != nil {
return nil, errors.Wrap(err, "right hand side")
}
children := []*datatypes.Node{left, right}
op, err := toComparisonOperator(n.Operator)
if err != nil {
return nil, err
}
return &datatypes.Node{
NodeType: datatypes.Node_TypeComparisonExpression,
Value: &datatypes.Node_Comparison_{Comparison: op},
Children: children,
}, nil
case *semantic.StringLiteral:
return &datatypes.Node{
NodeType: datatypes.Node_TypeLiteral,
Value: &datatypes.Node_StringValue{
StringValue: n.Value,
},
}, nil
case *semantic.IntegerLiteral:
return &datatypes.Node{
NodeType: datatypes.Node_TypeLiteral,
Value: &datatypes.Node_IntegerValue{
IntegerValue: n.Value,
},
}, nil
case *semantic.BooleanLiteral:
return &datatypes.Node{
NodeType: datatypes.Node_TypeLiteral,
Value: &datatypes.Node_BooleanValue{
BooleanValue: n.Value,
},
}, nil
case *semantic.FloatLiteral:
return &datatypes.Node{
NodeType: datatypes.Node_TypeLiteral,
Value: &datatypes.Node_FloatValue{
FloatValue: n.Value,
},
}, nil
case *semantic.RegexpLiteral:
return &datatypes.Node{
NodeType: datatypes.Node_TypeLiteral,
Value: &datatypes.Node_RegexValue{
RegexValue: n.Value.String(),
},
}, nil
case *semantic.MemberExpression:
// Sanity check that the object is the objectName identifier
if ident, ok := n.Object.(*semantic.IdentifierExpression); !ok || ident.Name.Name() != objectName {
return nil, fmt.Errorf("unknown object %q", n.Object)
}
switch n.Property.Name() {
case datatypes.FieldKey:
return &datatypes.Node{
NodeType: datatypes.Node_TypeTagRef,
Value: &datatypes.Node_TagRefValue{
TagRefValue: models.FieldKeyTagKey,
},
}, nil
case datatypes.MeasurementKey:
return &datatypes.Node{
NodeType: datatypes.Node_TypeTagRef,
Value: &datatypes.Node_TagRefValue{
TagRefValue: models.MeasurementTagKey,
},
}, nil
case datatypes.ValueKey:
return &datatypes.Node{
NodeType: datatypes.Node_TypeFieldRef,
Value: &datatypes.Node_FieldRefValue{
FieldRefValue: datatypes.ValueKey,
},
}, nil
}
return &datatypes.Node{
NodeType: datatypes.Node_TypeTagRef,
Value: &datatypes.Node_TagRefValue{
TagRefValue: n.Property.Name(),
},
}, nil
case *semantic.DurationLiteral:
return nil, errors.New("duration literals not supported in storage predicates")
case *semantic.DateTimeLiteral:
return nil, errors.New("time literals not supported in storage predicates")
default:
return nil, fmt.Errorf("unsupported semantic expression type %T", n)
}
}
func toComparisonOperator(o ast.OperatorKind) (datatypes.Node_Comparison, error) {
switch o {
case ast.EqualOperator:
return datatypes.Node_ComparisonEqual, nil
case ast.NotEqualOperator:
return datatypes.Node_ComparisonNotEqual, nil
case ast.RegexpMatchOperator:
return datatypes.Node_ComparisonRegex, nil
case ast.NotRegexpMatchOperator:
return datatypes.Node_ComparisonNotRegex, nil
case ast.StartsWithOperator:
return datatypes.Node_ComparisonStartsWith, nil
case ast.LessThanOperator:
return datatypes.Node_ComparisonLess, nil
case ast.LessThanEqualOperator:
return datatypes.Node_ComparisonLessEqual, nil
case ast.GreaterThanOperator:
return datatypes.Node_ComparisonGreater, nil
case ast.GreaterThanEqualOperator:
return datatypes.Node_ComparisonGreaterEqual, nil
default:
return 0, fmt.Errorf("unknown operator %v", o)
}
} | query/stdlib/influxdata/influxdb/storage_predicate.go | 0.646572 | 0.600188 | storage_predicate.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.