code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
package csvnewcol import ( "errors" "strings" ) type csvReader interface { Read() ([]string, error) } // Expression allows generating a new column given a row type Expression interface { evaluate([]string) string } type stringExpr struct { str string } // Evaluate on StringExpr just returns its contained string func (se *stringExpr) evaluate(row []string) string { return se.str } type lookupExpr struct { index int } // Evaluate on LookupExpr returns the cooresponding cell in a row func (le *lookupExpr) evaluate(row []string) string { if le.index >= len(row) { return "missing" } return row[le.index] } // CreateExpressions creates a list of expressions based on a string and // the header row (column names) func CreateExpressions(exprString string, cols []string) ([]Expression, error) { var exprs []Expression openCurlyError := errors.New("`{` found before first one closed") closeCurlyError := errors.New("`}` found before opening curly brace") currentExpr := "" currentIsString := true for _, j := range exprString { // If we're looking at a {, we've entered a LookupExpr or // done something illegal if j == '{' { if currentIsString { newStrExpr := &stringExpr{currentExpr} exprs = append(exprs, newStrExpr) currentExpr = "" currentIsString = false continue } else { return exprs, openCurlyError } } if j == '}' { if currentIsString { return exprs, closeCurlyError } index := -1 for ii, jj := range cols { if jj == currentExpr { index = ii break } } if index == -1 { return exprs, errors.New( "Couldn't find column " + currentExpr) } newLookupExpr := &lookupExpr{index} exprs = append(exprs, newLookupExpr) currentExpr = "" currentIsString = true continue } currentExpr += string(j) } if !currentIsString { return exprs, errors.New("lookup expression opened but never closed") } exprs = append(exprs, &stringExpr{currentExpr}) return exprs, nil } // CreateNewColumnExprs creates an a new cell (slice of expressions) for // each semicolon separated item func CreateNewColumnExprs(newColString string, cols []string) ([][]Expression, error) { var exprs [][]Expression if newColString == "" { return exprs, nil } newcells := strings.Split(newColString, ";") for _, j := range newcells { set, err := CreateExpressions(j, cols) if err != nil { return exprs, err } exprs = append(exprs, set) } return exprs, nil } // GenColumns will return newly generated columns from a 2d expression slice and a row func GenColumns(newcols [][]Expression, row []string) []string { var newcells []string for _, col := range newcols { var cell string for _, expr := range col { cell += expr.evaluate(row) } newcells = append(newcells, cell) } return newcells } // Reader reads a CSV row that may have new columns type Reader struct { reader csvReader exprs [][]Expression } // NewReader creates a reader from a csvReader interface and expression list func NewReader(r csvReader, exprs [][]Expression) *Reader { return &Reader{r, exprs} } // Read reads a line with the added columns func (r *Reader) Read() ([]string, error) { row, err := r.reader.Read() if err != nil { return []string{}, err } newCols := GenColumns(r.exprs, row) return append(row, newCols...), nil }
pkg/csvnewcol/csvnewcol.go
0.764364
0.417509
csvnewcol.go
starcoder
package vmath import ( "errors" ) // MatStack4f represents a stack of 4x4 matrices. type MatStack4f struct { stack []Mat4f } // NewMatStack4f creates a new matrix stack containing only the identity matrix. func NewMatStack4f() *MatStack4f { mStack := &MatStack4f{ stack: make([]Mat4f, 1), } mStack.stack[0] = Ident4f() return mStack } // Size returns the current size of the matrix stack func (m MatStack4f) Size() int { return len(m.stack) } // Push stores the current top on the stack by duplicating it. func (m *MatStack4f) Push() { m.stack = append(m.stack, m.Top()) } // Pop removes the current top element from the stack. // Returns an error if the stack contains only one element. func (m *MatStack4f) Pop() error { if len(m.stack) == 1 { return errors.New("cannot pop last element from matrix stack") } m.stack = m.stack[:len(m.stack)-1] return nil } // Top returns the current top element without modifying the stack. func (m MatStack4f) Top() Mat4f { return m.stack[len(m.stack)-1] } // Set overwrites the top element with a new matrix. func (m *MatStack4f) Set(mat Mat4f) { m.stack[len(m.stack)-1] = mat } // PushSet is equivalent to Push(), Set(). func (m *MatStack4f) PushSet(mat Mat4f) { m.Push() m.Set(mat) } // SetIdent overwrites the top element with the identity matrix. func (m *MatStack4f) SetIdent() { m.Set(Ident4f()) } // PushIdent is equivalent to Push(), SetIdent(). func (m *MatStack4f) PushIdent() { m.Push() m.SetIdent() } // MulRight multiplies the top element with the given matrix. func (m *MatStack4f) MulRight(mat Mat4f) { top := m.Top() m.Set(top.Mul(mat)) } // PushMulRight is equivalent to Push(), MulRight(). func (m *MatStack4f) PushMulRight(mat Mat4f) { m.Push() m.MulRight(mat) } // MulLeft multiplies the given matrix with the top element and overwrites the top element with the result. func (m *MatStack4f) MulLeft(mat Mat4f) { top := m.Top() m.Set(mat.Mul(top)) } // PushMulLeft is equivalent to Push(), MulLeft(). func (m *MatStack4f) PushMulLeft(mat Mat4f) { m.Push() m.MulLeft(mat) }
matstack4f.go
0.863046
0.538498
matstack4f.go
starcoder
package leetcode_go /* Given two sorted arrays nums1 and nums2 of size m and n respectively, return the median of the two sorted arrays. Follow up: The overall run time complexity should be O(log (m+n)). Example 1: Input: nums1 = [1,3], nums2 = [2] Output: 2.00000 Explanation: merged array = [1,2,3] and median is 2. Example 2: Input: nums1 = [1,2], nums2 = [3,4] Output: 2.50000 Explanation: merged array = [1,2,3,4] and median is (2 + 3) / 2 = 2.5. Example 3: Input: nums1 = [0,0], nums2 = [0,0] Output: 0.00000 Example 4: Input: nums1 = [], nums2 = [1] Output: 1.00000 Example 5: Input: nums1 = [2], nums2 = [] Output: 2.00000 Constraints: nums1.length == m nums2.length == n 0 <= m <= 1000 0 <= n <= 1000 1 <= m + n <= 2000 -10^6 <= nums1[i], nums2[i] <= 10^6 */ import "math" func findMedianSortedArrays(nums1 []int, nums2 []int) float64 { if len(nums1) > len(nums2) { return findMedianSortedArrays(nums2, nums1) } var len1, len2 = len(nums1), len(nums2) if len(nums1) == 0 { return float64(nums2[(len2-1)/2]+nums2[len2/2]) / 2 } var left, right = 0, len1 * 2 for left <= right { mid1 := (left + right) / 2 mid2 := len1 + len2 - mid1 var L1, R1, L2, R2 int if mid1 == 0 { L1 = math.MinInt64 } else { L1 = nums1[(mid1-1)/2] } if mid2 == 0 { L2 = math.MinInt64 } else { L2 = nums2[(mid2-1)/2] } if mid1 == len1*2 { R1 = math.MaxInt64 } else { R1 = nums1[mid1/2] } if mid2 == len2*2 { R2 = math.MaxInt64 } else { R2 = nums2[mid2/2] } if L1 > R2 { right = mid1 - 1 } else if L2 > R1 { left = mid1 + 1 } else { return float64(maxInt(L1, L2)+minInt(R1, R2)) / 2 } } return -1 } func maxInt(a, b int) int { if a > b { return a } return b } func minInt(a, b int) int { if a < b { return a } return b } /* import "math" func findMedianSortedArrays(nums1 []int, nums2 []int) float64 { if len(nums1) > len(nums2) { return findMedianSortedArrays(nums2, nums1) } var m, n = len(nums1), len(nums2) var left, right = (m + n + 1) / 2, (m + n + 2) / 2 return float64(findKth(nums1, 0, nums2, 0, left)+findKth(nums1, 0, nums2, 0, right)) / 2.0 } func findKth(nums1 []int, i int, nums2 []int, j int, k int) int { if i >= len(nums1) { return nums2[j+k-1] } if j >= len(nums2) { return nums1[i+k-1] } if k == 1 { return min(nums1[i], nums2[j]) } var mid1, mid2 = math.MaxInt64, math.MinInt64 if i+k/2-1 < len(nums1) { mid1 = nums1[i+k/2-1] } if j+k/2-1 < len(nums2) { mid2 = nums2[j+k/2-1] } if mid1 < mid2 { return findKth(nums1, i+k/2, nums2, j, k-k/2) } else { return findKth(nums1, i, nums2, j+k/2, k-k/2) } } */ /* class Solution { public: double findMedianSortedArrays(vector<int>& nums1, vector<int>& nums2) { int m = nums1.size(), n = nums2.size(), left = (m + n + 1) / 2, right = (m + n + 2) / 2; return (findKth(nums1, 0, nums2, 0, left) + findKth(nums1, 0, nums2, 0, right)) / 2.0; } int findKth(vector<int>& nums1, int i, vector<int>& nums2, int j, int k) { if (i >= nums1.size()) return nums2[j + k - 1]; if (j >= nums2.size()) return nums1[i + k - 1]; if (k == 1) return min(nums1[i], nums2[j]); int midVal1 = (i + k / 2 - 1 < nums1.size()) ? nums1[i + k / 2 - 1] : INT_MAX; int midVal2 = (j + k / 2 - 1 < nums2.size()) ? nums2[j + k / 2 - 1] : INT_MAX; if (midVal1 < midVal2) { return findKth(nums1, i + k / 2, nums2, j, k - k / 2); } else { return findKth(nums1, i, nums2, j + k / 2, k - k / 2); } } }; */ /* class Solution { public: double findMedianSortedArrays(vector<int>& nums1, vector<int>& nums2) { int m = nums1.size(), n = nums2.size(); if (m < n) return findMedianSortedArrays(nums2, nums1); if (n == 0) return ((double)nums1[(m - 1) / 2] + (double)nums1[m / 2]) / 2.0; int left = 0, right = n * 2; while (left <= right) { int mid2 = (left + right) / 2; int mid1 = m + n - mid2; double L1 = mid1 == 0 ? INT_MIN : nums1[(mid1 - 1) / 2]; double L2 = mid2 == 0 ? INT_MIN : nums2[(mid2 - 1) / 2]; double R1 = mid1 == m * 2 ? INT_MAX : nums1[mid1 / 2]; double R2 = mid2 == n * 2 ? INT_MAX : nums2[mid2 / 2]; if (L1 > R2) left = mid2 + 1; else if (L2 > R1) right = mid2 - 1; else return (max(L1, L2) + min(R1, R2)) / 2; } return -1; } }; */
0004.MedianOfTwoSortedArrays.go
0.837387
0.740714
0004.MedianOfTwoSortedArrays.go
starcoder
package datastore import ( "github.com/stretchr/testify/mock" ) // MockDatastore represents the mocked object type MockDatastore struct { mock.Mock } // CreateSession implements the Datastore interface func (m *MockDatastore) CreateSession() (string, error) { arguments := m.Called() return arguments.Get(0).(string), arguments.Error(1) } // JoinSession implements the Datastore interface func (m *MockDatastore) JoinSession(t, n string) error { arguments := m.Called(t, n) return arguments.Error(0) } // LeaveSession implements the Datastore interface func (m *MockDatastore) LeaveSession(t, n string) error { arguments := m.Called(t, n) return arguments.Error(0) } // RemoveSession implements the Datastore interface func (m *MockDatastore) RemoveSession(t string) error { arguments := m.Called(t) return arguments.Error(0) } // AddTask implements the Datastore interface func (m *MockDatastore) AddTask(t, id, s string) error { arguments := m.Called(t, id, s) return arguments.Error(0) } // RemoveTask implements the Datastore interface func (m *MockDatastore) RemoveTask(t, id string) error { arguments := m.Called(t, id) return arguments.Error(0) } // AddEstimateToTask implements the Datastore interface func (m *MockDatastore) AddEstimateToTask(t, id string, e, s float64) error { arguments := m.Called(t, id, e, s) return arguments.Error(0) } // RemoveEstimateFromTask implements the Datastore interface func (m *MockDatastore) RemoveEstimateFromTask(t, id string) error { arguments := m.Called(t, id) return arguments.Error(0) } // GetUsers implements the Datastore interface func (m *MockDatastore) GetUsers(t string) ([]string, error) { arguments := m.Called(t) return arguments.Get(0).([]string), arguments.Error(1) } // GetTasks implements the Datastore interface func (m *MockDatastore) GetTasks(t string) ([]Task, error) { arguments := m.Called(t) return arguments.Get(0).([]Task), arguments.Error(1) } // AddEstimate implements the Datastore interface func (m *MockDatastore) AddEstimate(t string, e Estimate) error { arguments := m.Called(t, e) return arguments.Error(0) } // RemoveEstimate implements the Datastore interface func (m *MockDatastore) RemoveEstimate(t string, e Estimate) error { arguments := m.Called(t, e) return arguments.Error(0) } // GetEstimates implements the Datastore interface func (m *MockDatastore) GetEstimates(t string) ([]Estimate, error) { arguments := m.Called(t) return arguments.Get(0).([]Estimate), arguments.Error(1) }
backend/pkg/datastore/datastore_mock.go
0.833121
0.540075
datastore_mock.go
starcoder
package scanner // stateT is the state after reading `t`. func stateT(s *scanner, c byte) int { if c == 'r' { s.step = stateTr return scanContinue } return s.error(c, "in literal true (expecting 'r')") } // stateTr is the state after reading `tr`. func stateTr(s *scanner, c byte) int { if c == 'u' { s.step = stateTru return scanContinue } return s.error(c, "in literal true (expecting 'u')") } // stateTru is the state after reading `tru`. func stateTru(s *scanner, c byte) int { if c == 'e' { s.step = stateEndValue return scanContinue } return s.error(c, "in literal true (expecting 'e')") } // stateF is the state after reading `f`. func stateF(s *scanner, c byte) int { if c == 'a' { s.step = stateFa return scanContinue } return s.error(c, "in literal false (expecting 'a')") } // stateFa is the state after reading `fa`. func stateFa(s *scanner, c byte) int { if c == 'l' { s.step = stateFal return scanContinue } return s.error(c, "in literal false (expecting 'l')") } // stateFal is the state after reading `fal`. func stateFal(s *scanner, c byte) int { if c == 's' { s.step = stateFals return scanContinue } return s.error(c, "in literal false (expecting 's')") } // stateFals is the state after reading `fals`. func stateFals(s *scanner, c byte) int { if c == 'e' { s.step = stateEndValue return scanContinue } return s.error(c, "in literal false (expecting 'e')") } // stateN is the state after reading `n`. func stateN(s *scanner, c byte) int { if c == 'u' { s.step = stateNu return scanContinue } return s.error(c, "in literal null (expecting 'u')") } // stateNu is the state after reading `nu`. func stateNu(s *scanner, c byte) int { if c == 'l' { s.step = stateNul return scanContinue } return s.error(c, "in literal null (expecting 'l')") } // stateNul is the state after reading `nul`. func stateNul(s *scanner, c byte) int { if c == 'l' { s.step = stateEndValue return scanContinue } return s.error(c, "in literal null (expecting 'l')") }
scanner/state_keywords.go
0.793426
0.498474
state_keywords.go
starcoder
// Package transform provides translations for opentelemetry-go concepts and // structures to otlp structures. package transform import ( "errors" commonpb "github.com/open-telemetry/opentelemetry-proto/gen/go/common/v1" metricpb "github.com/open-telemetry/opentelemetry-proto/gen/go/metrics/v1" "go.opentelemetry.io/otel/api/core" "go.opentelemetry.io/otel/api/metric" export "go.opentelemetry.io/otel/sdk/export/metric" "go.opentelemetry.io/otel/sdk/export/metric/aggregator" ) // ErrUnimplementedAgg is returned when a transformation of an unimplemented // aggregator is attempted. var ErrUnimplementedAgg = errors.New("unimplemented aggregator") // Record transforms a Record into an OTLP Metric. An ErrUnimplementedAgg // error is returned if the Record Aggregator is not supported. func Record(r export.Record) (*metricpb.Metric, error) { d := r.Descriptor() l := r.Labels() switch a := r.Aggregator().(type) { case aggregator.MinMaxSumCount: return minMaxSumCount(d, l, a) case aggregator.Sum: return sum(d, l, a) } return nil, ErrUnimplementedAgg } // sum transforms a Sum Aggregator into an OTLP Metric. func sum(desc *metric.Descriptor, labels export.Labels, a aggregator.Sum) (*metricpb.Metric, error) { sum, err := a.Sum() if err != nil { return nil, err } m := &metricpb.Metric{ MetricDescriptor: &metricpb.MetricDescriptor{ Name: desc.Name(), Description: desc.Description(), Unit: string(desc.Unit()), Labels: stringKeyValues(labels.Iter()), }, } switch n := desc.NumberKind(); n { case core.Int64NumberKind, core.Uint64NumberKind: m.MetricDescriptor.Type = metricpb.MetricDescriptor_COUNTER_INT64 m.Int64DataPoints = []*metricpb.Int64DataPoint{ {Value: sum.CoerceToInt64(n)}, } case core.Float64NumberKind: m.MetricDescriptor.Type = metricpb.MetricDescriptor_COUNTER_DOUBLE m.DoubleDataPoints = []*metricpb.DoubleDataPoint{ {Value: sum.CoerceToFloat64(n)}, } } return m, nil } // minMaxSumCountValue returns the values of the MinMaxSumCount Aggregator // as discret values. func minMaxSumCountValues(a aggregator.MinMaxSumCount) (min, max, sum core.Number, count int64, err error) { if min, err = a.Min(); err != nil { return } if max, err = a.Max(); err != nil { return } if sum, err = a.Sum(); err != nil { return } if count, err = a.Count(); err != nil { return } return } // minMaxSumCount transforms a MinMaxSumCount Aggregator into an OTLP Metric. func minMaxSumCount(desc *metric.Descriptor, labels export.Labels, a aggregator.MinMaxSumCount) (*metricpb.Metric, error) { min, max, sum, count, err := minMaxSumCountValues(a) if err != nil { return nil, err } numKind := desc.NumberKind() return &metricpb.Metric{ MetricDescriptor: &metricpb.MetricDescriptor{ Name: desc.Name(), Description: desc.Description(), Unit: string(desc.Unit()), Type: metricpb.MetricDescriptor_SUMMARY, Labels: stringKeyValues(labels.Iter()), }, SummaryDataPoints: []*metricpb.SummaryDataPoint{ { Count: uint64(count), Sum: sum.CoerceToFloat64(numKind), PercentileValues: []*metricpb.SummaryDataPoint_ValueAtPercentile{ { Percentile: 0.0, Value: min.CoerceToFloat64(numKind), }, { Percentile: 100.0, Value: max.CoerceToFloat64(numKind), }, }, }, }, }, nil } // stringKeyValues transforms a label iterator into an OTLP StringKeyValues. func stringKeyValues(iter export.LabelIterator) []*commonpb.StringKeyValue { l := iter.Len() if l == 0 { // TODO: That looks like a pointless allocation in case of // no labels, but returning nil from this function makes // the test fail. return []*commonpb.StringKeyValue{} } result := make([]*commonpb.StringKeyValue, 0, l) for iter.Next() { kv := iter.Label() result = append(result, &commonpb.StringKeyValue{ Key: string(kv.Key), Value: kv.Value.Emit(), }) } return result }
exporters/otlp/internal/transform/metric.go
0.701815
0.420659
metric.go
starcoder
package webrtc import ( "encoding/json" "strings" ) // SDPType describes the type of an SessionDescription. type SDPType int const ( // SDPTypeOffer indicates that a description MUST be treated as an SDP // offer. SDPTypeOffer SDPType = iota + 1 // SDPTypePranswer indicates that a description MUST be treated as an // SDP answer, but not a final answer. A description used as an SDP // pranswer may be applied as a response to an SDP offer, or an update to // a previously sent SDP pranswer. SDPTypePranswer // SDPTypeAnswer indicates that a description MUST be treated as an SDP // final answer, and the offer-answer exchange MUST be considered complete. // A description used as an SDP answer may be applied as a response to an // SDP offer or as an update to a previously sent SDP pranswer. SDPTypeAnswer // SDPTypeRollback indicates that a description MUST be treated as // canceling the current SDP negotiation and moving the SDP offer and // answer back to what it was in the previous stable state. Note the // local or remote SDP descriptions in the previous stable state could be // null if there has not yet been a successful offer-answer negotiation. SDPTypeRollback ) // This is done this way because of a linter. const ( sdpTypeOfferStr = "offer" sdpTypePranswerStr = "pranswer" sdpTypeAnswerStr = "answer" sdpTypeRollbackStr = "rollback" ) func newSDPType(raw string) SDPType { switch raw { case sdpTypeOfferStr: return SDPTypeOffer case sdpTypePranswerStr: return SDPTypePranswer case sdpTypeAnswerStr: return SDPTypeAnswer case sdpTypeRollbackStr: return SDPTypeRollback default: return SDPType(Unknown) } } func (t SDPType) String() string { switch t { case SDPTypeOffer: return sdpTypeOfferStr case SDPTypePranswer: return sdpTypePranswerStr case SDPTypeAnswer: return sdpTypeAnswerStr case SDPTypeRollback: return sdpTypeRollbackStr default: return ErrUnknownType.Error() } } // MarshalJSON enables JSON marshaling of a SDPType func (t SDPType) MarshalJSON() ([]byte, error) { return json.Marshal(t.String()) } // UnmarshalJSON enables JSON unmarshaling of a SDPType func (t *SDPType) UnmarshalJSON(b []byte) error { var s string if err := json.Unmarshal(b, &s); err != nil { return err } switch strings.ToLower(s) { default: return ErrUnknownType case "offer": *t = SDPTypeOffer case "pranswer": *t = SDPTypePranswer case "answer": *t = SDPTypeAnswer case "rollback": *t = SDPTypeRollback } return nil }
sdptype.go
0.526343
0.413832
sdptype.go
starcoder
package virustotal import ( "context" virustotal "github.com/VirusTotal/vt-go" "github.com/turbot/steampipe-plugin-sdk/grpc/proto" "github.com/turbot/steampipe-plugin-sdk/plugin" "github.com/turbot/steampipe-plugin-sdk/plugin/transform" ) func tableVirusTotalDomain(ctx context.Context) *plugin.Table { return &plugin.Table{ Name: "virustotal_domain", Description: "Information and analysis for a domain.", List: &plugin.ListConfig{ Hydrate: listDomain, KeyColumns: plugin.SingleColumn("id"), }, Columns: []*plugin.Column{ // Top columns {Name: "id", Type: proto.ColumnType_STRING, Transform: transform.FromQual("id"), Description: "The domain name to retrieve."}, // Other columns {Name: "categories", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "categories"), Description: "Mapping that relates categorisation services with the category it assigns the domain to. These services are, among others: Alexa, BitDefender, TrendMicro, Websense ThreatSeeker, etc."}, {Name: "creation_date", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromValue().TransformP(getAttribute, "creation_date").Transform(transform.UnixToTimestamp), Description: "Creation date extracted from the Domain's whois."}, {Name: "favicon", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "favicon"), Description: "Dictionary including difference hash and md5 hash of the domain's favicon. Only available for premium users."}, {Name: "jarm", Type: proto.ColumnType_STRING, Transform: transform.FromValue().TransformP(getAttribute, "jarm"), Description: "JARM is an active Transport Layer Security (TLS) server fingerprint."}, {Name: "last_analysis_results", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "last_analysis_results"), Description: "Result from URL scanners. dict with scanner name as key and a dict with notes/result from that scanner as value."}, {Name: "last_analysis_stats", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "last_analysis_stats"), Description: "Number of different results from this scans."}, {Name: "last_dns_records", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "last_dns_records"), Description: "Domain's DNS records on its last scan."}, {Name: "last_dns_records_date", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromValue().TransformP(getAttribute, "last_dns_records_date").Transform(transform.UnixToTimestamp), Description: "Date when the dns records list was retrieved by VirusTotal."}, {Name: "last_https_certificate", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "last_https_certificate"), Description: "SSL Certificate object retrieved last time the domain was analysed."}, {Name: "last_https_certificate_date", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromValue().TransformP(getAttribute, "last_https_certificate_date").Transform(transform.UnixToTimestamp), Description: "Date when the certificate was retrieved by VirusTotal."}, {Name: "last_modification_date", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromValue().TransformP(getAttribute, "last_modification_date").Transform(transform.UnixToTimestamp), Description: "Date when any of domain's information was last updated."}, {Name: "last_update_date", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromValue().TransformP(getAttribute, "last_update_date").Transform(transform.UnixToTimestamp), Description: "Updated date extracted from whois."}, {Name: "popularity_ranks", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "popularity_ranks"), Description: "Domain's position in popularity ranks such as Alexa, Quantcast, Statvoo, etc."}, {Name: "registrar", Type: proto.ColumnType_STRING, Transform: transform.FromValue().TransformP(getAttribute, "registrar"), Description: "Company that registered the domain."}, {Name: "reputation", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "reputation"), Description: "Domain's score calculated from the votes of the VirusTotal's community."}, {Name: "tags", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "tags"), Description: "List of representative attributes."}, {Name: "total_votes", Type: proto.ColumnType_JSON, Transform: transform.FromValue().TransformP(getAttribute, "total_votes"), Description: "Unweighted number of total votes from the community, divided into harmless and malicious."}, {Name: "whois", Type: proto.ColumnType_STRING, Transform: transform.FromValue().TransformP(getAttribute, "whois"), Description: "WHOIS information as returned from the pertinent whois server."}, {Name: "whois_date", Type: proto.ColumnType_TIMESTAMP, Transform: transform.FromValue().TransformP(getAttribute, "whois_date").Transform(transform.UnixToTimestamp), Description: "Date of the last update of the whois record in VirusTotal."}, }, } } func listDomain(ctx context.Context, d *plugin.QueryData, _ *plugin.HydrateData) (interface{}, error) { conn, err := connect(ctx, d) if err != nil { plugin.Logger(ctx).Error("virustotal_domain.listDomain", "connection_error", err) return nil, err } quals := d.KeyColumnQuals id := quals["id"].GetStringValue() u := virustotal.URL("domains/" + id) it, err := conn.Iterator(u) if err != nil { plugin.Logger(ctx).Error("virustotal_domain.listDomain", "query_error", err, "it", it) return nil, err } defer it.Close() for it.Next() { i := it.Get() d.StreamListItem(ctx, i) } if err := it.Error(); err != nil { if !isNotFoundError(err) { plugin.Logger(ctx).Error("virustotal_domain.listDomain", "query_error", err, "it", it) return nil, err } } return nil, nil }
virustotal/table_virustotal_domain.go
0.594551
0.430028
table_virustotal_domain.go
starcoder
package solution /* leetcode: https://leetcode.com/problems/implement-magic-dictionary/ */ /* We build a trie data structure. When we search a word, for example: abc We try to seach *bc, a*c, ab* and character at index * != character at index in searchword ==> we found the answer Time complexity: Constructor: O(1) BuildDict: O( len(dictionary) * max(len(word)) Search: O(len(searchword) ^ 2) Space complexity: O(n) where n is number of character i trie. */ type MagicDictionary struct { root *Node } func Constructor() MagicDictionary { return MagicDictionary{ root: &Node{Children: make([]*Node, 26)}, } } func (this *MagicDictionary) BuildDict(dictionary []string) { for _, word := range dictionary { this.root.InsertWord(word) } } func (this *MagicDictionary) Search(searchWord string) bool { for i := 0; i < len(searchWord); i++ { if this.root.Search(searchWord, i, 0) { return true } } // end loop return false } /** * Your MagicDictionary object will be instantiated and called as such: * obj := Constructor(); * obj.BuildDict(dictionary); * param_2 := obj.Search(searchWord); */ type Node struct { Children []*Node End bool } func (n *Node) Search(s string, skipIdx, k int) bool { if k == len(s) { return n.End } if skipIdx == k { for i := 0; i < len(n.Children); i++ { child := n.Children[i] if child != nil && i != n.GetIndex(s[k]) && child.Search(s, skipIdx, k+1) { return true } } // end loop return false } idx := n.GetIndex(s[k]) child := n.Children[idx] if child != nil { return child.Search(s, skipIdx, k+1) } return false } func (n *Node) InsertWord(word string) { curr := n for i := 0; i < len(word); i++ { curr = curr.Add(word[i]) } curr.End = true } func (n *Node) Add(c byte) *Node { idx := n.GetIndex(c) if n.Children[idx] == nil { n.Children[idx] = &Node{Children: make([]*Node, 26)} } return n.Children[idx] } func (n *Node) GetIndex(c byte) int { return int(c - 'a') }
lesson-15/trie/676-implement-magic-dictionary/solution.go
0.810141
0.418875
solution.go
starcoder
package main import ( "fmt" "errors" "strings" "github.com/rolfschmidt/advent-of-code-2021/helper" ) func main() { fmt.Println("Part 1", Part1()) fmt.Println("Part 2", Part2()) } func Part1() int { return Run(false) } func Part2() int { return Run(true) } func IntBetween(value int, from int, to int) bool { if value < from || value > to { return false } return true } type CubeRange struct { turOn bool x1 int x2 int y1 int y2 int z1 int z2 int } func (cu CubeRange) Volume() int { vx := ((cu.x2 + 1) - cu.x1) vy := ((cu.y2 + 1) - cu.y1) vz := ((cu.z2 + 1) - cu.z1) return vx * vy * vz } func (cu CubeRange) Overlap(cu2 CubeRange) (CubeRange, error) { x1 := helper.IntMax(cu.x1, cu2.x1) x2 := helper.IntMin(cu.x2, cu2.x2) y1 := helper.IntMax(cu.y1, cu2.y1) y2 := helper.IntMin(cu.y2, cu2.y2) z1 := helper.IntMax(cu.z1, cu2.z1) z2 := helper.IntMin(cu.z2, cu2.z2) if x1 <= x2 && y1 <= y2 && z1 <= z2 { cube := CubeRange{false, x1, x2, y1, y2, z1, z2} if cu.turOn && cu2.turOn { return cube, nil } else if cu.turOn && !cu2.turOn { return cube, nil } else if !cu.turOn && cu2.turOn { cube.turOn = true return cube, nil } else if !cu.turOn && !cu2.turOn { cube.turOn = true return cube, nil } } return CubeRange{}, errors.New("failed") } func Run(Part2 bool) int { var cubeRanges []CubeRange = []CubeRange{} for _, line := range helper.ReadFile("input.txt") { cubeRange := CubeRange{} for _, part := range helper.Split(line, ",") { values := helper.Split(part, "=") varName := string(values[0][len(values[0]) - 1]) rangeValue := helper.StringArrayInt(helper.Split(values[1], "..")) if varName == "x" { cubeRange.x1 = helper.IntMin(rangeValue[0], rangeValue[1]) cubeRange.x2 = helper.IntMax(rangeValue[0], rangeValue[1]) } else if varName == "y" { cubeRange.y1 = helper.IntMin(rangeValue[0], rangeValue[1]) cubeRange.y2 = helper.IntMax(rangeValue[0], rangeValue[1]) } else if varName == "z" { cubeRange.z1 = helper.IntMin(rangeValue[0], rangeValue[1]) cubeRange.z2 = helper.IntMax(rangeValue[0], rangeValue[1]) } } if !Part2 && (!IntBetween(cubeRange.x1, -50, 50) || !IntBetween(cubeRange.x2, -50, 50) || !IntBetween(cubeRange.y1, -50, 50) || !IntBetween(cubeRange.y2, -50, 50) || !IntBetween(cubeRange.z1, -50, 50) || !IntBetween(cubeRange.z2, -50, 50)) { continue } if strings.Count(line, "on") > 0 { cubeRange.turOn = true } cubeRanges = append(cubeRanges, cubeRange) } countNeg := 0 countPos := 0 onCubes := []CubeRange{} for _, currCube := range cubeRanges { mergedCubes := []CubeRange{} for _, onCube := range onCubes { oCube, err := onCube.Overlap(currCube) if err == nil { mergedCubes = append(mergedCubes, oCube) } } if currCube.turOn { onCubes = append(onCubes, currCube) } for _, merCube := range mergedCubes { onCubes = append(onCubes, merCube) } } for _, onCube := range onCubes { if onCube.turOn { countPos += onCube.Volume() } else { countNeg += onCube.Volume() } } return countPos - countNeg }
day22/main.go
0.555194
0.458773
main.go
starcoder
package internal import ( "fmt" "sync" "testing" ) type Tree struct { TreeNode testsByID map[string]*TestCase } // NewTree returns a new Tree with the provided test cases. func NewTree(testCases ...TestCase) Tree { var tree Tree for _, tc := range testCases { tree.Insert(tc) } return tree } // DeepEqual returns true if the two trees are equal. // Only exported fields are compared. func (tr *Tree) DeepEqual(b Tree) bool { return tr.TreeNode.deepEqual(b.TreeNode) } // Run the tests for this tree. func (tr Tree) Run(t *testing.T, ctx Context) { tr.run(t, ctx) } func (tr *Tree) Get(id string) *TestCase { return tr.testsByID[id] } func (tr *Tree) Insert(testCase TestCase) *TestCase { tr.Once.Do(func() { tr.Index = map[string]int{} tr.testsByID = map[string]*TestCase{} }) tc := tr.Get(testCase.ID) if tc != nil { return tc } tc = tr.TreeNode.insert(testCase, testCase.Path()...) tr.testsByID[tc.ID] = tc return tc } // TreeNode organizes the TestCases in a tree structure. type TreeNode struct { sync.Once Index map[string]int Steps []string Nodes []TreeNode Tests []TestCase } func (tr *TreeNode) deepEqual(b TreeNode) bool { if len(tr.Index) != len(b.Index) { return false } for ak, av := range tr.Index { if bv, ok := b.Index[ak]; !ok || av != bv { return false } } if len(tr.Steps) != len(b.Steps) { return false } for i := range tr.Steps { if tr.Steps[i] != b.Steps[i] { return false } } if len(tr.Tests) != len(b.Tests) { return false } for i := range tr.Tests { if !tr.Tests[i].deepEqual(b.Tests[i]) { return false } } if len(tr.Nodes) != len(b.Nodes) { return false } for i := range tr.Nodes { if !tr.Nodes[i].deepEqual(b.Nodes[i]) { return false } } return true } func (tr *TreeNode) insert(testCase TestCase, path ...string) *TestCase { tr.Once.Do(func() { tr.Index = map[string]int{} }) if len(path) < 2 { if len(path) == 1 { testCase.Name = path[0] } tr.Tests = append(tr.Tests, testCase) return &tr.Tests[len(tr.Tests)-1] } else { if _, ok := tr.Index[path[0]]; !ok { tr.Index[path[0]] = len(tr.Nodes) tr.Nodes = append(tr.Nodes, TreeNode{}) tr.Steps = append(tr.Steps, path[0]) } return tr.Nodes[tr.Index[path[0]]].insert(testCase, path[1:]...) } } func (tr TreeNode) run(t *testing.T, ctx Context) { // Descend into any possible children. for i, s := range tr.Steps { i, s := i, s t.Run(s, func(t *testing.T) { tr.Nodes[i].run(t, ctx) }) } // Run this node's tests. for i := range tr.Tests { tc := tr.Tests[i] t.Run(tc.Name, func(t *testing.T) { // Assert the expected leak, escape, move decisions match. for _, lm := range tc.Matches { if s := lm.Regexp.FindString(ctx.BuildOutput); s == "" { t.Error(getBuildOutputErr(lm, s)) } } // Assert the expected leak, escape, move decisions do not match. for _, lm := range tc.Natches { if s := lm.Regexp.FindString(ctx.BuildOutput); s != "" { t.Error(getBuildOutputErr(lm, s)) } } // Find the benchmark function. if benchFn, ok := ctx.Benchmarks[tc.ID]; !ok { if ctx.Benchmarks != nil { t.Logf("benchmark function not registered for %s", tc.ID) } } else { // Assert the expected allocs and bytes match. r := testing.Benchmark(benchFn) if ea, aa := tc.AllocOp, r.AllocsPerOp(); !ea.Eq(aa) { t.Errorf("exp.alloc=%d, act.alloc=%d", ea, aa) } if eb, ab := tc.BytesOp, r.AllocedBytesPerOp(); !eb.Eq(ab) { t.Errorf("exp.bytes=%d, act.bytes=%d", eb, ab) } } }) } } const expectedBuildOutputNotFound = `error: build optimization reason: not found regexp: %s source: %s ` const expectedBuildOutputWasFound = `error: build optimization reason: was found output: %s regexp: %s source: %s ` func getBuildOutputErr(lm LineMatcher, found string) string { if found == "" { return fmt.Sprintf( expectedBuildOutputNotFound, lm.Regexp.String(), lm.Source, ) } return fmt.Sprintf( expectedBuildOutputWasFound, found, lm.Regexp.String(), lm.Source, ) }
internal/tree.go
0.70304
0.493164
tree.go
starcoder
package interpreter import ( "fmt" "image" "reflect" ) type Rect image.Rectangle func (rc Rect) Compare(other Value) (Value, error) { if r, ok := other.(Rect); ok { if rc == r { return Number(0), nil } } return nil, nil } func (rc Rect) Add(other Value) (Value, error) { return nil, fmt.Errorf("type mismatch: rect + %s Not supported", reflect.TypeOf(other)) } func (rc Rect) Sub(other Value) (Value, error) { return nil, fmt.Errorf("type mismatch: rect - %s Not supported", reflect.TypeOf(other)) } func (rc Rect) Mul(other Value) (Value, error) { return nil, fmt.Errorf("type mismatch: rect * %s Not supported", reflect.TypeOf(other)) } func (rc Rect) Div(other Value) (Value, error) { return nil, fmt.Errorf("type mismatch: rect / %s Not supported", reflect.TypeOf(other)) } func (rc Rect) Mod(other Value) (Value, error) { return nil, fmt.Errorf("type mismatch: rect %% %s Not supported", reflect.TypeOf(other)) } func (rc Rect) In(other Value) (Value, error) { if r, ok := other.(Rect); ok { return Boolean(rc.Min.X >= r.Min.X && rc.Min.Y >= r.Min.Y && rc.Max.X < r.Max.X && rc.Max.Y < r.Max.Y), nil } return nil, fmt.Errorf("type mismatch: rect In %s Not supported", reflect.TypeOf(other)) } func (rc Rect) Neg() (Value, error) { return nil, fmt.Errorf("type mismatch: -rect Not supported") } func (rc Rect) Not() (Value, error) { return nil, fmt.Errorf("type mismatch: 'Not rect' Not supported") } func (rc Rect) At(bitmap BitmapContext) (Value, error) { return nil, fmt.Errorf("type mismatch: @rect Not supported") } func (rc Rect) Property(ident string) (Value, error) { switch ident { case "x", "left": return Number(rc.Min.X), nil case "y", "top": return Number(rc.Min.Y), nil case "w", "width": return Number(rc.Max.X - rc.Min.X), nil case "h", "height": return Number(rc.Max.Y - rc.Min.Y), nil case "right": return Number(rc.Max.X), nil case "bottom": return Number(rc.Max.Y), nil } return baseProperty(rc, ident) } func (rc Rect) PrintStr() string { return fmt.Sprintf("rect(x:%d, y:%d, w:%d, h:%d)", rc.Min.X, rc.Min.Y, rc.Max.X-rc.Min.X, rc.Max.Y-rc.Min.Y) } func (rc Rect) Iterate(visit func(Value) error) error { for y := rc.Min.Y; y < rc.Max.Y; y++ { for x := rc.Min.X; x < rc.Max.X; x++ { if err := visit(Point{x, y}); err != nil { return err } } } return nil } func (rc Rect) Index(index Value) (Value, error) { return nil, fmt.Errorf("type mismatch: rect[Index] Not supported") } func (rc Rect) IndexRange(lower, upper Value) (Value, error) { return nil, fmt.Errorf("type mismatch: rect[lower..upper] Not supported") } func (rc Rect) IndexAssign(index Value, val Value) error { return fmt.Errorf("type mismatch: rect[%s] Not supported", reflect.TypeOf(index)) } func (rc Rect) RuntimeTypeName() string { return "rect" } func (rc Rect) Concat(val Value) (Value, error) { return nil, fmt.Errorf("type mismatch: rect :: [%s] Not supported", reflect.TypeOf(val)) }
internal/interpreter/rect.go
0.823754
0.469824
rect.go
starcoder
package move import ( "github.com/kemokemo/kuronan-dash/internal/view" ) // NewKuronaVc returns a new VelocityController for Kurona. func NewKuronaVc() *KuronaVc { return &KuronaVc{ scrollV: &view.Vector{X: 0.0, Y: 0.0}, charaPosV: &view.Vector{X: 0.0, Y: 0.0}, charaDrawV: &view.Vector{X: 0.0, Y: 0.0}, gravity: 1.2, jumpV0: -10.3, dropV0: 0.5, } } // KuronaVc is VelocityController of Kurona. Please create via 'NewKuronaVc' method. type KuronaVc struct { scrollV *view.Vector charaPosV *view.Vector charaDrawV *view.Vector gravity float64 jumpV0 float64 dropV0 float64 currentState State prevState State elapsed float64 deltaX, deltaY float64 } func (kvc *KuronaVc) SetState(s State) { kvc.prevState = kvc.currentState kvc.currentState = s if kvc.prevState == s { kvc.elapsed += elapsedStep } else { kvc.elapsed = 0.0 } } // GetVelocity returns the velocity to scroll the field parts and to update the character position. func (kvc *KuronaVc) GetVelocity() (*view.Vector, *view.Vector, *view.Vector) { kvc.decideVbyState() kvc.updateVelocity() return kvc.scrollV, kvc.charaPosV, kvc.charaDrawV } // TODO: キャラクターごとに個性を出す部分 // ダッシュから歩きに変わる時のX方向の速度の落ち方、上がり方もキャラごとに特性が出せたら素敵。 func (kvc *KuronaVc) decideVbyState() { switch kvc.currentState { case Walk: kvc.deltaX = 1.0 kvc.deltaY = 0.0 case Dash: kvc.deltaX = 2.0 kvc.deltaY = 0.0 case Ascending: kvc.deltaX = 0.6 kvc.deltaY = kvc.jumpV0 + kvc.gravity*kvc.elapsed case Descending: kvc.deltaX = 0.6 kvc.deltaY = kvc.dropV0 + kvc.gravity*kvc.elapsed // todo: 試験的に上限の落下速度を導入 if kvc.deltaY > 9.0 { kvc.deltaY = 9.0 } default: // Don't move kvc.deltaX = 0.0 kvc.deltaY = 0.0 } } // updateVelocity updates all velocities. Please pass me the data for charaPosV. func (kvc *KuronaVc) updateVelocity() { kvc.charaPosV.X = kvc.deltaX kvc.charaPosV.Y = kvc.deltaY kvc.charaDrawV.X = 0.0 kvc.charaDrawV.Y = kvc.deltaY kvc.scrollV.X = -kvc.deltaX kvc.scrollV.Y = 0.0 }
internal/move/kurona_vc.go
0.512937
0.416085
kurona_vc.go
starcoder
package main import ( "math" "sort" . "github.com/9d77v/leetcode/pkg/algorithm/math" . "github.com/9d77v/leetcode/pkg/algorithm/unionfind" ) /* 题目:连接所有点的最小费用 给你一个points 数组,表示 2D 平面上的一些点,其中 points[i] = [xi, yi] 。 连接点 [xi, yi] 和点 [xj, yj] 的费用为它们之间的 曼哈顿距离 :|xi - xj| + |yi - yj| ,其中 |val| 表示 val 的绝对值。 请你返回将所有点连接的最小总费用。只有任意两点之间 有且仅有 一条简单路径时,才认为所有点都已连接。 提示: 1 <= points.length <= 1000 -106 <= xi, yi <= 106 所有点 (xi, yi) 两两不同。 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/min-cost-to-connect-all-points */ /* 方法一:Kruskal 算法 时间复杂度:О(n²log(n)) 空间复杂度:О(n²) 运行时间:392 ms 内存消耗:27 MB */ func minCostConnectPointsFunc1(points [][]int) int { n := len(points) if n <= 1 { return 0 } edges := getEdges(points) sortByLen(edges) return calcCost(edges, n) } func getEdges(points [][]int) [][3]int { edges := make([][3]int, 0, len(points)*(len(points)-1)) n := len(points) for i := 0; i < n-1; i++ { for j := i + 1; j < n; j++ { edges = append(edges, [3]int{i, j, dist(points[i], points[j])}) } } return edges } func dist(x, y []int) int { return Abs(x[0]-y[0]) + Abs(x[1]-y[1]) } func sortByLen(edges [][3]int) { sort.Slice(edges, func(i, j int) bool { return edges[i][2] < edges[j][2] }) } func calcCost(edges [][3]int, n int) (cost int) { uf := NewUnionFind(n) left := n - 1 for _, edge := range edges { if uf.Union(edge[0], edge[1]) { cost += edge[2] left-- if left == 0 { break } } } return } /* 方法二:建图优化的Kruskal 算法 时间复杂度:О(nlog(n)) 空间复杂度:О(n) 运行时间:16 ms 内存消耗:6.5 MB */ func minCostConnectPointsFunc2(points [][]int) int { n := len(points) edges := getEdges2(points, n) sortByLen(edges) return calcCost(edges, n) } func getEdges2(points [][]int, n int) [][3]int { for i, p := range points { points[i] = append(p, i) } edges := [][3]int{} build := func() { sort.Slice(points, func(i, j int) bool { a, b := points[i], points[j]; return a[0] < b[0] || a[0] == b[0] && a[1] < b[1] }) // 离散化 y-x type pair struct{ v, i int } ps := make([]pair, n) for i, p := range points { ps[i] = pair{p[1] - p[0], i} } sort.Slice(ps, func(i, j int) bool { return ps[i].v < ps[j].v }) kth := make([]int, n) k := 1 kth[ps[0].i] = k for i := 1; i < n; i++ { if ps[i].v != ps[i-1].v { k++ } kth[ps[i].i] = k } t := newFenwickTree(k + 1) for i := n - 1; i >= 0; i-- { p := points[i] pos := kth[i] if j := t.query(pos); j != -1 { q := points[j] edges = append(edges, [3]int{p[2], q[2], dist(p, q)}) } t.update(pos, p[0]+p[1], i) } } build() for _, p := range points { p[0], p[1] = p[1], p[0] } build() for _, p := range points { p[0] = -p[0] } build() for _, p := range points { p[0], p[1] = p[1], p[0] } build() return edges } type fenwickTree struct { tree, idRec []int } func newFenwickTree(n int) *fenwickTree { tree := make([]int, n) idRec := make([]int, n) for i := range tree { tree[i], idRec[i] = math.MaxInt64, -1 } return &fenwickTree{tree, idRec} } func (f *fenwickTree) update(pos, val, id int) { for ; pos > 0; pos &= pos - 1 { if val < f.tree[pos] { f.tree[pos], f.idRec[pos] = val, id } } } func (f *fenwickTree) query(pos int) int { minVal, minID := math.MaxInt64, -1 for ; pos < len(f.tree); pos += pos & -pos { if f.tree[pos] < minVal { minVal, minID = f.tree[pos], f.idRec[pos] } } return minID }
internal/leetcode/1584.min-cost-to-connect-all-points/main.go
0.529263
0.435061
main.go
starcoder
package wkt import ( "errors" "strconv" "strings" "github.com/paulmach/orb" ) var ( // ErrNotWKT is returned when unmarshalling WKT and the data is not valid. ErrNotWKT = errors.New("wkt: invalid data") // ErrIncorrectGeometry is returned when unmarshalling WKT data into the wrong type. // For example, unmarshaling linestring data into a point. ErrIncorrectGeometry = errors.New("wkt: incorrect geometry") // ErrUnsupportedGeometry is returned when geometry type is not supported by this lib. ErrUnsupportedGeometry = errors.New("wkt: unsupported geometry") ) // UnmarshalPoint returns the point represented by the wkt string. // Will return ErrIncorrectGeometry if the wkt is not a point. func UnmarshalPoint(s string) (p orb.Point, err error) { geom, err := Unmarshal(s) if err != nil { return orb.Point{}, err } g, ok := geom.(orb.Point) if !ok { return orb.Point{}, ErrIncorrectGeometry } return g, nil } // UnmarshalMultiPoint returns the multi-point represented by the wkt string. // Will return ErrIncorrectGeometry if the wkt is not a multi-point. func UnmarshalMultiPoint(s string) (p orb.MultiPoint, err error) { geom, err := Unmarshal(s) if err != nil { return nil, err } g, ok := geom.(orb.MultiPoint) if !ok { return nil, ErrIncorrectGeometry } return g, nil } // UnmarshalLineString returns the linestring represented by the wkt string. // Will return ErrIncorrectGeometry if the wkt is not a linestring. func UnmarshalLineString(s string) (p orb.LineString, err error) { geom, err := Unmarshal(s) if err != nil { return nil, err } g, ok := geom.(orb.LineString) if !ok { return nil, ErrIncorrectGeometry } return g, nil } // UnmarshalMultiLineString returns the multi-linestring represented by the wkt string. // Will return ErrIncorrectGeometry if the wkt is not a multi-linestring. func UnmarshalMultiLineString(s string) (p orb.MultiLineString, err error) { geom, err := Unmarshal(s) if err != nil { return nil, err } g, ok := geom.(orb.MultiLineString) if !ok { return nil, ErrIncorrectGeometry } return g, nil } // UnmarshalPolygon returns the polygon represented by the wkt string. // Will return ErrIncorrectGeometry if the wkt is not a polygon. func UnmarshalPolygon(s string) (p orb.Polygon, err error) { geom, err := Unmarshal(s) if err != nil { return nil, err } g, ok := geom.(orb.Polygon) if !ok { return nil, ErrIncorrectGeometry } return g, nil } // UnmarshalMultiPolygon returns the multi-polygon represented by the wkt string. // Will return ErrIncorrectGeometry if the wkt is not a multi-polygon. func UnmarshalMultiPolygon(s string) (p orb.MultiPolygon, err error) { geom, err := Unmarshal(s) if err != nil { return nil, err } g, ok := geom.(orb.MultiPolygon) if !ok { return nil, ErrIncorrectGeometry } return g, nil } // UnmarshalCollection returns the geometry collection represented by the wkt string. // Will return ErrIncorrectGeometry if the wkt is not a geometry collection. func UnmarshalCollection(s string) (p orb.Collection, err error) { geom, err := Unmarshal(s) if err != nil { return orb.Collection{}, err } g, ok := geom.(orb.Collection) if !ok { return nil, ErrIncorrectGeometry } return g, nil } // trimSpaceBrackets trim space and brackets func trimSpaceBrackets(s string) string { s = strings.Trim(s, " ") if s[0] == '(' { s = s[1:] } if s[len(s)-1] == ')' { s = s[:len(s)-1] } return strings.Trim(s, " ") } // parsePoint pase point by (x y) func parsePoint(s string) (p orb.Point, err error) { ps := strings.Split(s, " ") if len(ps) != 2 { return orb.Point{}, ErrNotWKT } x, err := strconv.ParseFloat(ps[0], 64) if err != nil { return orb.Point{}, err } y, err := strconv.ParseFloat(ps[1], 64) if err != nil { return orb.Point{}, err } return orb.Point{x, y}, nil } // splitGeometryCollection split GEOMETRYCOLLECTION to more geometry func splitGeometryCollection(s string) (r []string) { r = make([]string, 0) stack := make([]rune, 0) l := len(s) for i, v := range s { if !strings.Contains(string(stack), "(") { stack = append(stack, v) continue } if v >= 'A' && v < 'Z' { t := string(stack) r = append(r, t[:len(t)-1]) stack = make([]rune, 0) stack = append(stack, v) continue } if i == l-1 { r = append(r, string(stack)) continue } stack = append(stack, v) } return } // Unmarshal return a geometry by parsing the WKT string. func Unmarshal(s string) (geom orb.Geometry, err error) { s = strings.ToUpper(strings.Trim(s, " ")) switch { case strings.Contains(s, "GEOMETRYCOLLECTION"): if s == "GEOMETRYCOLLECTION EMPTY" { return orb.Collection{}, nil } s = strings.Replace(s, "GEOMETRYCOLLECTION", "", -1) c := orb.Collection{} ms := splitGeometryCollection(s) if len(ms) == 0 { return nil, err } for _, v := range ms { if len(v) == 0 { continue } g, err := Unmarshal(v) if err != nil { return nil, err } c = append(c, g) } geom = c case strings.Contains(s, "MULTIPOINT"): if s == "MULTIPOINT EMPTY" { return orb.MultiPoint{}, nil } s = strings.Replace(s, "MULTIPOINT", "", -1) s = trimSpaceBrackets(s) ps := strings.Split(s, ",") mp := orb.MultiPoint{} for _, p := range ps { tp, err := parsePoint(trimSpaceBrackets(p)) if err != nil { return nil, err } mp = append(mp, tp) } geom = mp case strings.Contains(s, "POINT"): s = strings.Replace(s, "POINT", "", -1) tp, err := parsePoint(trimSpaceBrackets(s)) if err != nil { return nil, err } geom = tp case strings.Contains(s, "MULTILINESTRING"): if s == "MULTILINESTRING EMPTY" { return orb.MultiLineString{}, nil } s = strings.Replace(s, "MULTILINESTRING", "", -1) ml := orb.MultiLineString{} for _, l := range strings.Split(trimSpaceBrackets(s), "),(") { tl := orb.LineString{} for _, p := range strings.Split(trimSpaceBrackets(l), ",") { tp, err := parsePoint(trimSpaceBrackets(p)) if err != nil { return nil, err } tl = append(tl, tp) } ml = append(ml, tl) } geom = ml case strings.Contains(s, "LINESTRING"): if s == "LINESTRING EMPTY" { return orb.LineString{}, nil } s = strings.Replace(s, "LINESTRING", "", -1) s = trimSpaceBrackets(s) ps := strings.Split(s, ",") ls := orb.LineString{} for _, p := range ps { tp, err := parsePoint(trimSpaceBrackets(p)) if err != nil { return nil, err } ls = append(ls, tp) } geom = ls case strings.Contains(s, "MULTIPOLYGON"): if s == "MULTIPOLYGON EMPTY" { return orb.MultiPolygon{}, nil } s = strings.Replace(s, "MULTIPOLYGON", "", -1) mpol := orb.MultiPolygon{} for _, ps := range strings.Split(trimSpaceBrackets(s), ")),((") { pol := orb.Polygon{} for _, ls := range strings.Split(trimSpaceBrackets(ps), "),(") { ring := orb.Ring{} for _, p := range strings.Split(ls, ",") { tp, err := parsePoint(trimSpaceBrackets(p)) if err != nil { return nil, err } ring = append(ring, tp) } pol = append(pol, ring) } mpol = append(mpol, pol) } geom = mpol case strings.Contains(s, "POLYGON"): if s == "POLYGON EMPTY" { return orb.Polygon{}, nil } s = strings.Replace(s, "POLYGON", "", -1) s = trimSpaceBrackets(s) rs := strings.Split(s, "),(") pol := make(orb.Polygon, 0, len(rs)) for _, r := range rs { ps := strings.Split(trimSpaceBrackets(r), ",") ring := orb.Ring{} for _, p := range ps { tp, err := parsePoint(trimSpaceBrackets(p)) if err != nil { return nil, err } ring = append(ring, tp) } pol = append(pol, ring) } geom = pol default: return nil, ErrUnsupportedGeometry } return }
vendor/github.com/paulmach/orb/encoding/wkt/unmarshal.go
0.68637
0.430686
unmarshal.go
starcoder
package faststats import ( "encoding/json" "fmt" "strconv" "strings" "time" ) // RollingCounter uses a slice of buckets to keep track of counts of an event over time with a sliding window type RollingCounter struct { // The len(buckets) is constant and not mutable // The values of the individual buckets are atomic, so they do not take the mutex buckets []AtomicInt64 // Neither of these need to be locked (atomic operations) rollingSum AtomicInt64 totalSum AtomicInt64 rollingBucket RollingBuckets } // NewRollingCounter initializes a rolling counter with a bucket width and # of buckets func NewRollingCounter(bucketWidth time.Duration, numBuckets int, now time.Time) RollingCounter { ret := RollingCounter{ buckets: make([]AtomicInt64, numBuckets), rollingBucket: RollingBuckets{ NumBuckets: numBuckets, BucketWidth: bucketWidth, StartTime: now, }, } return ret } var _ json.Marshaler = &RollingCounter{} var _ json.Unmarshaler = &RollingCounter{} var _ fmt.Stringer = &RollingCounter{} type jsonCounter struct { Buckets []AtomicInt64 RollingSum *AtomicInt64 TotalSum *AtomicInt64 RollingBucket *RollingBuckets } // MarshalJSON JSON encodes a counter. It is thread safe. func (r *RollingCounter) MarshalJSON() ([]byte, error) { return json.Marshal(jsonCounter{ Buckets: r.buckets, RollingSum: &r.rollingSum, TotalSum: &r.totalSum, RollingBucket: &r.rollingBucket, }) } // UnmarshalJSON stores the previous JSON encoding. Note, this is *NOT* thread safe. func (r *RollingCounter) UnmarshalJSON(b []byte) error { var into jsonCounter if err := json.Unmarshal(b, &into); err != nil { return err } r.buckets = into.Buckets r.rollingSum = *into.RollingSum r.totalSum = *into.TotalSum r.rollingBucket = *into.RollingBucket return nil } // String for debugging func (r *RollingCounter) String() string { return r.StringAt(time.Now()) } // StringAt converts the counter to a string at a given time. func (r *RollingCounter) StringAt(now time.Time) string { b := r.GetBuckets(now) parts := make([]string, 0, len(r.buckets)) for _, v := range b { parts = append(parts, strconv.FormatInt(v, 10)) } return fmt.Sprintf("rolling_sum=%d total_sum=%d parts=(%s)", r.RollingSumAt(now), r.TotalSum(), strings.Join(parts, ",")) } // Inc adds a single event to the current bucket func (r *RollingCounter) Inc(now time.Time) { r.totalSum.Add(1) if len(r.buckets) == 0 { return } idx := r.rollingBucket.Advance(now, r.clearBucket) if idx < 0 { return } r.buckets[idx].Add(1) r.rollingSum.Add(1) } // RollingSumAt returns the total number of events in the rolling time window func (r *RollingCounter) RollingSumAt(now time.Time) int64 { r.rollingBucket.Advance(now, r.clearBucket) return r.rollingSum.Get() } // RollingSum returns the total number of events in the rolling time window (With time time.Now()) func (r *RollingCounter) RollingSum() int64 { r.rollingBucket.Advance(time.Now(), r.clearBucket) return r.rollingSum.Get() } // TotalSum returns the total number of events of all time func (r *RollingCounter) TotalSum() int64 { return r.totalSum.Get() } // GetBuckets returns a copy of the buckets in order backwards in time func (r *RollingCounter) GetBuckets(now time.Time) []int64 { r.rollingBucket.Advance(now, r.clearBucket) startIdx := int(r.rollingBucket.LastAbsIndex.Get() % int64(r.rollingBucket.NumBuckets)) ret := make([]int64, r.rollingBucket.NumBuckets) for i := 0; i < r.rollingBucket.NumBuckets; i++ { idx := startIdx - i if idx < 0 { idx += r.rollingBucket.NumBuckets } ret[i] = r.buckets[idx].Get() } return ret } func (r *RollingCounter) clearBucket(idx int) { toDec := r.buckets[idx].Swap(0) r.rollingSum.Add(-toDec) } // Reset the counter to all zero values. func (r *RollingCounter) Reset(now time.Time) { r.rollingBucket.Advance(now, r.clearBucket) for i := 0; i < r.rollingBucket.NumBuckets; i++ { r.clearBucket(i) } }
v3/faststats/rolling_counter.go
0.68637
0.424591
rolling_counter.go
starcoder
package webtest import ( "bytes" "context" "io/ioutil" "net/http" "testing" "time" "github.com/stretchr/testify/require" ) // HandlerForTest implement the function signature used to check the req/resp type HandlerForTest = func(t *testing.T, resp *http.Response) const ( _notEqualHeader = `assertion failed for the %q header: %q != %q` _ttlTest = 5 ) // Body fetch and assert that the body of the http.Response is the same than expected func Body(t *testing.T, expected string, resp *http.Response) { t.Helper() require.Equal(t, expected, fetchBody(t, resp)) } func BodyContains(t *testing.T, expected string, resp *http.Response) { t.Helper() require.Contains(t, fetchBody(t, resp), expected) } // BodyDiffere fetch and assert that the body of the http.Response differ than expected func BodyDiffere(t *testing.T, expected string, resp *http.Response) { t.Helper() require.NotEqual(t, expected, fetchBody(t, resp)) } // StatusCode assert the status code of the response func StatusCode(t *testing.T, expected int, resp *http.Response) { t.Helper() require.Equal(t, expected, resp.StatusCode) } // Header assert value of the given header key:vak in the htt.Response param func Header(t *testing.T, key, val string, resp *http.Response) bool { t.Helper() // test existence if out, ok := resp.Header[key]; !ok || len(out) == 0 || out[0] != val { require.Equalf(t, val, out, _notEqualHeader, key, out[0], val) return false } return true } // Header assert value of the given header key:vak in the htt.Response param func Headers(t *testing.T, resp *http.Response, kv ...[2]string) bool { t.Helper() for i := range kv { k, v := kv[i][0], kv[i][1] if !Header(t, k, v, resp) { return false } } return true } func DeleteAndTestAPI(t *testing.T, url string, handler HandlerForTest) { t.Helper() var resp = deleteAPI(t, url) defer resp.Body.Close() handler(t, resp) } // RequestAndTestAPI request an API then run the test handler func RequestAndTestAPI(t *testing.T, url string, handler HandlerForTest) { t.Helper() var resp = requestAPI(t, url) defer resp.Body.Close() handler(t, resp) } // PushAndTestAPI post to an API then run the test handler // The sub method try to send an `application/json` encoded content func PushAndTestAPI(t *testing.T, path string, content []byte, handler HandlerForTest, headers ...[2]string) { t.Helper() var resp = pushAPI(t, path, content, headers...) defer resp.Body.Close() handler(t, resp) } func FetchBody(t *testing.T, resp *http.Response) string { return fetchBody(t, resp) } func fetchBody(t *testing.T, resp *http.Response) string { var tmp, err = ioutil.ReadAll(resp.Body) if err != nil { t.Fatalf("error fetching the body response : %s", err.Error()) } defer resp.Body.Close() return string(tmp) } func prepReq(t *testing.T, url, method string) *http.Response { var ( client = &http.Client{} ctx, cl = context.WithTimeout(context.Background(), time.Second*_ttlTest) req, err = http.NewRequestWithContext(ctx, method, url, nil) ) defer cl() if err != nil { t.Fatalf("error requesting the api : %s", err.Error()) } resp, err := client.Do(req) if err != nil { t.Fatalf("error requesting the api : %s", err.Error()) } return resp } func requestAPI(t *testing.T, url string) *http.Response { return prepReq(t, url, "GET") } func deleteAPI(t *testing.T, url string) *http.Response { return prepReq(t, url, "DELETE") } func pushAPI(t *testing.T, url string, content []byte, headers ...[2]string) *http.Response { var ( client = &http.Client{} ctx, cl = context.WithTimeout(context.Background(), time.Second*_ttlTest) req, err = http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(content)) ) defer cl() if err != nil { t.Fatalf("can't post the new request : %s", err.Error()) } if len(headers) == 0 { req.Header.Set("Content-Type", "application/json") } else { for i := range headers { req.Header.Set(headers[i][0], headers[i][1]) } } resp, err := client.Do(req) if err != nil { t.Fatalf("error requesting the api : %s", err.Error()) } return resp }
webtest/web.go
0.622918
0.459743
web.go
starcoder
package subnetmath import ( "bytes" "math" "math/big" "net" "sync" ) type Buffer struct { mtx *sync.Mutex bigIntAlpha *big.Int bigIntBravo *big.Int bigIntCharlie *big.Int bigIntDelta *big.Int bigIntEcho *big.Int ipSubZero [16]byte } func NewBuffer() *Buffer { return &Buffer{ mtx: &sync.Mutex{}, bigIntAlpha: new(big.Int), bigIntBravo: new(big.Int), bigIntCharlie: new(big.Int), bigIntDelta: new(big.Int), bigIntEcho: new(big.Int), ipSubZero: [16]byte{}, } } var memoizedBigExp2 [128]*big.Int func init() { for i := range memoizedBigExp2 { memoizedBigExp2[i] = new(big.Int).Exp(bigTwo, big.NewInt(int64(i)), nil) } } // NetworkComesBefore returns a bool with regards to numerical network order. // Note that IPv4 networks come before IPv6 networks. func (b *Buffer) NetworkComesBefore(first, second *net.IPNet) bool { if first != nil && second != nil { if first.IP.Equal(second.IP) { firstMask, _ := first.Mask.Size() secondMask, _ := second.Mask.Size() if firstMask < secondMask { return true } return false } return b.AddressComesBefore(first.IP, second.IP) } return false } // AddressComesBefore returns a bool with regards to numerical address order. // Note that IPv4 addresses come before IPv6 addresses. func (b *Buffer) AddressComesBefore(firstIP, secondIP net.IP) bool { if firstIP.To4() == nil && secondIP.To4() != nil { return false } else if firstIP.To4() != nil && secondIP.To4() == nil { return true } b.mtx.Lock() defer b.mtx.Unlock() if b.addrToIntAlpha(firstIP).Cmp(b.addrToIntBravo(secondIP)) < 0 { return true } return false } // NetworkContainsSubnet validates that the network is a valid supernet func (b *Buffer) NetworkContainsSubnet(network *net.IPNet, subnet *net.IPNet) bool { if network != nil && subnet != nil { b.mtx.Lock() defer b.mtx.Unlock() supernetInt := b.addrToIntAlpha(network.IP) subnetInt := b.addrToIntBravo(subnet.IP) if supernetInt.Cmp(subnetInt) <= 0 { supernetInt.Add(supernetInt, b.addressCountCharlieDelta(network)) subnetInt.Add(subnetInt, b.addressCountCharlieDelta(subnet)) if supernetInt.Cmp(subnetInt) >= 0 { return true } } } return false } func (b *Buffer) addrToIntAlpha(address net.IP) *big.Int { v4addr := address.To4() if v4addr != nil { b.bigIntAlpha.SetBytes(v4addr) } else { b.bigIntAlpha.SetBytes(address.To16()) } return b.bigIntAlpha } func (b *Buffer) addrToIntBravo(address net.IP) *big.Int { v4addr := address.To4() if v4addr != nil { b.bigIntBravo.SetBytes(v4addr) } else { b.bigIntBravo.SetBytes(address.To16()) } return b.bigIntBravo } func (b *Buffer) addressCountCharlieDelta(network *net.IPNet) *big.Int { if network != nil { ones, bits := network.Mask.Size() if bits <= 32 { return b.bigIntCharlie.SetInt64(int64(math.Exp2(float64(bits - ones)))) } return memoizedBigExp2[bits-ones] } return nil } func (b *Buffer) nextNetworkEcho(network *net.IPNet) *net.IPNet { if network != nil { nextNetwork := DuplicateNetwork(network) v4addr := network.IP.To4() if v4addr != nil { b.bigIntEcho.SetBytes(v4addr) } else { b.bigIntEcho.SetBytes(network.IP.To16()) } b.bigIntEcho.Add(b.bigIntEcho, b.addressCountCharlieDelta(network)) nextNetwork.IP = IntToAddr(b.bigIntEcho) return nextNetwork } return nil } // FindInbetweenSubnets returns a slice of subnets given a range of IP addresses. // Note that the delimiter 'stop' is inclusive. In other words, it will be included in the result. func (b *Buffer) FindInbetweenSubnets(start, stop net.IP) []*net.IPNet { if sameAddrType(start, stop) && b.AddressComesBefore(start, stop) { var subnets []*net.IPNet maskBits := maskBitLength(start) current := DuplicateAddr(start) stopInt := b.addrToIntAlpha(stop) for { currentSubnet := &net.IPNet{ IP: current, Mask: make(net.IPMask, maskBits/8), } for ones := 1; ones <= maskBits; ones++ { currentSubnet.Mask = recreateMask(currentSubnet.Mask, ones, maskBits) increment := b.addressCountCharlieDelta(currentSubnet) addressInt := b.addrToIntBravo(currentSubnet.IP) addressInt.Add(addressInt, increment) addressInt.Sub(addressInt, bigOne) if addressInt.Cmp(stopInt) > 0 { continue } if b.SubnetZeroAddr(currentSubnet.IP, currentSubnet).Equal(currentSubnet.IP) { break } } subnets = append(subnets, currentSubnet) current = b.nextNetworkEcho(currentSubnet).IP if b.AddressComesBefore(current, start) { break } if b.AddressComesBefore(stop, current) && !current.Equal(stop) { break } } return subnets } return nil } func allFF(b []byte) bool { for _, c := range b { if c != 0xff { return false } } return true } var v4InV6Prefix = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff} func applyMaskDirectly(ip net.IP, mask net.IPMask) net.IP { if len(mask) == net.IPv6len && len(ip) == net.IPv4len && allFF(mask[:12]) { mask = mask[12:] } if len(mask) == net.IPv4len && len(ip) == net.IPv6len && bytes.Equal(ip[:12], v4InV6Prefix) { ip = ip[12:] } if len(ip) == len(mask) { for i := 0; i < len(ip); i++ { ip[i] = ip[i] & mask[i] } return ip } return nil } // SubnetZeroAddr returns the subnet zero address func (b *Buffer) SubnetZeroAddr(address net.IP, network *net.IPNet) net.IP { if network != nil { b.mtx.Lock() defer b.mtx.Unlock() for i := 0; i < len(address); i++ { b.ipSubZero[i] = address[i] } return applyMaskDirectly(b.ipSubZero[:len(address)], network.Mask) } return nil } func recreateMask(mask net.IPMask, ones, bits int) net.IPMask { if bits != 8*net.IPv4len && bits != 8*net.IPv6len { return nil } if ones < 0 || ones > bits { return nil } l := bits / 8 n := uint(ones) for i := 0; i < l; i++ { if n >= 8 { mask[i] = 0xff n -= 8 continue } mask[i] = ^byte(0xff >> n) n = 0 } return mask }
buffered.go
0.620047
0.415788
buffered.go
starcoder
package bits import ( "encoding/binary" "fmt" "regexp" "strings" "sync" tmmath "github.com/supragya/TendermintConnector/chains/cosmos/libs/math" tmrand "github.com/supragya/TendermintConnector/chains/cosmos/libs/rand" tmprotobits "github.com/supragya/TendermintConnector/chains/cosmos/proto/tendermint/libs/bits" ) // BitArray is a thread-safe implementation of a bit array. type BitArray struct { mtx sync.Mutex Bits int `json:"bits"` // NOTE: persisted via reflect, must be exported Elems []uint64 `json:"elems"` // NOTE: persisted via reflect, must be exported } // NewBitArray returns a new bit array. // It returns nil if the number of bits is zero. func NewBitArray(bits int) *BitArray { if bits <= 0 { return nil } return &BitArray{ Bits: bits, Elems: make([]uint64, (bits+63)/64), } } // Size returns the number of bits in the bitarray func (bA *BitArray) Size() int { if bA == nil { return 0 } return bA.Bits } // GetIndex returns the bit at index i within the bit array. // The behavior is undefined if i >= bA.Bits func (bA *BitArray) GetIndex(i int) bool { if bA == nil { return false } bA.mtx.Lock() defer bA.mtx.Unlock() return bA.getIndex(i) } func (bA *BitArray) getIndex(i int) bool { if i >= bA.Bits { return false } return bA.Elems[i/64]&(uint64(1)<<uint(i%64)) > 0 } // SetIndex sets the bit at index i within the bit array. // The behavior is undefined if i >= bA.Bits func (bA *BitArray) SetIndex(i int, v bool) bool { if bA == nil { return false } bA.mtx.Lock() defer bA.mtx.Unlock() return bA.setIndex(i, v) } func (bA *BitArray) setIndex(i int, v bool) bool { if i >= bA.Bits { return false } if v { bA.Elems[i/64] |= (uint64(1) << uint(i%64)) } else { bA.Elems[i/64] &= ^(uint64(1) << uint(i%64)) } return true } // Copy returns a copy of the provided bit array. func (bA *BitArray) Copy() *BitArray { if bA == nil { return nil } bA.mtx.Lock() defer bA.mtx.Unlock() return bA.copy() } func (bA *BitArray) copy() *BitArray { c := make([]uint64, len(bA.Elems)) copy(c, bA.Elems) return &BitArray{ Bits: bA.Bits, Elems: c, } } func (bA *BitArray) copyBits(bits int) *BitArray { c := make([]uint64, (bits+63)/64) copy(c, bA.Elems) return &BitArray{ Bits: bits, Elems: c, } } // Or returns a bit array resulting from a bitwise OR of the two bit arrays. // If the two bit-arrys have different lengths, Or right-pads the smaller of the two bit-arrays with zeroes. // Thus the size of the return value is the maximum of the two provided bit arrays. func (bA *BitArray) Or(o *BitArray) *BitArray { if bA == nil && o == nil { return nil } if bA == nil && o != nil { return o.Copy() } if o == nil { return bA.Copy() } bA.mtx.Lock() o.mtx.Lock() c := bA.copyBits(tmmath.MaxInt(bA.Bits, o.Bits)) smaller := tmmath.MinInt(len(bA.Elems), len(o.Elems)) for i := 0; i < smaller; i++ { c.Elems[i] |= o.Elems[i] } bA.mtx.Unlock() o.mtx.Unlock() return c } // And returns a bit array resulting from a bitwise AND of the two bit arrays. // If the two bit-arrys have different lengths, this truncates the larger of the two bit-arrays from the right. // Thus the size of the return value is the minimum of the two provided bit arrays. func (bA *BitArray) And(o *BitArray) *BitArray { if bA == nil || o == nil { return nil } bA.mtx.Lock() o.mtx.Lock() defer func() { bA.mtx.Unlock() o.mtx.Unlock() }() return bA.and(o) } func (bA *BitArray) and(o *BitArray) *BitArray { c := bA.copyBits(tmmath.MinInt(bA.Bits, o.Bits)) for i := 0; i < len(c.Elems); i++ { c.Elems[i] &= o.Elems[i] } return c } // Not returns a bit array resulting from a bitwise Not of the provided bit array. func (bA *BitArray) Not() *BitArray { if bA == nil { return nil // Degenerate } bA.mtx.Lock() defer bA.mtx.Unlock() return bA.not() } func (bA *BitArray) not() *BitArray { c := bA.copy() for i := 0; i < len(c.Elems); i++ { c.Elems[i] = ^c.Elems[i] } return c } // Sub subtracts the two bit-arrays bitwise, without carrying the bits. // Note that carryless subtraction of a - b is (a and not b). // The output is the same as bA, regardless of o's size. // If bA is longer than o, o is right padded with zeroes func (bA *BitArray) Sub(o *BitArray) *BitArray { if bA == nil || o == nil { // TODO: Decide if we should do 1's complement here? return nil } bA.mtx.Lock() o.mtx.Lock() // output is the same size as bA c := bA.copyBits(bA.Bits) // Only iterate to the minimum size between the two. // If o is longer, those bits are ignored. // If bA is longer, then skipping those iterations is equivalent // to right padding with 0's smaller := tmmath.MinInt(len(bA.Elems), len(o.Elems)) for i := 0; i < smaller; i++ { // &^ is and not in golang c.Elems[i] &^= o.Elems[i] } bA.mtx.Unlock() o.mtx.Unlock() return c } // IsEmpty returns true iff all bits in the bit array are 0 func (bA *BitArray) IsEmpty() bool { if bA == nil { return true // should this be opposite? } bA.mtx.Lock() defer bA.mtx.Unlock() for _, e := range bA.Elems { if e > 0 { return false } } return true } // IsFull returns true iff all bits in the bit array are 1. func (bA *BitArray) IsFull() bool { if bA == nil { return true } bA.mtx.Lock() defer bA.mtx.Unlock() // Check all elements except the last for _, elem := range bA.Elems[:len(bA.Elems)-1] { if (^elem) != 0 { return false } } // Check that the last element has (lastElemBits) 1's lastElemBits := (bA.Bits+63)%64 + 1 lastElem := bA.Elems[len(bA.Elems)-1] return (lastElem+1)&((uint64(1)<<uint(lastElemBits))-1) == 0 } // PickRandom returns a random index for a set bit in the bit array. // If there is no such value, it returns 0, false. // It uses the global randomness in `random.go` to get this index. func (bA *BitArray) PickRandom() (int, bool) { if bA == nil { return 0, false } bA.mtx.Lock() trueIndices := bA.getTrueIndices() bA.mtx.Unlock() if len(trueIndices) == 0 { // no bits set to true return 0, false } return trueIndices[tmrand.Intn(len(trueIndices))], true } func (bA *BitArray) getTrueIndices() []int { trueIndices := make([]int, 0, bA.Bits) curBit := 0 numElems := len(bA.Elems) // set all true indices for i := 0; i < numElems-1; i++ { elem := bA.Elems[i] if elem == 0 { curBit += 64 continue } for j := 0; j < 64; j++ { if (elem & (uint64(1) << uint64(j))) > 0 { trueIndices = append(trueIndices, curBit) } curBit++ } } // handle last element lastElem := bA.Elems[numElems-1] numFinalBits := bA.Bits - curBit for i := 0; i < numFinalBits; i++ { if (lastElem & (uint64(1) << uint64(i))) > 0 { trueIndices = append(trueIndices, curBit) } curBit++ } return trueIndices } // String returns a string representation of BitArray: BA{<bit-string>}, // where <bit-string> is a sequence of 'x' (1) and '_' (0). // The <bit-string> includes spaces and newlines to help people. // For a simple sequence of 'x' and '_' characters with no spaces or newlines, // see the MarshalJSON() method. // Example: "BA{_x_}" or "nil-BitArray" for nil. func (bA *BitArray) String() string { return bA.StringIndented("") } // StringIndented returns the same thing as String(), but applies the indent // at every 10th bit, and twice at every 50th bit. func (bA *BitArray) StringIndented(indent string) string { if bA == nil { return "nil-BitArray" } bA.mtx.Lock() defer bA.mtx.Unlock() return bA.stringIndented(indent) } func (bA *BitArray) stringIndented(indent string) string { lines := []string{} bits := "" for i := 0; i < bA.Bits; i++ { if bA.getIndex(i) { bits += "x" } else { bits += "_" } if i%100 == 99 { lines = append(lines, bits) bits = "" } if i%10 == 9 { bits += indent } if i%50 == 49 { bits += indent } } if len(bits) > 0 { lines = append(lines, bits) } return fmt.Sprintf("BA{%v:%v}", bA.Bits, strings.Join(lines, indent)) } // Bytes returns the byte representation of the bits within the bitarray. func (bA *BitArray) Bytes() []byte { bA.mtx.Lock() defer bA.mtx.Unlock() numBytes := (bA.Bits + 7) / 8 bytes := make([]byte, numBytes) for i := 0; i < len(bA.Elems); i++ { elemBytes := [8]byte{} binary.LittleEndian.PutUint64(elemBytes[:], bA.Elems[i]) copy(bytes[i*8:], elemBytes[:]) } return bytes } // Update sets the bA's bits to be that of the other bit array. // The copying begins from the begin of both bit arrays. func (bA *BitArray) Update(o *BitArray) { if bA == nil || o == nil { return } bA.mtx.Lock() o.mtx.Lock() copy(bA.Elems, o.Elems) o.mtx.Unlock() bA.mtx.Unlock() } // MarshalJSON implements json.Marshaler interface by marshaling bit array // using a custom format: a string of '-' or 'x' where 'x' denotes the 1 bit. func (bA *BitArray) MarshalJSON() ([]byte, error) { if bA == nil { return []byte("null"), nil } bA.mtx.Lock() defer bA.mtx.Unlock() bits := `"` for i := 0; i < bA.Bits; i++ { if bA.getIndex(i) { bits += `x` } else { bits += `_` } } bits += `"` return []byte(bits), nil } var bitArrayJSONRegexp = regexp.MustCompile(`\A"([_x]*)"\z`) // UnmarshalJSON implements json.Unmarshaler interface by unmarshaling a custom // JSON description. func (bA *BitArray) UnmarshalJSON(bz []byte) error { b := string(bz) if b == "null" { // This is required e.g. for encoding/json when decoding // into a pointer with pre-allocated BitArray. bA.Bits = 0 bA.Elems = nil return nil } // Validate 'b'. match := bitArrayJSONRegexp.FindStringSubmatch(b) if match == nil { return fmt.Errorf("bitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b) } bits := match[1] // Construct new BitArray and copy over. numBits := len(bits) bA2 := NewBitArray(numBits) for i := 0; i < numBits; i++ { if bits[i] == 'x' { bA2.SetIndex(i, true) } } *bA = *bA2 //nolint:govet return nil } // ToProto converts BitArray to protobuf func (bA *BitArray) ToProto() *tmprotobits.BitArray { if bA == nil || len(bA.Elems) == 0 { return nil } return &tmprotobits.BitArray{ Bits: int64(bA.Bits), Elems: bA.Elems, } } // FromProto sets a protobuf BitArray to the given pointer. func (bA *BitArray) FromProto(protoBitArray *tmprotobits.BitArray) { if protoBitArray == nil { bA = nil return } bA.Bits = int(protoBitArray.Bits) if len(protoBitArray.Elems) > 0 { bA.Elems = protoBitArray.Elems } }
chains/cosmos/libs/bits/bit_array.go
0.649245
0.407304
bit_array.go
starcoder
package storetest import ( "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/mattermost/mattermost-server/v5/model" "github.com/mattermost/mattermost-server/v5/store" ) func TestPreferenceStore(t *testing.T, ss store.Store) { t.Run("PreferenceSave", func(t *testing.T) { testPreferenceSave(t, ss) }) t.Run("PreferenceGet", func(t *testing.T) { testPreferenceGet(t, ss) }) t.Run("PreferenceGetCategory", func(t *testing.T) { testPreferenceGetCategory(t, ss) }) t.Run("PreferenceGetAll", func(t *testing.T) { testPreferenceGetAll(t, ss) }) t.Run("PreferenceDeleteByUser", func(t *testing.T) { testPreferenceDeleteByUser(t, ss) }) t.Run("PreferenceDelete", func(t *testing.T) { testPreferenceDelete(t, ss) }) t.Run("PreferenceDeleteCategory", func(t *testing.T) { testPreferenceDeleteCategory(t, ss) }) t.Run("PreferenceDeleteCategoryAndName", func(t *testing.T) { testPreferenceDeleteCategoryAndName(t, ss) }) t.Run("PreferenceCleanupFlagsBatch", func(t *testing.T) { testPreferenceCleanupFlagsBatch(t, ss) }) } func testPreferenceSave(t *testing.T, ss store.Store) { id := model.NewId() preferences := model.Preferences{ { UserId: id, Category: model.PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW, Name: model.NewId(), Value: "value1a", }, { UserId: id, Category: model.PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW, Name: model.NewId(), Value: "value1b", }, } err := ss.Preference().Save(&preferences) require.Nil(t, err, "saving preference returned error") for _, preference := range preferences { data, _ := ss.Preference().Get(preference.UserId, preference.Category, preference.Name) require.Equal(t, data.ToJson(), preference.ToJson(), "got incorrect preference after first Save") } preferences[0].Value = "value2a" preferences[1].Value = "value2b" err = ss.Preference().Save(&preferences) require.Nil(t, err, "saving preference returned error") for _, preference := range preferences { data, _ := ss.Preference().Get(preference.UserId, preference.Category, preference.Name) require.Equal(t, data.ToJson(), preference.ToJson(), "got incorrect preference after second Save") } } func testPreferenceGet(t *testing.T, ss store.Store) { userId := model.NewId() category := model.PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW name := model.NewId() preferences := model.Preferences{ { UserId: userId, Category: category, Name: name, }, { UserId: userId, Category: category, Name: model.NewId(), }, { UserId: userId, Category: model.NewId(), Name: name, }, { UserId: model.NewId(), Category: category, Name: name, }, } err := ss.Preference().Save(&preferences) require.Nil(t, err) data, err := ss.Preference().Get(userId, category, name) require.Nil(t, err) require.Equal(t, preferences[0].ToJson(), data.ToJson(), "got incorrect preference") // make sure getting a missing preference fails _, err = ss.Preference().Get(model.NewId(), model.NewId(), model.NewId()) require.NotNil(t, err, "no error on getting a missing preference") } func testPreferenceGetCategory(t *testing.T, ss store.Store) { userId := model.NewId() category := model.PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW name := model.NewId() preferences := model.Preferences{ { UserId: userId, Category: category, Name: name, }, // same user/category, different name { UserId: userId, Category: category, Name: model.NewId(), }, // same user/name, different category { UserId: userId, Category: model.NewId(), Name: name, }, // same name/category, different user { UserId: model.NewId(), Category: category, Name: name, }, } err := ss.Preference().Save(&preferences) require.Nil(t, err) preferencesByCategory, err := ss.Preference().GetCategory(userId, category) require.Nil(t, err) require.Equal(t, 2, len(preferencesByCategory), "got the wrong number of preferences") require.True( t, ((preferencesByCategory[0] == preferences[0] && preferencesByCategory[1] == preferences[1]) || (preferencesByCategory[0] == preferences[1] && preferencesByCategory[1] == preferences[0])), "got incorrect preferences", ) // make sure getting a missing preference category doesn't fail preferencesByCategory, err = ss.Preference().GetCategory(model.NewId(), model.NewId()) require.Nil(t, err) require.Equal(t, 0, len(preferencesByCategory), "shouldn't have got any preferences") } func testPreferenceGetAll(t *testing.T, ss store.Store) { userId := model.NewId() category := model.PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW name := model.NewId() preferences := model.Preferences{ { UserId: userId, Category: category, Name: name, }, // same user/category, different name { UserId: userId, Category: category, Name: model.NewId(), }, // same user/name, different category { UserId: userId, Category: model.NewId(), Name: name, }, // same name/category, different user { UserId: model.NewId(), Category: category, Name: name, }, } err := ss.Preference().Save(&preferences) require.Nil(t, err) result, err := ss.Preference().GetAll(userId) require.Nil(t, err) require.Equal(t, 3, len(result), "got the wrong number of preferences") for i := 0; i < 3; i++ { assert.Falsef(t, result[0] != preferences[i] && result[1] != preferences[i] && result[2] != preferences[i], "got incorrect preferences") } } func testPreferenceDeleteByUser(t *testing.T, ss store.Store) { userId := model.NewId() category := model.PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW name := model.NewId() preferences := model.Preferences{ { UserId: userId, Category: category, Name: name, }, // same user/category, different name { UserId: userId, Category: category, Name: model.NewId(), }, // same user/name, different category { UserId: userId, Category: model.NewId(), Name: name, }, // same name/category, different user { UserId: model.NewId(), Category: category, Name: name, }, } err := ss.Preference().Save(&preferences) require.Nil(t, err) err = ss.Preference().PermanentDeleteByUser(userId) require.Nil(t, err) } func testPreferenceDelete(t *testing.T, ss store.Store) { preference := model.Preference{ UserId: model.NewId(), Category: model.PREFERENCE_CATEGORY_DIRECT_CHANNEL_SHOW, Name: model.NewId(), Value: "value1a", } err := ss.Preference().Save(&model.Preferences{preference}) require.Nil(t, err) preferences, err := ss.Preference().GetAll(preference.UserId) require.Nil(t, err) assert.Len(t, preferences, 1, "should've returned 1 preference") err = ss.Preference().Delete(preference.UserId, preference.Category, preference.Name) require.Nil(t, err) preferences, err = ss.Preference().GetAll(preference.UserId) require.Nil(t, err) assert.Len(t, preferences, 0, "should've returned no preferences") } func testPreferenceDeleteCategory(t *testing.T, ss store.Store) { category := model.NewId() userId := model.NewId() preference1 := model.Preference{ UserId: userId, Category: category, Name: model.NewId(), Value: "value1a", } preference2 := model.Preference{ UserId: userId, Category: category, Name: model.NewId(), Value: "value1a", } err := ss.Preference().Save(&model.Preferences{preference1, preference2}) require.Nil(t, err) preferences, err := ss.Preference().GetAll(userId) require.Nil(t, err) assert.Len(t, preferences, 2, "should've returned 2 preferences") err = ss.Preference().DeleteCategory(userId, category) require.Nil(t, err) preferences, err = ss.Preference().GetAll(userId) require.Nil(t, err) assert.Len(t, preferences, 0, "should've returned no preferences") } func testPreferenceDeleteCategoryAndName(t *testing.T, ss store.Store) { category := model.NewId() name := model.NewId() userId := model.NewId() userId2 := model.NewId() preference1 := model.Preference{ UserId: userId, Category: category, Name: name, Value: "value1a", } preference2 := model.Preference{ UserId: userId2, Category: category, Name: name, Value: "value1a", } err := ss.Preference().Save(&model.Preferences{preference1, preference2}) require.Nil(t, err) preferences, err := ss.Preference().GetAll(userId) require.Nil(t, err) assert.Len(t, preferences, 1, "should've returned 1 preference") preferences, err = ss.Preference().GetAll(userId2) require.Nil(t, err) assert.Len(t, preferences, 1, "should've returned 1 preference") err = ss.Preference().DeleteCategoryAndName(category, name) require.Nil(t, err) preferences, err = ss.Preference().GetAll(userId) require.Nil(t, err) assert.Len(t, preferences, 0, "should've returned no preference") preferences, err = ss.Preference().GetAll(userId2) require.Nil(t, err) assert.Len(t, preferences, 0, "should've returned no preference") } func testPreferenceCleanupFlagsBatch(t *testing.T, ss store.Store) { category := model.PREFERENCE_CATEGORY_FLAGGED_POST userId := model.NewId() o1 := &model.Post{} o1.ChannelId = model.NewId() o1.UserId = userId o1.Message = "zz" + model.NewId() + "AAAAAAAAAAA" o1.CreateAt = 1000 o1, err := ss.Post().Save(o1) require.Nil(t, err) preference1 := model.Preference{ UserId: userId, Category: category, Name: o1.Id, Value: "true", } preference2 := model.Preference{ UserId: userId, Category: category, Name: model.NewId(), Value: "true", } err = ss.Preference().Save(&model.Preferences{preference1, preference2}) require.Nil(t, err) _, err = ss.Preference().CleanupFlagsBatch(10000) assert.Nil(t, err) _, err = ss.Preference().Get(userId, category, preference1.Name) assert.Nil(t, err) _, err = ss.Preference().Get(userId, category, preference2.Name) assert.NotNil(t, err) }
store/storetest/preference_store.go
0.524151
0.431884
preference_store.go
starcoder
package statistics import ( "math" "math/bits" "sort" "github.com/pingcap/errors" "github.com/pingcap/parser/ast" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/expression" planutil "github.com/pingcap/tidb/planner/util" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/ranger" ) // If one condition can't be calculated, we will assume that the selectivity of this condition is 0.8. const selectionFactor = 0.8 // StatsNode is used for calculating selectivity. type StatsNode struct { Tp int ID int64 // mask is a bit pattern whose ith bit will indicate whether the ith expression is covered by this index/column. mask int64 // Ranges contains all the Ranges we got. Ranges []*ranger.Range // Selectivity indicates the Selectivity of this column/index. Selectivity float64 // numCols is the number of columns contained in the index or column(which is always 1). numCols int // partCover indicates whether the bit in the mask is for a full cover or partial cover. It is only true // when the condition is a DNF expression on index, and the expression is not totally extracted as access condition. partCover bool } // The type of the StatsNode. const ( IndexType = iota PkType ColType ) func compareType(l, r int) int { if l == r { return 0 } if l == ColType { return -1 } if l == PkType { return 1 } if r == ColType { return 1 } return -1 } // MockStatsNode is only used for test. func MockStatsNode(id int64, m int64, num int) *StatsNode { return &StatsNode{ID: id, mask: m, numCols: num} } const unknownColumnID = math.MinInt64 // getConstantColumnID receives two expressions and if one of them is column and another is constant, it returns the // ID of the column. func getConstantColumnID(e []expression.Expression) int64 { if len(e) != 2 { return unknownColumnID } col, ok1 := e[0].(*expression.Column) _, ok2 := e[1].(*expression.Constant) if ok1 && ok2 { return col.ID } col, ok1 = e[1].(*expression.Column) _, ok2 = e[0].(*expression.Constant) if ok1 && ok2 { return col.ID } return unknownColumnID } func pseudoSelectivity(coll *HistColl, exprs []expression.Expression) float64 { minFactor := selectionFactor colExists := make(map[string]bool) for _, expr := range exprs { fun, ok := expr.(*expression.ScalarFunction) if !ok { continue } colID := getConstantColumnID(fun.GetArgs()) if colID == unknownColumnID { continue } switch fun.FuncName.L { case ast.EQ, ast.NullEQ, ast.In: minFactor = math.Min(minFactor, 1.0/pseudoEqualRate) col, ok := coll.Columns[colID] if !ok { continue } colExists[col.Info.Name.L] = true if mysql.HasUniKeyFlag(col.Info.Flag) { return 1.0 / float64(coll.Count) } case ast.GE, ast.GT, ast.LE, ast.LT: minFactor = math.Min(minFactor, 1.0/pseudoLessRate) // FIXME: To resolve the between case. } } if len(colExists) == 0 { return minFactor } // use the unique key info for _, idx := range coll.Indices { if !idx.Info.Unique { continue } unique := true for _, col := range idx.Info.Columns { if !colExists[col.Name.L] { unique = false break } } if unique { return 1.0 / float64(coll.Count) } } return minFactor } // isColEqCorCol checks if the expression is a eq function that one side is correlated column and another is column. // If so, it will return the column's reference. Otherwise return nil instead. func isColEqCorCol(filter expression.Expression) *expression.Column { f, ok := filter.(*expression.ScalarFunction) if !ok || f.FuncName.L != ast.EQ { return nil } if c, ok := f.GetArgs()[0].(*expression.Column); ok { if _, ok := f.GetArgs()[1].(*expression.CorrelatedColumn); ok { return c } } if c, ok := f.GetArgs()[1].(*expression.Column); ok { if _, ok := f.GetArgs()[0].(*expression.CorrelatedColumn); ok { return c } } return nil } // Selectivity is a function calculate the selectivity of the expressions. // The definition of selectivity is (row count after filter / row count before filter). // And exprs must be CNF now, in other words, `exprs[0] and exprs[1] and ... and exprs[len - 1]` should be held when you call this. // Currently the time complexity is o(n^2). func (coll *HistColl) Selectivity(ctx sessionctx.Context, exprs []expression.Expression, filledPaths []*planutil.AccessPath) (float64, []*StatsNode, error) { // If table's count is zero or conditions are empty, we should return 100% selectivity. if coll.Count == 0 || len(exprs) == 0 { return 1, nil, nil } // TODO: If len(exprs) is bigger than 63, we could use bitset structure to replace the int64. // This will simplify some code and speed up if we use this rather than a boolean slice. if len(exprs) > 63 || (len(coll.Columns) == 0 && len(coll.Indices) == 0) { return pseudoSelectivity(coll, exprs), nil, nil } ret := 1.0 var nodes []*StatsNode sc := ctx.GetSessionVars().StmtCtx remainedExprs := make([]expression.Expression, 0, len(exprs)) // Deal with the correlated column. for _, expr := range exprs { c := isColEqCorCol(expr) if c == nil { remainedExprs = append(remainedExprs, expr) continue } if colHist := coll.Columns[c.UniqueID]; colHist == nil || colHist.IsInvalid(sc, coll.Pseudo) { ret *= 1.0 / pseudoEqualRate continue } colHist := coll.Columns[c.UniqueID] if colHist.NDV > 0 { ret *= 1 / float64(colHist.NDV) } else { ret *= 1.0 / pseudoEqualRate } } extractedCols := make([]*expression.Column, 0, len(coll.Columns)) extractedCols = expression.ExtractColumnsFromExpressions(extractedCols, remainedExprs, nil) for id, colInfo := range coll.Columns { col := expression.ColInfo2Col(extractedCols, colInfo.Info) if col != nil { maskCovered, ranges, _, err := getMaskAndRanges(ctx, remainedExprs, ranger.ColumnRangeType, nil, nil, col) if err != nil { return 0, nil, errors.Trace(err) } nodes = append(nodes, &StatsNode{Tp: ColType, ID: id, mask: maskCovered, Ranges: ranges, numCols: 1}) if colInfo.IsHandle { nodes[len(nodes)-1].Tp = PkType var cnt float64 cnt, err = coll.GetRowCountByIntColumnRanges(sc, id, ranges) if err != nil { return 0, nil, errors.Trace(err) } nodes[len(nodes)-1].Selectivity = cnt / float64(coll.Count) continue } cnt, err := coll.GetRowCountByColumnRanges(sc, id, ranges) if err != nil { return 0, nil, errors.Trace(err) } nodes[len(nodes)-1].Selectivity = cnt / float64(coll.Count) } } id2Paths := make(map[int64]*planutil.AccessPath) for _, path := range filledPaths { if path.IsIntHandlePath { continue } id2Paths[path.Index.ID] = path } for id, idxInfo := range coll.Indices { idxCols := expression.FindPrefixOfIndex(extractedCols, coll.Idx2ColumnIDs[id]) if len(idxCols) > 0 { lengths := make([]int, 0, len(idxCols)) for i := 0; i < len(idxCols); i++ { lengths = append(lengths, idxInfo.Info.Columns[i].Length) } maskCovered, ranges, partCover, err := getMaskAndRanges(ctx, remainedExprs, ranger.IndexRangeType, lengths, id2Paths[idxInfo.ID], idxCols...) if err != nil { return 0, nil, errors.Trace(err) } cnt, err := coll.GetRowCountByIndexRanges(sc, id, ranges) if err != nil { return 0, nil, errors.Trace(err) } selectivity := cnt / float64(coll.Count) nodes = append(nodes, &StatsNode{ Tp: IndexType, ID: id, mask: maskCovered, Ranges: ranges, numCols: len(idxInfo.Info.Columns), Selectivity: selectivity, partCover: partCover, }) } } usedSets := GetUsableSetsByGreedy(nodes) // Initialize the mask with the full set. mask := (int64(1) << uint(len(remainedExprs))) - 1 for _, set := range usedSets { mask &^= set.mask ret *= set.Selectivity // If `partCover` is true, it means that the conditions are in DNF form, and only part // of the DNF expressions are extracted as access conditions, so besides from the selectivity // of the extracted access conditions, we multiply another selectionFactor for the residual // conditions. if set.partCover { ret *= selectionFactor } } // If there's still conditions which cannot be calculated, we will multiply a selectionFactor. if mask > 0 { ret *= selectionFactor } return ret, nodes, nil } func getMaskAndRanges(ctx sessionctx.Context, exprs []expression.Expression, rangeType ranger.RangeType, lengths []int, cachedPath *planutil.AccessPath, cols ...*expression.Column) (mask int64, ranges []*ranger.Range, partCover bool, err error) { sc := ctx.GetSessionVars().StmtCtx isDNF := false var accessConds, remainedConds []expression.Expression switch rangeType { case ranger.ColumnRangeType: accessConds = ranger.ExtractAccessConditionsForColumn(exprs, cols[0].UniqueID) ranges, err = ranger.BuildColumnRange(accessConds, sc, cols[0].RetType, types.UnspecifiedLength) case ranger.IndexRangeType: if cachedPath != nil { ranges, accessConds, remainedConds, isDNF = cachedPath.Ranges, cachedPath.AccessConds, cachedPath.TableFilters, cachedPath.IsDNFCond break } var res *ranger.DetachRangeResult res, err = ranger.DetachCondAndBuildRangeForIndex(ctx, exprs, cols, lengths) ranges, accessConds, remainedConds, isDNF = res.Ranges, res.AccessConds, res.RemainedConds, res.IsDNFCond if err != nil { return 0, nil, false, err } default: panic("should never be here") } if err != nil { return 0, nil, false, err } if isDNF && len(accessConds) > 0 { mask |= 1 return mask, ranges, len(remainedConds) > 0, nil } for i := range exprs { for j := range accessConds { if exprs[i].Equal(ctx, accessConds[j]) { mask |= 1 << uint64(i) break } } } return mask, ranges, false, nil } // GetUsableSetsByGreedy will select the indices and pk used for calculate selectivity by greedy algorithm. func GetUsableSetsByGreedy(nodes []*StatsNode) (newBlocks []*StatsNode) { sort.Slice(nodes, func(i int, j int) bool { if r := compareType(nodes[i].Tp, nodes[j].Tp); r != 0 { return r < 0 } return nodes[i].ID < nodes[j].ID }) marked := make([]bool, len(nodes)) mask := int64(math.MaxInt64) for { // Choose the index that covers most. bestID, bestCount, bestTp, bestNumCols, bestMask := -1, 0, ColType, 0, int64(0) for i, set := range nodes { if marked[i] { continue } curMask := set.mask & mask if curMask != set.mask { marked[i] = true continue } bits := bits.OnesCount64(uint64(curMask)) // This set cannot cover any thing, just skip it. if bits == 0 { marked[i] = true continue } // We greedy select the stats info based on: // (1): The stats type, always prefer the primary key or index. // (2): The number of expression that it covers, the more the better. // (3): The number of columns that it contains, the less the better. if (bestTp == ColType && set.Tp != ColType) || bestCount < bits || (bestCount == bits && bestNumCols > set.numCols) { bestID, bestCount, bestTp, bestNumCols, bestMask = i, bits, set.Tp, set.numCols, curMask } } if bestCount == 0 { break } // Update the mask, remove the bit that nodes[bestID].mask has. mask &^= bestMask newBlocks = append(newBlocks, nodes[bestID]) marked[bestID] = true } return }
statistics/selectivity.go
0.55254
0.428652
selectivity.go
starcoder
package d06 import ( "regexp" "strings" "github.com/jzimbel/adventofcode-go/solutions" ) // intermediate data structure to make building the tree easier type orbitMap map[string][]string // standard tree structure with awareness of its parent and depth from the root type tree struct { label string depth uint parent *tree children []*tree } // allows for fast access to any node in a tree by its label type flatTree map[string]*tree const ( centerOfMass string = "COM" you string = "YOU" santa string = "SAN" ) var pattern = regexp.MustCompile(`(.+)\)(.+)`) // makeFlatTree creates a flatTree that points to nodes in an underlying tree. func makeFlatTree(om orbitMap) flatTree { ft := make(flatTree, len(om)) var buildTree func(string, uint, *tree) *tree buildTree = func(label string, depth uint, parent *tree) *tree { t := tree{ label: label, children: make([]*tree, 0, len(om[label])), depth: depth, parent: parent, } for i := range om[label] { t.children = append(t.children, buildTree(om[label][i], depth+1, &t)) } ft[label] = &t return &t } buildTree(centerOfMass, 0, nil) return ft } func (ft flatTree) sumDepths() (count uint) { for _, t := range ft { count += t.depth } return } func (t *tree) getAncestors() []*tree { ancestors := make([]*tree, t.depth) current := t for i := uint(0); i < t.depth; i++ { current = current.parent ancestors[i] = current } return ancestors } func getCommonAncestor(t1 *tree, t2 *tree) (common *tree) { ancestors1 := t1.getAncestors() ancestors2 := t2.getAncestors() // put the second list of ancestors into a set for faster existence checking between the two lists compareSet := make(map[*tree]struct{}, len(ancestors2)) for _, ancestor := range ancestors2 { compareSet[ancestor] = struct{}{} } for _, ancestor := range ancestors1 { if _, ok := compareSet[ancestor]; ok { common = ancestor break } } return } func parseInput(input string) (om orbitMap) { lines := strings.Split(input, "\n") om = make(orbitMap, len(lines)) for _, line := range lines { matches := pattern.FindStringSubmatch(line) base, satellite := matches[1], matches[2] if _, ok := om[base]; ok { om[base] = append(om[base], satellite) } else { om[base] = []string{satellite} } } return } func part1(ft flatTree) uint { return ft.sumDepths() } func part2(ft flatTree) uint { common := getCommonAncestor(ft[you].parent, ft[santa].parent) return (ft[you].parent.depth - common.depth) + (ft[santa].parent.depth - common.depth) } // Solve provides the day 6 puzzle solution. func Solve(input string) (*solutions.Solution, error) { ft := makeFlatTree(parseInput(input)) return &solutions.Solution{Part1: part1(ft), Part2: part2(ft)}, nil }
solutions/y2019/d06/solution.go
0.694613
0.44071
solution.go
starcoder
package swisstopo import ( "math" ) func CHtoWGSheight(y float64, x float64, h float64) float64 { // Converts military to civil and to unit = 1000km // Auxiliary values (% Bern) y_aux := (y - 600000) / 1000000 x_aux := (x - 200000) / 1000000 // Process height h = (h + 49.55) - (12.60 * y_aux) - (22.64 * x_aux) return h } // Convert CH y/x to WGS lat func CHtoWGSlat(y float64, x float64) float64 { // Converts military to civil and to unit = 1000km // Auxiliary values (% Bern) y_aux := (y - 600000) / 1000000 x_aux := (x - 200000) / 1000000 // Process lat lat := (16.9023892 + (3.238272 * x_aux)) - (0.270978 * math.Pow(y_aux, 2)) - (0.002528 * math.Pow(x_aux, 2)) - (0.0447 * math.Pow(y_aux, 2) * x_aux) - (0.0140 * math.Pow(x_aux, 3)) // Unit 10000" to 1 " and converts seconds to degrees (dec) lat = (lat * 100) / 36 return lat } // Convert CH y/x to WGS long func CHtoWGSlng(y float64, x float64) float64 { // Converts military to civil and to unit = 1000km // Auxiliary values (% Bern) y_aux := (y - 600000) / 1000000 x_aux := (x - 200000) / 1000000 // Process long lng := (2.6779094 + (4.728982 * y_aux) + (0.791484 * y_aux * x_aux) + (0.1306 * y_aux * math.Pow(x_aux, 2))) - (0.0436 * math.Pow(y_aux, 3)) // Unit 10000" to 1 " and converts seconds to degrees (dec) lng = (lng * 100) / 36 return lng } // Convert decimal angle (degrees) to sexagesimal angle (seconds) func DecToSexAngle(dec float64) float64 { deg := math.Floor(dec) min := math.Floor((dec - deg) * 60) sec := (((dec - deg) * 60) - min) * 60 return sec + min*60.0 + deg*3600.0 } /** * Convert LV03 to WGS84 Return a array of double that contain lat, long, * and height * * @param east * @param north * @param height * @return */ func LV03toWGS84(east float64, north float64, height float64) []float64 { d := make([]float64, 3) d[0] = CHtoWGSlat(east, north) d[1] = CHtoWGSlng(east, north) d[2] = CHtoWGSheight(east, north, height) return d } /** * Convert WGS84 to LV03 Return an array of double that contain east, * north, and height * * @param latitude * @param longitude * @param ellHeight * @return */ func WGS84toLV03(latitude float64, longitude float64, ellHeight float64) []float64 { // , ref double east, ref double north, ref double height d := make([]float64, 3) d[0] = WGStoCHy(latitude, longitude) d[1] = WGStoCHx(latitude, longitude) d[2] = WGStoCHh(latitude, longitude, ellHeight) return d } // Convert WGS lat/long (° dec) and height to CH h func WGStoCHh(lat float64, lng float64, h float64) float64 { // Converts dec degrees to sex seconds lat = DecToSexAngle(lat) lng = DecToSexAngle(lng) // Auxiliary values (% Bern) lat_aux := (lat - 169028.66) / 10000 lng_aux := (lng - 26782.5) / 10000 // Process h h = (h - 49.55) + (2.73 * lng_aux) + (6.94 * lat_aux) return h } // Convert WGS lat/long (° dec) to CH x func WGStoCHx(lat float64, lng float64) float64 { // Converts dec degrees to sex seconds lat = DecToSexAngle(lat) lng = DecToSexAngle(lng) // Auxiliary values (% Bern) lat_aux := (lat - 169028.66) / 10000 lng_aux := (lng - 26782.5) / 10000 // Process X x := ((200147.07 + (308807.95 * lat_aux) + (3745.25 * math.Pow(lng_aux, 2)) + (76.63 * math.Pow(lat_aux, 2))) - (194.56 * math.Pow(lng_aux, 2) * lat_aux)) + (119.79 * math.Pow(lat_aux, 3)) return x } // Convert WGS lat/long (° dec) to CH y func WGStoCHy(lat float64, lng float64) float64 { // Converts dec degrees to sex seconds lat = DecToSexAngle(lat) lng = DecToSexAngle(lng) // Auxiliary values (% Bern) lat_aux := (lat - 169028.66) / 10000 lng_aux := (lng - 26782.5) / 10000 // Process Y y := (600072.37 + (211455.93 * lng_aux)) - (10938.51 * lng_aux * lat_aux) - (0.36 * lng_aux * math.Pow(lat_aux, 2)) - (44.54 * math.Pow(lng_aux, 3)) return y }
scripts/go/WGS84_CH1903.go
0.81283
0.519034
WGS84_CH1903.go
starcoder
package iso20022 // Cash movements from or to a fund as a result of investment funds transactions, eg, subscriptions or redemptions. type EstimatedFundCashForecast3 struct { // Unique technical identifier for an instance of a fund cash forecast within a fund cash forecast report as assigned by the issuer of the report. Identification *Max35Text `xml:"Id"` // Date and, if required, the time, at which the price has been applied. TradeDateTime *DateAndDateTimeChoice `xml:"TradDtTm"` // Previous date and time at which a price was applied. PreviousTradeDateTime *DateAndDateTimeChoice `xml:"PrvsTradDtTm"` // Investment fund class to which the cash flow is related. FinancialInstrumentDetails *FinancialInstrument9 `xml:"FinInstrmDtls"` // Estimated total value of all the holdings, less the fund's liabilities, attributable to a specific investment fund class. EstimatedTotalNAV *ActiveOrHistoricCurrencyAndAmount `xml:"EstmtdTtlNAV,omitempty"` // Previous value of all the holdings, less the fund's liabilities, attributable to a specific investment fund class. PreviousTotalNAV *ActiveOrHistoricCurrencyAndAmount `xml:"PrvsTtlNAV,omitempty"` // Estimated total number of investment fund class units that have been issued. EstimatedTotalUnitsNumber *FinancialInstrumentQuantity1 `xml:"EstmtdTtlUnitsNb,omitempty"` // Previous value of all the holdings, less the fund's liabilities, attributable to a specific investment fund class. PreviousTotalUnitsNumber *FinancialInstrumentQuantity1 `xml:"PrvsTtlUnitsNb,omitempty"` // Rate of change of the net asset value. EstimatedTotalNAVChangeRate *PercentageRate `xml:"EstmtdTtlNAVChngRate,omitempty"` // Currency of the investment fund class. InvestmentCurrency []*ActiveOrHistoricCurrencyCode `xml:"InvstmtCcy,omitempty"` // Indicates whether the estimated net cash flow is exceptional. ExceptionalNetCashFlowIndicator *YesNoIndicator `xml:"XcptnlNetCshFlowInd"` // Estimated cash movements into the fund as a result of investment funds transactions, eg, subscriptions or switch-in. EstimatedCashInForecastDetails []*CashInForecast4 `xml:"EstmtdCshInFcstDtls,omitempty"` // Estimated cash movements out of the fund as a result of investment funds transactions, eg, redemptions or switch-out. EstimatedCashOutForecastDetails []*CashOutForecast4 `xml:"EstmtdCshOutFcstDtls,omitempty"` // Net cash movements to a fund as a result of investment funds transactions. EstimatedNetCashForecastDetails []*NetCashForecast2 `xml:"EstmtdNetCshFcstDtls,omitempty"` } func (e *EstimatedFundCashForecast3) SetIdentification(value string) { e.Identification = (*Max35Text)(&value) } func (e *EstimatedFundCashForecast3) AddTradeDateTime() *DateAndDateTimeChoice { e.TradeDateTime = new(DateAndDateTimeChoice) return e.TradeDateTime } func (e *EstimatedFundCashForecast3) AddPreviousTradeDateTime() *DateAndDateTimeChoice { e.PreviousTradeDateTime = new(DateAndDateTimeChoice) return e.PreviousTradeDateTime } func (e *EstimatedFundCashForecast3) AddFinancialInstrumentDetails() *FinancialInstrument9 { e.FinancialInstrumentDetails = new(FinancialInstrument9) return e.FinancialInstrumentDetails } func (e *EstimatedFundCashForecast3) SetEstimatedTotalNAV(value, currency string) { e.EstimatedTotalNAV = NewActiveOrHistoricCurrencyAndAmount(value, currency) } func (e *EstimatedFundCashForecast3) SetPreviousTotalNAV(value, currency string) { e.PreviousTotalNAV = NewActiveOrHistoricCurrencyAndAmount(value, currency) } func (e *EstimatedFundCashForecast3) AddEstimatedTotalUnitsNumber() *FinancialInstrumentQuantity1 { e.EstimatedTotalUnitsNumber = new(FinancialInstrumentQuantity1) return e.EstimatedTotalUnitsNumber } func (e *EstimatedFundCashForecast3) AddPreviousTotalUnitsNumber() *FinancialInstrumentQuantity1 { e.PreviousTotalUnitsNumber = new(FinancialInstrumentQuantity1) return e.PreviousTotalUnitsNumber } func (e *EstimatedFundCashForecast3) SetEstimatedTotalNAVChangeRate(value string) { e.EstimatedTotalNAVChangeRate = (*PercentageRate)(&value) } func (e *EstimatedFundCashForecast3) AddInvestmentCurrency(value string) { e.InvestmentCurrency = append(e.InvestmentCurrency, (*ActiveOrHistoricCurrencyCode)(&value)) } func (e *EstimatedFundCashForecast3) SetExceptionalNetCashFlowIndicator(value string) { e.ExceptionalNetCashFlowIndicator = (*YesNoIndicator)(&value) } func (e *EstimatedFundCashForecast3) AddEstimatedCashInForecastDetails() *CashInForecast4 { newValue := new (CashInForecast4) e.EstimatedCashInForecastDetails = append(e.EstimatedCashInForecastDetails, newValue) return newValue } func (e *EstimatedFundCashForecast3) AddEstimatedCashOutForecastDetails() *CashOutForecast4 { newValue := new (CashOutForecast4) e.EstimatedCashOutForecastDetails = append(e.EstimatedCashOutForecastDetails, newValue) return newValue } func (e *EstimatedFundCashForecast3) AddEstimatedNetCashForecastDetails() *NetCashForecast2 { newValue := new (NetCashForecast2) e.EstimatedNetCashForecastDetails = append(e.EstimatedNetCashForecastDetails, newValue) return newValue }
EstimatedFundCashForecast3.go
0.860266
0.534612
EstimatedFundCashForecast3.go
starcoder
package rpc import ( "encoding/binary" "math" "time" "github.com/ebay/beam/logentry" ) // TypePrefix returns a byte slice that contain a prefix of the encoding that contains // the type. This will contain the type indicator, and for types that have units, will // also contain the units ID. func (o KGObject) TypePrefix() []byte { vt := o.ValueType() if vt == KtNil { return []byte{byte(KtNil)} } if vt != KtString && vt != KtKID { return []byte(o.value[:9]) } return []byte(o.value[:1]) } // ValueType returns the contained KGObject type. func (o KGObject) ValueType() KGObjectType { if len(o.value) > 0 { return KGObjectType(o.value[0]) } return KtNil } // IsType returns true if this KGObject contains the indicated type func (o KGObject) IsType(t KGObjectType) bool { return o.ValueType() == t } // ValBool returns the contained bool value if the type is KtBool // otherwise it returns false func (o KGObject) ValBool() bool { // TODO: Why does Bool has a units field? if o.ValueType() == KtBool { return o.value[1+8] > 0 } return false } // ValInt64 returns the contained Int64 value if the type is KtInt64 // otherwise it returns 0 func (o KGObject) ValInt64() int64 { if o.ValueType() == KtInt64 { return int64(binary.BigEndian.Uint64([]byte(o.value[9:17])) ^ maskMsbOnly) } return 0 } // ValFloat64 returns the contained Float64 value if the type is KtFloat64. // otherwise it returns 0 func (o KGObject) ValFloat64() float64 { if o.ValueType() == KtFloat64 { u := binary.BigEndian.Uint64([]byte(o.value[9:17])) if u&maskMsbOnly != 0 { u = u ^ maskMsbOnly } else { u = u ^ maskAllBits } return math.Float64frombits(u) } return 0 } // ValString returns the contained string value if the type is KtString // otherwise it returns "" // Don't get this confused with String() which returns a human readable // representation of the Object. func (o KGObject) ValString() string { if o.ValueType() == KtString { return string(o.value[1 : len(o.value)-9]) } return "" } // ValKID returns the contained KID value if the type is KtKID // otherwise it returns 0 func (o KGObject) ValKID() uint64 { if o.ValueType() == KtKID { return binary.BigEndian.Uint64([]byte(o.value[1:])) } return 0 } // ValTimestamp returns the contained Timestamp if the type is KtTimestamp // otherwise it returns an empty/zero value KGTimestamp func (o KGObject) ValTimestamp() logentry.KGTimestamp { if o.ValueType() == KtTimestamp { p := logentry.TimestampPrecision(o.value[20]) year := int(binary.BigEndian.Uint16([]byte(o.value[9:11]))) month := int(o.value[11]) day := int(o.value[12]) hour := int(o.value[13]) mins := int(o.value[14]) secs := int(o.value[15]) nano := int(binary.BigEndian.Uint32([]byte(o.value[16:20]))) return logentry.KGTimestamp{ Precision: p, Value: time.Date(year, time.Month(month), day, hour, mins, secs, nano, time.UTC), } } return logentry.KGTimestamp{} } // LangID returns the Language ID if the contained type is KtString, // otherwise it returns 0 func (o KGObject) LangID() uint64 { if o.ValueType() == KtString { return binary.BigEndian.Uint64([]byte(o.value[len(o.value)-8:])) } return 0 } // UnitID returns the Units ID for the contained types that have units (bool, int, float, timestamp) // otherwise it returns 0 func (o KGObject) UnitID() uint64 { vt := o.ValueType() if vt != KtString && vt != KtKID && vt != KtNil { return binary.BigEndian.Uint64([]byte(o.value[1:9])) } return 0 }
src/github.com/ebay/beam/rpc/kgobject_accessors.go
0.64232
0.41834
kgobject_accessors.go
starcoder
package internal var metricsFile = &File{ Name: "metrics", imports: []string{ `otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1"`, }, testImports: []string{ `"testing"`, ``, `"github.com/stretchr/testify/assert"`, ``, `otlpmetrics "go.opentelemetry.io/collector/internal/data/opentelemetry-proto-gen/metrics/v1"`, }, structs: []baseStruct{ resourceMetricsSlice, resourceMetrics, instrumentationLibraryMetricsSlice, instrumentationLibraryMetrics, metricSlice, metric, metricDescriptor, int64DataPointSlice, int64DataPoint, doubleDataPointSlice, doubleDataPoint, histogramDataPointSlice, histogramDataPoint, histogramBucketSlice, histogramBucket, histogramBucketExemplar, summaryDataPointSlice, summaryDataPoint, summaryValueAtPercentileSlice, summaryValueAtPercentile, }, } var resourceMetricsSlice = &sliceStruct{ structName: "ResourceMetricsSlice", element: resourceMetrics, } var resourceMetrics = &messageStruct{ structName: "ResourceMetrics", description: "// InstrumentationLibraryMetrics is a collection of metrics from a LibraryInstrumentation.", originFullName: "otlpmetrics.ResourceMetrics", fields: []baseField{ resourceField, &sliceField{ fieldMame: "InstrumentationLibraryMetrics", originFieldName: "InstrumentationLibraryMetrics", returnSlice: instrumentationLibraryMetricsSlice, }, }, } var instrumentationLibraryMetricsSlice = &sliceStruct{ structName: "InstrumentationLibraryMetricsSlice", element: instrumentationLibraryMetrics, } var instrumentationLibraryMetrics = &messageStruct{ structName: "InstrumentationLibraryMetrics", description: "// InstrumentationLibraryMetrics is a collection of metrics from a LibraryInstrumentation.", originFullName: "otlpmetrics.InstrumentationLibraryMetrics", fields: []baseField{ instrumentationLibraryField, &sliceField{ fieldMame: "Metrics", originFieldName: "Metrics", returnSlice: metricSlice, }, }, } var metricSlice = &sliceStruct{ structName: "MetricSlice", element: metric, } var metric = &messageStruct{ structName: "Metric", description: "// Metric represents one metric as a collection of datapoints.\n" + "// See Metric definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/master/opentelemetry/proto/metrics/v1/metrics.proto#L96", originFullName: "otlpmetrics.Metric", fields: []baseField{ &messageField{ fieldName: "MetricDescriptor", originFieldName: "MetricDescriptor", returnMessage: metricDescriptor, }, &sliceField{ fieldMame: "Int64DataPoints", originFieldName: "Int64DataPoints", returnSlice: int64DataPointSlice, }, &sliceField{ fieldMame: "DoubleDataPoints", originFieldName: "DoubleDataPoints", returnSlice: doubleDataPointSlice, }, &sliceField{ fieldMame: "HistogramDataPoints", originFieldName: "HistogramDataPoints", returnSlice: histogramDataPointSlice, }, &sliceField{ fieldMame: "SummaryDataPoints", originFieldName: "SummaryDataPoints", returnSlice: summaryDataPointSlice, }, }, } var metricDescriptor = &messageStruct{ structName: "MetricDescriptor", description: "// MetricDescriptor is the descriptor of a metric.", originFullName: "otlpmetrics.MetricDescriptor", fields: []baseField{ nameField, &primitiveField{ fieldMame: "Description", originFieldName: "Description", returnType: "string", defaultVal: `""`, testVal: `"test_description"`, }, &primitiveField{ fieldMame: "Unit", originFieldName: "Unit", returnType: "string", defaultVal: `""`, testVal: `"1"`, }, &primitiveTypedField{ fieldMame: "Type", originFieldName: "Type", returnType: "MetricType", rawType: "otlpmetrics.MetricDescriptor_Type", defaultVal: "MetricTypeUnspecified", testVal: "MetricTypeGaugeInt64", }, labelsField, }, } var int64DataPointSlice = &sliceStruct{ structName: "Int64DataPointSlice", element: int64DataPoint, } var int64DataPoint = &messageStruct{ structName: "Int64DataPoint", description: "// Int64DataPoint is a single data point in a timeseries that describes the time-varying values of a int64 metric.", originFullName: "otlpmetrics.Int64DataPoint", fields: []baseField{ labelsField, startTimeField, timeField, valueInt64Field, }, } var doubleDataPointSlice = &sliceStruct{ structName: "DoubleDataPointSlice", element: doubleDataPoint, } var doubleDataPoint = &messageStruct{ structName: "DoubleDataPoint", description: "// DoubleDataPoint is a single data point in a timeseries that describes the time-varying value of a double metric.", originFullName: "otlpmetrics.DoubleDataPoint", fields: []baseField{ labelsField, startTimeField, timeField, valueFloat64Field, }, } var histogramDataPointSlice = &sliceStruct{ structName: "HistogramDataPointSlice", element: histogramDataPoint, } var histogramDataPoint = &messageStruct{ structName: "HistogramDataPoint", description: "// HistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram.", originFullName: "otlpmetrics.HistogramDataPoint", fields: []baseField{ labelsField, startTimeField, timeField, countField, sumField, &sliceField{ fieldMame: "Buckets", originFieldName: "Buckets", returnSlice: histogramBucketSlice, }, explicitBoundsField, }, } var histogramBucketSlice = &sliceStruct{ structName: "HistogramBucketSlice", element: histogramBucket, } var histogramBucket = &messageStruct{ structName: "HistogramBucket", description: "// HistogramBucket contains values for a histogram bucket.", originFullName: "otlpmetrics.HistogramDataPoint_Bucket", fields: []baseField{ countField, &messageField{ fieldName: "Exemplar", originFieldName: "Exemplar", returnMessage: histogramBucketExemplar, }, }, } var histogramBucketExemplar = &messageStruct{ structName: "HistogramBucketExemplar", description: "// HistogramBucketExemplar are example points that may be used to annotate aggregated Histogram values.\n" + "// They are metadata that gives information about a particular value added to a Histogram bucket.", originFullName: "otlpmetrics.HistogramDataPoint_Bucket_Exemplar", fields: []baseField{ timeField, valueFloat64Field, &sliceField{ fieldMame: "Attachments", originFieldName: "Attachments", returnSlice: stringMap, }, }, } var summaryDataPointSlice = &sliceStruct{ structName: "SummaryDataPointSlice", element: summaryDataPoint, } var summaryDataPoint = &messageStruct{ structName: "SummaryDataPoint", description: "// SummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary metric.", originFullName: "otlpmetrics.SummaryDataPoint", fields: []baseField{ labelsField, startTimeField, timeField, countField, sumField, &sliceField{ fieldMame: "ValueAtPercentiles", originFieldName: "PercentileValues", returnSlice: summaryValueAtPercentileSlice, }, }, } var summaryValueAtPercentileSlice = &sliceStruct{ structName: "SummaryValueAtPercentileSlice", element: summaryValueAtPercentile, } var summaryValueAtPercentile = &messageStruct{ structName: "SummaryValueAtPercentile", description: "// SummaryValueAtPercentile represents the value at a given percentile of a distribution.", originFullName: "otlpmetrics.SummaryDataPoint_ValueAtPercentile", fields: []baseField{ percentileField, valueFloat64Field, }, } var labelsField = &sliceField{ fieldMame: "LabelsMap", originFieldName: "Labels", returnSlice: stringMap, } var countField = &primitiveField{ fieldMame: "Count", originFieldName: "Count", returnType: "uint64", defaultVal: "uint64(0)", testVal: "uint64(17)", } var sumField = &primitiveField{ fieldMame: "Sum", originFieldName: "Sum", returnType: "float64", defaultVal: "float64(0.0)", testVal: "float64(17.13)", } var valueInt64Field = &primitiveField{ fieldMame: "Value", originFieldName: "Value", returnType: "int64", defaultVal: "int64(0)", testVal: "int64(-17)", } var valueFloat64Field = &primitiveField{ fieldMame: "Value", originFieldName: "Value", returnType: "float64", defaultVal: "float64(0.0)", testVal: "float64(17.13)", } var percentileField = &primitiveField{ fieldMame: "Percentile", originFieldName: "Percentile", returnType: "float64", defaultVal: "float64(0.0)", testVal: "float64(0.90)", } var explicitBoundsField = &primitiveField{ fieldMame: "ExplicitBounds", originFieldName: "ExplicitBounds", returnType: "[]float64", defaultVal: "[]float64(nil)", testVal: "[]float64{1, 2, 3}", }
cmd/pdatagen/internal/metrics_structs.go
0.70028
0.470372
metrics_structs.go
starcoder
package binarytree import ( "fmt" "strings" "github.com/ianadiwibowo/central-park/datastructures/queue" ) type BinaryTree struct { Root *BinaryTreeNode } type BinaryTreeNode struct { Value int Left *BinaryTreeNode Right *BinaryTreeNode } // NewBinaryTree creates a new empty binary tree func NewBinaryTree() *BinaryTree { return &BinaryTree{} } // SetRoot assign a new value's node as the tree root func (b *BinaryTree) SetRoot(value int) { b.Root = &BinaryTreeNode{ Value: value, } } // InsertLeft create a new value's node and put it as left child of parent's node func (b *BinaryTree) InsertLeft(value int, parent int) { parentNode := b.Find(parent) if parentNode != nil { parentNode.Left = &BinaryTreeNode{ Value: value, } } } // InsertRight create a new value's node and put it as right child of parent's node func (b *BinaryTree) InsertRight(value int, parent int) { parentNode := b.Find(parent) if parentNode != nil { parentNode.Right = &BinaryTreeNode{ Value: value, } } } // Find returns value's node func (b *BinaryTree) Find(value int) *BinaryTreeNode { return findInOrder(b.Root, value) } func findInOrder(currentNode *BinaryTreeNode, value int) *BinaryTreeNode { if currentNode != nil { leftNode := findInOrder(currentNode.Left, value) if leftNode != nil { return leftNode } if currentNode.Value == value { return currentNode } rightNode := findInOrder(currentNode.Right, value) if rightNode != nil { return rightNode } } return nil } // Height returns the tree height from the root func (b *BinaryTree) Height() int { return maxHeightPreOrder(b.Root, 0) } func maxHeightPreOrder(currentNode *BinaryTreeNode, maxHeight int) int { if currentNode != nil { leftHeight := maxHeightPreOrder(currentNode.Left, maxHeight+1) rightHeight := maxHeightPreOrder(currentNode.Right, maxHeight+1) if leftHeight > maxHeight { maxHeight = leftHeight } if rightHeight > maxHeight { maxHeight = rightHeight } return maxHeight } return 0 } // FindCompletePaths find all complete paths from root to all leaves func (b *BinaryTree) FindCompletePaths() [][]int { return findCompletePaths(b.Root, []int{}, [][]int{}) } func findCompletePaths(currentNode *BinaryTreeNode, path []int, prevResults [][]int) [][]int { if currentNode != nil { updatedPath := make([]int, len(path)+1) copy(updatedPath, append(path, currentNode.Value)) if currentNode.IsLeaf() { updatedPrevResults := make([][]int, len(prevResults)+1) copy(updatedPrevResults, append(prevResults, updatedPath)) return updatedPrevResults } prevResults = findCompletePaths(currentNode.Left, updatedPath, prevResults) prevResults = findCompletePaths(currentNode.Right, updatedPath, prevResults) } return prevResults } // TraversePreOrder returns the pre-ordered (depth-first) binary tree values func (b *BinaryTree) TraversePreOrder() (result []int) { var traverse func(currentNode *BinaryTreeNode) traverse = func(currentNode *BinaryTreeNode) { if currentNode == nil { return } result = append(result, currentNode.Value) traverse(currentNode.Left) traverse(currentNode.Right) } traverse(b.Root) return result } // TraverseInOrder returns the in-ordered (depth-first) binary tree values func (b *BinaryTree) TraverseInOrder() (result []int) { var traverse func(currentNode *BinaryTreeNode) traverse = func(currentNode *BinaryTreeNode) { if currentNode == nil { return } traverse(currentNode.Left) result = append(result, currentNode.Value) traverse(currentNode.Right) } traverse(b.Root) return result } // TraversePostOrder returns the post-ordered (depth-first) binary tree values func (b *BinaryTree) TraversePostOrder() (result []int) { var traverse func(currentNode *BinaryTreeNode) traverse = func(currentNode *BinaryTreeNode) { if currentNode == nil { return } traverse(currentNode.Left) traverse(currentNode.Right) result = append(result, currentNode.Value) } traverse(b.Root) return result } // TraverseLevelOrder returns the breadth-first-ordered binary tree values func (b *BinaryTree) TraverseLevelOrder() (result []int) { queue := queue.NewQueue() queue.Enqueue(b.Root) for !queue.IsEmpty() { currentNode := queue.Dequeue().(*BinaryTreeNode) result = append(result, currentNode.Value) if currentNode.Left != nil { queue.Enqueue(currentNode.Left) } if currentNode.Right != nil { queue.Enqueue(currentNode.Right) } } return result } // IsLeaf checks whether a binary tree node is leaf (true) or not (false) func (btn *BinaryTreeNode) IsLeaf() bool { return btn.Left == nil && btn.Right == nil } // PrintInverseLevelOrder returns the level-ordered (breadth first) human-readable format of the binary tree, but inverted func (b *BinaryTree) PrintInverseLevelOrder() (printout string) { queue := queue.NewQueue() queue.Enqueue(b.Root) for !queue.IsEmpty() { currentNode := queue.Dequeue().(*BinaryTreeNode) printout = fmt.Sprintf("%v %v", printout, currentNode.Value) if currentNode.Right != nil { queue.Enqueue(currentNode.Right) } if currentNode.Left != nil { queue.Enqueue(currentNode.Left) } } return fmt.Sprintf("[%v]", strings.TrimSpace(printout)) } // FindParent searches for a node whose left or right child have a certain value func (b *BinaryTree) FindParent(value int) *BinaryTreeNode { return b.findParent(b.Root, value) } func (b *BinaryTree) findParent(currentNode *BinaryTreeNode, value int) *BinaryTreeNode { queue := queue.NewQueue() queue.Enqueue(b.Root) for !queue.IsEmpty() { currentNode := queue.Dequeue().(*BinaryTreeNode) if currentNode.Left != nil { if currentNode.Left.Value == value { return currentNode } queue.Enqueue(currentNode.Left) } if currentNode.Right != nil { if currentNode.Right.Value == value { return currentNode } queue.Enqueue(currentNode.Right) } } return nil }
datastructures/binarytree/binarytree.go
0.784649
0.460653
binarytree.go
starcoder
package imaging import ( "image" "image/color" ) // Clone returns a copy of the given image. func Clone(img image.Image) *image.NRGBA { dstBounds := img.Bounds().Sub(img.Bounds().Min) dst := image.NewNRGBA(dstBounds) switch src := img.(type) { case *image.NRGBA: copyNRGBA(dst, src) case *image.NRGBA64: copyNRGBA64(dst, src) case *image.RGBA: copyRGBA(dst, src) case *image.RGBA64: copyRGBA64(dst, src) case *image.Gray: copyGray(dst, src) case *image.Gray16: copyGray16(dst, src) case *image.YCbCr: copyYCbCr(dst, src) case *image.Paletted: copyPaletted(dst, src) default: copyImage(dst, src) } return dst } func copyNRGBA(dst *image.NRGBA, src *image.NRGBA) { srcMinX := src.Rect.Min.X srcMinY := src.Rect.Min.Y dstW := dst.Rect.Dx() dstH := dst.Rect.Dy() rowSize := dstW * 4 parallel(dstH, func(partStart, partEnd int) { for dstY := partStart; dstY < partEnd; dstY++ { di := dst.PixOffset(0, dstY) si := src.PixOffset(srcMinX, srcMinY+dstY) copy(dst.Pix[di:di+rowSize], src.Pix[si:si+rowSize]) } }) } func copyNRGBA64(dst *image.NRGBA, src *image.NRGBA64) { srcMinX := src.Rect.Min.X srcMinY := src.Rect.Min.Y dstW := dst.Rect.Dx() dstH := dst.Rect.Dy() parallel(dstH, func(partStart, partEnd int) { for dstY := partStart; dstY < partEnd; dstY++ { di := dst.PixOffset(0, dstY) si := src.PixOffset(srcMinX, srcMinY+dstY) for dstX := 0; dstX < dstW; dstX++ { dst.Pix[di+0] = src.Pix[si+0] dst.Pix[di+1] = src.Pix[si+2] dst.Pix[di+2] = src.Pix[si+4] dst.Pix[di+3] = src.Pix[si+6] di += 4 si += 8 } } }) } func copyRGBA(dst *image.NRGBA, src *image.RGBA) { srcMinX := src.Rect.Min.X srcMinY := src.Rect.Min.Y dstW := dst.Rect.Dx() dstH := dst.Rect.Dy() parallel(dstH, func(partStart, partEnd int) { for dstY := partStart; dstY < partEnd; dstY++ { di := dst.PixOffset(0, dstY) si := src.PixOffset(srcMinX, srcMinY+dstY) for dstX := 0; dstX < dstW; dstX++ { a := src.Pix[si+3] dst.Pix[di+3] = a switch a { case 0: dst.Pix[di+0] = 0 dst.Pix[di+1] = 0 dst.Pix[di+2] = 0 case 0xff: dst.Pix[di+0] = src.Pix[si+0] dst.Pix[di+1] = src.Pix[si+1] dst.Pix[di+2] = src.Pix[si+2] default: var tmp uint16 tmp = uint16(src.Pix[si+0]) * 0xff / uint16(a) dst.Pix[di+0] = uint8(tmp) tmp = uint16(src.Pix[si+1]) * 0xff / uint16(a) dst.Pix[di+1] = uint8(tmp) tmp = uint16(src.Pix[si+2]) * 0xff / uint16(a) dst.Pix[di+2] = uint8(tmp) } di += 4 si += 4 } } }) } func copyRGBA64(dst *image.NRGBA, src *image.RGBA64) { srcMinX := src.Rect.Min.X srcMinY := src.Rect.Min.Y dstW := dst.Rect.Dx() dstH := dst.Rect.Dy() parallel(dstH, func(partStart, partEnd int) { for dstY := partStart; dstY < partEnd; dstY++ { di := dst.PixOffset(0, dstY) si := src.PixOffset(srcMinX, srcMinY+dstY) for dstX := 0; dstX < dstW; dstX++ { a := src.Pix[si+6] dst.Pix[di+3] = a switch a { case 0: dst.Pix[di+0] = 0 dst.Pix[di+1] = 0 dst.Pix[di+2] = 0 case 0xff: dst.Pix[di+0] = src.Pix[si+0] dst.Pix[di+1] = src.Pix[si+2] dst.Pix[di+2] = src.Pix[si+4] default: var tmp uint16 tmp = uint16(src.Pix[si+0]) * 0xff / uint16(a) dst.Pix[di+0] = uint8(tmp) tmp = uint16(src.Pix[si+2]) * 0xff / uint16(a) dst.Pix[di+1] = uint8(tmp) tmp = uint16(src.Pix[si+4]) * 0xff / uint16(a) dst.Pix[di+2] = uint8(tmp) } di += 4 si += 8 } } }) } func copyGray(dst *image.NRGBA, src *image.Gray) { srcMinX := src.Rect.Min.X srcMinY := src.Rect.Min.Y dstW := dst.Rect.Dx() dstH := dst.Rect.Dy() parallel(dstH, func(partStart, partEnd int) { for dstY := partStart; dstY < partEnd; dstY++ { di := dst.PixOffset(0, dstY) si := src.PixOffset(srcMinX, srcMinY+dstY) for dstX := 0; dstX < dstW; dstX++ { c := src.Pix[si] dst.Pix[di+0] = c dst.Pix[di+1] = c dst.Pix[di+2] = c dst.Pix[di+3] = 0xff di += 4 si++ } } }) } func copyGray16(dst *image.NRGBA, src *image.Gray16) { srcMinX := src.Rect.Min.X srcMinY := src.Rect.Min.Y dstW := dst.Rect.Dx() dstH := dst.Rect.Dy() parallel(dstH, func(partStart, partEnd int) { for dstY := partStart; dstY < partEnd; dstY++ { di := dst.PixOffset(0, dstY) si := src.PixOffset(srcMinX, srcMinY+dstY) for dstX := 0; dstX < dstW; dstX++ { c := src.Pix[si] dst.Pix[di+0] = c dst.Pix[di+1] = c dst.Pix[di+2] = c dst.Pix[di+3] = 0xff di += 4 si += 2 } } }) } func copyYCbCr(dst *image.NRGBA, src *image.YCbCr) { srcMinX := src.Rect.Min.X srcMinY := src.Rect.Min.Y dstW := dst.Rect.Dx() dstH := dst.Rect.Dy() parallel(dstH, func(partStart, partEnd int) { for dstY := partStart; dstY < partEnd; dstY++ { srcY := srcMinY + dstY di := dst.PixOffset(0, dstY) for dstX := 0; dstX < dstW; dstX++ { srcX := srcMinX + dstX siy := (srcY-srcMinY)*src.YStride + (srcX - srcMinX) var sic int switch src.SubsampleRatio { case image.YCbCrSubsampleRatio444: sic = (srcY-srcMinY)*src.CStride + (srcX - srcMinX) case image.YCbCrSubsampleRatio422: sic = (srcY-srcMinY)*src.CStride + (srcX/2 - srcMinX/2) case image.YCbCrSubsampleRatio420: sic = (srcY/2-srcMinY/2)*src.CStride + (srcX/2 - srcMinX/2) case image.YCbCrSubsampleRatio440: sic = (srcY/2-srcMinY/2)*src.CStride + (srcX - srcMinX) default: sic = src.COffset(srcX, srcY) } y := int32(src.Y[siy]) cb := int32(src.Cb[sic]) - 128 cr := int32(src.Cr[sic]) - 128 r := (y<<16 + 91881*cr + 1<<15) >> 16 if r > 255 { r = 255 } else if r < 0 { r = 0 } g := (y<<16 - 22554*cb - 46802*cr + 1<<15) >> 16 if g > 255 { g = 255 } else if g < 0 { g = 0 } b := (y<<16 + 116130*cb + 1<<15) >> 16 if b > 255 { b = 255 } else if b < 0 { b = 0 } dst.Pix[di+0] = uint8(r) dst.Pix[di+1] = uint8(g) dst.Pix[di+2] = uint8(b) dst.Pix[di+3] = 255 di += 4 } } }) } func copyPaletted(dst *image.NRGBA, src *image.Paletted) { srcMinX := src.Rect.Min.X srcMinY := src.Rect.Min.Y dstW := dst.Rect.Dx() dstH := dst.Rect.Dy() plen := len(src.Palette) pnew := make([]color.NRGBA, plen) for i := 0; i < plen; i++ { pnew[i] = color.NRGBAModel.Convert(src.Palette[i]).(color.NRGBA) } parallel(dstH, func(partStart, partEnd int) { for dstY := partStart; dstY < partEnd; dstY++ { di := dst.PixOffset(0, dstY) si := src.PixOffset(srcMinX, srcMinY+dstY) for dstX := 0; dstX < dstW; dstX++ { c := pnew[src.Pix[si]] dst.Pix[di+0] = c.R dst.Pix[di+1] = c.G dst.Pix[di+2] = c.B dst.Pix[di+3] = c.A di += 4 si++ } } }) } func copyImage(dst *image.NRGBA, src image.Image) { srcMinX := src.Bounds().Min.X srcMinY := src.Bounds().Min.Y dstW := dst.Bounds().Dx() dstH := dst.Bounds().Dy() parallel(dstH, func(partStart, partEnd int) { for dstY := partStart; dstY < partEnd; dstY++ { di := dst.PixOffset(0, dstY) for dstX := 0; dstX < dstW; dstX++ { c := color.NRGBAModel.Convert(src.At(srcMinX+dstX, srcMinY+dstY)).(color.NRGBA) dst.Pix[di+0] = c.R dst.Pix[di+1] = c.G dst.Pix[di+2] = c.B dst.Pix[di+3] = c.A di += 4 } } }) } // toNRGBA converts any image type to *image.NRGBA with min-point at (0, 0). func toNRGBA(img image.Image) *image.NRGBA { if img, ok := img.(*image.NRGBA); ok && img.Bounds().Min.Eq(image.ZP) { return img } return Clone(img) }
vendor/github.com/disintegration/imaging/clone.go
0.625667
0.578389
clone.go
starcoder
package models import ( "encoding/json" "fmt" "strconv" ) // Features a Feature result set type Features []Feature func (a Features) Len() int { return len(a) } func (a Features) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a Features) Less(i, j int) bool { return a[i].Key < a[j].Key } // FeatureType accepted feature types type FeatureType string const ( // Percentile percentile `FeatureType` Percentile FeatureType = "percentile" // Boolean boolean `FeatureType` Boolean FeatureType = "boolean" // Invalid invalid `FeatureType` Invalid FeatureType = "invalid" // FeatureScope scoping for feature keys FeatureScope = "features" ) // ParseValueAndFeatureType string to type helper func ParseValueAndFeatureType(v string) (interface{}, FeatureType) { b, err := strconv.ParseBool(v) if err == nil && v != "0" && v != "1" { return b, Boolean } f, err := strconv.ParseFloat(v, 64) if err == nil { return f, Percentile } i, err := strconv.ParseInt(v, 10, 64) if err == nil { return i, Percentile } return nil, Invalid } // Feature KV model for feature flags type Feature struct { FeatureType FeatureType `json:"feature_type"` Key string `json:"key"` Namespace string `json:"namespace"` Scope string `json:"scope"` Value interface{} `json:"value"` Comment string `json:"comment"` UpdatedBy string `json:"updated_by"` } // GetScope scope accessor func (f *Feature) GetScope() string { if f.Scope == "" { f.Scope = DefaultScope } return f.Scope } // GetNamespace formats the fully scoped namespace func (f *Feature) GetNamespace() string { return fmt.Sprintf("%s/%s", f.Namespace, FeatureScope) } // ScopedKey expanded key with namespace and scope func (f *Feature) ScopedKey() string { return fmt.Sprintf("%s/%s/%s", f.GetNamespace(), f.GetScope(), f.Key) } // NewFeature create a Feature func NewFeature(name string, value interface{}, comment string, user string, scope string, ns string) (f *Feature) { var ft FeatureType switch value.(type) { case float64: ft = Percentile case bool: ft = Boolean } f = &Feature{ Key: name, Value: value, FeatureType: ft, Comment: comment, UpdatedBy: user, Scope: scope, Namespace: ns, } return } // FloatValue cast Value to float64 func (f *Feature) FloatValue() float64 { return f.Value.(float64) } // BoolValue cast Value to bool func (f *Feature) BoolValue() bool { return f.Value.(bool) } // ToJSON marshal feature to json func (f *Feature) ToJSON() ([]byte, error) { return json.Marshal(f) }
models/feature.go
0.774071
0.405508
feature.go
starcoder
package etree import ( "strconv" "strings" ) /* A Path is an object that represents an optimized version of an XPath-like search string. Although path strings are XPath-like, only the following limited syntax is supported: . Selects the current element .. Selects the parent of the current element * Selects all child elements // Selects all descendants of the current element tag Selects all child elements with the given tag [#] Selects the element of the given index (1-based, negative starts from the end) [@attrib] Selects all elements with the given attribute [@attrib='val'] Selects all elements with the given attribute set to val [tag] Selects all elements with a child element named tag [tag='val'] Selects all elements with a child element named tag and text matching val [text()] Selects all elements with non-empty text [text()='val'] Selects all elements whose text matches val Examples: Select the title elements of all descendant book elements having a 'category' attribute of 'WEB': //book[@category='WEB']/title Select the first book element with a title child containing the text 'Great Expectations': .//book[title='Great Expectations'][1] Starting from the current element, select all children of book elements with an attribute 'language' set to 'english': ./book/*[@language='english'] Starting from the current element, select all children of book elements containing the text 'special': ./book/*[text()='special'] Select all descendant book elements whose title element has an attribute 'language' set to 'french': //book/title[@language='french']/.. */ type Path struct { segments []segment } // ErrPath is returned by path functions when an invalid etree path is provided. type ErrPath string // Error returns the string describing a path error. func (err ErrPath) Error() string { return "etree: " + string(err) } // CompilePath creates an optimized version of an XPath-like string that // can be used to query elements in an element tree. func CompilePath(path string) (Path, error) { var comp compiler segments := comp.parsePath(path) if comp.err != ErrPath("") { return Path{nil}, comp.err } return Path{segments}, nil } // MustCompilePath creates an optimized version of an XPath-like string that // can be used to query elements in an element tree. Panics if an error // occurs. Use this function to create Paths when you know the path is // valid (i.e., if it's hard-coded). func MustCompilePath(path string) Path { p, err := CompilePath(path) if err != nil { panic(err) } return p } // A segment is a portion of a path between "/" characters. // It contains one selector and zero or more [filters]. type segment struct { sel selector filters []filter } func (seg *segment) apply(e *Element, p *pather) { seg.sel.apply(e, p) for _, f := range seg.filters { f.apply(p) } } // A selector selects XML elements for consideration by the // path traversal. type selector interface { apply(e *Element, p *pather) } // A filter pares down a list of candidate XML elements based // on a path filter in [brackets]. type filter interface { apply(p *pather) } // A pather is helper object that traverses an element tree using // a Path object. It collects and deduplicates all elements matching // the path query. type pather struct { queue fifo results []*Element inResults map[*Element]bool candidates []*Element scratch []*Element // used by filters } // A node represents an element and the remaining path segments that // should be applied against it by the pather. type node struct { e *Element segments []segment } func newPather() *pather { return &pather{ results: make([]*Element, 0), inResults: make(map[*Element]bool), candidates: make([]*Element, 0), scratch: make([]*Element, 0), } } // traverse follows the path from the element e, collecting // and then returning all elements that match the path's selectors // and filters. func (p *pather) traverse(e *Element, path Path) []*Element { for p.queue.add(node{e, path.segments}); p.queue.len() > 0; { p.eval(p.queue.remove().(node)) } return p.results } // eval evalutes the current path node by applying the remaining // path's selector rules against the node's element. func (p *pather) eval(n node) { p.candidates = p.candidates[0:0] seg, remain := n.segments[0], n.segments[1:] seg.apply(n.e, p) if len(remain) == 0 { for _, c := range p.candidates { if in := p.inResults[c]; !in { p.inResults[c] = true p.results = append(p.results, c) } } } else { for _, c := range p.candidates { p.queue.add(node{c, remain}) } } } // A compiler generates a compiled path from a path string. type compiler struct { err ErrPath } // parsePath parses an XPath-like string describing a path // through an element tree and returns a slice of segment // descriptors. func (c *compiler) parsePath(path string) []segment { // If path starts or ends with //, fix it if strings.HasPrefix(path, "//") { path = "." + path } if strings.HasSuffix(path, "//") { path = path + "*" } // Paths cannot be absolute if strings.HasPrefix(path, "/") { c.err = ErrPath("paths cannot be absolute.") return nil } // Split path into segment objects var segments []segment for _, s := range splitPath(path) { segments = append(segments, c.parseSegment(s)) if c.err != ErrPath("") { break } } return segments } func splitPath(path string) []string { pieces := make([]string, 0) start := 0 inquote := false for i := 0; i+1 <= len(path); i++ { if path[i] == '\'' { inquote = !inquote } else if path[i] == '/' && !inquote { pieces = append(pieces, path[start:i]) start = i + 1 } } return append(pieces, path[start:]) } // parseSegment parses a path segment between / characters. func (c *compiler) parseSegment(path string) segment { pieces := strings.Split(path, "[") seg := segment{ sel: c.parseSelector(pieces[0]), filters: make([]filter, 0), } for i := 1; i < len(pieces); i++ { fpath := pieces[i] if fpath[len(fpath)-1] != ']' { c.err = ErrPath("path has invalid filter [brackets].") break } seg.filters = append(seg.filters, c.parseFilter(fpath[:len(fpath)-1])) } return seg } // parseSelector parses a selector at the start of a path segment. func (c *compiler) parseSelector(path string) selector { switch path { case ".": return new(selectSelf) case "..": return new(selectParent) case "*": return new(selectChildren) case "": return new(selectDescendants) default: return newSelectChildrenByTag(path) } } // parseFilter parses a path filter contained within [brackets]. func (c *compiler) parseFilter(path string) filter { if len(path) == 0 { c.err = ErrPath("path contains an empty filter expression.") return nil } // Filter contains [@attr='val'], [text()='val'], or [tag='val']? eqindex := strings.Index(path, "='") if eqindex >= 0 { rindex := nextIndex(path, "'", eqindex+2) if rindex != len(path)-1 { c.err = ErrPath("path has mismatched filter quotes.") return nil } switch { case path[0] == '@': return newFilterAttrVal(path[1:eqindex], path[eqindex+2:rindex]) case strings.HasPrefix(path, "text()"): return newFilterTextVal(path[eqindex+2 : rindex]) default: return newFilterChildText(path[:eqindex], path[eqindex+2:rindex]) } } // Filter contains [@attr], [N], [tag] or [text()] switch { case path[0] == '@': return newFilterAttr(path[1:]) case path == "text()": return newFilterText() case isInteger(path): pos, _ := strconv.Atoi(path) switch { case pos > 0: return newFilterPos(pos - 1) default: return newFilterPos(pos) } default: return newFilterChild(path) } } // selectSelf selects the current element into the candidate list. type selectSelf struct{} func (s *selectSelf) apply(e *Element, p *pather) { p.candidates = append(p.candidates, e) } // selectParent selects the element's parent into the candidate list. type selectParent struct{} func (s *selectParent) apply(e *Element, p *pather) { if e.parent != nil { p.candidates = append(p.candidates, e.parent) } } // selectChildren selects the element's child elements into the // candidate list. type selectChildren struct{} func (s *selectChildren) apply(e *Element, p *pather) { for _, c := range e.Child { if c, ok := c.(*Element); ok { p.candidates = append(p.candidates, c) } } } // selectDescendants selects all descendant child elements // of the element into the candidate list. type selectDescendants struct{} func (s *selectDescendants) apply(e *Element, p *pather) { var queue fifo for queue.add(e); queue.len() > 0; { e := queue.remove().(*Element) p.candidates = append(p.candidates, e) for _, c := range e.Child { if c, ok := c.(*Element); ok { queue.add(c) } } } } // selectChildrenByTag selects into the candidate list all child // elements of the element having the specified tag. type selectChildrenByTag struct { space, tag string } func newSelectChildrenByTag(path string) *selectChildrenByTag { s, l := spaceDecompose(path) return &selectChildrenByTag{s, l} } func (s *selectChildrenByTag) apply(e *Element, p *pather) { for _, c := range e.Child { if c, ok := c.(*Element); ok && spaceMatch(s.space, c.Space) && s.tag == c.Tag { p.candidates = append(p.candidates, c) } } } // filterPos filters the candidate list, keeping only the // candidate at the specified index. type filterPos struct { index int } func newFilterPos(pos int) *filterPos { return &filterPos{pos} } func (f *filterPos) apply(p *pather) { if f.index >= 0 { if f.index < len(p.candidates) { p.scratch = append(p.scratch, p.candidates[f.index]) } } else { if -f.index <= len(p.candidates) { p.scratch = append(p.scratch, p.candidates[len(p.candidates)+f.index]) } } p.candidates, p.scratch = p.scratch, p.candidates[0:0] } // filterAttr filters the candidate list for elements having // the specified attribute. type filterAttr struct { space, key string } func newFilterAttr(str string) *filterAttr { s, l := spaceDecompose(str) return &filterAttr{s, l} } func (f *filterAttr) apply(p *pather) { for _, c := range p.candidates { for _, a := range c.Attr { if spaceMatch(f.space, a.Space) && f.key == a.Key { p.scratch = append(p.scratch, c) break } } } p.candidates, p.scratch = p.scratch, p.candidates[0:0] } // filterAttrVal filters the candidate list for elements having // the specified attribute with the specified value. type filterAttrVal struct { space, key, val string } func newFilterAttrVal(str, value string) *filterAttrVal { s, l := spaceDecompose(str) return &filterAttrVal{s, l, value} } func (f *filterAttrVal) apply(p *pather) { for _, c := range p.candidates { for _, a := range c.Attr { if spaceMatch(f.space, a.Space) && f.key == a.Key && f.val == a.Value { p.scratch = append(p.scratch, c) break } } } p.candidates, p.scratch = p.scratch, p.candidates[0:0] } // filterText filters the candidate list for elements having text. type filterText struct{} func newFilterText() *filterText { return &filterText{} } func (f *filterText) apply(p *pather) { for _, c := range p.candidates { if c.Text() != "" { p.scratch = append(p.scratch, c) } } p.candidates, p.scratch = p.scratch, p.candidates[0:0] } // filterTextVal filters the candidate list for elements having // text equal to the specified value. type filterTextVal struct { val string } func newFilterTextVal(value string) *filterTextVal { return &filterTextVal{value} } func (f *filterTextVal) apply(p *pather) { for _, c := range p.candidates { if c.Text() == f.val { p.scratch = append(p.scratch, c) } } p.candidates, p.scratch = p.scratch, p.candidates[0:0] } // filterChild filters the candidate list for elements having // a child element with the specified tag. type filterChild struct { space, tag string } func newFilterChild(str string) *filterChild { s, l := spaceDecompose(str) return &filterChild{s, l} } func (f *filterChild) apply(p *pather) { for _, c := range p.candidates { for _, cc := range c.Child { if cc, ok := cc.(*Element); ok && spaceMatch(f.space, cc.Space) && f.tag == cc.Tag { p.scratch = append(p.scratch, c) } } } p.candidates, p.scratch = p.scratch, p.candidates[0:0] } // filterChildText filters the candidate list for elements having // a child element with the specified tag and text. type filterChildText struct { space, tag, text string } func newFilterChildText(str, text string) *filterChildText { s, l := spaceDecompose(str) return &filterChildText{s, l, text} } func (f *filterChildText) apply(p *pather) { for _, c := range p.candidates { for _, cc := range c.Child { if cc, ok := cc.(*Element); ok && spaceMatch(f.space, cc.Space) && f.tag == cc.Tag && f.text == cc.Text() { p.scratch = append(p.scratch, c) } } } p.candidates, p.scratch = p.scratch, p.candidates[0:0] }
pkg/terraform/exec/vendor/github.com/beevik/etree/path.go
0.777596
0.500549
path.go
starcoder
* (https://github.com/sipa/bips/blob/bip-schnorr/bip-schnorr.mediawiki). */ package secp256k1 /* #include <stdlib.h> #include "include/secp256k1_schnorrsig.h" static unsigned char** makeBytesArray(int size) { return !size ? NULL : calloc(sizeof(unsigned char*), size); } static void setBytesArray(unsigned char** a, unsigned char* v, int i) { if(a) a[i] = v; } static unsigned char* getBytesArray(unsigned char** a, int i) { return !a ? NULL : a[i]; } static void freeBytesArray(unsigned char** a) { if(a) free(a); } static secp256k1_schnorrsig** makeSchnorrsigArray(int size) { return !size ? NULL : calloc(sizeof(secp256k1_schnorrsig*), size); } static void setSchnorrsigArray(secp256k1_schnorrsig** a, secp256k1_schnorrsig* v, int i) { if (a) a[i] = v; } static void freeSchnorrsigArray(secp256k1_schnorrsig** a) { if(a) free(a); } static secp256k1_pubkey** makePubkeyArray(int size) { return !size ? NULL : calloc(sizeof(secp256k1_pubkey*), size); } static void setPubkeyArray(secp256k1_pubkey **a, secp256k1_pubkey *pubkey, int n) { if (a) a[n] = pubkey; } static void freePubkeyArray(secp256k1_pubkey **a) { if (a) free(a); } */ //#cgo CFLAGS: -I${SRCDIR}/secp256k1-zkp -I${SRCDIR}/secp256k1-zkp/src import "C" import ( "errors" ) /** Pointer to opaque data structure that holds a parsed Schnorr signature. * * The exact representation of data inside is implementation defined and not * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. * If you need to convert to a format suitable for storage, transmission, or * comparison, use the `secp256k1_schnorrsig_serialize` and * `secp256k1_schnorrsig_parse` functions. */ type Schnorrsig struct { c *C.secp256k1_schnorrsig } const ( ErrorSchnorrsigSize string = "Schnorr signature data expected length is 64 bytes" ErrorSchnorrsigParse string = "Unable to parse the data as a Schnorr signature" ErrorSchnorrsigCount string = "Number of elements differ in input arrays" ErrorSchnorrsigSerialize string = "Unable to serialize the data as a Schnorr signature" ErrorSchnorrsigSign string = "Error creating Schnorr signature" ErrorSchnorrsigVerify string = "Error verifying Schnorr signature" ) func newSchnorrsig() *Schnorrsig { return &Schnorrsig{ &C.secp256k1_schnorrsig{}, } } /** Parse sequence of bytes as a schnorrsig object. * * Returns: 1 if input contains a valid schnorr signature * Args: ctx: a secp256k1 context object. * In: data: 64-byte serialized data * Out: status, *Schnorrsig, error * * The signature is serialized in the form R||s, where R is a 32-byte public * key (x-coordinate only; the y-coordinate is considered to be the unique * y-coordinate satisfying the curve equation that is a quadratic residue) * and s is a 32-byte big-endian scalar. * * After the call, sig will always be initialized. If parsing failed or the * encoded numbers are out of range, signature validation with it is * guaranteed to fail for every message and public key. */ func SchnorrsigParse( context *Context, data []byte, ) ( schnorrsig *Schnorrsig, failure error, ) { schnorrsig = newSchnorrsig() if 1 != int( C.secp256k1_schnorrsig_parse( context.ctx, schnorrsig.c, cBuf(data))) { return nil, errors.New(ErrorSchnorrsigParse) } return schnorrsig, nil } /** Serialize Schnorr signature into byte sequence. * * Returns: 1 on success, 0 on failure * Args: ctx: a secp256k1 context object. * In: Schnorrsig a Schnorr signature object * Out: status, data, error: 64-byte byte array */ func SchnorrsigSerialize( context *Context, schnorrsig *Schnorrsig, ) ( []byte, error, ) { var data [64]byte if 1 != int( C.secp256k1_schnorrsig_serialize( context.ctx, cBuf(data[:]), schnorrsig.c)) { return nil, errors.New(ErrorSchnorrsigSerialize) } return data[:], nil } /** Create a Schnorr signature * * In: ctx: pointer to a context object * hash32: 32-byte message hash being signed * seckey: 32-byte secret key // TODO: * noncefunc: optional custom nonce generation function, the default one is secp256k1_nonce_function_bipschnorr // * nonceseed: optional seed data for the custom nonce generation function * * Out: schnorrsig: pointer to resulting Schnorr signature * noncenegated: non-zero if signing algorithm negated the nonce * * Returns: 1: Success * 0: Failure */ /* DEFAULT NONCE FUNCTION: * This nonce function is described in BIP-schnorr * (https://github.com/sipa/bips/blob/bip-schnorr/bip-schnorr.mediawiki) */ // FYI: code of the default function /* static int secp256k1_nonce_function_bipschnorr(unsigned char* nonce32, const unsigned char* msg32, const unsigned char* key32, const unsigned char* algo16, void* data, unsigned int counter) { secp256k1_sha256 sha; (void)data; (void)counter; VERIFY_CHECK(counter == 0); // Hash x||msg as per the spec secp256k1_sha256_initialize(&sha); secp256k1_sha256_write(&sha, key32, 32); secp256k1_sha256_write(&sha, msg32, 32); // Hash in algorithm, which is not in the spec, but may be critical to // users depending on it to avoid nonce reuse across algorithms. if (algo16 != NULL) { secp256k1_sha256_write(&sha, algo16, 16); } secp256k1_sha256_finalize(&sha, nonce32); return 1; } */ func SchnorrsigSign( context *Context, hash32 [32]byte, seckey [32]byte, // noncefunc *NonceGenerator, // nonceseed []byte, ) ( schnorrsig *Schnorrsig, noncenegated bool, err error, ) { schnorrsig = newSchnorrsig() var noncenegatedint C.int if 1 != int( C.secp256k1_schnorrsig_sign( context.ctx, schnorrsig.c, &noncenegatedint, cBuf(hash32[:]), cBuf(seckey[:]), nil, nil)) { return nil, false, errors.New(ErrorSchnorrsigSign) } noncenegated = 1 == int(noncenegatedint) return } /** Verify a Schnorr signature. * * Returns: 1: correct signature * 0: incorrect or unparseable signature * Args: ctx: a secp256k1 context object, initialized for verification. * In: sig: the signature being verified (cannot be NULL) * msg32: the 32-byte message hash being verified (cannot be NULL) * pubkey: pointer to a public key to verify with (cannot be NULL) */ func SchnorrsigVerify( context *Context, schnorrsig *Schnorrsig, msg []byte, pubkey *PublicKey, ) ( err error, ) { if 1 != C.secp256k1_schnorrsig_verify( context.ctx, schnorrsig.c, cBuf(msg), pubkey.pk) { return errors.New(ErrorSchnorrsigVerify) } return } /** Verifies a set of Schnorr signatures. * * Returns 1 if all succeeded, 0 otherwise. In particular, returns 1 if n_sigs is 0. * * Args: ctx: a secp256k1 context object, initialized for verification. * scratch: scratch space used for the multiexponentiation * In: sig: array of signatures, or NULL if there are no signatures * msg32: array of messages, or NULL if there are no signatures * pk: array of public keys, or NULL if there are no signatures * n_sigs: number of signatures in above arrays. Must be smaller than * 2^31 and smaller than half the maximum size_t value. Must be 0 * if above arrays are NULL. */ func SchnorrsigVerifyBatch( context *Context, scratch *ScratchSpace, sig []*Schnorrsig, data [][32]byte, pubkey []*PublicKey, ) ( err error, ) { sl := len(sig) dl := len(data) kl := len(pubkey) if sl != dl || sl != kl { return errors.New(ErrorSchnorrsigCount) } ss := C.makeSchnorrsigArray(C.int(sl)) ds := C.makeBytesArray(C.int(sl)) ks := C.makePubkeyArray(C.int(sl)) for i := 0; i < sl; i++ { C.setSchnorrsigArray(ss, sig[i].c, C.int(i)) C.setBytesArray(ds, cBuf(data[i][:]), C.int(i)) C.setPubkeyArray(ks, pubkey[i].pk, C.int(i)) } defer C.freeSchnorrsigArray(ss) defer C.freeBytesArray(ds) defer C.freePubkeyArray(ks) if 1 != int( C.secp256k1_schnorrsig_verify_batch( context.ctx, scratch.scr, ss, ds, ks, C.size_t(sl))) { return errors.New(ErrorSchnorrsigVerify) } return } /* SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_schnorrsig_verify_batch( const secp256k1_context* ctx, secp256k1_scratch_space* scratch, const secp256k1_schnorrsig* const* sig, const unsigned char* const* msg32, const secp256k1_pubkey* const* pk, size_t n_sigs ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); */
schnorrsig.go
0.734024
0.444263
schnorrsig.go
starcoder
package constfold import ( "github.com/VKCOM/noverify/src/meta" ) // Plus performs arithmetic "+". func Plus(x, y meta.ConstValue) meta.ConstValue { switch x.Type { case meta.Integer: if y.Type == meta.Integer { return meta.NewIntConst(x.GetInt() + y.GetInt()) } case meta.Float: if y.Type == meta.Float { return meta.NewFloatConst(x.GetFloat() + y.GetFloat()) } } return meta.UnknownValue } // Minus performs arithmetic "-". func Minus(x, y meta.ConstValue) meta.ConstValue { switch x.Type { case meta.Integer: if y.Type == meta.Integer { return meta.NewIntConst(x.GetInt() - y.GetInt()) } case meta.Float: if y.Type == meta.Float { return meta.NewFloatConst(x.GetFloat() - y.GetFloat()) } } return meta.UnknownValue } // Mul performs arithmetic "*". func Mul(x, y meta.ConstValue) meta.ConstValue { switch x.Type { case meta.Integer: if y.Type == meta.Integer { return meta.NewIntConst(x.GetInt() * y.GetInt()) } case meta.Float: if y.Type == meta.Float { return meta.NewFloatConst(x.GetFloat() * y.GetFloat()) } } return meta.UnknownValue } // Concat performs string "." operation. func Concat(x, y meta.ConstValue) meta.ConstValue { v1, ok1 := x.ToString() v2, ok2 := y.ToString() if ok1 && ok2 { return meta.NewStringConst(v1 + v2) } return meta.UnknownValue } // Or performs logical "||". // Also works for "or" operator. func Or(x, y meta.ConstValue) meta.ConstValue { v1, ok1 := x.ToBool() v2, ok2 := y.ToBool() if ok1 && ok2 { return meta.NewBoolConst(v1 || v2) } return meta.UnknownValue } // And performs logical "&&". // Also works for "and" operator. func And(x, y meta.ConstValue) meta.ConstValue { v1, ok1 := x.ToBool() v2, ok2 := y.ToBool() if ok1 && ok2 { return meta.NewBoolConst(v1 && v2) } return meta.UnknownValue } // BitOr performs bitwise "|". func BitOr(x, y meta.ConstValue) meta.ConstValue { v1, ok1 := x.ToInt() v2, ok2 := y.ToInt() if ok1 && ok2 { return meta.NewIntConst(v1 | v2) } return meta.UnknownValue } // BitAnd performs bitwise "&". func BitAnd(x, y meta.ConstValue) meta.ConstValue { v1, ok1 := x.ToInt() v2, ok2 := y.ToInt() if ok1 && ok2 { return meta.NewIntConst(v1 & v2) } return meta.UnknownValue }
src/constfold/binary_op.go
0.647352
0.430686
binary_op.go
starcoder
package cornellbox import ( "math/rand" "github.com/peterstace/grayt/scene" . "github.com/peterstace/grayt/scene/dsl" "github.com/peterstace/grayt/xmath" ) func SphereTree() scene.Scene { cam := CornellCam(1.3) cam.LookingAt = Vect(0.5, 0.25, -0.5) cam.FieldOfViewInRadians *= 0.95 cam.AspectWide = 2 cam.AspectHigh = 1 return scene.Scene{ Camera: cam, Objects: []scene.Object{ scene.Object{ Material: scene.Material{Colour: White, Emittance: 5}, Surface: CornellCeilingLight(), }, scene.Object{ Material: scene.Material{Colour: White}, Surface: MergeSurfaces( CornellFloor, CornellBackWall, CornellCeiling, tree(), ), }, scene.Object{ Material: scene.Material{Colour: Red}, Surface: CornellLeftWall, }, scene.Object{ Material: scene.Material{Colour: Green}, Surface: CornellRightWall, }, }, } } type sphere struct { c xmath.Vector r float64 } func tree() scene.Surface { root := sphere{xmath.Vect(0.5, 0, -0.5), 0.2} spheres := new([]sphere) *spheres = append(*spheres, root) recurse(spheres, root, 9) var surf scene.Surface for _, s := range *spheres { surf.Spheres = append(surf.Spheres, scene.Sphere{s.c, s.r}) } return surf } const radiusScaleDown = 0.7 func recurse(spheres *[]sphere, parent sphere, level int) { if level == 0 { return } child1, child2 := findChildren(spheres, parent) *spheres = append(*spheres, child1) *spheres = append(*spheres, child2) recurse(spheres, child1, level-1) recurse(spheres, child2, level-1) } func findChildren(spheres *[]sphere, parent sphere) (sphere, sphere) { var child1, child2 sphere for true { child1 = createChild(parent) child2 = createChild(parent) if !isValidChild(child1, parent, spheres) { continue } if !isValidChild(child2, parent, spheres) { continue } if spheresIntersect(child1, child2) { continue } break } return child1, child2 } // TODO: this should be reset at the start of each new scene generation var rnd = rand.New(rand.NewSource(0)) func createChild(parent sphere) sphere { rndUnit := xmath.Vector{rnd.NormFloat64(), rnd.NormFloat64(), rnd.NormFloat64()}.Unit() return sphere{ parent.c.Add(rndUnit.Scale(parent.r)), radiusScaleDown * parent.r, } } func isValidChild(child, parent sphere, spheres *[]sphere) bool { // Check for intersection with other spheres (ignore the parent). for _, s := range *spheres { if s.c == parent.c && s.r == parent.r { continue } if spheresIntersect(s, child) { return false } } // Check for wall/floor/ceiling intersection. return true && child.c.X > child.r && child.c.X < 1.0-child.r && child.c.Y > child.r && child.c.Y < 1.0-child.r && child.c.Z < -child.r && child.c.Z > -1.0+child.r } func spheresIntersect(s1, s2 sphere) bool { return s1.c.Sub(s2.c).Length() < s1.r+s2.r }
scene/cornellbox/spheretree.go
0.677794
0.409752
spheretree.go
starcoder
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix import "time" // TimespecToNsec converts a Timespec value into a number of // nanoseconds since the Unix epoch. func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } // NsecToTimespec takes a number of nanoseconds since the Unix epoch // and returns the corresponding Timespec value. func NsecToTimespec(nsec int64) Timespec { sec := nsec / 1e9 nsec = nsec % 1e9 if nsec < 0 { nsec += 1e9 sec-- } return setTimespec(sec, nsec) } // TimeToTimespec converts t into a Timespec. // On some 32-bit systems the range of valid Timespec values are smaller // than that of time.Time values. So if t is out of the valid range of // Timespec, it returns a zero Timespec and ERANGE. func TimeToTimespec(t time.Time) (Timespec, error) { sec := t.Unix() nsec := int64(t.Nanosecond()) ts := setTimespec(sec, nsec) // Currently all targets have either int32 or int64 for Timespec.Sec. // If there were a new target with floating point type for it, we have // to consider the rounding error. if int64(ts.Sec) != sec { return Timespec{}, ERANGE } return ts, nil } // TimevalToNsec converts a Timeval value into a number of nanoseconds // since the Unix epoch. func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } // NsecToTimeval takes a number of nanoseconds since the Unix epoch // and returns the corresponding Timeval value. func NsecToTimeval(nsec int64) Timeval { nsec += 999 // round up to microsecond usec := nsec % 1e9 / 1e3 sec := nsec / 1e9 if usec < 0 { usec += 1e6 sec-- } return setTimeval(sec, usec) } // Unix returns ts as the number of seconds and nanoseconds elapsed since the // Unix epoch. func (ts *Timespec) Unix() (sec int64, nsec int64) { return int64(ts.Sec), int64(ts.Nsec) } // Unix returns tv as the number of seconds and nanoseconds elapsed since the // Unix epoch. func (tv *Timeval) Unix() (sec int64, nsec int64) { return int64(tv.Sec), int64(tv.Usec) * 1000 } // Nano returns ts as the number of nanoseconds elapsed since the Unix epoch. func (ts *Timespec) Nano() int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } // Nano returns tv as the number of nanoseconds elapsed since the Unix epoch. func (tv *Timeval) Nano() int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 }
vendor/golang.org/x/sys/unix/timestruct.go
0.834204
0.527012
timestruct.go
starcoder
package shcrypto import ( "bytes" "crypto/rand" "io" "math/big" bn256 "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" gocmp "github.com/google/go-cmp/cmp" "github.com/pkg/errors" ) var ( zeroG1 *bn256.G1 zeroG2 *bn256.G2 ) // Polynomial represents a polynomial over Z_q. type Polynomial []*big.Int // Gammas is a sequence of G2 points based on a polynomial. type Gammas []*bn256.G2 func init() { zeroG1 = new(bn256.G1).ScalarBaseMult(big.NewInt(0)) zeroG2 = new(bn256.G2).ScalarBaseMult(big.NewInt(0)) } // NewPolynomial creates a new polynomial from the given coefficients. It verifies the number and // range of them. func NewPolynomial(coefficients []*big.Int) (*Polynomial, error) { if len(coefficients) == 0 { return nil, errors.Errorf("no coefficients given") } for i, v := range coefficients { if v.Sign() < 0 { return nil, errors.Errorf("coefficient %d is negative (%d)", i, v) } if v.Cmp(bn256.Order) >= 0 { return nil, errors.Errorf("coefficient %d is too big (%d)", i, v) } } p := Polynomial(coefficients) return &p, nil } // Degree returns the degree of the polynomial. func (p *Polynomial) Degree() uint64 { return uint64(len(*p)) - 1 } // Degree returns the degree of the underlying polynomial. func (g *Gammas) Degree() uint64 { return uint64(len(*g)) - 1 } func (g Gammas) Equal(g2 Gammas) bool { gs := []*bn256.G2(g) gs2 := []*bn256.G2(g2) if len(gs) != len(gs2) { return false } for i := range gs { if !EqualG2(gs[i], gs2[i]) { return false } } return true } // ZeroGammas returns the zero value for gammas. func ZeroGammas(degree uint64) *Gammas { points := []*bn256.G2{} for i := uint64(0); i < degree+1; i++ { points = append(points, new(bn256.G2).Set(zeroG2)) } gammas := Gammas(points) return &gammas } // DegreeFromThreshold returns the degree polynomials should have for the given threshold. func DegreeFromThreshold(threshold uint64) uint64 { return threshold - 1 } // Eval evaluates the polynomial at the given coordinate. func (p *Polynomial) Eval(x *big.Int) *big.Int { // uses Horner's method res := new(big.Int).Set((*p)[p.Degree()]) for i := int(p.Degree()) - 1; i >= 0; i-- { res.Mul(res, x) res.Add(res, (*p)[i]) res.Mod(res, bn256.Order) } return res } // EvalForKeyper evaluates the polynomial at the position designated for the given keyper. func (p *Polynomial) EvalForKeyper(keyperIndex int) *big.Int { x := KeyperX(keyperIndex) return p.Eval(x) } // ValidEval checks if the given value is a valid polynomial evaluation, i.e., if it is in Z_q. func ValidEval(v *big.Int) bool { if v.Sign() < 0 { return false } if v.Cmp(bn256.Order) >= 0 { return false } return true } // Gammas computes the gamma values for a given polynomial. func (p *Polynomial) Gammas() *Gammas { gammas := Gammas{} for _, c := range *p { gamma := new(bn256.G2).ScalarBaseMult(c) gammas = append(gammas, gamma) } return &gammas } // Pi computes the pi value at the given x coordinate. func (g *Gammas) Pi(xi *big.Int) *bn256.G2 { xiToJ := big.NewInt(1) res := new(bn256.G2).Set(zeroG2) for _, gamma := range *g { p := new(bn256.G2).ScalarMult(gamma, xiToJ) res = new(bn256.G2).Add(res, p) xiToJ.Mul(xiToJ, xi) xiToJ.Mod(xiToJ, bn256.Order) } return res } // GobEncode encodes a Gammas value. See https://golang.org/pkg/encoding/gob/#GobEncoder func (g *Gammas) GobEncode() ([]byte, error) { buff := bytes.Buffer{} if g != nil { for _, g2 := range *g { buff.Write(g2.Marshal()) } } return buff.Bytes(), nil } // GobDecode decodes a Gammas value. See https://golang.org/pkg/encoding/gob/#GobDecoder func (g *Gammas) GobDecode(data []byte) error { var err error for len(data) > 0 { g2 := new(bn256.G2) data, err = g2.Unmarshal(data) if err != nil { return err } *g = append(*g, g2) } return nil } // KeyperX computes the x value assigned to the keyper identified by its index. func KeyperX(keyperIndex int) *big.Int { keyperIndexBig := big.NewInt(int64(keyperIndex)) return new(big.Int).Add(big.NewInt(1), keyperIndexBig) } // EqualG1 checks if two points on G1 are equal. func EqualG1(p1, p2 *bn256.G1) bool { p1Bytes := new(bn256.G1).Set(p1).Marshal() p2Bytes := new(bn256.G1).Set(p2).Marshal() return bytes.Equal(p1Bytes, p2Bytes) } var G1Comparer = gocmp.Comparer(EqualG1) // EqualG2 checks if two points on G2 are equal. func EqualG2(p1, p2 *bn256.G2) bool { p1Bytes := new(bn256.G2).Set(p1).Marshal() p2Bytes := new(bn256.G2).Set(p2).Marshal() return bytes.Equal(p1Bytes, p2Bytes) } var G2Comparer = gocmp.Comparer(EqualG2) // EqualGT checks if two points on GT are equal. func EqualGT(p1, p2 *bn256.GT) bool { p1Bytes := new(bn256.GT).Set(p1).Marshal() p2Bytes := new(bn256.GT).Set(p2).Marshal() return bytes.Equal(p1Bytes, p2Bytes) } var GTComparer = gocmp.Comparer(EqualGT) // VerifyPolyEval checks that the evaluation of a polynomial is consistent with the public gammas. func VerifyPolyEval(keyperIndex int, polyEval *big.Int, gammas *Gammas, threshold uint64) bool { if gammas.Degree() != threshold-1 { return false } rhs := new(bn256.G2).ScalarBaseMult(polyEval) lhs := gammas.Pi(KeyperX(keyperIndex)) return EqualG2(lhs, rhs) } // RandomPolynomial generates a random polynomial of given degree. func RandomPolynomial(r io.Reader, degree uint64) (*Polynomial, error) { coefficients := []*big.Int{} for i := uint64(0); i < degree+1; i++ { c, err := rand.Int(r, bn256.Order) if err != nil { return nil, errors.WithStack(err) } coefficients = append(coefficients, c) } return NewPolynomial(coefficients) }
shlib/shcrypto/feldman.go
0.802517
0.724256
feldman.go
starcoder
package iso20022 // Chain of parties involved in the settlement of a transaction, including receipts and deliveries, book transfers, treasury deals, or other activities, resulting in the movement of a security or amount of money from one account to another. type ReceivingPartiesAndAccount8 struct { // Party that buys goods or services, or a financial instrument. ReceiverDetails *InvestmentAccount24 `xml:"RcvrDtls,omitempty"` // Party that acts on behalf of the buyer of securities when the buyer does not have a direct relationship with the receiving agent. ReceiversCustodianDetails *PartyIdentificationAndAccount5 `xml:"RcvrsCtdnDtls,omitempty"` // Party that the Receiver's custodian uses to effect the receipt of a security, when the Receiver's custodian does not have a direct relationship with the Receiver agent. ReceiversIntermediaryDetails *PartyIdentificationAndAccount5 `xml:"RcvrsIntrmyDtls,omitempty"` // Party that receives securities from the delivering agent via the place of settlement, eg, securities central depository. ReceivingAgentDetails *PartyIdentificationAndAccount4 `xml:"RcvgAgtDtls"` // Identifies the securities settlement system to be used. SecuritiesSettlementSystem *Max35Text `xml:"SctiesSttlmSys,omitempty"` // Place where settlement of the securities takes place. PlaceOfSettlementDetails *PartyIdentification21 `xml:"PlcOfSttlmDtls,omitempty"` } func (r *ReceivingPartiesAndAccount8) AddReceiverDetails() *InvestmentAccount24 { r.ReceiverDetails = new(InvestmentAccount24) return r.ReceiverDetails } func (r *ReceivingPartiesAndAccount8) AddReceiversCustodianDetails() *PartyIdentificationAndAccount5 { r.ReceiversCustodianDetails = new(PartyIdentificationAndAccount5) return r.ReceiversCustodianDetails } func (r *ReceivingPartiesAndAccount8) AddReceiversIntermediaryDetails() *PartyIdentificationAndAccount5 { r.ReceiversIntermediaryDetails = new(PartyIdentificationAndAccount5) return r.ReceiversIntermediaryDetails } func (r *ReceivingPartiesAndAccount8) AddReceivingAgentDetails() *PartyIdentificationAndAccount4 { r.ReceivingAgentDetails = new(PartyIdentificationAndAccount4) return r.ReceivingAgentDetails } func (r *ReceivingPartiesAndAccount8) SetSecuritiesSettlementSystem(value string) { r.SecuritiesSettlementSystem = (*Max35Text)(&value) } func (r *ReceivingPartiesAndAccount8) AddPlaceOfSettlementDetails() *PartyIdentification21 { r.PlaceOfSettlementDetails = new(PartyIdentification21) return r.PlaceOfSettlementDetails }
ReceivingPartiesAndAccount8.go
0.662687
0.457016
ReceivingPartiesAndAccount8.go
starcoder
package gosang import ( "bytes" "image" "image/color" "io" "github.com/pkg/errors" ) // offsetedReader implements io.Reader combining io.ReaderAt and offset. type offsetedReader struct { r io.ReaderAt offset int64 } func (or *offsetedReader) Read(p []byte) (int, error) { n, err := or.r.ReadAt(p, or.offset) or.offset += int64(n) return n, err } func advanceWriter(w io.Writer, n int) error { b := []byte{0} for n > 0 { c := n if c > 8192 { c = 8192 } if _, err := w.Write(bytes.Repeat(b, c)); err != nil { return errors.Wrap(err, "failed to write empty bytes") } n -= c } return nil } func rgbaAt(img image.Image, x, y int) (r, g, b, a uint8) { switch p := img.At(x, y).(type) { case color.NRGBA: r, g, b, a = p.R, p.G, p.B, p.A default: tr, tg, tb, ta := p.RGBA() r, g, b, a = uint8(tr), uint8(tg), uint8(tb), uint8(ta) } return } // sprite8Palette is a color palette used by 8-bit color sprites. var sprite8Palette = color.Palette{ color.RGBA{0x00, 0x00, 0x00, 0xff}, color.RGBA{0x34, 0x5f, 0x2c, 0xff}, color.RGBA{0x34, 0x51, 0x2c, 0xff}, color.RGBA{0x34, 0x4a, 0x3f, 0xff}, color.RGBA{0x2c, 0x3f, 0x37, 0xff}, color.RGBA{0x2c, 0x42, 0x34, 0xff}, color.RGBA{0x2c, 0x42, 0x37, 0xff}, color.RGBA{0x29, 0x54, 0x25, 0xff}, color.RGBA{0x25, 0x4a, 0x29, 0xff}, color.RGBA{0x29, 0x3b, 0x29, 0xff}, color.RGBA{0x25, 0x34, 0x30, 0xff}, color.RGBA{0x25, 0x34, 0x37, 0xff}, color.RGBA{0x21, 0x34, 0x34, 0xff}, color.RGBA{0x21, 0x42, 0x1e, 0xff}, color.RGBA{0x1e, 0x34, 0x1e, 0xff}, color.RGBA{0x1a, 0x34, 0x29, 0xff}, color.RGBA{0xd1, 0xaf, 0x74, 0xff}, color.RGBA{0x88, 0x4d, 0x1e, 0xff}, color.RGBA{0x77, 0x5f, 0x37, 0xff}, color.RGBA{0xa8, 0x88, 0x54, 0xff}, color.RGBA{0x99, 0x7b, 0x4a, 0xff}, color.RGBA{0xbd, 0x9c, 0x5f, 0xff}, color.RGBA{0x4a, 0x3b, 0x13, 0xff}, color.RGBA{0x4d, 0x3f, 0x16, 0xff}, color.RGBA{0x3b, 0x29, 0x0b, 0xff}, color.RGBA{0x2c, 0x4d, 0x2c, 0xff}, color.RGBA{0x42, 0x74, 0x37, 0xff}, color.RGBA{0x42, 0x74, 0x34, 0xff}, color.RGBA{0x42, 0x6a, 0x3b, 0xff}, color.RGBA{0x3f, 0x66, 0x3b, 0xff}, color.RGBA{0x3b, 0x5b, 0x34, 0xff}, color.RGBA{0x37, 0x5f, 0x30, 0xff}, color.RGBA{0xd1, 0x9c, 0x6d, 0xff}, color.RGBA{0xb2, 0x77, 0x1a, 0xff}, color.RGBA{0xd1, 0xbd, 0xb2, 0xff}, color.RGBA{0xc3, 0x95, 0x21, 0xff}, color.RGBA{0xb8, 0x7e, 0x16, 0xff}, color.RGBA{0xc9, 0xac, 0x70, 0xff}, color.RGBA{0xcc, 0xbd, 0xb5, 0xff}, color.RGBA{0xc3, 0x9f, 0x51, 0xff}, color.RGBA{0xb8, 0x74, 0x13, 0xff}, color.RGBA{0xc0, 0x8f, 0x1a, 0xff}, color.RGBA{0xc0, 0x99, 0x46, 0xff}, color.RGBA{0xcc, 0xb5, 0x92, 0xff}, color.RGBA{0xb2, 0x66, 0x0b, 0xff}, color.RGBA{0xac, 0x51, 0x07, 0xff}, color.RGBA{0x99, 0x30, 0x04, 0xff}, color.RGBA{0x8f, 0x21, 0x04, 0xff}, color.RGBA{0x77, 0x0b, 0x00, 0xff}, color.RGBA{0x85, 0x7e, 0x70, 0xff}, color.RGBA{0x88, 0x77, 0x63, 0xff}, color.RGBA{0xb2, 0xa8, 0x9c, 0xff}, color.RGBA{0x8c, 0x82, 0x70, 0xff}, color.RGBA{0x9f, 0x95, 0x85, 0xff}, color.RGBA{0xb2, 0xa8, 0x9c, 0xff}, color.RGBA{0xbb, 0xb8, 0xaf, 0xff}, color.RGBA{0x92, 0x8f, 0x82, 0xff}, color.RGBA{0x99, 0x92, 0x88, 0xff}, color.RGBA{0xbb, 0xaf, 0x9f, 0xff}, color.RGBA{0x8c, 0x7b, 0x6a, 0xff}, color.RGBA{0x8f, 0x82, 0x6d, 0xff}, color.RGBA{0x37, 0x3b, 0x66, 0xff}, color.RGBA{0x34, 0x37, 0x58, 0xff}, color.RGBA{0x2c, 0x30, 0x51, 0xff}, color.RGBA{0x92, 0x8c, 0x7b, 0xff}, color.RGBA{0x3b, 0x30, 0x30, 0xff}, color.RGBA{0x29, 0x16, 0x07, 0xff}, color.RGBA{0x5f, 0x54, 0x4d, 0xff}, color.RGBA{0x00, 0xaf, 0x00, 0xff}, color.RGBA{0xaf, 0x00, 0x00, 0xff}, color.RGBA{0xaf, 0xaf, 0x00, 0xff}, color.RGBA{0x63, 0x58, 0x4d, 0xff}, color.RGBA{0x3f, 0x37, 0x25, 0xff}, color.RGBA{0x5b, 0x58, 0x4a, 0xff}, color.RGBA{0x4d, 0x4a, 0x21, 0xff}, color.RGBA{0x58, 0x46, 0x25, 0xff}, color.RGBA{0x9c, 0x88, 0x6d, 0xff}, color.RGBA{0x82, 0x74, 0x51, 0xff}, color.RGBA{0x74, 0x63, 0x42, 0xff}, color.RGBA{0x6d, 0x5f, 0x3f, 0xff}, color.RGBA{0x8f, 0x7e, 0x5f, 0xff}, color.RGBA{0x54, 0x4a, 0x37, 0xff}, color.RGBA{0x6d, 0x63, 0x54, 0xff}, color.RGBA{0x7b, 0x70, 0x5f, 0xff}, color.RGBA{0x4d, 0x4a, 0x3b, 0xff}, color.RGBA{0x77, 0x6d, 0x63, 0xff}, color.RGBA{0x7e, 0x6a, 0x66, 0xff}, color.RGBA{0x70, 0x6a, 0x5f, 0xff}, color.RGBA{0x6a, 0x63, 0x58, 0xff}, color.RGBA{0x3b, 0x37, 0x29, 0xff}, color.RGBA{0x5b, 0x54, 0x42, 0xff}, color.RGBA{0x4a, 0x46, 0x3b, 0xff}, color.RGBA{0x6a, 0x63, 0x51, 0xff}, color.RGBA{0x70, 0x6a, 0x5b, 0xff}, color.RGBA{0x4d, 0x42, 0x30, 0xff}, color.RGBA{0x42, 0x3b, 0x29, 0xff}, color.RGBA{0x5f, 0x54, 0x42, 0xff}, color.RGBA{0x42, 0x1e, 0x1a, 0xff}, color.RGBA{0x46, 0x34, 0x13, 0xff}, color.RGBA{0x54, 0x42, 0x1a, 0xff}, color.RGBA{0x66, 0x42, 0x3b, 0xff}, color.RGBA{0x51, 0x34, 0x2c, 0xff}, color.RGBA{0x82, 0x58, 0x4d, 0xff}, color.RGBA{0x74, 0x58, 0x30, 0xff}, color.RGBA{0x85, 0x46, 0x1e, 0xff}, color.RGBA{0x8f, 0x6d, 0x30, 0xff}, color.RGBA{0x85, 0x5f, 0x29, 0xff}, color.RGBA{0x99, 0x5b, 0x25, 0xff}, color.RGBA{0x6a, 0x3b, 0x1a, 0xff}, color.RGBA{0x66, 0x3b, 0x1a, 0xff}, color.RGBA{0x70, 0x37, 0x1a, 0xff}, color.RGBA{0x5f, 0x37, 0x16, 0xff}, color.RGBA{0x77, 0x46, 0x1a, 0xff}, color.RGBA{0x66, 0x13, 0x13, 0xff}, color.RGBA{0x4d, 0x25, 0x1e, 0xff}, color.RGBA{0xce, 0xb5, 0x99, 0xff}, color.RGBA{0x5b, 0x30, 0x25, 0xff}, color.RGBA{0x6a, 0x3b, 0x2c, 0xff}, color.RGBA{0x70, 0x3f, 0x30, 0xff}, color.RGBA{0x77, 0x42, 0x34, 0xff}, color.RGBA{0x85, 0x4d, 0x51, 0xff}, color.RGBA{0x8c, 0x51, 0x42, 0xff}, color.RGBA{0x92, 0x58, 0x4d, 0xff}, color.RGBA{0x99, 0x63, 0x4d, 0xff}, color.RGBA{0xce, 0xc0, 0x9f, 0xff}, color.RGBA{0xa8, 0x74, 0x5b, 0xff}, color.RGBA{0xb5, 0x85, 0x6a, 0xff}, color.RGBA{0xb8, 0x8c, 0x70, 0xff}, color.RGBA{0x66, 0x8c, 0x8c, 0xff}, color.RGBA{0x5f, 0x82, 0x7e, 0xff}, color.RGBA{0x54, 0x70, 0x77, 0xff}, color.RGBA{0x51, 0x77, 0x70, 0xff}, color.RGBA{0x4d, 0x66, 0x6d, 0xff}, color.RGBA{0x4a, 0x6a, 0x70, 0xff}, color.RGBA{0x42, 0x66, 0x66, 0xff}, color.RGBA{0x4a, 0x6a, 0x66, 0xff}, color.RGBA{0x4a, 0x58, 0x7e, 0xff}, color.RGBA{0x3f, 0x4a, 0x70, 0xff}, color.RGBA{0x3f, 0x63, 0x5f, 0xff}, color.RGBA{0x3f, 0x58, 0x5f, 0xff}, color.RGBA{0x37, 0x42, 0x66, 0xff}, color.RGBA{0x34, 0x4a, 0x5b, 0xff}, color.RGBA{0x30, 0x37, 0x5b, 0xff}, color.RGBA{0x25, 0x2c, 0x46, 0xff}, color.RGBA{0xb5, 0xc0, 0xcc, 0xff}, color.RGBA{0x34, 0x42, 0x5f, 0xff}, color.RGBA{0x30, 0x3f, 0x5b, 0xff}, color.RGBA{0x77, 0x5f, 0x51, 0xff}, color.RGBA{0x58, 0x4a, 0x3b, 0xff}, color.RGBA{0x51, 0x42, 0x34, 0xff}, color.RGBA{0x58, 0x46, 0x37, 0xff}, color.RGBA{0xaf, 0x99, 0x4d, 0xff}, color.RGBA{0xac, 0x95, 0x4a, 0xff}, color.RGBA{0x63, 0x54, 0x13, 0xff}, color.RGBA{0x7b, 0x6a, 0x13, 0xff}, color.RGBA{0x85, 0x70, 0x13, 0xff}, color.RGBA{0x54, 0x5f, 0x5f, 0xff}, color.RGBA{0x3f, 0x4a, 0x4a, 0xff}, color.RGBA{0x85, 0xb5, 0xaf, 0xff}, color.RGBA{0x70, 0x9c, 0x9c, 0xff}, color.RGBA{0x4a, 0x51, 0x58, 0xff}, color.RGBA{0x7b, 0x29, 0x1a, 0xff}, color.RGBA{0x88, 0x34, 0x21, 0xff}, color.RGBA{0x9c, 0x42, 0x29, 0xff}, color.RGBA{0xac, 0x4d, 0x34, 0xff}, color.RGBA{0xbb, 0x63, 0x3f, 0xff}, color.RGBA{0xc9, 0x85, 0x58, 0xff}, color.RGBA{0x82, 0x2c, 0x1e, 0xff}, color.RGBA{0x92, 0x3b, 0x25, 0xff}, color.RGBA{0xa2, 0x46, 0x2c, 0xff}, color.RGBA{0xb5, 0x5b, 0x3b, 0xff}, color.RGBA{0xc3, 0x70, 0x4a, 0xff}, color.RGBA{0xd1, 0xa5, 0x6d, 0xff}, color.RGBA{0xd4, 0xd4, 0xb8, 0xff}, color.RGBA{0xd4, 0xc9, 0x9f, 0xff}, color.RGBA{0xd4, 0xc0, 0x92, 0xff}, color.RGBA{0xd4, 0xb5, 0x88, 0xff}, color.RGBA{0xd4, 0xaf, 0x82, 0xff}, color.RGBA{0xd4, 0xc6, 0x95, 0xff}, color.RGBA{0xc9, 0x7b, 0x51, 0xff}, color.RGBA{0xce, 0x8c, 0x5f, 0xff}, color.RGBA{0xd1, 0xa5, 0x74, 0xff}, color.RGBA{0xd1, 0xaf, 0x74, 0xff}, color.RGBA{0xd1, 0x9c, 0x6d, 0xff}, color.RGBA{0x34, 0x51, 0x37, 0xff}, color.RGBA{0x51, 0x25, 0x04, 0xff}, color.RGBA{0x4a, 0x21, 0x04, 0xff}, color.RGBA{0x42, 0x1e, 0x00, 0xff}, color.RGBA{0x37, 0x1a, 0x00, 0xff}, color.RGBA{0x34, 0x16, 0x00, 0xff}, color.RGBA{0x2c, 0x13, 0x00, 0xff}, color.RGBA{0x1e, 0x0b, 0x00, 0xff}, color.RGBA{0x63, 0x2c, 0x04, 0xff}, color.RGBA{0x51, 0x66, 0x85, 0xff}, color.RGBA{0x4a, 0x58, 0x7b, 0xff}, color.RGBA{0x46, 0x6d, 0x66, 0xff}, color.RGBA{0x42, 0x51, 0x74, 0xff}, color.RGBA{0x3b, 0x46, 0x66, 0xff}, color.RGBA{0x37, 0x4d, 0x5b, 0xff}, color.RGBA{0x2c, 0x37, 0x54, 0xff}, color.RGBA{0x25, 0x2c, 0x4d, 0xff}, color.RGBA{0x1e, 0x21, 0x46, 0xff}, color.RGBA{0x1a, 0x1e, 0x42, 0xff}, color.RGBA{0x1a, 0x1e, 0x42, 0xff}, color.RGBA{0x16, 0x1a, 0x3b, 0xff}, color.RGBA{0x16, 0x16, 0x37, 0xff}, color.RGBA{0x0f, 0x0f, 0x34, 0xff}, color.RGBA{0x58, 0x29, 0x04, 0xff}, color.RGBA{0x25, 0x25, 0x25, 0xff}, color.RGBA{0x46, 0x46, 0x46, 0xff}, color.RGBA{0x6d, 0x6d, 0x6d, 0xff}, color.RGBA{0x8f, 0x8f, 0x8f, 0xff}, color.RGBA{0x25, 0x13, 0x00, 0xff}, color.RGBA{0x46, 0x2c, 0x00, 0xff}, color.RGBA{0x6d, 0x4d, 0x00, 0xff}, color.RGBA{0x8f, 0x5f, 0x00, 0xff}, color.RGBA{0x25, 0x00, 0x25, 0xff}, color.RGBA{0x3f, 0x00, 0x3f, 0xff}, color.RGBA{0x5b, 0x00, 0x5b, 0xff}, color.RGBA{0x77, 0x00, 0x77, 0xff}, color.RGBA{0x00, 0x25, 0x25, 0xff}, color.RGBA{0x00, 0x3f, 0x3f, 0xff}, color.RGBA{0x00, 0x5b, 0x5b, 0xff}, color.RGBA{0x00, 0x77, 0x77, 0xff}, color.RGBA{0x25, 0x25, 0x00, 0xff}, color.RGBA{0x46, 0x46, 0x00, 0xff}, color.RGBA{0x6d, 0x6d, 0x00, 0xff}, color.RGBA{0x8f, 0x8f, 0x00, 0xff}, color.RGBA{0x00, 0x25, 0x00, 0xff}, color.RGBA{0x00, 0x3f, 0x00, 0xff}, color.RGBA{0x00, 0x5b, 0x00, 0xff}, color.RGBA{0x00, 0x77, 0x00, 0xff}, color.RGBA{0x25, 0x00, 0x00, 0xff}, color.RGBA{0x4a, 0x00, 0x00, 0xff}, color.RGBA{0x6d, 0x00, 0x00, 0xff}, color.RGBA{0x8f, 0x00, 0x00, 0xff}, color.RGBA{0x00, 0x00, 0x25, 0xff}, color.RGBA{0x00, 0x00, 0x4a, 0xff}, color.RGBA{0x00, 0x00, 0x6d, 0xff}, color.RGBA{0x00, 0x00, 0x8f, 0xff}, color.RGBA{0xc9, 0xc9, 0xc9, 0xff}, color.RGBA{0x0f, 0x0f, 0x0f, 0xff}, color.RGBA{0x1e, 0x1e, 0x1e, 0xff}, color.RGBA{0x2c, 0x2c, 0x2c, 0xff}, color.RGBA{0x3f, 0x3f, 0x3f, 0xff}, color.RGBA{0x4d, 0x4d, 0x4d, 0xff}, color.RGBA{0x5b, 0x5b, 0x5b, 0xff}, color.RGBA{0x6a, 0x6a, 0x6a, 0xff}, color.RGBA{0x7b, 0x7b, 0x7b, 0xff}, color.RGBA{0x88, 0x88, 0x88, 0xff}, color.RGBA{0x95, 0x95, 0x95, 0xff}, color.RGBA{0xa2, 0xa2, 0xa2, 0xff}, color.RGBA{0xb2, 0xb2, 0xb2, 0xff}, color.RGBA{0xbd, 0xbd, 0xbd, 0xff}, color.RGBA{0xd4, 0xc0, 0xd4, 0xff}, color.RGBA{0xd4, 0xd4, 0xd4, 0xff}, }
util.go
0.576065
0.436922
util.go
starcoder
package giu import ( "image" "github.com/ianling/imgui-go" ) type PlotWidget interface { Plot() } type ImPlotYAxis int const ( ImPlotYAxisLeft ImPlotYAxis = 0 // left (default) ImPlotYAxisFirstOnRight ImPlotYAxis = 1 // first on right side ImPlotYAxisSecondOnRight ImPlotYAxis = 2 // second on right side ) type PlotTicker struct { Position float64 Label string } type PlotCanvasWidget struct { title string xLabel string yLabel string width int height int flags imgui.ImPlotFlags xFlags, yFlags, y2Flags, y3Flags imgui.ImPlotAxisFlags y2Label string y3Label string xMin, xMax, yMin, yMax float64 axisLimitCondition ExecCondition xTicksValue, yTicksValue []float64 xTicksLabel, yTicksLabel []string xTicksShowDefault bool yTicksShowDefault bool yTicksYAxis ImPlotYAxis plots []PlotWidget } func Plot(title string) *PlotCanvasWidget { return &PlotCanvasWidget{ title: title, xLabel: "", yLabel: "", width: -1, height: 0, flags: imgui.ImPlotFlags_None, xFlags: imgui.ImPlotAxisFlags_None, yFlags: imgui.ImPlotAxisFlags_None, y2Flags: imgui.ImPlotAxisFlags_NoGridLines, y3Flags: imgui.ImPlotAxisFlags_NoGridLines, y2Label: "", y3Label: "", xMin: 0, xMax: 10, yMin: 0, yMax: 10, xTicksShowDefault: true, yTicksShowDefault: true, yTicksYAxis: 0, axisLimitCondition: ConditionOnce, } } func (p *PlotCanvasWidget) AxisLimits(xmin, xmax, ymin, ymax float64, cond ExecCondition) *PlotCanvasWidget { p.xMin = xmin p.xMax = xmax p.yMin = ymin p.yMax = ymax p.axisLimitCondition = cond return p } func (p *PlotCanvasWidget) XTicks(ticks []PlotTicker, showDefault bool) *PlotCanvasWidget { length := len(ticks) if length == 0 { return p } values := make([]float64, length) labels := make([]string, length) for i, t := range ticks { values[i] = t.Position labels[i] = t.Label } p.xTicksValue = values p.xTicksLabel = labels p.xTicksShowDefault = showDefault return p } func (p *PlotCanvasWidget) YTicks(ticks []PlotTicker, showDefault bool, yAxis ImPlotYAxis) *PlotCanvasWidget { length := len(ticks) if length == 0 { return p } values := make([]float64, length) labels := make([]string, length) for i, t := range ticks { values[i] = t.Position labels[i] = t.Label } p.yTicksValue = values p.yTicksLabel = labels p.yTicksShowDefault = showDefault p.yTicksYAxis = yAxis return p } func (p *PlotCanvasWidget) Flags(flags imgui.ImPlotFlags) *PlotCanvasWidget { p.flags = flags return p } func (p *PlotCanvasWidget) XAxeFlags(flags imgui.ImPlotAxisFlags) *PlotCanvasWidget { p.xFlags = flags return p } func (p *PlotCanvasWidget) YAxeFlags(yFlags, y2Flags, y3Flags imgui.ImPlotAxisFlags) *PlotCanvasWidget { p.yFlags = yFlags p.y2Flags = y2Flags p.y3Flags = y3Flags return p } func (p *PlotCanvasWidget) Plots(plots ...PlotWidget) *PlotCanvasWidget { p.plots = plots return p } func (p *PlotCanvasWidget) Size(width, height int) *PlotCanvasWidget { p.width = width p.height = height return p } func (p *PlotCanvasWidget) Build() { if len(p.plots) > 0 { imgui.ImPlotSetNextPlotLimits(p.xMin, p.xMax, p.yMin, p.yMax, imgui.Condition(p.axisLimitCondition)) if len(p.xTicksValue) > 0 { imgui.ImPlotSetNextPlotTicksX(p.xTicksValue, p.xTicksLabel, p.xTicksShowDefault) } if len(p.yTicksValue) > 0 { imgui.ImPlotSetNextPlotTicksY(p.yTicksValue, p.yTicksLabel, p.yTicksShowDefault, int(p.yTicksYAxis)) } if imgui.ImPlotBegin(p.title, p.xLabel, p.yLabel, ToVec2(image.Pt(p.width, p.height)), p.flags, p.xFlags, p.yFlags, p.y2Flags, p.y3Flags, p.y2Label, p.y3Label) { for _, plot := range p.plots { plot.Plot() } imgui.ImPlotEnd() } } } type PlotBarWidget struct { title string data []float64 width float64 shift float64 offset int } func PlotBar(title string, data []float64) *PlotBarWidget { return &PlotBarWidget{ title: title, data: data, width: 0.2, shift: 0, offset: 0, } } func (p *PlotBarWidget) Width(width float64) *PlotBarWidget { p.width = width return p } func (p *PlotBarWidget) Shift(shift float64) *PlotBarWidget { p.shift = shift return p } func (p *PlotBarWidget) Offset(offset int) *PlotBarWidget { p.offset = offset return p } func (p *PlotBarWidget) Plot() { imgui.ImPlotBars(p.title, p.data, p.width, p.shift, p.offset) } type PlotBarHWidget struct { title string data []float64 height float64 shift float64 offset int } func PlotBarH(title string, data []float64) *PlotBarHWidget { return &PlotBarHWidget{ title: title, data: data, height: 0.2, shift: 0, offset: 0, } } func (p *PlotBarHWidget) Height(height float64) *PlotBarHWidget { p.height = height return p } func (p *PlotBarHWidget) Shift(shift float64) *PlotBarHWidget { p.shift = shift return p } func (p *PlotBarHWidget) Offset(offset int) *PlotBarHWidget { p.offset = offset return p } func (p *PlotBarHWidget) Plot() { imgui.ImPlotBarsH(p.title, p.data, p.height, p.shift, p.offset) } type PlotLineWidget struct { title string values []float64 xScale, x0 float64 offset int } func PlotLine(title string, values []float64) *PlotLineWidget { return &PlotLineWidget{ title: title, values: values, xScale: 1, x0: 0, offset: 0, } } func (p *PlotLineWidget) XScale(scale float64) *PlotLineWidget { p.xScale = scale return p } func (p *PlotLineWidget) X0(x0 float64) *PlotLineWidget { p.x0 = x0 return p } func (p *PlotLineWidget) Offset(offset int) *PlotLineWidget { p.offset = offset return p } func (p *PlotLineWidget) Plot() { imgui.ImPlotLine(p.title, p.values, p.xScale, p.x0, p.offset) } type PlotLineXYWidget struct { title string xs, ys []float64 offset int } func PlotLineXY(title string, xvalues, yvalues []float64) *PlotLineXYWidget { return &PlotLineXYWidget{ title: title, xs: xvalues, ys: yvalues, offset: 0, } } func (p *PlotLineXYWidget) Offset(offset int) *PlotLineXYWidget { p.offset = offset return p } func (p *PlotLineXYWidget) Plot() { imgui.ImPlotLineXY(p.title, p.xs, p.ys, p.offset) } type PlotPieChartWidget struct { labels []string values []float64 x, y, radius float64 normalize bool labelFormat string angle0 float64 } func PlotPieChart(labels []string, values []float64, x, y, radius float64) *PlotPieChartWidget { return &PlotPieChartWidget{ labels: labels, values: values, x: x, y: y, radius: radius, normalize: false, labelFormat: "%.1f", angle0: 90, } } func (p *PlotPieChartWidget) Normalize(n bool) *PlotPieChartWidget { p.normalize = n return p } func (p *PlotPieChartWidget) LabelFormat(fmtStr string) *PlotPieChartWidget { p.labelFormat = fmtStr return p } func (p *PlotPieChartWidget) Angle0(a float64) *PlotPieChartWidget { p.angle0 = a return p } func (p *PlotPieChartWidget) Plot() { imgui.ImPlotPieChart(p.labels, p.values, p.x, p.y, p.radius, p.normalize, p.labelFormat, p.angle0) } type PlotScatterWidget struct { label string values []float64 xscale, x0 float64 offset int } func PlotScatter(label string, values []float64) *PlotScatterWidget { return &PlotScatterWidget{ label: label, values: values, xscale: 1, x0: 0, offset: 0, } } func (p *PlotScatterWidget) XScale(s float64) *PlotScatterWidget { p.xscale = s return p } func (p *PlotScatterWidget) X0(x float64) *PlotScatterWidget { p.x0 = x return p } func (p *PlotScatterWidget) Offset(offset int) *PlotScatterWidget { p.offset = offset return p } func (p *PlotScatterWidget) Plot() { imgui.ImPlotScatter(p.label, p.values, p.xscale, p.x0, p.offset) } type PlotScatterXYWidget struct { label string xs, ys []float64 offset int } func PlotScatterXY(label string, xs, ys []float64) *PlotScatterXYWidget { return &PlotScatterXYWidget{ label: label, xs: xs, ys: ys, offset: 0, } } func (p *PlotScatterXYWidget) Offset(offset int) *PlotScatterXYWidget { p.offset = offset return p } func (p *PlotScatterXYWidget) Plot() { imgui.ImPlotScatterXY(p.label, p.xs, p.ys, p.offset) }
Plot.go
0.763307
0.477859
Plot.go
starcoder
package go2linq // Reimplementing LINQ to Objects: Part 34 - SequenceEqual // https://codeblog.jonskeet.uk/2011/01/14/reimplementing-linq-to-objects-part-34-sequenceequal/ // https://docs.microsoft.com/dotnet/api/system.linq.enumerable.sequenceequal // SequenceEqual determines whether two sequences are equal by comparing the elements using reflect.DeepEqual. // 'first' and 'second' must not be based on the same Enumerator, otherwise use SequenceEqualSelf instead. func SequenceEqual[Source any](first, second Enumerator[Source]) (bool, error) { if first == nil || second == nil { return false, ErrNilSource } return SequenceEqualEq(first, second, nil) } // SequenceEqualMust is like SequenceEqual but panics in case of error. func SequenceEqualMust[Source any](first, second Enumerator[Source]) bool { r, err := SequenceEqual(first, second) if err != nil { panic(err) } return r } // SequenceEqualSelf determines whether two sequences are equal by comparing the elements using reflect.DeepEqual. // 'first' and 'second' may be based on the same Enumerator. // 'first' must have real Reset method. 'second' is enumerated immediately. func SequenceEqualSelf[Source any](first, second Enumerator[Source]) (bool, error) { if first == nil || second == nil { return false, ErrNilSource } sl2 := Slice(second) first.Reset() return SequenceEqual(first, NewOnSliceEn(sl2...)) } // SequenceEqualSelfMust is like SequenceEqualSelf but panics in case of error. func SequenceEqualSelfMust[Source any](first, second Enumerator[Source]) bool { r, err := SequenceEqualSelf(first, second) if err != nil { panic(err) } return r } // SequenceEqualEq determines whether two sequences are equal by comparing their elements using a specified Equaler. // If 'eq' is nil reflect.DeepEqual is used. // 'first' and 'second' must not be based on the same Enumerator, otherwise use SequenceEqualEqSelf instead. func SequenceEqualEq[Source any](first, second Enumerator[Source], eq Equaler[Source]) (bool, error) { if first == nil || second == nil { return false, ErrNilSource } if eq == nil { eq = EqualerFunc[Source](DeepEqual[Source]) } for first.MoveNext() { if !second.MoveNext() { return false, nil } if !eq.Equal(first.Current(), second.Current()) { return false, nil } } if second.MoveNext() { return false, nil } return true, nil } // SequenceEqualEqMust is like SequenceEqualEq but panics in case of error. func SequenceEqualEqMust[Source any](first, second Enumerator[Source], eq Equaler[Source]) bool { r, err := SequenceEqualEq(first, second, eq) if err != nil { panic(err) } return r } // SequenceEqualEqSelf determines whether two sequences are equal by comparing their elements using a specified Equaler. // If 'eq' is nil reflect.DeepEqual is used. // 'first' and 'second' may be based on the same Enumerator. // 'first' must have real Reset method. 'second' is enumerated immediately. func SequenceEqualEqSelf[Source any](first, second Enumerator[Source], eq Equaler[Source]) (bool, error) { if first == nil || second == nil { return false, ErrNilSource } sl2 := Slice(second) first.Reset() return SequenceEqualEq(first, NewOnSliceEn(sl2...), eq) } // SequenceEqualEqSelfMust is like SequenceEqualEqSelf but panics in case of error. func SequenceEqualEqSelfMust[Source any](first, second Enumerator[Source], eq Equaler[Source]) bool { r, err := SequenceEqualEqSelf(first, second, eq) if err != nil { panic(err) } return r }
sequenceequal.go
0.895788
0.627966
sequenceequal.go
starcoder
package squares import ( "image" "image/color" "io" svg "github.com/ajstarks/svgo" "github.com/taironas/tinygraphs/draw" ) // RandomGrid builds a grid image with with x colors selected at random for each quadrant. func RandomGrid(m *image.RGBA, colors []color.RGBA, xSquares int, prob float64) { size := m.Bounds().Size() quad := size.X / xSquares colorMap := make(map[int]color.RGBA) var currentQuadrand = 0 for x := 0; x < size.X; x++ { if x/quad != currentQuadrand { colorMap = make(map[int]color.RGBA) currentQuadrand = x / quad } for y := 0; y < size.Y; y++ { yQuadrant := y / quad if _, ok := colorMap[yQuadrant]; !ok { colorMap[yQuadrant] = draw.RandomColorFromArrayWithFreq(colors, prob) } m.Set(x, y, colorMap[yQuadrant]) } } } // RandomGridSVG builds a grid image with with x colors selected at random for each quadrant. func RandomGridSVG(w io.Writer, colors []color.RGBA, width, height, xSquares int, prob float64) { canvas := svg.New(w) canvas.Start(width, height) squares := xSquares quadrantSize := width / squares colorMap := make(map[int]color.RGBA) for yQ := 0; yQ < squares; yQ++ { y := yQ * quadrantSize colorMap = make(map[int]color.RGBA) for xQ := 0; xQ < squares; xQ++ { x := xQ * quadrantSize if _, ok := colorMap[xQ]; !ok { colorMap[xQ] = draw.RandomColorFromArrayWithFreq(colors, prob) } canvas.Rect(x, y, quadrantSize, quadrantSize, draw.FillFromRGBA(colorMap[xQ])) } } canvas.End() } // RandomGradientGrid builds a grid image with with x colors selected at random for each quadrant going from brighter to dracker color. func RandomGradientGrid(m *image.RGBA, colors []color.RGBA, xSquares int) { size := m.Bounds().Size() quad := size.X / xSquares colorMap := make(map[int]color.RGBA) var currentQuadrand = 0 for x := 0; x < size.X; x++ { if x/quad != currentQuadrand { colorMap = make(map[int]color.RGBA) currentQuadrand = x / quad } percentage := 100 - int(float64(x)/float64(size.X)*100) for y := 0; y < size.Y; y++ { yQuadrant := y / quad if _, ok := colorMap[yQuadrant]; !ok { colorMap[yQuadrant] = draw.ColorByPercentage(colors, percentage) } m.Set(x, y, colorMap[yQuadrant]) } } } // RandomGradientGridSVG builds a grid image with with x colors selected at random for each quadrant. func RandomGradientGridSVG(w io.Writer, colors []color.RGBA, width, height, xSquares int) { canvas := svg.New(w) canvas.Start(width, height) squares := xSquares quadrantSize := width / squares colorMap := make(map[int]color.RGBA) for yQ := 0; yQ < squares; yQ++ { y := yQ * quadrantSize colorMap = make(map[int]color.RGBA) for xQ := 0; xQ < squares; xQ++ { x := xQ * quadrantSize if _, ok := colorMap[xQ]; !ok { percentage := 100 - int(float64(xQ)/float64(squares)*100) colorMap[xQ] = draw.ColorByPercentage(colors, percentage) } canvas.Rect(x, y, quadrantSize, quadrantSize, draw.FillFromRGBA(colorMap[xQ])) } } canvas.End() }
draw/squares/random.go
0.676192
0.507263
random.go
starcoder
package kronasje import ( "regexp" "fmt" ) // This spec tries to adhere to the 4th Berkely Distribution of the crontab // manual (man 5 crontab) dated 19 April 2010. // Regular expression strings const ( startExp = `^` endExp = `$` everyExp = `\*` singleOrDoubleDigitExp = `([\d]{1,2})` aliasExp = `([[:alpha:]]{3})` stepExp = `/` + singleOrDoubleDigitExp numberRangeExp = singleOrDoubleDigitExp + `-` + singleOrDoubleDigitExp listExp = singleOrDoubleDigitExp + `(?:,\s*` + singleOrDoubleDigitExp + `)*` nameExp = `@[[:alpha:]]+` ) var ( every = regexp.MustCompile(startExp + everyExp + endExp) step = regexp.MustCompile(startExp + stepExp + endExp) everyStep = regexp.MustCompile(startExp + everyExp + stepExp + endExp) singleOrDoubleDigit = regexp.MustCompile(startExp + singleOrDoubleDigitExp + endExp) alias = regexp.MustCompile(startExp + aliasExp + endExp) numberRange = regexp.MustCompile(startExp + numberRangeExp + endExp) list = regexp.MustCompile(startExp + listExp + endExp) rangeStep = regexp.MustCompile(startExp + numberRangeExp + stepExp + endExp) name = regexp.MustCompile(startExp + nameExp + endExp) ) // Days and months can be specified with named aliases such as "mon", "jan", etc. type aliases map[string]uint8 // Every field has a minimum and maximum value and possibly aliases. type fieldSpec struct { Min uint8 Max uint8 Aliases aliases } // Dealias returns the value aliased value by the given alias. Error returned if the field has no such alias or no aliases. func (f *fieldSpec) Dealias(alias string) (uint8, error) { if f.Aliases == nil { return 0, fmt.Errorf("field has no aliases") } if number, ok := f.Aliases[alias]; !ok { return 0, fmt.Errorf(`"%v" is not a valid alias`, alias) } else { return number, nil } } // InRange returns a boolean indicating if the given number lies in the range of the minimum and maximum value of the field spec. func (f *fieldSpec) InRange(number uint8) bool { if number < f.Min || number > f.Max { return false } else { return true } } func (f *fieldSpec) String() string { return fmt.Sprintf("min %v, max %v, aliases %+v", f.Min, f.Max, f.Aliases) } type fields struct { minute *fieldSpec hour *fieldSpec dom *fieldSpec month *fieldSpec dow *fieldSpec } var spec = &fields{ minute: &fieldSpec{0, 59, nil}, hour: &fieldSpec{0, 23, nil}, dom: &fieldSpec{1, 31, nil}, month: &fieldSpec{1, 12, aliases{ "jan": 1, "feb": 2, "mar": 3, "apr": 4, "may": 5, "jun": 6, "jul": 7, "aug": 8, "sep": 9, "okt": 10, "nov": 11, "des": 12, }, }, dow: &fieldSpec{0, 7, aliases{ "sun": 0, "mon": 1, "tue": 2, "wed": 3, "thu": 4, "fri": 5, "sat": 6, // "sun": 7, }, }, } // Common cron expressions can be specified using names var names = map[string]string{ "@yearly": "0 0 1 1 *", // 1st day in the 1st month at midnight "@annually": "@yearly", "@monthly": "0 0 1 * *", // 1st day of every month at midnight "@weekly": "0 0 * * 0", // Every sunday at midnight "@daily": "0 0 * * *", // Every day at noon "@midnight": "@daily", "@hourly": "0 * * * *", // Every hour }
spec.go
0.681727
0.432243
spec.go
starcoder
package geodesic import ( "math" ) // NaiveFind is the naive algorithm for determining the face containing a point. // Searches every face on the sphere, so it's incredibly inefficient for large // numbers of faces. func NaiveFind(g *Geodesic, v Vector) int { start := 0 minDistSq := math.MaxFloat64 for i, center := range g.Centers { iDistSq := DistSq(center, v) if iDistSq < minDistSq { minDistSq = iDistSq start = i } } return start } // Find returns the *Node closest to v in the last Geodesic in gs. // gs is a precomputed sequence of geodesics, each homomorphic to the previous one. // Further, if face F is in gs[i] and gs[k], then gs[i].Center[F] = gs[j].Center[F]. func Find(gs []*Geodesic, v Vector) int { if len(gs) == 1 { return NaiveFind(gs[0], v) } gs0 := gs[0] start := 0 minDistSq := math.MaxFloat64 if v.Z > 0 { // We are in the northern hemisphere, so we can disregard all southern // faces. // Start from the north pole at index 0. minDistSq = DistSq(gs0.Centers[start], v) for i := 1; i <= 5; i++ { iDistSq := DistSq(gs0.Centers[i], v) if iDistSq < minDistSq { minDistSq = iDistSq start = i } } } else { // We are in the southern hemisphere, so we can disregard all northern // faces. // Start from the south pole at index 11. start = 11 minDistSq = DistSq(gs0.Centers[start], v) for i := 6; i <= 10; i++ { iDistSq := DistSq(gs0.Centers[i], v) if iDistSq < minDistSq { minDistSq = iDistSq start = i } } } if len(gs) == 1 { return start } return find(gs[1:], v, minDistSq, start) } func DistSq(v1, v2 Vector) float64 { return (v1.X-v2.X)*(v1.X-v2.X) + (v1.Y-v2.Y)*(v1.Y-v2.Y) + (v1.Z-v2.Z)*(v1.Z-v2.Z) } // find returns the *Node closest to v, starting from the node at index start. // start must be the face index closest to v in the previous geodesic. func find(gs []*Geodesic, v Vector, minDistSq float64, start int) int { gs0 := gs[0] nextStart := start neighbors := gs0.Faces[start].Neighbors for _, n := range neighbors { iDistSq := DistSq(gs0.Centers[n], v) if iDistSq < minDistSq { minDistSq = iDistSq nextStart = n } } if len(gs) == 1 { // There's a bug related to correctly classifying the neighbor of a // pentagon face, so this corrects for that. if nextStart == start { return nextStart } return find(gs, v, minDistSq, nextStart) } return find(gs[1:], v, minDistSq, nextStart) }
pkg/geodesic/find.go
0.772616
0.436802
find.go
starcoder
package atomic import ( "unsafe" ) // A Value provides an atomic load and store of a consistently typed value. // Values can be created as part of other data structures. // The zero value for a Value returns nil from Load. // Once Store has been called, a Value must not be copied. type Value struct { v interface{} } // ifaceWords is interface{} internal representation. type ifaceWords struct { typ unsafe.Pointer data unsafe.Pointer } // Load returns the value set by the most recent Store. // It returns nil if there has been no call to Store for this Value. func (v *Value) Load() (x interface{}) { vp := (*ifaceWords)(unsafe.Pointer(v)) typ := LoadPointer(&vp.typ) if typ == nil || uintptr(typ) == ^uintptr(0) { // First store not yet completed. return nil } data := LoadPointer(&vp.data) xp := (*ifaceWords)(unsafe.Pointer(&x)) xp.typ = typ xp.data = data return } // Store sets the value of the Value to x. // All calls to Store for a given Value must use values of the same concrete type. // Store of an inconsistent type panics, as does Store(nil). func (v *Value) Store(x interface{}) { if x == nil { panic("sync/atomic: store of nil value into Value") } vp := (*ifaceWords)(unsafe.Pointer(v)) xp := (*ifaceWords)(unsafe.Pointer(&x)) for { typ := LoadPointer(&vp.typ) if typ == nil { // Attempt to start first store. // Disable preemption so that other goroutines can use // active spin wait to wait for completion; and so that // GC does not see the fake type accidentally. runtime_procPin() if !CompareAndSwapPointer(&vp.typ, nil, unsafe.Pointer(^uintptr(0))) { runtime_procUnpin() continue } // Complete first store. StorePointer(&vp.data, xp.data) StorePointer(&vp.typ, xp.typ) runtime_procUnpin() return } if uintptr(typ) == ^uintptr(0) { // First store in progress. Wait. // Since we disable preemption around the first store, // we can wait with active spinning. continue } // First store completed. Check type and overwrite data. if typ != xp.typ { panic("sync/atomic: store of inconsistently typed value into Value") } StorePointer(&vp.data, xp.data) return } } // Disable/enable preemption, implemented in runtime. func runtime_procPin() func runtime_procUnpin()
go1.5/src/sync/atomic/value.go
0.695958
0.426799
value.go
starcoder
package interval import ( "sort" "time" "github.com/grokify/gocharts/data/timeseries" "github.com/grokify/simplego/math/mathutil" "github.com/grokify/simplego/time/month" "github.com/grokify/simplego/time/timeutil" "github.com/pkg/errors" ) type XoXGrowth struct { DateMap map[string]XoxPoint YTD int64 QTD int64 } func NewXoXTimeSeries(ds timeseries.TimeSeries) (XoXGrowth, error) { xox := XoXGrowth{DateMap: map[string]XoxPoint{}} for dateNowRfc, itemNow := range ds.ItemMap { dateNow, err := time.Parse(time.RFC3339, dateNowRfc) if err != nil { return xox, errors.Wrap(err, "timeseries.NewXoXTimeSeries") } xoxPoint := XoxPoint{Time: dateNow, Value: itemNow.Int64()} quarterAgo := month.MonthBegin(dateNow, -3) yearAgo := month.MonthBegin(dateNow, -12) xoxPoint.TimeQuarterAgo = quarterAgo xoxPoint.TimeYearAgo = yearAgo if ds.Interval == timeutil.Month { monthAgo := month.MonthBegin(dateNow, -1) xoxPoint.TimeMonthAgo = monthAgo if itemMonthAgo, ok := ds.ItemMap[monthAgo.Format(time.RFC3339)]; ok { xoxPoint.MMAgoValue = itemMonthAgo.Int64() xoxPoint.MNowValue = itemNow.Int64() xoxPoint.MOldValue = itemMonthAgo.Int64() xoxPoint.MoM = mathutil.PercentChangeToXoX(itemNow.Float64() / itemMonthAgo.Float64()) xoxPoint.MoMAggregate = mathutil.PercentChangeToXoX(itemNow.Float64() / itemMonthAgo.Float64()) } } if itemMonthQuarterAgo, ok := ds.ItemMap[quarterAgo.Format(time.RFC3339)]; ok { xoxPoint.MQAgoValue = itemMonthQuarterAgo.Int64() xoxPoint.QNowValue = AggregatePriorMonths(ds, dateNow, 3) xoxPoint.QOldValue = AggregatePriorMonths(ds, month.MonthBegin(dateNow, -3), 3) xoxPoint.QoQ = mathutil.PercentChangeToXoX(itemNow.Float64() / itemMonthQuarterAgo.Float64()) xoxPoint.QoQAggregate = mathutil.PercentChangeToXoX( float64(xoxPoint.QNowValue) / float64(xoxPoint.QOldValue)) } if itemMonthYearAgo, ok := ds.ItemMap[yearAgo.Format(time.RFC3339)]; ok { xoxPoint.MYAgoValue = itemMonthYearAgo.Int64() xoxPoint.YNowValue = AggregatePriorMonths(ds, dateNow, 12) xoxPoint.YOldValue = AggregatePriorMonths(ds, month.MonthBegin(dateNow, -12), 12) xoxPoint.YoY = mathutil.PercentChangeToXoX(itemNow.Float64() / itemMonthYearAgo.Float64()) xoxPoint.YoYAggregate = mathutil.PercentChangeToXoX( float64(xoxPoint.YNowValue) / float64(xoxPoint.YOldValue)) /* xoxPoint.YAgoValue = itemYear.Value xoxPoint.YoY = mathutil.PercentChangeToXoX(float64(itemNow.Value) / float64(itemYear.Value)) */ } xox.DateMap[dateNowRfc] = xoxPoint } return xox, nil } func AggregatePriorMonths(ds timeseries.TimeSeries, start time.Time, months uint) int64 { aggregateValue := int64(0) monthBegin := month.MonthBegin(start, 0) for i := uint(1); i <= months; i++ { subtractMonths := i - 1 thisMonth := monthBegin if subtractMonths > 0 { thisMonth = month.MonthBegin(monthBegin, -1*int(subtractMonths)) } key := thisMonth.Format(time.RFC3339) if item, ok := ds.ItemMap[key]; ok { aggregateValue += item.Value } } return aggregateValue } func (xg *XoXGrowth) Last() XoxPoint { dates := []string{} for date := range xg.DateMap { dates = append(dates, date) } if len(dates) == 0 { return XoxPoint{} } sort.Strings(dates) lastDate := dates[len(dates)-1] if lastItem, ok := xg.DateMap[lastDate]; ok { return lastItem } return XoxPoint{} }
data/timeseries/interval/xox_month.go
0.537041
0.523786
xox_month.go
starcoder
package big import ( "fmt" "math" "math/big" "math/rand" ) // A wrapper around math/big.Int which makes operations easier to express. // The big difference is that this big.Int is immutable. Operations on // these big.Ints are easier to write code with, but require more // allocations under the hood. Totally worth it. type Int struct { v *big.Int } // Constructors func Int64(x int64) Int { return Int{big.NewInt(x)} } func ParseInt(s string) (Int, bool) { y, ok := new(big.Int).SetString(s, 10) return Int{y}, ok } // Arithmetic func (x Int) Add(y Int) Int { return Int{new(big.Int).Add(x.v, y.v)} } func (x Int) Sub(y Int) Int { return Int{new(big.Int).Sub(x.v, y.v)} } func (x Int) Mul(y Int) Int { return Int{new(big.Int).Mul(x.v, y.v)} } func (x Int) Div(y Int) Int { return Int{new(big.Int).Div(x.v, y.v)} } func (x Int) Mod(y Int) Int { return Int{new(big.Int).Mod(x.v, y.v)} } func (x Int) Add64(y int64) Int { z := big.NewInt(y) return Int{z.Add(x.v, z)} } func (x Int) Sub64(y int64) Int { z := big.NewInt(y) return Int{z.Sub(x.v, z)} } func (x Int) Mul64(y int64) Int { z := big.NewInt(y) return Int{z.Mul(x.v, z)} } func (x Int) Div64(y int64) Int { z := big.NewInt(y) return Int{z.Div(x.v, z)} } func (x Int) Mod64(y int64) int64 { z := big.NewInt(y) return z.Mod(x.v, z).Int64() } func (x Int) Neg() Int { return Int{new(big.Int).Neg(x.v)} } func (x Int) Abs() Int { if x.v.Sign() >= 0 { return x } return Int{new(big.Int).Neg(x.v)} } func (x Int) Lsh(n uint) Int { return Int{new(big.Int).Lsh(x.v, n)} } func (x Int) Rsh(n uint) Int { return Int{new(big.Int).Rsh(x.v, n)} } func (x Int) Min(y Int) Int { if x.Cmp(y) < 0 { return x } return y } func (x Int) Max(y Int) Int { if x.Cmp(y) > 0 { return x } return y } // Info extraction func (x Int) Int64() int64 { return x.v.Int64() } func (x Int) IsZero() bool { return x.v.Sign() == 0 } func (x Int) Sign() int { return x.v.Sign() } func (x Int) Cmp(y Int) int { return x.v.Cmp(y.v) } func (x Int) Equals(y Int) bool { return x.v.Cmp(y.v) == 0 } func (x Int) Cmp64(y int64) int { if x.BitLen() >= 64 { if x.Sign() > 0 { return 1 } if x.Cmp(minInt64) == 0 && y == math.MinInt64 { return 0 } return -1 } z := x.Int64() if z > y { return 1 } if z < y { return -1 } return 0 } func (x Int) BitLen() int { return x.v.BitLen() } func (x Int) Bit(i int) uint { return x.v.Bit(i) } func (x Int) ProbablyPrime(n int) bool { return x.v.ProbablyPrime(n) } func (x Int) Log() float64 { f, _ := new(big.Rat).SetInt(x.v).Float64() return math.Log(f) } // Other math func (x Int) Square() Int { return x.Mul(x) } func (x Int) Cube() Int { y := new(big.Int) y.Mul(x.v, x.v) y.Mul(y, x.v) return Int{y} } func (x Int) Exp(k int64) Int { b := big.NewInt(k) return Int{b.Exp(x.v, b, nil)} } func (x Int) SqrtFloor() Int { if x.IsZero() { return x } b := uint(x.BitLen()) // invariant lo <= sqrt(x) < hi lo := One.Lsh((b - 1) / 2) hi := lo.Lsh(1) for { m := lo.Add(hi).Rsh(1) if m.Cmp(lo) == 0 { return lo } if m.Square().Cmp(x) <= 0 { lo = m } else { hi = m } } } func (x Int) SqrtCeil() Int { y := x.SqrtFloor() if y.Square().Cmp(x) != 0 { y = y.Add(One) } return y } // Discrete math stuff func (x Int) ExpMod(k, m Int) Int { return Int{new(big.Int).Exp(x.v, k.v, m.v)} } func (x Int) ModInv(n Int) Int { // TODO: check gcd(x,n)==1? return Int{new(big.Int).ModInverse(x.v, n.v)} } func (x Int) GCD(y Int) Int { return Int{new(big.Int).GCD(nil, nil, x.v, y.v)} } // For printing func (x Int) Format(s fmt.State, ch rune) { x.v.Format(s, ch) } // Rand returns a random number in [0,x) func (x Int) Rand(rnd *rand.Rand) Int { return Int{new(big.Int).Rand(rnd, x.v)} } // Optimized routines // Scratch space for use by Mod64s. type Scratch [3]big.Int // Mod64s is the same as Mod64 except it uses // the scratch space provided to avoid allocation. func (x Int) Mod64s(y int64, s *Scratch) int64 { // Note: use DivMod here instead of Mod so we can reuse // storage for the dividend. Mod allocates storage for // the (thrown away) dividend on each call. s[0].DivMod(x.v, s[1].SetInt64(y), &s[2]) return s[2].Int64() } // helpful constants var Zero = Int64(0) var One = Int64(1) var Two = Int64(2) var Three = Int64(3) var Ten = Int64(10) var MinusOne = Int64(-1) var minInt64 = Int64(math.MinInt64)
big/int.go
0.653348
0.46952
int.go
starcoder
package header /** * A Proxy-Authenticate header field value contains an authentication * challenge. When a UAC sends a request to a proxy server, the proxy server * MAY authenticate the originator before the request is processed. If no * credentials (in the Proxy-Authorization header field) are provided in the * request, the proxy can challenge the originator to provide credentials by * rejecting the request with a 407 (Proxy Authentication Required) status * code. The proxy MUST populate the 407 (Proxy Authentication Required) * message with a Proxy-Authenticate header field value applicable to the * proxy for the requested resource. The field value consists of a challenge * that indicates the authentication and parameters applicable to the proxy * for this RequestURI. * <p> * Note - Unlike its usage within HTTP, the ProxyAuthenticateHeader must be * passed upstream in the Response to the UAC. In SIP, only UAC's can * authenticate themselves to proxies. * <p> * Proxies MUST NOT add values to the Proxy-Authorization header field. All * 407 (Proxy Authentication Required) responses MUST be forwarded upstream * toward the UAC following the procedures for any other response. It is the * UAC's responsibility to add the Proxy-Authorization header field value * containing credentials for the realm of the proxy that has asked for * authentication. * <p> * When the originating UAC receives the 407 (Proxy Authentication Required) * it SHOULD, if it is able, re-originate the request with the proper * credentials. It should follow the same procedures for the display of the * "realm" parameter that are given above for responding to 401. If no * credentials for a realm can be located, UACs MAY attempt to retry the * request with a username of "anonymous" and no password (a password of ""). * The UAC SHOULD also cache the credentials used in the re-originated request. * <p> * For Example:<br> * <code>Proxy-Authenticate: Digest realm="jcp.org", * domain="sip:ss1.duke.com", qop="auth", * nonce="f84f1cec41e6cbe5aea9c8e88d359", opaque="", stale=FALSE, * algorithm=MD5</code> * * @see Parameters * @see ProxyAuthorizationHeader */ type ProxyAuthenticateHeader interface { WWWAuthenticateHeader }
sip/header/ProxyAuthenticateHeader.go
0.851181
0.412175
ProxyAuthenticateHeader.go
starcoder
package engine import ( "github.com/ivan1993spb/snake-bot/internal/types" ) type Sight struct { area Area topLeft types.Dot zeroedBottomRight types.Dot width uint8 height uint8 } // sightDivisor defines how many intervals of a given length // we need to be able to fit within an area. In the 2D space // the divisor has to be 2: // ___ // ^ // | // | // distance // | // | // v // |<---distance--->GAP<---distance--->| // ^ // | // | // distance // | // | // v // --- const sightDivisor = 2 const sightGap = 1 func NewSight(a Area, pos types.Dot, distance uint8) Sight { distance = a.FitDistance(distance, sightDivisor, sightGap) topLeft := types.Dot{ X: pos.X - distance, Y: pos.Y - distance, } if pos.X < distance { topLeft.X += a.Width } if pos.Y < distance { topLeft.Y += a.Height } bottomRight := types.Dot{ X: (pos.X + distance) % a.Width, Y: (pos.Y + distance) % a.Height, } zeroedBottomRight := types.Dot{ X: bottomRight.X - topLeft.X, Y: bottomRight.Y - topLeft.Y, } if bottomRight.X < topLeft.X { zeroedBottomRight.X += a.Width } if bottomRight.Y < topLeft.Y { zeroedBottomRight.Y += a.Height } return Sight{ area: a, topLeft: topLeft, zeroedBottomRight: zeroedBottomRight, // TODO: Check on overflow. //width: zeroedBottomRight.X + 1, //height: zeroedBottomRight.Y + 1, } } func (s Sight) Absolute(relX, relY uint8) types.Dot { x := uint16(s.topLeft.X) + uint16(relX) y := uint16(s.topLeft.Y) + uint16(relY) return types.Dot{ X: uint8(x % uint16(s.area.Width)), Y: uint8(y % uint16(s.area.Height)), } } func (s Sight) Relative(dot types.Dot) (uint8, uint8) { x := dot.X - s.topLeft.X if s.topLeft.X > dot.X { x += s.area.Width } y := dot.Y - s.topLeft.Y if s.topLeft.Y > dot.Y { y += s.area.Height } return x, y } func (s Sight) Seen(dot types.Dot) bool { if zeroedX := dot.X - s.topLeft.X; dot.X >= s.topLeft.X { if zeroedX > s.zeroedBottomRight.X { return false } } else if zeroedX+s.area.Width > s.zeroedBottomRight.X { return false } if zeroedY := dot.Y - s.topLeft.Y; dot.Y >= s.topLeft.Y { if zeroedY > s.zeroedBottomRight.Y { return false } } else if zeroedY+s.area.Height > s.zeroedBottomRight.Y { return false } return true } func (s Sight) Dots() []types.Dot { w := (int(s.zeroedBottomRight.X) + 1) h := (int(s.zeroedBottomRight.Y) + 1) dots := make([]types.Dot, 0, w*h) for x := uint8(0); x <= s.zeroedBottomRight.X; x++ { for y := uint8(0); y <= s.zeroedBottomRight.Y; y++ { dots = append(dots, s.Absolute(x, y)) } } return dots }
internal/bot/engine/sight.go
0.576304
0.423935
sight.go
starcoder
package predicate import ( "fmt" "github.com/influxdata/influxdb/v2" "github.com/influxdata/influxdb/v2/kit/platform/errors" "github.com/influxdata/influxdb/v2/models" "github.com/influxdata/influxdb/v2/storage/reads/datatypes" ) // TagRuleNode is a node type of a single tag rule. type TagRuleNode influxdb.TagRule var specialKey = map[string]string{ "_measurement": models.MeasurementTagKey, "_field": models.FieldKeyTagKey, } // NodeTypeLiteral convert a TagRuleNode to a nodeTypeLiteral. func NodeTypeLiteral(tr TagRuleNode) *datatypes.Node { switch tr.Operator { case influxdb.RegexEqual: fallthrough case influxdb.NotRegexEqual: return &datatypes.Node{ NodeType: datatypes.NodeTypeLiteral, Value: &datatypes.Node_RegexValue{ RegexValue: tr.Value, }, } default: return &datatypes.Node{ NodeType: datatypes.NodeTypeLiteral, Value: &datatypes.Node_StringValue{ StringValue: tr.Value, }, } } } // NodeComparison convert influxdb.Operator to Node_Comparison. func NodeComparison(op influxdb.Operator) (datatypes.Node_Comparison, error) { switch op { case influxdb.Equal: return datatypes.ComparisonEqual, nil case influxdb.NotEqual: return datatypes.ComparisonNotEqual, nil case influxdb.RegexEqual: fallthrough case influxdb.NotRegexEqual: return 0, &errors.Error{ Code: errors.EInvalid, Msg: fmt.Sprintf("Operator %s is not supported for delete predicate yet", op), } default: return 0, &errors.Error{ Code: errors.EInvalid, Msg: fmt.Sprintf("Unsupported operator: %s", op), } } } // ToDataType convert a TagRuleNode to datatypes.Node. func (n TagRuleNode) ToDataType() (*datatypes.Node, error) { compare, err := NodeComparison(n.Operator) if err != nil { return nil, err } if special, ok := specialKey[n.Key]; ok { n.Key = special } return &datatypes.Node{ NodeType: datatypes.NodeTypeComparisonExpression, Value: &datatypes.Node_Comparison_{Comparison: compare}, Children: []*datatypes.Node{ { NodeType: datatypes.NodeTypeTagRef, Value: &datatypes.Node_TagRefValue{TagRefValue: n.Key}, }, NodeTypeLiteral(n), }, }, nil }
predicate/tag_rule.go
0.577853
0.431584
tag_rule.go
starcoder
package strftime import "time" /* Strftime implements the POSIX strftime(3) function. The underlying implementation uses the native C library function on Linux and Darwin, and a pure Go replacement otherwise. There are some functional differences between the implementations; see also the documentation of StrftimePure. The following documentation is adapted from the linux man-pages project. The Strftime function formats the time `t` according to the format specification `format` and places the result in the string s. The format specification is a string and may contain special character sequences called conversion specifications, each of which is introduced by a '%' character and terminated by some other character known as a conversion specifier character. All other character sequences are ordinary character sequences. The characters of ordinary character sequences are copied verbatim from format to s. However, the characters of conversion specifications are replaced as follows: %a The abbreviated weekday name according to the current locale. %A The full weekday name according to the current locale. %b The abbreviated month name according to the current locale. %B The full month name according to the current locale. %c The preferred date and time representation for the current locale. %C The century number (year/100) as a 2-digit integer. (SU) %d The day of the month as a decimal number (range 01 to 31). %D Equivalent to %m/%d/%y. (Yecch—for Americans only. Americans should note that in other countries %d/%m/%y is rather common. This means that in international context this format is ambiguous and should not be used.) (SU) %e Like %d, the day of the month as a decimal number, but a leading zero is replaced by a space. (SU) %E Modifier: use alternative format, see below. (SU) %F Equivalent to %Y-%m-%d (the ISO 8601 date format). (C99) %G The ISO 8601 week-based year (see NOTES) with century as a decimal number. The 4-digit year corresponding to the ISO week number (see %V). This has the same format and value as %Y, except that if the ISO week number belongs to the previous or next year, that year is used instead. (TZ) %g Like %G, but without century, that is, with a 2-digit year (00-99). (TZ) %h Equivalent to %b. (SU) %H The hour as a decimal number using a 24-hour clock (range 00 to 23). %I The hour as a decimal number using a 12-hour clock (range 01 to 12). %j The day of the year as a decimal number (range 001 to 366). %k The hour (24-hour clock) as a decimal number (range 0 to 23); single digits are preceded by a blank. (See also %H.) (TZ) %l The hour (12-hour clock) as a decimal number (range 1 to 12); single digits are preceded by a blank. (See also %I.) (TZ) %m The month as a decimal number (range 01 to 12). %M The minute as a decimal number (range 00 to 59). %n A newline character. (SU) %O Modifier: use alternative format, see below. (SU) %p Either "AM" or "PM" according to the given time value, or the corresponding strings for the current locale. Noon is treated as "PM" and midnight as "AM". %P Like %p but in lowercase: "am" or "pm" or a corresponding string for the current locale. (GNU) %r The time in a.m. or p.m. notation. In the POSIX locale this is equivalent to %I:%M:%S %p. (SU) %R The time in 24-hour notation (%H:%M). (SU) For a version including the seconds, see %T below. %s The number of seconds since the Epoch, 1970-01-01 00:00:00 +0000 (UTC). (TZ) %S The second as a decimal number (range 00 to 60). (The range is up to 60 to allow for occasional leap seconds.) %t A tab character. (SU) %T The time in 24-hour notation (%H:%M:%S). (SU) %u The day of the week as a decimal, range 1 to 7, Monday being 1. See also %w. (SU) %U The week number of the current year as a decimal number, range 00 to 53, starting with the first Sunday as the first day of week 01. See also %V and %W. %V The ISO 8601 week number (see NOTES) of the current year as a decimal number, range 01 to 53, where week 1 is the first week that has at least 4 days in the new year. See also %U and %W. (SU) %w The day of the week as a decimal, range 0 to 6, Sunday being 0. See also %u. %W The week number of the current year as a decimal number, range 00 to 53, starting with the first Monday as the first day of week 01. %x The preferred date representation for the current locale without the time. %X The preferred time representation for the current locale without the date. %y The year as a decimal number without a century (range 00 to 99). %Y The year as a decimal number including the century. %z The +hhmm or -hhmm numeric timezone (that is, the hour and minute offset from UTC). (SU) %Z The timezone or name or abbreviation. %+ The date and time in date(1) format. (TZ) (Not supported in glibc2.) %% A literal '%' character. Some conversion specifications can be modified by preceding the conversion specifier character by the E or O modifier to indicate that an alternative format should be used. If the alternative format or specification does not exist for the current locale, the behavior will be as if the unmodified conversion specification were used. (SU) The Single UNIX Specification mentions %Ec, %EC, %Ex, %EX, %Ey, %EY, %Od, %Oe, %OH, %OI, %Om, %OM, %OS, %Ou, %OU, %OV, %Ow, %OW, %Oy, where the effect of the O modifier is to use alternative numeric symbols (say, roman numerals), and that of the E modifier is to use a locale-dependent alternative representation. */ func Strftime(format string, t time.Time) (s string) { return strftime(format, t) }
vendor/github.com/fastly/go-utils/strftime/strftime.go
0.732209
0.677169
strftime.go
starcoder
package equality import ( operatorv1alpha1 "github.com/projectcontour/contour-operator/api/v1alpha1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" ) // DaemonsetConfigChanged checks if current and expected DaemonSet match, // and if not, returns the updated DaemonSet resource. func DaemonsetConfigChanged(current, expected *appsv1.DaemonSet) (*appsv1.DaemonSet, bool) { changed := false updated := current.DeepCopy() if !apiequality.Semantic.DeepEqual(current.Labels, expected.Labels) { changed = true updated.Labels = expected.Labels } if !apiequality.Semantic.DeepEqual(current.Spec, expected.Spec) { changed = true updated.Spec = expected.Spec } if !changed { return nil, false } return updated, true } // DaemonSetSelectorsDiffer checks if the current and expected DaemonSet selectors differ. func DaemonSetSelectorsDiffer(current, expected *appsv1.DaemonSet) bool { return !apiequality.Semantic.DeepEqual(current.Spec.Selector, expected.Spec.Selector) } // JobConfigChanged checks if the current and expected Job match and if not, // returns true and the expected job. func JobConfigChanged(current, expected *batchv1.Job) (*batchv1.Job, bool) { changed := false updated := current.DeepCopy() if !apiequality.Semantic.DeepEqual(current.Labels, expected.Labels) { updated = expected changed = true } if !apiequality.Semantic.DeepEqual(current.Spec.Parallelism, expected.Spec.Parallelism) { updated = expected changed = true } if !apiequality.Semantic.DeepEqual(current.Spec.BackoffLimit, expected.Spec.BackoffLimit) { updated = expected changed = true } // The completions field is immutable, so no need to compare. Ignore job-generated // labels and only check the presence of the contour owning labels. if current.Spec.Template.Labels != nil { _, nameFound := current.Spec.Template.Labels[operatorv1alpha1.OwningContourNameLabel] _, nsFound := current.Spec.Template.Labels[operatorv1alpha1.OwningContourNsLabel] if !nameFound || !nsFound { updated = expected changed = true } } if !apiequality.Semantic.DeepEqual(current.Spec.Template.Spec, expected.Spec.Template.Spec) { updated = expected changed = true } if !changed { return nil, false } return updated, true } // DeploymentConfigChanged checks if the current and expected Deployment match // and if not, returns true and the expected Deployment. func DeploymentConfigChanged(current, expected *appsv1.Deployment) (*appsv1.Deployment, bool) { changed := false updated := current.DeepCopy() if !apiequality.Semantic.DeepEqual(current.Labels, expected.Labels) { updated = expected changed = true } if !apiequality.Semantic.DeepEqual(current.Spec, expected.Spec) { updated = expected changed = true } if !changed { return nil, false } return updated, true } // DeploymentSelectorsDiffer checks if the current and expected Deployment selectors differ. func DeploymentSelectorsDiffer(current, expected *appsv1.Deployment) bool { return !apiequality.Semantic.DeepEqual(current.Spec.Selector, expected.Spec.Selector) } // ClusterIPServiceChanged checks if the spec of current and expected match and if not, // returns true and the expected Service resource. The cluster IP is not compared // as it's assumed to be dynamically assigned. func ClusterIPServiceChanged(current, expected *corev1.Service) (*corev1.Service, bool) { changed := false updated := current.DeepCopy() // Spec can't simply be matched since clusterIP is being dynamically assigned. if len(current.Spec.Ports) != len(expected.Spec.Ports) { updated.Spec.Ports = expected.Spec.Ports changed = true } else { if !apiequality.Semantic.DeepEqual(current.Spec.Ports, expected.Spec.Ports) { updated.Spec.Ports = expected.Spec.Ports changed = true } } if !apiequality.Semantic.DeepEqual(current.Spec.Selector, expected.Spec.Selector) { updated.Spec.Selector = expected.Spec.Selector changed = true } if !apiequality.Semantic.DeepEqual(current.Spec.SessionAffinity, expected.Spec.SessionAffinity) { updated.Spec.SessionAffinity = expected.Spec.SessionAffinity changed = true } if !apiequality.Semantic.DeepEqual(current.Spec.Type, expected.Spec.Type) { updated.Spec.Type = expected.Spec.Type changed = true } if !changed { return nil, false } return updated, true } // LoadBalancerServiceChanged checks if current and expected match and if not, returns // true and the expected Service resource. The healthCheckNodePort and a port's nodePort // are not compared since they are dynamically assigned. func LoadBalancerServiceChanged(current, expected *corev1.Service) (*corev1.Service, bool) { changed := false updated := current.DeepCopy() // Ports can't simply be matched since some fields are being dynamically assigned. if len(current.Spec.Ports) != len(expected.Spec.Ports) { updated.Spec.Ports = expected.Spec.Ports changed = true } else { for i, p := range current.Spec.Ports { if !apiequality.Semantic.DeepEqual(p.Name, expected.Spec.Ports[i].Name) { updated.Spec.Ports[i].Name = expected.Spec.Ports[i].Name changed = true } if !apiequality.Semantic.DeepEqual(p.Protocol, expected.Spec.Ports[i].Protocol) { updated.Spec.Ports[i].Protocol = expected.Spec.Ports[i].Protocol changed = true } if !apiequality.Semantic.DeepEqual(p.Port, expected.Spec.Ports[i].Port) { updated.Spec.Ports[i].Port = expected.Spec.Ports[i].Port changed = true } if !apiequality.Semantic.DeepEqual(p.TargetPort, expected.Spec.Ports[i].TargetPort) { updated.Spec.Ports[i].TargetPort = expected.Spec.Ports[i].TargetPort changed = true } } } if !apiequality.Semantic.DeepEqual(current.Spec.Selector, expected.Spec.Selector) { updated.Spec.Selector = expected.Spec.Selector changed = true } if !apiequality.Semantic.DeepEqual(current.Spec.ExternalTrafficPolicy, expected.Spec.ExternalTrafficPolicy) { updated.Spec.ExternalTrafficPolicy = expected.Spec.ExternalTrafficPolicy changed = true } if !apiequality.Semantic.DeepEqual(current.Spec.SessionAffinity, expected.Spec.SessionAffinity) { updated.Spec.SessionAffinity = expected.Spec.SessionAffinity changed = true } if !apiequality.Semantic.DeepEqual(current.Spec.Type, expected.Spec.Type) { updated.Spec.Type = expected.Spec.Type changed = true } if !apiequality.Semantic.DeepEqual(current.Annotations, expected.Annotations) { updated.Annotations = expected.Annotations changed = true } if !apiequality.Semantic.DeepEqual(current.Spec.LoadBalancerIP, expected.Spec.LoadBalancerIP) { updated.Spec.LoadBalancerIP = expected.Spec.LoadBalancerIP changed = true } if !changed { return nil, false } return updated, true } // NodePortServiceChanged checks if current and expected match and if not, returns // true and the expected Service resource. The healthCheckNodePort is not compared // since it's dynamically assigned. func NodePortServiceChanged(current, expected *corev1.Service) (*corev1.Service, bool) { changed := false updated := current.DeepCopy() if len(current.Spec.Ports) != len(expected.Spec.Ports) { updated.Spec.Ports = expected.Spec.Ports changed = true } for i, p := range current.Spec.Ports { if !apiequality.Semantic.DeepEqual(p, expected.Spec.Ports[i]) { updated.Spec.Ports = expected.Spec.Ports changed = true } } if !apiequality.Semantic.DeepEqual(current.Spec.Selector, expected.Spec.Selector) { updated.Spec.Selector = expected.Spec.Selector changed = true } if !apiequality.Semantic.DeepEqual(current.Spec.ExternalTrafficPolicy, expected.Spec.ExternalTrafficPolicy) { updated.Spec.ExternalTrafficPolicy = expected.Spec.ExternalTrafficPolicy changed = true } if !apiequality.Semantic.DeepEqual(current.Spec.SessionAffinity, expected.Spec.SessionAffinity) { updated.Spec.SessionAffinity = expected.Spec.SessionAffinity changed = true } if !apiequality.Semantic.DeepEqual(current.Spec.Type, expected.Spec.Type) { updated.Spec.Type = expected.Spec.Type changed = true } if !apiequality.Semantic.DeepEqual(current.Annotations, expected.Annotations) { updated.Annotations = expected.Annotations changed = true } if !changed { return nil, false } return updated, true } // ContourStatusChanged checks if current and expected match and if not, // returns true. func ContourStatusChanged(current, expected operatorv1alpha1.ContourStatus) bool { if current.AvailableContours != expected.AvailableContours { return true } if current.AvailableEnvoys != expected.AvailableEnvoys { return true } if !apiequality.Semantic.DeepEqual(current.Conditions, expected.Conditions) { return true } return false } // NamespaceConfigChanged checks if the current and expected Namespace match // and if not, returns true and the expected Namespace. func NamespaceConfigChanged(current, expected *corev1.Namespace) (*corev1.Namespace, bool) { changed := false updated := current.DeepCopy() if !apiequality.Semantic.DeepEqual(current.Labels, expected.Labels) { updated = expected changed = true } if !changed { return nil, false } return updated, true } // ServiceAccountConfigChanged checks if the current and expected ServiceAccount // match and if not, returns true and the expected ServiceAccount. func ServiceAccountConfigChanged(current, expected *corev1.ServiceAccount) (*corev1.ServiceAccount, bool) { changed := false updated := current.DeepCopy() if !apiequality.Semantic.DeepEqual(current.Labels, expected.Labels) { updated = expected changed = true } if !changed { return nil, false } return updated, true } // ClusterRoleConfigChanged checks if the current and expected ClusterRole // match and if not, returns true and the expected ClusterRole. func ClusterRoleConfigChanged(current, expected *rbacv1.ClusterRole) (*rbacv1.ClusterRole, bool) { changed := false updated := current.DeepCopy() if !apiequality.Semantic.DeepEqual(current.Labels, expected.Labels) { changed = true updated.Labels = expected.Labels } if !apiequality.Semantic.DeepEqual(current.Rules, expected.Rules) { changed = true updated.Rules = expected.Rules } if !changed { return nil, false } return updated, true } // ClusterRoleBindingConfigChanged checks if the current and expected ClusterRoleBinding // match and if not, returns true and the expected ClusterRoleBinding. func ClusterRoleBindingConfigChanged(current, expected *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, bool) { changed := false updated := current.DeepCopy() if !apiequality.Semantic.DeepEqual(current.Labels, expected.Labels) { changed = true updated.Labels = expected.Labels } if !apiequality.Semantic.DeepEqual(current.Subjects, expected.Subjects) { changed = true updated.Subjects = expected.Subjects } if !apiequality.Semantic.DeepEqual(current.RoleRef, expected.RoleRef) { changed = true updated.RoleRef = expected.RoleRef } if !changed { return nil, false } return updated, true } // RoleConfigChanged checks if the current and expected Role match // and if not, returns true and the expected Role. func RoleConfigChanged(current, expected *rbacv1.Role) (*rbacv1.Role, bool) { changed := false updated := current.DeepCopy() if !apiequality.Semantic.DeepEqual(current.Labels, expected.Labels) { changed = true updated.Labels = expected.Labels } if !apiequality.Semantic.DeepEqual(current.Rules, expected.Rules) { changed = true updated.Rules = expected.Rules } if !changed { return nil, false } return updated, true } // RoleBindingConfigChanged checks if the current and expected RoleBinding // match and if not, returns true and the expected RoleBinding. func RoleBindingConfigChanged(current, expected *rbacv1.RoleBinding) (*rbacv1.RoleBinding, bool) { changed := false updated := current.DeepCopy() if !apiequality.Semantic.DeepEqual(current.Labels, expected.Labels) { changed = true updated.Labels = expected.Labels } if !apiequality.Semantic.DeepEqual(current.Subjects, expected.Subjects) { changed = true updated.Subjects = expected.Subjects } if !apiequality.Semantic.DeepEqual(current.RoleRef, expected.RoleRef) { changed = true updated.RoleRef = expected.RoleRef } if !changed { return nil, false } return updated, true }
internal/equality/equality.go
0.740831
0.423339
equality.go
starcoder
package movers import ( "github.com/wieku/danser-go/app/beatmap/difficulty" "github.com/wieku/danser-go/app/beatmap/objects" "github.com/wieku/danser-go/app/settings" "github.com/wieku/danser-go/framework/math/curves" "github.com/wieku/danser-go/framework/math/math32" "github.com/wieku/danser-go/framework/math/mutils" "github.com/wieku/danser-go/framework/math/vector" "math" ) type AngleOffsetMover struct { *basicMover curve *curves.Bezier lastAngle float32 lastPoint vector.Vector2f invert float32 } func NewAngleOffsetMover() MultiPointMover { return &AngleOffsetMover{basicMover: &basicMover{}} } func (mover *AngleOffsetMover) Reset(diff *difficulty.Difficulty, id int) { mover.basicMover.Reset(diff, id) mover.lastAngle = 0 mover.invert = 1 mover.lastPoint = vector.NewVec2f(0, 0) } func (mover *AngleOffsetMover) SetObjects(objs []objects.IHitObject) int { config := settings.CursorDance.MoverSettings.Flower[mover.id%len(settings.CursorDance.MoverSettings.Flower)] start, end := objs[0], objs[1] mover.startTime = start.GetEndTime() mover.endTime = end.GetStartTime() timeDelta := mover.endTime - mover.startTime startPos := start.GetStackedEndPositionMod(mover.diff.Mods) endPos := end.GetStackedStartPositionMod(mover.diff.Mods) distance := startPos.Dst(endPos) s1, ok1 := start.(objects.ILongObject) s2, ok2 := end.(objects.ILongObject) var points []vector.Vector2f scaledDistance := distance * float32(config.DistanceMult) newAngle := float32(config.AngleOffset) * math32.Pi / 180.0 if start.GetStartTime() > 0 && config.LongJump >= 0 && timeDelta > float64(config.LongJump) { scaledDistance = float32(timeDelta) * float32(config.LongJumpMult) } if startPos == endPos { if config.LongJumpOnEqualPos { scaledDistance = float32(timeDelta) * float32(config.LongJumpMult) mover.lastAngle += math.Pi pt1 := vector.NewVec2fRad(mover.lastAngle, scaledDistance).Add(startPos) if ok1 { pt1 = vector.NewVec2fRad(s1.GetEndAngleMod(mover.diff.Mods), scaledDistance).Add(startPos) } if !ok2 { angle := mover.lastAngle - newAngle*mover.invert pt2 := vector.NewVec2fRad(angle, scaledDistance).Add(endPos) mover.lastAngle = angle points = []vector.Vector2f{startPos, pt1, pt2, endPos} } else { pt2 := vector.NewVec2fRad(s2.GetStartAngleMod(mover.diff.Mods), scaledDistance).Add(endPos) points = []vector.Vector2f{startPos, pt1, pt2, endPos} } } else { points = []vector.Vector2f{startPos, endPos} } } else if ok1 && ok2 { mover.invert = -1 * mover.invert pt1 := vector.NewVec2fRad(s1.GetEndAngleMod(mover.diff.Mods), scaledDistance).Add(startPos) pt2 := vector.NewVec2fRad(s2.GetStartAngleMod(mover.diff.Mods), scaledDistance).Add(endPos) points = []vector.Vector2f{startPos, pt1, pt2, endPos} } else if ok1 { mover.invert = -1 * mover.invert mover.lastAngle = startPos.AngleRV(endPos) - newAngle*mover.invert pt1 := vector.NewVec2fRad(s1.GetEndAngleMod(mover.diff.Mods), scaledDistance).Add(startPos) pt2 := vector.NewVec2fRad(mover.lastAngle, scaledDistance).Add(endPos) points = []vector.Vector2f{startPos, pt1, pt2, endPos} } else if ok2 { mover.lastAngle += math.Pi pt1 := vector.NewVec2fRad(mover.lastAngle, scaledDistance).Add(startPos) pt2 := vector.NewVec2fRad(s2.GetStartAngleMod(mover.diff.Mods), scaledDistance).Add(endPos) points = []vector.Vector2f{startPos, pt1, pt2, endPos} } else { if vector.AngleBetween32(startPos, mover.lastPoint, endPos) >= float32(config.AngleOffset)*math32.Pi/180.0 { mover.invert = -1 * mover.invert newAngle = float32(config.StreamAngleOffset) * math32.Pi / 180.0 } angle := startPos.AngleRV(endPos) - newAngle*mover.invert pt1 := vector.NewVec2fRad(mover.lastAngle+math.Pi, scaledDistance).Add(startPos) pt2 := vector.NewVec2fRad(angle, scaledDistance).Add(endPos) mover.lastAngle = angle points = []vector.Vector2f{startPos, pt1, pt2, endPos} } mover.curve = curves.NewBezierNA(points) mover.lastPoint = startPos return 2 } func (mover *AngleOffsetMover) Update(time float64) vector.Vector2f { t := mutils.ClampF64((time-mover.startTime)/(mover.endTime-mover.startTime), 0, 1) return mover.curve.PointAt(float32(t)) }
app/dance/movers/angleoffset.go
0.679179
0.407717
angleoffset.go
starcoder
package metrics // MonotonicCount tracks a raw counter, based on increasing counter values. // Samples that have a lower value than the previous sample are ignored (since it usually // means that the underlying raw counter has been reset). // Example: // submitting samples 2, 3, 6, 7 returns 5 (i.e. 7-2) on flush ; // then submitting samples 10, 11 on the same MonotonicCount returns 4 (i.e. 11-7) on flush type MonotonicCount struct { previousSample float64 currentSample float64 sampledSinceLastFlush bool hasPreviousSample bool value float64 // With flushFirstValue enabled (passed in MetricSample), these 2 differences apply: // 1. the sampled value will be flushed as-is if it's the first value sampled (and no other // values are flushed until the flush). The assumption is that the underlying raw counter // started from 0 and that any earlier value of the raw counter would'be been sampled // earlier, so it's safe to flush the raw value as-is. // 2. a sample that has a lower value than the previous sample is not ignored, instead its // value is used as the value to flush. The assumption is that the underlying raw counter was // reset from 0. // This flag is used (for example) by the openmetrics check after its first run, to better // support openmetrics monotonic counters. flushFirstValue bool } func (mc *MonotonicCount) addSample(sample *MetricSample, timestamp float64) { if !mc.sampledSinceLastFlush { mc.currentSample = sample.Value mc.sampledSinceLastFlush = true } else { mc.previousSample, mc.currentSample = mc.currentSample, sample.Value mc.hasPreviousSample = true } mc.flushFirstValue = sample.FlushFirstValue // To handle cases where the samples are not monotonically increasing, we always add the difference // between 2 consecutive samples to the value that'll be flushed (if the difference is >0). diff := mc.currentSample - mc.previousSample if (mc.hasPreviousSample || mc.flushFirstValue) && diff >= 0. { mc.value += diff } else if mc.flushFirstValue { mc.value = mc.currentSample } } func (mc *MonotonicCount) flush(timestamp float64) ([]*Serie, error) { if !mc.sampledSinceLastFlush || !(mc.hasPreviousSample || mc.flushFirstValue) { return []*Serie{}, NoSerieError{} } value := mc.value // reset struct fields mc.previousSample, mc.currentSample, mc.value = mc.currentSample, 0., 0. mc.hasPreviousSample = true mc.sampledSinceLastFlush = false mc.flushFirstValue = false return []*Serie{ { // we use the timestamp passed to the flush Points: []Point{{Ts: timestamp, Value: value}}, MType: APICountType, }, }, nil } func (mc *MonotonicCount) isStateful() bool { return true }
pkg/metrics/monotonic_count.go
0.888396
0.601857
monotonic_count.go
starcoder
package kml import ( "github.com/twpayne/go-kml" "github.com/twpayne/go-geom" ) // Encode encodes an arbitrary geometry. func Encode(g geom.T) (kml.Element, error) { switch g := g.(type) { case *geom.Point: return EncodePoint(g), nil case *geom.LineString: return EncodeLineString(g), nil case *geom.LinearRing: return EncodeLinearRing(g), nil case *geom.MultiLineString: return EncodeMultiLineString(g), nil case *geom.MultiPoint: return EncodeMultiPoint(g), nil case *geom.MultiPolygon: return EncodeMultiPolygon(g), nil case *geom.Polygon: return EncodePolygon(g), nil case *geom.GeometryCollection: return EncodeGeometryCollection(g) default: return nil, geom.ErrUnsupportedType{Value: g} } } // EncodeLineString encodes a LineString. func EncodeLineString(ls *geom.LineString) kml.Element { flatCoords := ls.FlatCoords() return kml.LineString(kml.CoordinatesFlat(flatCoords, 0, len(flatCoords), ls.Stride(), dim(ls.Layout()))) } // EncodeLinearRing encodes a LinearRing. func EncodeLinearRing(lr *geom.LinearRing) kml.Element { flatCoords := lr.FlatCoords() return kml.LinearRing(kml.CoordinatesFlat(flatCoords, 0, len(flatCoords), lr.Stride(), dim(lr.Layout()))) } // EncodeMultiLineString encodes a MultiLineString. func EncodeMultiLineString(mls *geom.MultiLineString) kml.Element { lineStrings := make([]kml.Element, mls.NumLineStrings()) flatCoords := mls.FlatCoords() ends := mls.Ends() stride := mls.Stride() d := dim(mls.Layout()) offset := 0 for i, end := range ends { lineStrings[i] = kml.LineString(kml.CoordinatesFlat(flatCoords, offset, end, stride, d)) offset = end } return kml.MultiGeometry(lineStrings...) } // EncodeMultiPoint encodes a MultiPoint. func EncodeMultiPoint(mp *geom.MultiPoint) kml.Element { points := make([]kml.Element, mp.NumPoints()) flatCoords := mp.FlatCoords() stride := mp.Stride() d := dim(mp.Layout()) for i, offset, end := 0, 0, len(flatCoords); offset < end; i++ { points[i] = kml.Point(kml.CoordinatesFlat(flatCoords, offset, offset+stride, stride, d)) offset += stride } return kml.MultiGeometry(points...) } // EncodeMultiPolygon encodes a MultiPolygon. func EncodeMultiPolygon(mp *geom.MultiPolygon) kml.Element { polygons := make([]kml.Element, mp.NumPolygons()) flatCoords := mp.FlatCoords() endss := mp.Endss() stride := mp.Stride() d := dim(mp.Layout()) offset := 0 for i, ends := range endss { boundaries := make([]kml.Element, len(ends)) for j, end := range ends { linearRing := kml.LinearRing(kml.CoordinatesFlat(flatCoords, offset, end, stride, d)) if j == 0 { boundaries[j] = kml.OuterBoundaryIs(linearRing) } else { boundaries[j] = kml.InnerBoundaryIs(linearRing) } offset = end } polygons[i] = kml.Polygon(boundaries...) } return kml.MultiGeometry(polygons...) } // EncodePoint encodes a Point. func EncodePoint(p *geom.Point) kml.Element { flatCoords := p.FlatCoords() return kml.Point(kml.CoordinatesFlat(flatCoords, 0, len(flatCoords), p.Stride(), dim(p.Layout()))) } // EncodePolygon encodes a Polygon. func EncodePolygon(p *geom.Polygon) kml.Element { boundaries := make([]kml.Element, p.NumLinearRings()) stride := p.Stride() flatCoords := p.FlatCoords() d := dim(p.Layout()) offset := 0 for i, end := range p.Ends() { linearRing := kml.LinearRing(kml.CoordinatesFlat(flatCoords, offset, end, stride, d)) if i == 0 { boundaries[i] = kml.OuterBoundaryIs(linearRing) } else { boundaries[i] = kml.InnerBoundaryIs(linearRing) } offset = end } return kml.Polygon(boundaries...) } // EncodeGeometryCollection encodes a GeometryCollection. func EncodeGeometryCollection(g *geom.GeometryCollection) (kml.Element, error) { geometries := make([]kml.Element, g.NumGeoms()) for i, g := range g.Geoms() { var err error geometries[i], err = Encode(g) if err != nil { return nil, err } } return kml.MultiGeometry(geometries...), nil } func dim(l geom.Layout) int { switch l { case geom.XY, geom.XYM: return 2 default: return 3 } }
encoding/kml/kml.go
0.778733
0.573917
kml.go
starcoder
package tfutils func (s SimpleSchema) Required(status bool) SimpleSchema { s.s.Required = status return s } func (s SimpleSchema) Optional(status bool) SimpleSchema { s.s.Optional = status return s } func (s SimpleSchema) Computed(status bool) SimpleSchema { s.s.Computed = status return s } func (s SimpleSchema) Default(d interface{}) SimpleSchema { s.s.Default = d return s.Optional(true) } func (s SimpleSchema) ConflictsWith(t ...string) SimpleSchema { s.s.ConflictsWith = t return s } func (s SimpleSchema) ExactlyOneOf(t ...string) SimpleSchema { s.s.ExactlyOneOf = t return s } func (s SimpleSchema) AtLeastOneOf(t ...string) SimpleSchema { s.s.AtLeastOneOf = t return s } func (s SimpleSchema) RequiredWith(t ...string) SimpleSchema { s.s.RequiredWith = t return s } func (s SimpleSchema) Sensitive(status bool) SimpleSchema { s.s.Sensitive = status return s } func (s ListSchema) Required(status bool) ListSchema { s.s.Required = status return s } func (s ListSchema) Optional(status bool) ListSchema { s.s.Optional = status return s } func (s ListSchema) Computed(status bool) ListSchema { s.s.Computed = status return s } func (s ListSchema) Default(d interface{}) ListSchema { s.s.Default = d return s.Optional(true) } func (s ListSchema) ConflictsWith(t ...string) ListSchema { s.s.ConflictsWith = t return s } func (s ListSchema) ExactlyOneOf(t ...string) ListSchema { s.s.ExactlyOneOf = t return s } func (s ListSchema) AtLeastOneOf(t ...string) ListSchema { s.s.AtLeastOneOf = t return s } func (s ListSchema) RequiredWith(t ...string) ListSchema { s.s.RequiredWith = t return s } func (s ListSchema) Sensitive(status bool) ListSchema { s.s.Sensitive = status return s } func (s SetSchema) Required(status bool) SetSchema { s.s.Required = status return s } func (s SetSchema) Optional(status bool) SetSchema { s.s.Optional = status return s } func (s SetSchema) Computed(status bool) SetSchema { s.s.Computed = status return s } func (s SetSchema) Default(d interface{}) SetSchema { s.s.Default = d return s.Optional(true) } func (s SetSchema) ConflictsWith(t ...string) SetSchema { s.s.ConflictsWith = t return s } func (s SetSchema) ExactlyOneOf(t ...string) SetSchema { s.s.ExactlyOneOf = t return s } func (s SetSchema) AtLeastOneOf(t ...string) SetSchema { s.s.AtLeastOneOf = t return s } func (s SetSchema) RequiredWith(t ...string) SetSchema { s.s.RequiredWith = t return s } func (s SetSchema) Sensitive(status bool) SetSchema { s.s.Sensitive = status return s } func (s MapSchema) Required(status bool) MapSchema { s.s.Required = status return s } func (s MapSchema) Optional(status bool) MapSchema { s.s.Optional = status return s } func (s MapSchema) Computed(status bool) MapSchema { s.s.Computed = status return s } func (s MapSchema) Default(d interface{}) MapSchema { s.s.Default = d return s.Optional(true) } func (s MapSchema) ConflictsWith(t ...string) MapSchema { s.s.ConflictsWith = t return s } func (s MapSchema) ExactlyOneOf(t ...string) MapSchema { s.s.ExactlyOneOf = t return s } func (s MapSchema) AtLeastOneOf(t ...string) MapSchema { s.s.AtLeastOneOf = t return s } func (s MapSchema) RequiredWith(t ...string) MapSchema { s.s.RequiredWith = t return s } func (s MapSchema) Sensitive(status bool) MapSchema { s.s.Sensitive = status return s }
schema.go
0.850375
0.608769
schema.go
starcoder
package tedi import ( "reflect" "testing" "unsafe" "github.com/stretchr/testify/require" "go.uber.org/dig" ) // Test registers a function as a test. func (t *Tedi) Test(name string, fn interface{}, labels ...string) { testsLabel := newStringSet(labels...) matchedLabels := testsLabel.Intersect(t.labels) matchedLabels = matchedLabels.Intersect(t.runLabels) if len(matchedLabels) > 0 { // Ignore test if the groupset does not overlap with the running set. testFn := t.wrapTest(name, fn, matchedLabels.List()...) t.addTest(name, testFn) } } // BeforeTest registers a function as a beforeTest hook. func (t *Tedi) BeforeTest(fn interface{}) { t.beforeTests = append(t.beforeTests, fn) } // AfterTest registers a function as a afterTest hook. func (t *Tedi) AfterTest(fn interface{}) { t.afterTests = append(t.afterTests, fn) } type testFunc func(t *testing.T) func (t *Tedi) wrapTest(name string, fn interface{}, labels ...string) testFunc { return func(test *testing.T) { c, t, err := t.createContainer(test, name, labels...) require.NoError(test, err, "Failed to build container for test: %s", name) require.NoError(test, t.onStart(), "Failed to run onStart for test: %s", name) t.running = true defer func() { require.NoError(test, t.onEnd(), "Failed to run onEnd for test: %s", name) }() require.NoError(t, c.Invoke(fn), "Failed to Invoke test: %s", name) } } func (t *Tedi) addTest(name string, fn testFunc) { tests := reflect.ValueOf(t.m).Elem().FieldByName("tests") // tests is a private field on the tesing.M struct so we need to do this trick in order to add new tests. tests = reflect.NewAt(tests.Type(), unsafe.Pointer(tests.UnsafeAddr())) internalTestType := tests.Type().Elem().Elem() newTest := reflect.New(internalTestType) newTest.Elem().FieldByName("Name").Set(reflect.ValueOf(name)) newTest.Elem().FieldByName("F").Set(reflect.ValueOf(fn)) res := reflect.Append(tests.Elem(), newTest.Elem()) tests.Elem().Set(res) } func (t *Tedi) createT(test *testing.T, container *dig.Container, testName string, testLabels ...string) *T { res := &T{ T: test, tedi: t, container: container, running: false, testName: testName, testLabels: testLabels, beforeTests: t.beforeTests[:], afterTests: t.afterTests[:], } return res } // T extends testing.T struct with hooks to be called before and // after the test has been executed and a Run method that also // works with dependency injection. type T struct { *testing.T tedi *Tedi container *dig.Container running bool testName string testLabels []string beforeTests []interface{} afterTests []interface{} } func (t *T) onStart() error { for _, fn := range t.beforeTests { if err := t.container.Invoke(fn); err != nil { return err } } return nil } func (t *T) onEnd() error { for i := range t.afterTests { if err := t.container.Invoke(t.afterTests[len(t.afterTests)-i-1]); err != nil { return err } } return nil } // BeforeTest register a function to be called before a test will run. func (t *T) BeforeTest(fn interface{}) { if t.running { require.NoError(t, t.container.Invoke(fn), "Failed to run BeforeTest for test: %s", t.testName) return } t.beforeTests = append(t.beforeTests, fn) } // AfterTest register a function to be called once the test was executed. func (t *T) AfterTest(fn interface{}) { t.afterTests = append(t.afterTests, fn) } // Run fn as a subtest of t similar to how testing.T.Run would work. func (t *T) Run(name string, fn interface{}) bool { return t.T.Run(name, t.tedi.wrapTest(name, fn, t.testLabels...)) } func (t *T) Labels() []string { return t.testLabels } func (t *T) HasLabel(label string) bool { for _, l := range t.testLabels { if l == label { return true } } return false }
test.go
0.586049
0.512632
test.go
starcoder
Package workflow implements a workflow manager to be used for implementing composable kubeadm workflows. Composable kubeadm workflows are built by an ordered sequence of phases; each phase can have it's own, nested, ordered sequence of sub phases. For instance preflight Run master pre-flight checks certs Generates all PKI assets necessary to establish the control plane /ca Generates a self-signed Kubernetes CA to provision identities for Kubernetes components /apiserver Generates an API server serving certificate and key ... kubeconfig Generates all kubeconfig files necessary to establish the control plane /admin Generates a kubeconfig file for the admin to use and for kubeadm itself /kubelet Generates a kubeconfig file for the kubelet to use. ... ... Phases are designed to be reusable across different kubeadm workflows thus allowing e.g. reuse of phase certs in both kubeadm init and kubeadm join --control-plane workflows. Each workflow can be defined and managed using a Runner, that will run all the phases according to the given order; nested phases will be executed immediately after their parent phase. The phase runner can be bound to a cobra command; this operation sets the command description giving evidence of the list of phases, and automatically creates sub commands for invoking phases atomically. Autogenerated sub commands get flags according to the following rule: - global flags will be always inherited by autogenerated commands (this is managed by cobra) - local flags defined in the parent command might be inherited by autogenerated commands, but this requires explicit opt-in so each phase can select the subset of relevant flags - it is possible to define additional flags that might be inherited by autogenerated commands via explicit opt-in, but are not applied to the parent command In order to keep flags definition under control, please refer to the "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" package. */ package workflow
cmd/kubeadm/app/cmd/phases/workflow/doc.go
0.726911
0.654895
doc.go
starcoder
package redshift import ( "fmt" "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) func dataSourceRedshiftSchema() *schema.Resource { return &schema.Resource{ Description: ` A database contains one or more named schemas. Each schema in a database contains tables and other kinds of named objects. By default, a database has a single schema, which is named PUBLIC. You can use schemas to group database objects under a common name. Schemas are similar to file system directories, except that schemas cannot be nested. `, Read: RedshiftResourceFunc(dataSourceRedshiftSchemaRead), Schema: map[string]*schema.Schema{ schemaNameAttr: { Type: schema.TypeString, Required: true, Description: "Name of the schema.", StateFunc: func(val interface{}) string { return strings.ToLower(val.(string)) }, }, schemaOwnerAttr: { Type: schema.TypeString, Computed: true, Description: "Name of the schema owner.", }, schemaQuotaAttr: { Type: schema.TypeInt, Computed: true, Description: "The maximum amount of disk space that the specified schema can use. GB is the default unit of measurement.", }, schemaExternalSchemaAttr: { Type: schema.TypeList, Optional: true, Computed: true, Description: "Configures the schema as an external schema. See https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_SCHEMA.html", MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "database_name": { Type: schema.TypeString, Computed: true, Description: "The database where the external schema can be found", }, "data_catalog_source": { Type: schema.TypeList, Description: "Configures the external schema from the AWS Glue Data Catalog", Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "region": { Type: schema.TypeString, Optional: true, Computed: true, Description: "If the external database is defined in an Athena data catalog or the AWS Glue Data Catalog, the AWS Region in which the database is located. This parameter is required if the database is defined in an external Data Catalog.", }, "iam_role_arns": { Type: schema.TypeList, Computed: true, Description: `The Amazon Resource Name (ARN) for the IAM roles that your cluster uses for authentication and authorization. As a minimum, the IAM roles must have permission to perform a LIST operation on the Amazon S3 bucket to be accessed and a GET operation on the Amazon S3 objects the bucket contains. If the external database is defined in an Amazon Athena data catalog or the AWS Glue Data Catalog, the IAM role must have permission to access Athena unless catalog_role is specified. For more information, see https://docs.aws.amazon.com/redshift/latest/dg/c-spectrum-iam-policies.html. When you attach a role to your cluster, your cluster can assume that role to access Amazon S3, Athena, and AWS Glue on your behalf. If a role attached to your cluster doesn't have access to the necessary resources, you can chain another role, possibly belonging to another account. Your cluster then temporarily assumes the chained role to access the data. You can also grant cross-account access by chaining roles. You can chain a maximum of 10 roles. Each role in the chain assumes the next role in the chain, until the cluster assumes the role at the end of chain. To chain roles, you establish a trust relationship between the roles. A role that assumes another role must have a permissions policy that allows it to assume the specified role. In turn, the role that passes permissions must have a trust policy that allows it to pass its permissions to another role. For more information, see https://docs.aws.amazon.com/redshift/latest/mgmt/authorizing-redshift-service.html#authorizing-redshift-service-chaining-roles`, Elem: &schema.Schema{ Type: schema.TypeString, }, }, "catalog_role_arns": { Type: schema.TypeList, Optional: true, Computed: true, Description: `The Amazon Resource Name (ARN) for the IAM roles that your cluster uses for authentication and authorization for the data catalog. If this is not specified, Amazon Redshift uses the specified iam_role_arns. The catalog role must have permission to access the Data Catalog in AWS Glue or Athena. For more information, see https://docs.aws.amazon.com/redshift/latest/dg/c-spectrum-iam-policies.html. To chain roles, you establish a trust relationship between the roles. A role that assumes another role must have a permissions policy that allows it to assume the specified role. In turn, the role that passes permissions must have a trust policy that allows it to pass its permissions to another role. For more information, see https://docs.aws.amazon.com/redshift/latest/mgmt/authorizing-redshift-service.html#authorizing-redshift-service-chaining-roles`, Elem: &schema.Schema{ Type: schema.TypeString, }, }, }, }, }, "hive_metastore_source": { Type: schema.TypeList, Description: "Configures the external schema from a Hive Metastore.", Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "hostname": { Type: schema.TypeString, Description: "The hostname of the hive metastore database.", Computed: true, }, "port": { Type: schema.TypeInt, Description: "The port number of the hive metastore. The default port number is 9083.", Optional: true, Computed: true, }, "iam_role_arns": { Type: schema.TypeList, Computed: true, Description: `The Amazon Resource Name (ARN) for the IAM roles that your cluster uses for authentication and authorization. As a minimum, the IAM roles must have permission to perform a LIST operation on the Amazon S3 bucket to be accessed and a GET operation on the Amazon S3 objects the bucket contains. If the external database is defined in an Amazon Athena data catalog or the AWS Glue Data Catalog, the IAM role must have permission to access Athena unless catalog_role is specified. For more information, see https://docs.aws.amazon.com/redshift/latest/dg/c-spectrum-iam-policies.html. When you attach a role to your cluster, your cluster can assume that role to access Amazon S3, Athena, and AWS Glue on your behalf. If a role attached to your cluster doesn't have access to the necessary resources, you can chain another role, possibly belonging to another account. Your cluster then temporarily assumes the chained role to access the data. You can also grant cross-account access by chaining roles. You can chain a maximum of 10 roles. Each role in the chain assumes the next role in the chain, until the cluster assumes the role at the end of chain. To chain roles, you establish a trust relationship between the roles. A role that assumes another role must have a permissions policy that allows it to assume the specified role. In turn, the role that passes permissions must have a trust policy that allows it to pass its permissions to another role. For more information, see https://docs.aws.amazon.com/redshift/latest/mgmt/authorizing-redshift-service.html#authorizing-redshift-service-chaining-roles`, Elem: &schema.Schema{ Type: schema.TypeString, }, }, }, }, }, "rds_postgres_source": { Type: schema.TypeList, Description: "Configures the external schema to reference data using a federated query to RDS POSTGRES or Aurora PostgreSQL.", Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "hostname": { Type: schema.TypeString, Description: "The hostname of the head node of the PostgreSQL database replica set.", Computed: true, }, "port": { Type: schema.TypeInt, Description: "The port number of the PostgreSQL database. The default port number is 5432.", Optional: true, Computed: true, }, "schema": { Type: schema.TypeString, Description: "The name of the PostgreSQL schema. The default schema is 'public'", Optional: true, Computed: true, }, "iam_role_arns": { Type: schema.TypeList, Computed: true, Description: `The Amazon Resource Name (ARN) for the IAM roles that your cluster uses for authentication and authorization. As a minimum, the IAM roles must have permission to perform a LIST operation on the Amazon S3 bucket to be accessed and a GET operation on the Amazon S3 objects the bucket contains. If the external database is defined in an Amazon Athena data catalog or the AWS Glue Data Catalog, the IAM role must have permission to access Athena unless catalog_role is specified. For more information, see https://docs.aws.amazon.com/redshift/latest/dg/c-spectrum-iam-policies.html. When you attach a role to your cluster, your cluster can assume that role to access Amazon S3, Athena, and AWS Glue on your behalf. If a role attached to your cluster doesn't have access to the necessary resources, you can chain another role, possibly belonging to another account. Your cluster then temporarily assumes the chained role to access the data. You can also grant cross-account access by chaining roles. You can chain a maximum of 10 roles. Each role in the chain assumes the next role in the chain, until the cluster assumes the role at the end of chain. To chain roles, you establish a trust relationship between the roles. A role that assumes another role must have a permissions policy that allows it to assume the specified role. In turn, the role that passes permissions must have a trust policy that allows it to pass its permissions to another role. For more information, see https://docs.aws.amazon.com/redshift/latest/mgmt/authorizing-redshift-service.html#authorizing-redshift-service-chaining-roles`, Elem: &schema.Schema{ Type: schema.TypeString, }, }, "secret_arn": { Type: schema.TypeString, Description: `The Amazon Resource Name (ARN) of a supported PostgreSQL database engine secret created using AWS Secrets Manager. For information about how to create and retrieve an ARN for a secret, see https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_create-basic-secret.html and https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_retrieve-secret.html in the AWS Secrets Manager User Guide.`, Computed: true, }, }, }, }, "rds_mysql_source": { Type: schema.TypeList, Description: "Configures the external schema to reference data using a federated query to RDS MYSQL or Aurora MySQL.", Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "hostname": { Type: schema.TypeString, Description: "The hostname of the head node of the MySQL database replica set.", Computed: true, }, "port": { Type: schema.TypeInt, Description: "The port number of the MySQL database. The default port number is 3306.", Optional: true, Computed: true, }, "iam_role_arns": { Type: schema.TypeList, Computed: true, Description: `The Amazon Resource Name (ARN) for the IAM roles that your cluster uses for authentication and authorization. As a minimum, the IAM roles must have permission to perform a LIST operation on the Amazon S3 bucket to be accessed and a GET operation on the Amazon S3 objects the bucket contains. If the external database is defined in an Amazon Athena data catalog or the AWS Glue Data Catalog, the IAM role must have permission to access Athena unless catalog_role is specified. For more information, see https://docs.aws.amazon.com/redshift/latest/dg/c-spectrum-iam-policies.html. When you attach a role to your cluster, your cluster can assume that role to access Amazon S3, Athena, and AWS Glue on your behalf. If a role attached to your cluster doesn't have access to the necessary resources, you can chain another role, possibly belonging to another account. Your cluster then temporarily assumes the chained role to access the data. You can also grant cross-account access by chaining roles. You can chain a maximum of 10 roles. Each role in the chain assumes the next role in the chain, until the cluster assumes the role at the end of chain. To chain roles, you establish a trust relationship between the roles. A role that assumes another role must have a permissions policy that allows it to assume the specified role. In turn, the role that passes permissions must have a trust policy that allows it to pass its permissions to another role. For more information, see https://docs.aws.amazon.com/redshift/latest/mgmt/authorizing-redshift-service.html#authorizing-redshift-service-chaining-roles`, Elem: &schema.Schema{ Type: schema.TypeString, }, }, "secret_arn": { Type: schema.TypeString, Description: `The Amazon Resource Name (ARN) of a supported MySQL database engine secret created using AWS Secrets Manager. For information about how to create and retrieve an ARN for a secret, see https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_create-basic-secret.html and https://docs.aws.amazon.com/secretsmanager/latest/userguide/manage_retrieve-secret.html in the AWS Secrets Manager User Guide.`, Computed: true, }, }, }, }, "redshift_source": { Type: schema.TypeList, Description: "Configures the external schema to reference datashare database.", Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "schema": { Type: schema.TypeString, Description: "The name of the datashare schema. The default schema is 'public'.", Optional: true, Computed: true, }, }, }, }, }, }, }, }, } } func dataSourceRedshiftSchemaRead(db *DBConnection, d *schema.ResourceData) error { var schemaOwner, schemaId, schemaType string // Step 1: get basic schema info err := db.QueryRow(` SELECT pg_namespace.oid, trim(pg_user_info.usename), trim(svv_all_schemas.schema_type) FROM svv_all_schemas INNER JOIN pg_namespace ON (svv_all_schemas.database_name = $1 and svv_all_schemas.schema_name = pg_namespace.nspname) LEFT JOIN pg_user_info ON (svv_all_schemas.database_name = $1 and pg_user_info.usesysid = svv_all_schemas.schema_owner) where svv_all_schemas.database_name = $1 AND svv_all_schemas.schema_name = $2`, db.client.databaseName, d.Get(schemaNameAttr).(string)).Scan(&schemaId, &schemaOwner, &schemaType) if err != nil { return err } d.SetId(schemaId) d.Set(schemaOwnerAttr, schemaOwner) switch { case schemaType == "local": return resourceRedshiftSchemaReadLocal(db, d) case schemaType == "external": return resourceRedshiftSchemaReadExternal(db, d) default: return fmt.Errorf(`Unsupported schema type "%s". Supported types are "local" and "external".`, schemaType) } }
redshift/data_source_redshift_schema.go
0.770206
0.449816
data_source_redshift_schema.go
starcoder
package stringunescape import ( "fmt" "strings" "github.com/relex/slog-agent/util" ) // Unescaper is used to search and unescape characters like '\n', '\t' etc // Unescaper instances contain no buffer and may be copied or concurrently used. type Unescaper struct { escapeChar byte escapableCharMap []byte } // NewUnescaper creates a StringEscaper to be used with map[string]string sources func NewUnescaper(escapeChar byte, mapping map[byte]byte) Unescaper { cmap := make([]byte, 256) for key, val := range mapping { cmap[key] = val } cmap[escapeChar] = escapeChar return Unescaper{escapeChar, cmap} } // FindFirst finds the index of the first escape char, or -1 func (e Unescaper) FindFirst(str string) int { return strings.IndexByte(str, e.escapeChar) } // FindFirstUnescaped finds the index of the first unescaped target, or -1 // This can be used to find, e.g. the second asterisk in `\*name: Foo*` func (e Unescaper) FindFirstUnescaped(str string, target byte) int { if e.escapableCharMap[target] == 0 { panic(fmt.Sprintf("target '%c' is not escapable", target)) } pos := 0 for pos < len(str) { c := str[pos] switch c { case e.escapeChar: pos += 2 case target: return pos default: pos++ } } return -1 } // Run unescapes the given string func (e Unescaper) Run(src string) string { first := strings.IndexByte(src, e.escapeChar) if first == -1 { return src } return e.RunFromFirst(src, first) } // RunFromFirst unescapes the given string, starting from the position of first escape char func (e Unescaper) RunFromFirst(src string, first int) string { dst := make([]byte, len(src)) dend := e.RunToBuffer(src, first, dst) return util.StringFromBytes(dst[:dend]) } // RunToBuffer unescapes the given string to destination buffer, starting from the position of first escape char // Returns the end / length in the destination buffer func (e Unescaper) RunToBuffer(src string, first int, dst []byte) int { si := first di := copy(dst, src[:si]) slimit := len(src) - 1 for si < slimit { // lookup escape char val := src[si+1] if c := e.escapableCharMap[val]; c != 0 { dst[di] = c di++ } else { dst[di] = e.escapeChar dst[di+1] = val di += 2 } si += 2 // find next escape char n := strings.IndexByte(src[si:], e.escapeChar) if n == -1 { n = len(src) - si } // copy all chars before next escape char di += copy(dst[di:], src[si:si+n]) si += n } if si < len(src) { di += copy(dst[di:], src[si:]) } return di }
util/stringunescape/unescape.go
0.580709
0.473231
unescape.go
starcoder
package quantum import ( "fmt" "math" "math/cmplx" "math/rand" "time" ) func init() { rand.Seed(time.Now().UnixNano()) } // Represents a quantum register type QReg struct { // The width (number of qubits) of this quantum register. width int // The complex amplitudes for each of the standard basis states. // There are math.Pow(2,width) of these. amplitudes []complex128 } // Constructor for a QReg func NewQReg(width int, values ...int) *QReg { qreg := &QReg{width, nil} qreg.Set(values...) return qreg } // Accessor for the width of a QReg func (qreg *QReg) Width() int { return qreg.width } // Copy a quantum register func (qreg *QReg) Copy() *QReg { new_qreg := &QReg{qreg.width, make([]complex128, len(qreg.amplitudes))} copy(new_qreg.amplitudes, qreg.amplitudes) return new_qreg } // Get the probability of observing a state func (qreg *QReg) StateProb(state int) float64 { return cmplx.Abs(qreg.amplitudes[state] * qreg.amplitudes[state]) } // Get the probability of observing a state for a specific bit func (qreg *QReg) BProb(index int, value int) float64 { prob := float64(0.0) bit := 1 << uint(index) bitnot := (1 - value) << uint(index) // Iterate through all the amplitudes where this bit is 1 for state := 0 | bit; state < len(qreg.amplitudes); state = (state + 1) | bit { prob += qreg.StateProb(state - bitnot) } return prob } // Set the QReg to a state in the standard basis. If no value is given, default // to the all zero state. If one value is given, interpret it as the integer // representation of a basis state. If a series of binary values are given, // interpret them as the binary representation of a basis state. func (qreg *QReg) Set(values ...int) { // The Hilbert space has dimension math.Pow(2,width). hilbert_space_dim := 1<<uint(qreg.width) qreg.amplitudes = make([]complex128, hilbert_space_dim) if len(values) == 0 { // Set to |0...0>. qreg.amplitudes[0] = 1 } else if len(values) == 1 { // Given an integer d, set to basis state |d>. if values[0] < 0 || values[0] >= hilbert_space_dim { err_str := fmt.Sprintf("Value of %d is too large for " + "QReg of width %d.", values[0], qreg.width) panic(err_str) } qreg.amplitudes[values[0]] = 1 } else if len(values) == qreg.width { // Given binary b_1, b_2, ..., b_k, set to basis state // |b_1 b_2 ... b_k>. basis_state_index := 0 for _, value := range values { basis_state_index <<= 1 if value < 0 || value > 1 { panic("Expected 0 or 1 when setting value of " + "quantum register.") } basis_state_index += value } qreg.amplitudes[basis_state_index] = 1 } else { panic("Bad values for quantum register.") } } // Set a particular bit in a QReg func (qreg *QReg) BSet(index int, value int) { if value > 1 { err_str := fmt.Sprintf("Value %d should be either 0 or 1", value) panic(err_str) } bit := 1 << uint(index) bitval := value << uint(index) bitnot := (1 - value) << uint(index) bprob := qreg.BProb(index, value) if bprob > 0 { amp_factor := complex(1.0/math.Sqrt(bprob), 0) // Alter every state. If it's the right qubit value, fix the // amplitude; otherwise, set the amplitude to 0. for state, amp := range qreg.amplitudes { if int(state)&bit == bitval { qreg.amplitudes[state] = amp * amp_factor } else { qreg.amplitudes[state] = complex(0, 0) } } } else { // Iterate through all the amplitudes where this bit is 1 for state := int(0) | bit; state < int(len(qreg.amplitudes)); state = (state + 1) | bit { // Add the amplitude of the old state to the new state old_state := state - bitval new_state := state - bitnot qreg.amplitudes[new_state] += qreg.amplitudes[old_state] qreg.amplitudes[old_state] = complex(0, 0) } } } // Measure a bit without collapsing its quantum state func (qreg *QReg) BMeasurePreserve(index int) int { if rand.Float64() < qreg.BProb(index, 0) { return 0 } return 1 } // Measure a bit (the quantum state of this qubit will collapse) func (qreg *QReg) BMeasure(index int) int { b := qreg.BMeasurePreserve(index) qreg.BSet(index, b) return b } // Measure a register without collapsing its quantum state func (qreg *QReg) MeasurePreserve() int { r := rand.Float64() sum := float64(0.0) for i, _ := range qreg.amplitudes { sum += qreg.StateProb(i) if r < sum { return i } } return len(qreg.amplitudes) - 1 } // Measure a register func (qreg *QReg) Measure() int { value := qreg.MeasurePreserve() var amp complex128 if real(qreg.amplitudes[value]) > 0 { amp = complex(1, 0) } else { amp = complex(-1, 0) } for i, _ := range qreg.amplitudes { qreg.amplitudes[i] = complex(0, 0) } qreg.amplitudes[value] = amp return value } func (qreg *QReg) PrintState(index int) { prob := qreg.StateProb(index) largest := (1 << uint(qreg.width)) - 1 padding := int(math.Floor(math.Log10(float64(largest)))) + 1 format := fmt.Sprintf("%%+f%%f|(%%%dd)%%0%db>", padding, qreg.width) fmt.Printf(format, qreg.amplitudes[index], prob, index, index) } func (qreg *QReg) PrintStateln(index int) { qreg.PrintState(index) fmt.Println() } func (qreg *QReg) Print() { for i, _ := range qreg.amplitudes { qreg.PrintStateln(i) } } func (qreg *QReg) PrintNonZero() { for i, state := range qreg.amplitudes { if state != 0 { qreg.PrintStateln(i) } } }
src/quantum/qreg.go
0.78403
0.636127
qreg.go
starcoder
package hll import ( "fmt" "sort" ) // Bitstrings are uint64. Rho values (bucket counts in M) are uint64. Indices and p values are uint. // rho results (position of first 1 in a bitstring) are uint8 because only 6 bits are required to // encode the position of a bit in a 64-bit sequence (log2(64)==6). // Return the position of the first set bit, starting with 1. This is the same as the number of // leading zeros + 1. Returns 63 if no bits were set. Since the result is in [1,63] it can be // encoded in 6 bits. func rho(x uint64) uint8 { var i uint8 for i = 0; i < 62 && x&1 == 0; i++ { x >>= 1 } return i + 1 } // x is a hash code. func encodeHash(x uint64, p, pPrime uint) (hashCode uint64) { if x&onesFromTo(64-pPrime, 63-p) == 0 { r := rho(extractShift(x, 0, 63-pPrime)) return concat([]concatInput{ {x, 64 - pPrime, 63}, {uint64(r), 0, 5}, {1, 0, 0}, // this just adds a 1 bit at the end }) } else { return concat([]concatInput{ {x, 64 - pPrime, 63}, {0, 0, 0}, // this just adds a 0 bit at the end }) } } // k is an encoded hash. // In the version of the paper that we're using, there are two off-by-one errors in GetIndex() in // Figure 7. We pointed these out to the authors, and they updated the appendix at // http://goo.gl/iU8Ig with a corrected algorithm. We're using the updated version. func getIndex(k uint64, p, pPrime uint) uint64 { if k&1 == 1 { index := extractShift(k, 7, p+6) // erratum from paper, start index is 7, not 6 return index } else { index := extractShift(k, 1, p) // erratum from paper, end index is p, not p+1 return index } } // k is an encoded hash. func decodeHash(k uint64, p, pPrime uint) (idx uint64, rhoW uint8) { var r uint8 if k&1 == 1 { r = uint8(extractShift(k, 1, 6) + uint64(pPrime-p)) } else { r = rho(extractShift(k, 1, pPrime-p-1)) } return getIndex(k, p, pPrime), r } type mergeElem struct { index uint64 rho uint8 encoded uint64 } type u64It func() (uint64, bool) type mergeElemIt func() (mergeElem, bool) func makeMergeElemIter(p, pPrime uint, input u64It) mergeElemIt { firstElem := true var lastIndex uint64 return func() (mergeElem, bool) { for { hashCode, ok := input() if !ok { return mergeElem{}, false } idx, r := decodeHash(hashCode, p, pPrime) if !firstElem && idx == lastIndex { // In the case where the tmp_set is being merged with the sparse_list, the tmp_set // may contain elements that have the same index value. In this case, they will // have been sorted so the one with the max rho value will come first. We should // discard all dupes after the first. continue } firstElem = false lastIndex = idx return mergeElem{idx, r, hashCode}, true } } } // The input should be sorted by hashcode. func makeU64SliceIt(in []uint64) u64It { idx := 0 return func() (uint64, bool) { if idx == len(in) { return 0, false } result := in[idx] idx++ return result, true } } // Both input iterators must be sorted by hashcode. func merge(p, pPrime uint, sizeEst uint64, it1, it2 u64It) *sparse { leftIt := makeMergeElemIter(p, pPrime, it1) rightIt := makeMergeElemIter(p, pPrime, it2) left, haveLeft := leftIt() right, haveRight := rightIt() output := newSparse(sizeEst) for haveLeft && haveRight { var toAppend uint64 if left.index < right.index { toAppend = left.encoded left, haveLeft = leftIt() } else if right.index < left.index { toAppend = right.encoded right, haveRight = rightIt() } else { // The indexes are equal. Keep the one with the highest rho value. if left.rho > right.rho { toAppend = left.encoded } else { toAppend = right.encoded } left, haveLeft = leftIt() right, haveRight = rightIt() } output.Add(toAppend) } for haveRight { output.Add(right.encoded) right, haveRight = rightIt() } for haveLeft { output.Add(left.encoded) left, haveLeft = leftIt() } return output } func toNormal(s *sparse, p, pPrime uint) normal { m := 1 << p M := newNormal(uint64(m)) it := s.GetIterator() for { k, ok := it() if !ok { break } idx, r := decodeHash(k, p, pPrime) val := maxU8(M.Get(uint64(idx)), r) M.Set(uint64(idx), val) } return M } func maxU8(x, y uint8) uint8 { if x >= y { return x } return y } func maxU64(x, y uint64) uint64 { if x >= y { return x } return y } // For debugging purposes, return the input as "binary/hex/decimal" func binU(x uint) string { return bin(uint64(x)) } // For debugging purposes, return the input as "binary/hex/decimal" func bin(x uint64) string { s := fmt.Sprintf("/%016x/%d", x, x) for i := 0; i < 64; i++ { thisBit := "0" if x&1 == 1 { thisBit = "1" } s = thisBit + s x >>= 1 } return s } func sortHashcodesByIndex(xs []uint64, p, pPrime uint) { sort.Sort(uint64Sorter{xs, p, pPrime}) } type uint64Sorter struct { xs []uint64 p, pPrime uint } func (u uint64Sorter) Len() int { return len(u.xs) } func (u uint64Sorter) Less(i, j int) bool { iIndex := getIndex(u.xs[i], u.p, u.pPrime) jIndex := getIndex(u.xs[j], u.p, u.pPrime) if iIndex != jIndex { return iIndex < jIndex } // When two elements have the same index, sort in descending order of rho. This means that the // highest rho value will be seen first, and subsequent elements can be discarded whem merging. _, iRho := decodeHash(u.xs[i], u.p, u.pPrime) _, jRho := decodeHash(u.xs[j], u.p, u.pPrime) return iRho > jRho } func (u uint64Sorter) Swap(i, j int) { tmp := u.xs[i] u.xs[i] = u.xs[j] u.xs[j] = tmp }
sparseutil.go
0.698021
0.666999
sparseutil.go
starcoder
package str import ( "kidy/utils" "math" "math/rand" "strings" "time" ) // Return the remainder of a string after a given value. func After(subject, search string) string { if search == "" { return subject } values := strings.Split(subject, search)[1:] if len(values) > 0 { return strings.Join(values, search) } return subject } // Get the portion of a string before a given value. func Before(subject, search string) string { if search == "" { return subject } return strings.Split(subject, search)[0] } // Return the length of the given string. func Length(value string) int { return len([]rune(value)) } // Convert a value to camel case. func Camel(value string) string { return Studly(value) } // Convert a value to studly caps case. func Studly(value string) (cstr string) { value = strings.Replace(value, "-", " ", -1) value = strings.Replace(value, "_", " ", -1) value = UcWords(value) return strings.Replace(value, " ", "", -1) } // Convert a string to snake case. func Snake(value, delimiter string) string { dlt := []byte(delimiter) bts := []byte(strings.Replace(UcWords(value), " ", "", -1)) var ss []byte for i, b := range bts { if 65 <= b && 90 >= b { b += 32 if i != 0 { ss = append(ss, dlt...) } } ss = append(ss, b) } return string(ss) } // Make a string's first character uppercase. func UcFirst(value string) string { str := []byte(value) if 97 <= str[0] && 122 >= str[0] { str[0] -= 32 } return string(str) } // Uppercase the first character of each word in a string // The optional separators contains the word separator characters func UcWords(value string, separators ...rune) string { if len(separators) == 0 { separators = []rune(" ") } values := Split(value, separators...) for i, str := range values { values[i] = UcFirst(str) } return strings.Join(values, " ") } // Generates cryptographically secure pseudo-random bytes // The length of the random string that should be returned in bytes func RandomBytes(n int) []byte { const letterBytes = "qwertyuiopasdfghjklzxcvbnm-QWERTYUIOPASDFGHJKLZXCVBNM_1234567890" bts := make([]byte, n) for i := range bts { rand.Seed(time.Now().UnixNano() + int64(n-i)) bts[i] = letterBytes[rand.Intn(len(letterBytes))] } return bts } // Split a string by a string // ...separators The boundary string func Split(str string, separators ...rune) []string { return strings.FieldsFunc(str, func(r rune) (flag bool) { for _, s := range separators { if s == r { flag = true } } return }) } const ( KIDY_STR_PAD_LEFT = 0 KIDY_STR_PAD_RIGHT = 1 KIDY_STR_PAD_BOTH = 2 ) // Padding a string to a certain length with another string // The `padStr` may be truncated if the required number of padding characters can't be evenly divided by the pad_string's length. // If the value of `length` is negative, less than, or equal to the length of the input string, no padding takes place, and string will be returned. // Optional argument `padType` can be // KDIY_STR_PAD_RIGHT, KDIY_STR_PAD_LEFT, or KIDY_STR_PAD_BOTH. // If pad_type is not specified it is assumed to be STR_PAD_RIGHT func Padding(str, padStr string, length, padType int) string { LEN := Length(str) if LEN >= length { return str } offset := length - LEN runes, rpad := []rune(str), []rune(padStr) var padding []rune for i := 0; i < offset/len(rpad); i++ { padding = append(padding, rpad...) } switch padType { case KIDY_STR_PAD_LEFT: runes = append(padding, runes...) case KIDY_STR_PAD_RIGHT: runes = append(runes, padding...) case KIDY_STR_PAD_BOTH: f := float64(offset) l, r := int(math.Floor(f/2)), int(math.Ceil(f/2)) runes = append(runes, padding[:r]...) runes = append(padding[:l], runes...) } return string(runes) } // Return part of a string // If `length` is given and is negative, then that many characters will be omitted from the end of string (after the start position has been calculated when a offset is negative). If offset denotes the position of this truncation or beyond, false will be returned. // If `length` is given and is positive, the string returned will contain at most length characters beginning from offset (depending on the length of string). func SubString(str string, offset, length int) string { runes := []rune(str) if offset >= len(runes) { return "" } if -1 == length { length = len(runes) - offset } l := offset + length l = utils.Ternary(l > len(runes), len(runes), l).(int) return string(runes[offset:l]) }
str/str.go
0.77223
0.438966
str.go
starcoder
package flake import ( "encoding/binary" "fmt" ) // Nil is the zero flake id. const Nil = ID(0) // Size returns the size (in bytes) of a flake id. const Size = 8 // These constants define the distribution of bits in a flake id. const ( // BucketBits is the number of bits dedicated to the bucket of the id BucketBits = 10 // TimestampBits is the number of bits dedicated to the timestamp (in milliseconds) of the id. TimestampBits = 42 // SequenceBits is the number of bits dedicated to the sequence number of the id. SequenceBits = 12 ) // These constants define the upper limits of all the components in a flake id. const ( // BucketLimit is the maximum bucket id allowed. BucketLimit = 1<<BucketBits - 1 // TimestampLimit is the maximum timestamp allowed. TimestampLimit = 1<<TimestampBits - 1 // SequenceLimit is the maximum sequence number allowed. SequenceLimit = 1<<SequenceBits - 1 ) // ID is a unique 64-bit unsigned integer generated based on time. type ID uint64 // FromBytes returns the id represented by the 8-byte byte array. func FromBytes(bytes []byte) (ID, error) { if len(bytes) != Size { return Nil, fmt.Errorf("unexpected number of bytes for flake id: %d", len(bytes)) } return ID(binary.BigEndian.Uint64(bytes)), nil } // Bucket returns the bucket component of the ID. func (f ID) Bucket() uint64 { return f.Uint64() >> (TimestampBits + SequenceBits) } // Time returns the time component of the ID. func (f ID) Time() Time { return Time(f.Uint64() << (BucketBits) >> (BucketBits + SequenceBits)) } // Sequence returns the sequence component of the ID. func (f ID) Sequence() uint64 { return f.Uint64() << (TimestampBits + BucketBits) >> (TimestampBits + BucketBits) } // Before returns true if this id comes before the provided id. Ordering is defined as first ordering // by the timestamp, then by the bucket, and lastly by the sequence. While there is a defined global // ordering of ids, the primary requirement for ordering is that there is ordering of time and then // sequence for a bucket. Bucket ordering may not be entirely accurate due to clock skew between // generators. func (f ID) Before(o ID) bool { fTime, oTime := f.Time(), o.Time() if fTime.Equal(oTime) { fBucket, oBucket := f.Bucket(), o.Bucket() if fBucket == oBucket { return f.Sequence() < o.Sequence() } return fBucket < oBucket } return fTime.Before(oTime) } // After returns true if this id comes after the provided id. See `Before` for a full description of // id ordering. func (f ID) After(o ID) bool { return !f.Equal(o) && !f.Before(o) } // Equal returns true if the two ids are equal and false otherwise. func (f ID) Equal(o ID) bool { return f == o } // Uint64 returns the uint64 representation of the ID. func (f ID) Uint64() uint64 { return uint64(f) } // Bytes returns a big-endian encoded byte array of the ID. func (f ID) Bytes() []byte { buf := make([]byte, Size) f.Put(buf) return buf } // Put places the id into the specified byte buffer. The buffer must be at least the size of // a uint64. The buffer is big-endian encoded. func (f ID) Put(buf []byte) { binary.BigEndian.PutUint64(buf, f.Uint64()) } // Binary returns the base-2 representation of the ID. func (f ID) Binary() string { return fmt.Sprintf("%064b", f.Uint64()) } // Octal returns the base-8 representation of the ID. func (f ID) Octal() string { return fmt.Sprintf("%022o", f.Uint64()) } // Hex returns the base-16 representation of the ID. func (f ID) Hex() string { return fmt.Sprintf("%016x", f.Uint64()) } func (f ID) String() string { return fmt.Sprintf("{Bucket: %d, Time: %s, Sequence: %d}", f.Bucket(), f.Time().StandardTime(), f.Sequence()) }
pkg/flake/id.go
0.840193
0.435241
id.go
starcoder
package colorpicker import ( "gioui.org/f32" "gioui.org/layout" "gioui.org/op" "gioui.org/op/clip" "gioui.org/op/paint" "gioui.org/unit" "image" "image/color" "math" ) const c = 0.55228475 // 4*(sqrt(2)-1)/3 func drawControl(p f32.Point, radius, width float32, gtx layout.Context) { width = float32(gtx.Px(unit.Dp(width))) radius = float32(gtx.Px(unit.Dp(radius))) - width p.X -= radius - width*2 p.Y -= radius - width*4 drawCircle(p, radius, width, color.NRGBA{A: 0xff}, gtx) p.X += width p.Y += width drawCircle(p, radius, width, color.NRGBA{R: 0xff, G: 0xff, B: 0xff, A: 0xff}, gtx) } func drawCircle(p f32.Point, r, width float32, col color.NRGBA, gtx layout.Context) { w := r * 2 defer op.Save(gtx.Ops).Load() var path clip.Path path.Begin(gtx.Ops) path.Move(f32.Point{X: p.X, Y: p.Y}) path.Move(f32.Point{X: w / 4 * 3, Y: r / 2}) path.Cube(f32.Point{X: 0, Y: r * c}, f32.Point{X: -r + r*c, Y: r}, f32.Point{X: -r, Y: r}) // SE path.Cube(f32.Point{X: -r * c, Y: 0}, f32.Point{X: -r, Y: -r + r*c}, f32.Point{X: -r, Y: -r}) // SW path.Cube(f32.Point{X: 0, Y: -r * c}, f32.Point{X: r - r*c, Y: -r}, f32.Point{X: r, Y: -r}) // NW path.Cube(f32.Point{X: r * c, Y: 0}, f32.Point{X: r, Y: r - r*c}, f32.Point{X: r, Y: r}) // NE clip.Stroke{Path: path.End(), Style: clip.StrokeStyle{Width: width}}.Op().Add(gtx.Ops) cons := gtx.Constraints dr := image.Rectangle{Min: image.Point{X: 0, Y: 0}, Max: image.Point{X: cons.Max.X, Y: cons.Max.Y}} clip.Rect(dr).Add(gtx.Ops) paint.ColorOp{Color: col}.Add(gtx.Ops) paint.PaintOp{}.Add(gtx.Ops) } func drawCheckerboard(gtx layout.Context) { w := gtx.Constraints.Max.X h := gtx.Constraints.Max.Y paint.FillShape(gtx.Ops, white, clip.Rect{Max: gtx.Constraints.Max}.Op()) size := h / 2 defer op.Save(gtx.Ops).Load() var path clip.Path path.Begin(gtx.Ops) count := int(math.Ceil(float64(w / size))) for i := 0; i < count; i++ { offset := 0 if math.Mod(float64(i), 2) == 0 { offset += size } path.MoveTo(f32.Point{X: float32(i * size), Y: float32(offset)}) path.Line(f32.Point{X: float32(size)}) path.Line(f32.Point{Y: float32(size)}) path.Line(f32.Point{X: float32(-size)}) path.Line(f32.Point{Y: float32(-size)}) } clip.Outline{Path: path.End()}.Op().Add(gtx.Ops) paint.ColorOp{Color: lightgrey}.Add(gtx.Ops) paint.PaintOp{}.Add(gtx.Ops) } var ( red = color.NRGBA{R: 255, A: 255} yellow = color.NRGBA{R: 255, G: 255, A: 255} green = color.NRGBA{G: 255, A: 255} cyan = color.NRGBA{G: 255, B: 255, A: 255} blue = color.NRGBA{B: 255, A: 255} magenta = color.NRGBA{R: 255, B: 255, A: 255} white = color.NRGBA{R: 0xff, G: 0xff, B: 0xff, A: 0xff} lightgrey = color.NRGBA{R: 0xef, G: 0xef, B: 0xef, A: 0xff} ) var rainbowColors = []color.NRGBA{red, yellow, green, cyan, blue, magenta, red} func drawRainbow(gtx layout.Context) layout.Dimensions { w := gtx.Constraints.Max.X h := gtx.Px(unit.Dp(20)) stepCount := len(rainbowColors) stepWidth := float32(w / (stepCount - 1)) offsetX := float32(0) color1 := rainbowColors[0] for _, color2 := range rainbowColors[1:] { stack := op.Save(gtx.Ops) paint.LinearGradientOp{ Stop1: f32.Point{offsetX, 0}, Stop2: f32.Point{offsetX + stepWidth, 0}, Color1: color1, Color2: color2, }.Add(gtx.Ops) dr := image.Rectangle{Min: image.Point{X: int(offsetX), Y: 0}, Max: image.Point{X: int(offsetX + stepWidth), Y: h}} clip.Rect(dr).Add(gtx.Ops) paint.PaintOp{}.Add(gtx.Ops) stack.Load() color1 = color2 offsetX += stepWidth } return layout.Dimensions{Size: image.Point{X: w, Y: h}} }
colorpicker/draw.go
0.639736
0.465934
draw.go
starcoder
package paint import ( "image/color" "math" "github.com/tomowarkar/biome" ) // Canvas : type Canvas interface { Fill(x, y int, obj int) Line(x1, y1, x2, y2 int, px float64, obj int) Square(x, y int, px, deg float64, obj int) Triangle(x, y int, px, deg float64, obj int) Dot(x, y int, px float64, obj int) Data() []int ToPng(filename string, palette []color.Color) } type canvas struct { b biome.Biome } type point struct { x, y, r float64 } type line struct { begin, end point } // NewCanv : func NewCanv(width, height int) Canvas { return &canvas{ biome.Biome{ H: height, W: width, Data: make([]int, height*width), }, } } func (c *canvas) Dot(x, y int, px float64, obj int) { dot := point{float64(x), float64(y), px} c.dot(dot, obj) } func (c *canvas) Triangle(x, y int, px, deg float64, obj int) { dot1 := point{ float64(x) + px*math.Cos((90+deg)/180*math.Pi), float64(y) - px*math.Sin((90+deg)/180*math.Pi), 1, } dot2 := point{ float64(x) + px*math.Cos((210+deg)/180*math.Pi), float64(y) - px*math.Sin((210+deg)/180*math.Pi), 1, } dot3 := point{ float64(x) + px*math.Cos((330+deg)/180*math.Pi), float64(y) - px*math.Sin((330+deg)/180*math.Pi), 1, } c.line(line{dot1, dot2}, obj) c.line(line{dot2, dot3}, obj) c.line(line{dot3, dot1}, obj) } func (c *canvas) Square(x, y int, px, deg float64, obj int) { arm := px * math.Sqrt2 dot1 := point{ float64(x) + arm*math.Cos((45+deg)/180*math.Pi), float64(y) - arm*math.Sin((45+deg)/180*math.Pi), 1, } dot2 := point{ float64(x) + arm*math.Cos((135+deg)/180*math.Pi), float64(y) - arm*math.Sin((135+deg)/180*math.Pi), 1, } dot3 := point{ float64(x) + arm*math.Cos((225+deg)/180*math.Pi), float64(y) - arm*math.Sin((225+deg)/180*math.Pi), 1, } dot4 := point{ float64(x) + arm*math.Cos((315+deg)/180*math.Pi), float64(y) - arm*math.Sin((315+deg)/180*math.Pi), 1, } c.line(line{dot1, dot2}, obj) c.line(line{dot2, dot3}, obj) c.line(line{dot3, dot4}, obj) c.line(line{dot4, dot1}, obj) } func (c *canvas) Data() []int { var data []int for i := range c.b.Data { data = append(data, c.b.Data[i]) } return data } func (c *canvas) Line(x1, y1, x2, y2 int, px float64, obj int) { line := line{ point{float64(x1), float64(y1), px}, point{float64(x2), float64(y2), px}, } c.line(line, obj) } func (c *canvas) Fill(x, y int, obj int) { c.fill(x, y, obj) } func (c *canvas) ToPng(filename string, palette []color.Color) { img := biome.Slice2Image(c.b, 1, palette) biome.ToPng(filename, img) } func (c canvas) arc(x, y, r, sAngle, eAngle float64) { // TODO 曲線 } func (c canvas) fill(x, y, obj int) { target := c.b.Data[y*c.b.W+x] dx := [4]int{1, 0, -1, 0} dy := [4]int{0, 1, 0, -1} var now [2]int queue := [][2]int{[2]int{x, y}} for len(queue) > 0 { now, queue = queue[0], queue[1:] for i := 0; i < 4; i++ { xx, yy := now[0]+dx[i], now[1]+dy[i] if xx < 0 || yy < 0 || xx > c.b.W-1 || yy > c.b.H-1 { continue } if c.b.Data[yy*c.b.W+xx] == target { queue = append(queue, [2]int{xx, yy}) c.b.Data[yy*c.b.W+xx] = obj } } } } func distL(x, y int, l line) bool { var xx, yy = float64(x), float64(y) var mx, my = (l.begin.x + l.end.x) / 2, (l.begin.y + l.end.y) / 2 var dx, dy = l.end.x - l.begin.x, l.end.y - l.begin.y var b1, b2 = -dy / dx, dx / dy var c1, c2 = -(b1*mx + my), -(b2*mx + my) r1 := biome.MaxFloat(l.begin.r, l.end.r) r2 := math.Sqrt(dx*dx+dy*dy) / 2 d1 := math.Abs(yy+b1*xx+c1) / math.Sqrt(1+b1*b1) d2 := math.Abs(yy+b2*xx+c2) / math.Sqrt(1+b2*b2) if dx == 0 && dy == 0 { return distP(x, y, l.begin) } else if dx == 0 { d1 = math.Abs(xx - mx) d2 = math.Abs(yy - my) } else if dy == 0 { d1 = math.Abs(yy - my) d2 = math.Abs(xx - mx) } if d1 < r1 && d2 < r2 { return true } return false } // TODO 全面参照ではなく、範囲を絞って描画 func (c canvas) line(l line, obj int) { for y := 0; y < c.b.H; y++ { for x := 0; x < c.b.W; x++ { if distL(x, y, l) { c.b.Data[y*c.b.W+x] = obj } } } } func distP(x, y int, p point) bool { var dx, dy = p.x - float64(x), p.y - float64(y) d := math.Sqrt(dx*dx+dy*dy) / p.r if d < 1 { return true } return false } // TODO 全面参照ではなく、範囲を絞って描画 func (c canvas) dot(p point, obj int) { for y := 0; y < c.b.H; y++ { for x := 0; x < c.b.W; x++ { if distP(x, y, p) { c.b.Data[y*c.b.W+x] = obj } } } }
paint/paint.go
0.542742
0.468365
paint.go
starcoder
package heisenberg import ( "fmt" "math" "math/cmplx" ) // Sparse64 is an algebriac matrix type Sparse64 struct { R, C int Matrix []map[int]complex64 } func (a Sparse64) String() string { output := "" for i := 0; i < a.R; i++ { for j := 0; j < a.C; j++ { out := a.Matrix[i] var value complex64 if out != nil { value = out[j] } output += fmt.Sprintf("%f ", value) } output += fmt.Sprintf("\n") } return output } // Vector64 is a 64 bit vector type Vector64 []complex64 func (a Vector64) String() string { output := "" for _, value := range a { output += fmt.Sprintf("%f ", value) } return output } // MachineSparse64 is a 64 bit sparse matrix machine type MachineSparse64 struct { Vector64 Qubits int } // Zero adds a zero to the matrix func (a *MachineSparse64) Zero() Qubit { qubit := Qubit(a.Qubits) a.Qubits++ zero := Vector64{1, 0} if qubit == 0 { a.Vector64 = zero return qubit } a.Vector64 = a.Tensor(zero) return qubit } // One adds a one to the matrix func (a *MachineSparse64) One() Qubit { qubit := Qubit(a.Qubits) a.Qubits++ one := Vector64{0, 1} if qubit == 0 { a.Vector64 = one return qubit } a.Vector64 = a.Tensor(one) return qubit } // Tensor product is the tensor product func (a *Sparse64) Tensor(b *Sparse64) *Sparse64 { output := make([]map[int]complex64, a.R*b.R) for x, xx := range a.Matrix { for y, yy := range b.Matrix { for i, ii := range xx { for j, jj := range yy { values := output[x*b.R+y] if values == nil { values = make(map[int]complex64) } value := ii * jj if value != 0 { values[i*b.C+j] = value } if len(values) > 0 { output[x*b.R+y] = values } } } } } return &Sparse64{ R: a.R * b.R, C: a.C * b.C, Matrix: output, } } // Multiply multiplies to matricies func (a *Sparse64) Multiply(b *Sparse64) *Sparse64 { if a.C != b.R { panic("invalid dimensions") } output := make([]map[int]complex64, a.R) for j := 0; j < b.C; j++ { for x, xx := range a.Matrix { var sum complex64 for y, value := range xx { yy := b.Matrix[y] var jj complex64 if yy != nil { jj = yy[j] } sum += jj * value } values := output[x] if values == nil { values = make(map[int]complex64) } if sum != 0 { values[j] = sum } if len(values) > 0 { output[x] = values } } } return &Sparse64{ R: a.R, C: b.C, Matrix: output, } } // Transpose transposes a matrix func (a *Sparse64) Transpose() { for i := 0; i < a.R; i++ { for j := 0; j < a.C; j++ { ii := a.Matrix[i] var value complex64 if ii != nil { value = ii[j] } a.Matrix[j][i] = value } } a.R, a.C = a.C, a.R } // Copy copies a matrix` func (a *Sparse64) Copy() *Sparse64 { cp := &Sparse64{ R: a.R, C: a.C, Matrix: make([]map[int]complex64, a.R), } for a, aa := range a.Matrix { value := cp.Matrix[a] if value == nil { value = make(map[int]complex64) } for b, bb := range aa { value[b] = bb } cp.Matrix[a] = value } return cp } // Tensor product is the tensor product func (a Vector64) Tensor(b Vector64) Vector64 { output := make(Vector64, 0, len(a)*len(b)) for _, ii := range a { for _, jj := range b { output = append(output, ii*jj) } } return output } // MultiplyVector multiplies a matrix by a vector func (a *Sparse64) MultiplyVector(b Vector64) Vector64 { if a.C != len(b) { panic(fmt.Sprintf("invalid dimensions %d %d", a.C, len(b))) } output := make(Vector64, 0, a.R) for _, xx := range a.Matrix { var sum complex64 for y, value := range xx { sum += b[y] * value } output = append(output, sum) } return output } // ControlledNot controlled not gate func (a *MachineSparse64) ControlledNot(c []Qubit, t Qubit) *Sparse64 { n := a.Qubits p := &Sparse64{ R: 2, C: 2, Matrix: []map[int]complex64{ map[int]complex64{ 0: 1, }, map[int]complex64{ 1: 1, }, }, } q := p for i := 0; i < n-1; i++ { q = p.Tensor(q) } d := q.R index := make([]int64, 0) for i := 0; i < d; i++ { bits := int64(i) // Apply X apply := true for _, j := range c { if (bits>>(Qubit(n-1)-j))&1 == 0 { apply = false break } } if apply { if (bits>>(Qubit(n-1)-t))&1 == 0 { bits |= 1 << (Qubit(n-1) - t) } else { bits &= ^(1 << (Qubit(n-1) - t)) } } index = append(index, bits) } g := Sparse64{ R: q.R, C: q.C, Matrix: make([]map[int]complex64, q.R), } for i, ii := range index { g.Matrix[i] = q.Matrix[int(ii)] } a.Vector64 = g.MultiplyVector(a.Vector64) return &g } // Multiply multiplies the machine by a matrix func (a *MachineSparse64) Multiply(b *Sparse64, qubits ...Qubit) { indexes := make(map[int]bool) for _, value := range qubits { indexes[int(value)] = true } identity := ISparse64() d := ISparse64() if indexes[0] { d = b.Copy() } for i := 1; i < a.Qubits; i++ { if indexes[i] { d = d.Tensor(b) continue } d = d.Tensor(identity) } a.Vector64 = d.MultiplyVector(a.Vector64) } // ISparse64 identity matrix func ISparse64() *Sparse64 { return &Sparse64{ R: 2, C: 2, Matrix: []map[int]complex64{ map[int]complex64{ 0: 1, }, map[int]complex64{ 1: 1, }, }, } } // I multiply by identity func (a *MachineSparse64) I(qubits ...Qubit) *MachineSparse64 { a.Multiply(ISparse64(), qubits...) return a } // HSparse64 Hadamard matrix func HSparse64() *Sparse64 { v := complex(1/math.Sqrt2, 0) return &Sparse64{ R: 2, C: 2, Matrix: []map[int]complex64{ map[int]complex64{ 0: complex64(v), 1: complex64(v), }, map[int]complex64{ 0: complex64(v), 1: complex64(-v), }, }, } } // H multiply by Hadamard gate func (a *MachineSparse64) H(qubits ...Qubit) *MachineSparse64 { a.Multiply(HSparse64(), qubits...) return a } // XSparse64 Pauli X matrix func XSparse64() *Sparse64 { return &Sparse64{ R: 2, C: 2, Matrix: []map[int]complex64{ map[int]complex64{ 1: 1, }, map[int]complex64{ 0: 1, }, }, } } // X multiply by Pauli X matrix func (a *MachineSparse64) X(qubits ...Qubit) *MachineSparse64 { a.Multiply(XSparse64(), qubits...) return a } // YSparse64 Pauli Y matrix func YSparse64() *Sparse64 { return &Sparse64{ R: 2, C: 2, Matrix: []map[int]complex64{ map[int]complex64{ 1: -1i, }, map[int]complex64{ 0: 1i, }, }, } } // Y multiply by Pauli Y matrix func (a *MachineSparse64) Y(qubits ...Qubit) *MachineSparse64 { a.Multiply(YSparse64(), qubits...) return a } // ZSparse64 Pauli Z matrix func ZSparse64() *Sparse64 { return &Sparse64{ R: 2, C: 2, Matrix: []map[int]complex64{ map[int]complex64{ 0: 1, }, map[int]complex64{ 1: -1, }, }, } } // Z multiply by Pauli Z matrix func (a *MachineSparse64) Z(qubits ...Qubit) *MachineSparse64 { a.Multiply(ZSparse64(), qubits...) return a } // SSparse64 phase gate func SSparse64() *Sparse64 { return &Sparse64{ R: 2, C: 2, Matrix: []map[int]complex64{ map[int]complex64{ 0: 1, }, map[int]complex64{ 1: 1i, }, }, } } // S multiply by phase matrix func (a *MachineSparse64) S(qubits ...Qubit) *MachineSparse64 { a.Multiply(SSparse64(), qubits...) return a } // TSparse64 T gate func TSparse64() *Sparse64 { return &Sparse64{ R: 2, C: 2, Matrix: []map[int]complex64{ map[int]complex64{ 0: 1, }, map[int]complex64{ 1: complex64(cmplx.Exp(1i * math.Pi / 4)), }, }, } } // T multiply by T matrix func (a *MachineSparse64) T(qubits ...Qubit) *MachineSparse64 { a.Multiply(TSparse64(), qubits...) return a } // USparse64 U gate func USparse64(theta, phi, lambda float64) *Sparse64 { v := complex(theta/2, 0) return &Sparse64{ R: 2, C: 2, Matrix: []map[int]complex64{ map[int]complex64{ 0: complex64(cmplx.Cos(v)), 1: complex64(-1 * cmplx.Exp(complex(0, lambda)) * cmplx.Sin(v)), }, map[int]complex64{ 0: complex64(cmplx.Exp(complex(0, phi)) * cmplx.Sin(v)), 1: complex64(cmplx.Exp(complex(0, (phi+lambda))) * cmplx.Cos(v)), }, }, } } // U multiply by U matrix func (a *MachineSparse64) U(theta, phi, lambda float64, qubits ...Qubit) *MachineSparse64 { a.Multiply(USparse64(theta, phi, lambda), qubits...) return a } // RXSparse64 x rotation matrix func RXSparse64(theta complex128) *Sparse64 { return &Sparse64{ R: 2, C: 2, Matrix: []map[int]complex64{ map[int]complex64{ 0: complex64(cmplx.Cos(complex128(theta))), 1: -1i * complex64(cmplx.Sin(complex128(theta))), }, map[int]complex64{ 0: -1i * complex64(cmplx.Sin(complex128(theta))), 1: complex64(cmplx.Cos(complex128(theta))), }, }, } } // RX rotate X gate func (a *MachineSparse64) RX(theta float64, qubits ...Qubit) *MachineSparse64 { a.Multiply(RXSparse64(complex(theta/2, 0)), qubits...) return a } // RYSparse64 y rotation matrix func RYSparse64(theta complex128) *Sparse64 { return &Sparse64{ R: 2, C: 2, Matrix: []map[int]complex64{ map[int]complex64{ 0: complex64(cmplx.Cos(complex128(theta))), 1: -1 * complex64(cmplx.Sin(complex128(theta))), }, map[int]complex64{ 0: complex64(cmplx.Sin(complex128(theta))), 1: complex64(cmplx.Cos(complex128(theta))), }, }, } } // RY rotate Y gate func (a *MachineSparse64) RY(theta float64, qubits ...Qubit) *MachineSparse64 { a.Multiply(RYSparse64(complex(theta/2, 0)), qubits...) return a } // RZSparse64 z rotation matrix func RZSparse64(theta complex128) *Sparse64 { return &Sparse64{ R: 2, C: 2, Matrix: []map[int]complex64{ map[int]complex64{ 0: complex64(cmplx.Exp(-1 * complex128(theta))), }, map[int]complex64{ 1: complex64(cmplx.Exp(complex128(theta))), }, }, } } // RZ rotate Z gate func (a *MachineSparse64) RZ(theta float64, qubits ...Qubit) *MachineSparse64 { a.Multiply(RZSparse64(complex(theta/2, 0)), qubits...) return a } // Swap swaps qubits` func (a *MachineSparse64) Swap(qubits ...Qubit) *MachineSparse64 { length := len(qubits) for i := 0; i < length/2; i++ { c, t := qubits[i], qubits[(length-1)-i] a.ControlledNot([]Qubit{c}, t) a.ControlledNot([]Qubit{t}, c) a.ControlledNot([]Qubit{c}, t) } return a } // Sparse128 is an algebriac matrix type Sparse128 struct { R, C int Matrix []map[int]complex128 } func (a Sparse128) String() string { output := "" for i := 0; i < a.R; i++ { for j := 0; j < a.C; j++ { out := a.Matrix[i] var value complex128 if out != nil { value = out[j] } output += fmt.Sprintf("%f ", value) } output += fmt.Sprintf("\n") } return output } // MachineSparse128 is a 128 bit sparse matrix machine type MachineSparse128 struct { Vector128 Qubits int } // Zero adds a zero to the matrix func (a *MachineSparse128) Zero() Qubit { qubit := Qubit(a.Qubits) a.Qubits++ zero := Vector128{1, 0} if qubit == 0 { a.Vector128 = zero return qubit } a.Vector128 = a.Tensor(zero) return qubit } // One adds a one to the matrix func (a *MachineSparse128) One() Qubit { qubit := Qubit(a.Qubits) a.Qubits++ one := Vector128{0, 1} if qubit == 0 { a.Vector128 = one return qubit } a.Vector128 = a.Tensor(one) return qubit } // Tensor product is the tensor product func (a *Sparse128) Tensor(b *Sparse128) *Sparse128 { output := make([]map[int]complex128, a.R*b.R) for x, xx := range a.Matrix { for y, yy := range b.Matrix { for i, ii := range xx { for j, jj := range yy { values := output[x*b.R+y] if values == nil { values = make(map[int]complex128) } value := ii * jj if value != 0 { values[i*b.C+j] = value } if len(values) > 0 { output[x*b.R+y] = values } } } } } return &Sparse128{ R: a.R * b.R, C: a.C * b.C, Matrix: output, } } // Multiply multiplies to matricies func (a *Sparse128) Multiply(b *Sparse128) *Sparse128 { if a.C != b.R { panic("invalid dimensions") } output := make([]map[int]complex128, a.R) for j := 0; j < b.C; j++ { for x, xx := range a.Matrix { var sum complex128 for y, value := range xx { yy := b.Matrix[y] var jj complex128 if yy != nil { jj = yy[j] } sum += jj * value } values := output[x] if values == nil { values = make(map[int]complex128) } if sum != 0 { values[j] = sum } if len(values) > 0 { output[x] = values } } } return &Sparse128{ R: a.R, C: b.C, Matrix: output, } } // Transpose transposes a matrix func (a *Sparse128) Transpose() { for i := 0; i < a.R; i++ { for j := 0; j < a.C; j++ { ii := a.Matrix[i] var value complex128 if ii != nil { value = ii[j] } a.Matrix[j][i] = value } } a.R, a.C = a.C, a.R } // Copy copies a matrix` func (a *Sparse128) Copy() *Sparse128 { cp := &Sparse128{ R: a.R, C: a.C, Matrix: make([]map[int]complex128, a.R), } for a, aa := range a.Matrix { value := cp.Matrix[a] if value == nil { value = make(map[int]complex128) } for b, bb := range aa { value[b] = bb } cp.Matrix[a] = value } return cp } // MultiplyVector multiplies a matrix by a vector func (a *Sparse128) MultiplyVector(b Vector128) Vector128 { if a.C != len(b) { panic(fmt.Sprintf("invalid dimensions %d %d", a.C, len(b))) } output := make(Vector128, 0, a.R) for _, xx := range a.Matrix { var sum complex128 for y, value := range xx { sum += b[y] * value } output = append(output, sum) } return output } // ControlledNot controlled not gate func (a *MachineSparse128) ControlledNot(c []Qubit, t Qubit) *Sparse128 { n := a.Qubits p := &Sparse128{ R: 2, C: 2, Matrix: []map[int]complex128{ map[int]complex128{ 0: 1, }, map[int]complex128{ 1: 1, }, }, } q := p for i := 0; i < n-1; i++ { q = p.Tensor(q) } d := q.R index := make([]int64, 0) for i := 0; i < d; i++ { bits := int64(i) // Apply X apply := true for _, j := range c { if (bits>>(Qubit(n-1)-j))&1 == 0 { apply = false break } } if apply { if (bits>>(Qubit(n-1)-t))&1 == 0 { bits |= 1 << (Qubit(n-1) - t) } else { bits &= ^(1 << (Qubit(n-1) - t)) } } index = append(index, bits) } g := Sparse128{ R: q.R, C: q.C, Matrix: make([]map[int]complex128, q.R), } for i, ii := range index { g.Matrix[i] = q.Matrix[int(ii)] } a.Vector128 = g.MultiplyVector(a.Vector128) return &g } // Multiply multiplies the machine by a matrix func (a *MachineSparse128) Multiply(b *Sparse128, qubits ...Qubit) { indexes := make(map[int]bool) for _, value := range qubits { indexes[int(value)] = true } identity := ISparse128() d := ISparse128() if indexes[0] { d = b.Copy() } for i := 1; i < a.Qubits; i++ { if indexes[i] { d = d.Tensor(b) continue } d = d.Tensor(identity) } a.Vector128 = d.MultiplyVector(a.Vector128) } // ISparse128 identity matrix func ISparse128() *Sparse128 { return &Sparse128{ R: 2, C: 2, Matrix: []map[int]complex128{ map[int]complex128{ 0: 1, }, map[int]complex128{ 1: 1, }, }, } } // I multiply by identity func (a *MachineSparse128) I(qubits ...Qubit) *MachineSparse128 { a.Multiply(ISparse128(), qubits...) return a } // HSparse128 Hadamard matrix func HSparse128() *Sparse128 { v := complex(1/math.Sqrt2, 0) return &Sparse128{ R: 2, C: 2, Matrix: []map[int]complex128{ map[int]complex128{ 0: v, 1: v, }, map[int]complex128{ 0: v, 1: -v, }, }, } } // H multiply by Hadamard gate func (a *MachineSparse128) H(qubits ...Qubit) *MachineSparse128 { a.Multiply(HSparse128(), qubits...) return a } // XSparse128 Pauli X matrix func XSparse128() *Sparse128 { return &Sparse128{ R: 2, C: 2, Matrix: []map[int]complex128{ map[int]complex128{ 1: 1, }, map[int]complex128{ 0: 1, }, }, } } // X multiply by Pauli X matrix func (a *MachineSparse128) X(qubits ...Qubit) *MachineSparse128 { a.Multiply(XSparse128(), qubits...) return a } // YSparse128 Pauli Y matrix func YSparse128() *Sparse128 { return &Sparse128{ R: 2, C: 2, Matrix: []map[int]complex128{ map[int]complex128{ 1: -1i, }, map[int]complex128{ 0: 1i, }, }, } } // Y multiply by Pauli Y matrix func (a *MachineSparse128) Y(qubits ...Qubit) *MachineSparse128 { a.Multiply(YSparse128(), qubits...) return a } // ZSparse128 Pauli Z matrix func ZSparse128() *Sparse128 { return &Sparse128{ R: 2, C: 2, Matrix: []map[int]complex128{ map[int]complex128{ 0: 1, }, map[int]complex128{ 1: -1, }, }, } } // Z multiply by Pauli Z matrix func (a *MachineSparse128) Z(qubits ...Qubit) *MachineSparse128 { a.Multiply(ZSparse128(), qubits...) return a } // SSparse128 phase gate func SSparse128() *Sparse128 { return &Sparse128{ R: 2, C: 2, Matrix: []map[int]complex128{ map[int]complex128{ 0: 1, }, map[int]complex128{ 1: 1i, }, }, } } // S multiply by phase matrix func (a *MachineSparse128) S(qubits ...Qubit) *MachineSparse128 { a.Multiply(SSparse128(), qubits...) return a } // TSparse128 T gate func TSparse128() *Sparse128 { return &Sparse128{ R: 2, C: 2, Matrix: []map[int]complex128{ map[int]complex128{ 0: 1, }, map[int]complex128{ 1: cmplx.Exp(1i * math.Pi / 4), }, }, } } // T multiply by T matrix func (a *MachineSparse128) T(qubits ...Qubit) *MachineSparse128 { a.Multiply(TSparse128(), qubits...) return a } // USparse128 U gate func USparse128(theta, phi, lambda float64) *Sparse128 { v := complex(theta/2, 0) return &Sparse128{ R: 2, C: 2, Matrix: []map[int]complex128{ map[int]complex128{ 0: cmplx.Cos(v), 1: -1 * cmplx.Exp(complex(0, lambda)) * cmplx.Sin(v), }, map[int]complex128{ 0: cmplx.Exp(complex(0, phi)) * cmplx.Sin(v), 1: cmplx.Exp(complex(0, (phi+lambda))) * cmplx.Cos(v), }, }, } } // U multiply by U matrix func (a *MachineSparse128) U(theta, phi, lambda float64, qubits ...Qubit) *MachineSparse128 { a.Multiply(USparse128(theta, phi, lambda), qubits...) return a } // RXSparse128 x rotation matrix func RXSparse128(theta complex128) *Sparse128 { return &Sparse128{ R: 2, C: 2, Matrix: []map[int]complex128{ map[int]complex128{ 0: cmplx.Cos(theta), 1: -1i * cmplx.Sin(theta), }, map[int]complex128{ 0: -1i * cmplx.Sin(theta), 1: cmplx.Cos(theta), }, }, } } // RX rotate X gate func (a *MachineSparse128) RX(theta float64, qubits ...Qubit) *MachineSparse128 { a.Multiply(RXSparse128(complex(theta/2, 0)), qubits...) return a } // RYSparse128 y rotation matrix func RYSparse128(theta complex128) *Sparse128 { return &Sparse128{ R: 2, C: 2, Matrix: []map[int]complex128{ map[int]complex128{ 0: cmplx.Cos(theta), 1: -1 * cmplx.Sin(theta), }, map[int]complex128{ 0: cmplx.Sin(theta), 1: cmplx.Cos(theta), }, }, } } // RY rotate Y gate func (a *MachineSparse128) RY(theta float64, qubits ...Qubit) *MachineSparse128 { a.Multiply(RYSparse128(complex(theta/2, 0)), qubits...) return a } // RZSparse128 z rotation matrix func RZSparse128(theta complex128) *Sparse128 { return &Sparse128{ R: 2, C: 2, Matrix: []map[int]complex128{ map[int]complex128{ 0: cmplx.Exp(-1 * theta), }, map[int]complex128{ 1: cmplx.Exp(theta), }, }, } } // RZ rotate Z gate func (a *MachineSparse128) RZ(theta float64, qubits ...Qubit) *MachineSparse128 { a.Multiply(RZSparse128(complex(theta/2, 0)), qubits...) return a } // Swap swaps qubits` func (a *MachineSparse128) Swap(qubits ...Qubit) *MachineSparse128 { length := len(qubits) for i := 0; i < length/2; i++ { c, t := qubits[i], qubits[(length-1)-i] a.ControlledNot([]Qubit{c}, t) a.ControlledNot([]Qubit{t}, c) a.ControlledNot([]Qubit{c}, t) } return a }
sparse.go
0.693992
0.536374
sparse.go
starcoder
// Package tracelog : logcalls.go provides formatting functions. package tracelog import ( "fmt" ) //** STARTED AND COMPLETED // Started uses the Serialize destination and adds a Started tag to the log line func Started(title string, functionName string) { logger.Trace.Output(2, fmt.Sprintf("%s : %s : Started\n", title, functionName)) } // Startedf uses the Serialize destination and writes a Started tag to the log line func Startedf(title string, functionName string, format string, a ...interface{}) { logger.Trace.Output(2, fmt.Sprintf("%s : %s : Started : %s\n", title, functionName, fmt.Sprintf(format, a...))) } // Completed uses the Serialize destination and writes a Completed tag to the log line func Completed(title string, functionName string) { logger.Trace.Output(2, fmt.Sprintf("%s : %s : Completed\n", title, functionName)) } // Completedf uses the Serialize destination and writes a Completed tag to the log line func Completedf(title string, functionName string, format string, a ...interface{}) { logger.Trace.Output(2, fmt.Sprintf("%s : %s : Completed : %s\n", title, functionName, fmt.Sprintf(format, a...))) } // CompletedError uses the Error destination and writes a Completed tag to the log line func CompletedError(err error, title string, functionName string) { logger.Error.Output(2, fmt.Sprintf("%s : %s : Completed : ERROR : %s\n", title, functionName, err)) } // CompletedErrorf uses the Error destination and writes a Completed tag to the log line func CompletedErrorf(err error, title string, functionName string, format string, a ...interface{}) { logger.Error.Output(2, fmt.Sprintf("%s : %s : Completed : ERROR : %s : %s\n", title, functionName, fmt.Sprintf(format, a...), err)) } //** TRACE // Trace writes to the Trace destination func Trace(title string, functionName string, format string, a ...interface{}) { logger.Trace.Output(2, fmt.Sprintf("%s : %s : Info : %s\n", title, functionName, fmt.Sprintf(format, a...))) } //** INFO // Info writes to the Info destination func Info(title string, functionName string, format string, a ...interface{}) { logger.Info.Output(2, fmt.Sprintf("%s : %s : Info : %s\n", title, functionName, fmt.Sprintf(format, a...))) } //** WARNING // Warning writes to the Warning destination func Warning(title string, functionName string, format string, a ...interface{}) { logger.Warning.Output(2, fmt.Sprintf("%s : %s : Info : %s\n", title, functionName, fmt.Sprintf(format, a...))) } //** ERROR // Error writes to the Error destination and accepts an err func Error(err error, title string, functionName string) { logger.Error.Output(2, fmt.Sprintf("%s : %s : ERROR : %s\n", title, functionName, err)) } // Errorf writes to the Error destination and accepts an err func Errorf(err error, title string, functionName string, format string, a ...interface{}) { logger.Error.Output(2, fmt.Sprintf("%s : %s : ERROR : %s : %s\n", title, functionName, fmt.Sprintf(format, a...), err)) } //** ALERT // Alert write to the Error destination and sends email alert func Alert(subject string, title string, functionName string, format string, a ...interface{}) { message := fmt.Sprintf("%s : %s : ALERT : %s\n", title, functionName, fmt.Sprintf(format, a...)) logger.Error.Output(2, message) SendEmailException(subject, message) } // CompletedAlert write to the Error destination, writes a Completed tag to the log line and sends email alert func CompletedAlert(subject string, title string, functionName string, format string, a ...interface{}) { message := fmt.Sprintf("%s : %s : Completed : ALERT : %s\n", title, functionName, fmt.Sprintf(format, a...)) logger.Error.Output(2, message) SendEmailException(subject, message) }
vendor/github.com/goinggo/tracelog/logcalls.go
0.529993
0.467332
logcalls.go
starcoder
package main import ( "fmt" "io" "text/template" ) const checkNativeSelectable = `func checkNativeSelectable(t *Dense, axis int, dt Dtype) error { if !t.IsNativelyAccessible() { return errors.New("Cannot select on non-natively accessible data") } if axis >= t.Shape().Dims() && !(t.IsScalar() && axis == 0) { return errors.Errorf("Cannot select on axis %d. Shape is %v", axis, t.Shape()) } if t.F() || t.RequiresIterator() { return errors.Errorf("Not yet implemented: native select for colmajor or unpacked matrices") } if t.Dtype() != dt { return errors.Errorf("Native selection only works on %v. Got %v", dt, t.Dtype()) } return nil } ` const nativeSelectRaw = `// Select{{short .}} creates a slice of flat data types. See Example of NativeSelectF64. func Select{{short .}}(t *Dense, axis int) (retVal [][]{{asType .}}, err error) { if err := checkNativeSelectable(t, axis, {{reflectKind .}}); err != nil { return nil, err } switch t.Shape().Dims() { case 0, 1: retVal = make([][]{{asType .}}, 1) retVal[0] = t.{{sliceOf .}} case 2: if axis == 0 { return Matrix{{short .}}(t) } fallthrough default: // size := t.Shape()[axis] data := t.{{sliceOf .}} stride := t.Strides()[axis] upper := ProdInts(t.Shape()[:axis+1]) retVal = make([][]{{asType .}}, 0, upper) for i, r := 0, 0; r < upper; i += stride { s := make([]{{asType .}}, 0) hdr := (*reflect.SliceHeader)(unsafe.Pointer(&s)) hdr.Data = uintptr(unsafe.Pointer(&data[i])) hdr.Len = stride hdr.Cap = stride retVal = append(retVal, s) r++ } return retVal, nil } return } ` const nativeSelectTestRaw = `func TestSelect{{short .}}(t *testing.T) { assert := assert.New(t) var T *Dense var err error var x [][]{{asType .}} T = New(Of({{reflectKind .}}), WithShape(2, 3, 4, 5), ) if x, err = Select{{short .}}(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(20, len(x[0])) T = New(Of({{reflectKind .}}), WithShape(2, 3, 4, 5), ) if x, err = Select{{short .}}(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(60, len(x[0])) T = New(Of({{reflectKind .}}), WithShape(2, 3, 4, 5), ) if x, err = Select{{short .}}(T, 3); err != nil { t.Fatal(err) } assert.Equal(120, len(x)) assert.Equal(1, len(x[0])) T = New(Of({{reflectKind .}}), WithShape(2, 3), ) if x, err = Select{{short .}}(T, 0); err != nil { t.Fatal(err) } assert.Equal(2, len(x)) assert.Equal(3, len(x[0])) T = New(Of({{reflectKind .}}), WithShape(2, 3), ) if x, err = Select{{short .}}(T, 1); err != nil { t.Fatal(err) } assert.Equal(6, len(x)) assert.Equal(1, len(x[0])) T = New(FromScalar({{if eq .String "bool" -}}false{{else if eq .String "string" -}}""{{else -}}{{asType .}}(0) {{end -}} )) if x, err = Select{{short .}}(T, 0); err != nil { t.Fatal(err) } assert.Equal(1, len(x)) assert.Equal(1, len(x[0])) if _, err = Select{{short .}}(T, 10); err == nil{ t.Fatal("Expected errors") } } ` var ( NativeSelect *template.Template NativeSelectTest *template.Template ) func init() { NativeSelect = template.Must(template.New("NativeSelect").Funcs(funcs).Parse(nativeSelectRaw)) NativeSelectTest = template.Must(template.New("NativeSelectTest").Funcs(funcs).Parse(nativeSelectTestRaw)) } func generateNativeSelect(f io.Writer, ak Kinds) { fmt.Fprintf(f, importUnqualifiedTensor) fmt.Fprintf(f, "%v\n", checkNativeSelectable) ks := filter(ak.Kinds, isSpecialized) for _, k := range ks { fmt.Fprintf(f, "/* Native Select for %v */\n\n", k) NativeSelect.Execute(f, k) fmt.Fprint(f, "\n\n") } } func generateNativeSelectTests(f io.Writer, ak Kinds) { fmt.Fprintf(f, importUnqualifiedTensor) ks := filter(ak.Kinds, isSpecialized) for _, k := range ks { NativeSelectTest.Execute(f, k) fmt.Fprint(f, "\n\n") } }
genlib2/native_select.go
0.697403
0.523786
native_select.go
starcoder
package golarm type period int type metric int type state int type procType int var ( states = map[string]float64{ "S": 1.0, "R": 2.0, "T": 3.0, "Z": 4.0, "D": 5.0, } ) const ( freeMetric metric = iota + 1 usedMetric timeMetric statusMetric ) // Linux process states to be used with status alarms const ( Sleeping state = iota + 1 Running Stopped Zombie Idle Unknown ) type load struct { period period } type proc struct { state state pid uint } type stats struct { period period proc proc metric metric } // Load average can be calculated for the last one minute, five minutes and fifteen minutes respectively. Load average is an indication of whether the system resources (mainly the CPU) are adequately available for the processes (system load) that are running, runnable or in uninterruptible sleep states during the previous n minutes. const ( OneMinPeriod period = iota + 1 FiveMinPeriod FifteenMinPeriod ) func getLoadAverage(p period, manager sigarMetrics, percentage bool) float64 { average, err := manager.GetLoadAverage() value := 0.0 if err != nil { return value } switch p { case OneMinPeriod: value = float64(average.One) case FiveMinPeriod: value = float64(average.Five) case FifteenMinPeriod: value = float64(average.Fifteen) } if percentage { value *= 10 } return value } func getPidState(pid uint, manager sigarMetrics) float64 { value, err := manager.getProcState(int(pid)) if err != nil { return 6.0 } return states[string(value.State)] } func getPidMemory(pid uint, manager sigarMetrics, percentage bool) float64 { memory, err := manager.getProcMem(int(pid)) if err != nil { return 0.0 } value := float64(memory.Resident / 1048576) if percentage { return 100.0 * (value / (getTotalMemory(manager) / 1048576)) } return value } // get running time for PID in minutes func getPidTime(pid uint, manager sigarMetrics) float64 { value, err := manager.getProcTime(int(pid)) if err != nil { return 0.0 } return float64(value.Total / 1000) } func getTotalMemory(manager sigarMetrics) float64 { mem, err := manager.GetMem() if err != nil { return 0.0 } return float64(mem.Total) } func getTotalSwap(manager sigarMetrics) float64 { swap, err := manager.GetSwap() if err != nil { return 0.0 } return float64(swap.Total) } func getUsedSwap(manager sigarMetrics) float64 { swap, err := manager.GetSwap() if err != nil { return 0.0 } return float64(swap.Used) } func getFreeSwap(manager sigarMetrics) float64 { swap, err := manager.GetSwap() if err != nil { return 0.0 } return float64(swap.Free) } func getActualUsedMemory(manager sigarMetrics, percentage bool) float64 { mem, err := manager.GetMem() if err != nil { return 0.0 } value := float64(mem.ActualUsed) / 1048576 if percentage { return 100.0 * (value / (getTotalMemory(manager) / 1048576)) } return value } func getActualFreeMemory(manager sigarMetrics, percentage bool) float64 { mem, err := manager.GetMem() if err != nil { return 0.0 } value := float64(mem.ActualFree) / 1048576 if percentage { return 100.0 * (value / (getTotalMemory(manager) / 1048576)) } return value } func getActualFreeSwap(manager sigarMetrics, percentage bool) float64 { value := float64(getFreeSwap(manager)) / 1048576 if percentage { return 100.0 * (value / getTotalSwap(manager) / 1048576) } return value } func getActualUsedSwap(manager sigarMetrics, percentage bool) float64 { value := float64(getUsedSwap(manager)) / 1048576 if percentage { return 100.0 * (value / (getTotalSwap(manager) / 1048576)) } return value } func getUptime(manager sigarMetrics) float64 { value, err := manager.getUpTime() if err != nil { return 0.0 } return value.Length }
metrics.go
0.779154
0.465145
metrics.go
starcoder
package meta import ( "fmt" "math" "sort" "strconv" "strings" ) const ( RadiansToDegrees = 57.2957795 DegreesToKm = math.Pi * 6371.0 / 180.0 RadiansToKm = RadiansToDegrees * DegreesToKm ) const ( placenameName = iota placenameLatitude placenameLongitude placenameLevel placenameLast ) // Placename is used to describe distances and azimuths to known places. type Placename struct { Name string Latitude float64 Longitude float64 Level int latitude string longitude string } // Distance returns the distance in kilometres from the given latitude and longitude to the Placename. func (p Placename) Distance(lat, lon float64) float64 { if (p.Latitude == lat) && (p.Longitude == lon) { return 0.0 } esq := (1.0 - 1.0/298.25) * (1.0 - 1.0/298.25) alat3 := math.Atan(math.Tan(p.Latitude/RadiansToDegrees)*esq) * RadiansToDegrees alat4 := math.Atan(math.Tan(lat/RadiansToDegrees)*esq) * RadiansToDegrees rlat1 := alat3 / RadiansToDegrees rlat2 := alat4 / RadiansToDegrees rdlon := (lon - p.Longitude) / RadiansToDegrees clat1 := math.Cos(rlat1) clat2 := math.Cos(rlat2) slat1 := math.Sin(rlat1) slat2 := math.Sin(rlat2) cdlon := math.Cos(rdlon) cdel := slat1*slat2 + clat1*clat2*cdlon switch { case cdel > 1.0: cdel = 1.0 case cdel < -1.0: cdel = -1.0 } return RadiansToKm * math.Acos(cdel) } // Azimuth returns the azimuth in degrees from the given latitude and longitude to the Placename. func (p Placename) Azimuth(lat, lon float64) float64 { if (p.Latitude == lat) && (p.Longitude == lon) { return 0.0 } esq := (1.0 - 1.0/298.25) * (1.0 - 1.0/298.25) alat3 := math.Atan(math.Tan(p.Latitude/RadiansToDegrees)*esq) * RadiansToDegrees alat4 := math.Atan(math.Tan(lat/RadiansToDegrees)*esq) * RadiansToDegrees rlat1 := alat3 / RadiansToDegrees rlat2 := alat4 / RadiansToDegrees rdlon := (lon - p.Longitude) / RadiansToDegrees clat1 := math.Cos(rlat1) clat2 := math.Cos(rlat2) slat1 := math.Sin(rlat1) slat2 := math.Sin(rlat2) cdlon := math.Cos(rdlon) sdlon := math.Sin(rdlon) yazi := sdlon * clat2 xazi := clat1*slat2 - slat1*clat2*cdlon azi := RadiansToDegrees * math.Atan2(yazi, xazi) if azi < 0.0 { azi += 360.0 } return azi } // BackAzimuth returns the back-azimuth in degrees from the given latitude and longitude to the Placename. func (p Placename) BackAzimuth(lat, lon float64) float64 { if (p.Latitude == lat) && (p.Longitude == lon) { return 0.0 } esq := (1.0 - 1.0/298.25) * (1.0 - 1.0/298.25) alat3 := math.Atan(math.Tan(p.Latitude/RadiansToDegrees)*esq) * RadiansToDegrees alat4 := math.Atan(math.Tan(lat/RadiansToDegrees)*esq) * RadiansToDegrees rlat1 := alat3 / RadiansToDegrees rlat2 := alat4 / RadiansToDegrees rdlon := (lon - p.Longitude) / RadiansToDegrees clat1 := math.Cos(rlat1) clat2 := math.Cos(rlat2) slat1 := math.Sin(rlat1) slat2 := math.Sin(rlat2) cdlon := math.Cos(rdlon) sdlon := math.Sin(rdlon) ybaz := -sdlon * clat1 xbaz := clat2*slat1 - slat2*clat1*cdlon baz := RadiansToDegrees * math.Atan2(ybaz, xbaz) if baz < 0.0 { baz += 360.0 } return baz } // Compass returns a text representation of the azimuth from the given latitude and longitude to the Placename. func (p Placename) Compass(lat, lon float64) string { azimuth := p.Azimuth(lat, lon) + 22.5 for azimuth < 0.0 { azimuth += 360.0 } for azimuth >= 360.0 { azimuth -= 360.0 } switch int(math.Floor(azimuth / 45.0)) { case 0: return "north" case 1: return "north-east" case 2: return "east" case 3: return "south-east" case 4: return "south" case 5: return "south-west" case 6: return "west" default: return "north-west" } } type PlacenameList []Placename func (p PlacenameList) Len() int { return len(p) } func (p PlacenameList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (p PlacenameList) Less(i, j int) bool { return strings.ToLower(p[i].Name) < strings.ToLower(p[j].Name) } func (p PlacenameList) encode() [][]string { data := [][]string{{ "Name", "Latitude", "Longitude", "Level", }} for _, v := range p { data = append(data, []string{ strings.TrimSpace(v.Name), strings.TrimSpace(v.latitude), strings.TrimSpace(v.longitude), strconv.Itoa(v.Level), }) } return data } func (p *PlacenameList) decode(data [][]string) error { var placenames []Placename if len(data) > 1 { for _, d := range data[1:] { if len(d) != placenameLast { return fmt.Errorf("incorrect number of placename fields") } latitude, err := strconv.ParseFloat(d[placenameLatitude], 64) if err != nil { return err } longitude, err := strconv.ParseFloat(d[placenameLongitude], 64) if err != nil { return err } level, err := strconv.Atoi(d[placenameLevel]) if err != nil { return err } placenames = append(placenames, Placename{ Name: strings.TrimSpace(d[placenameName]), Latitude: latitude, Longitude: longitude, Level: level, latitude: strings.TrimSpace(d[placenameLatitude]), longitude: strings.TrimSpace(d[placenameLongitude]), }) } *p = PlacenameList(placenames) } return nil } // Closest returns the Placename which is the closest to the given latitude and longitude taking into // account the Placename level. The level is used to avoid small places taking precident over larger // places at longer distances. Currently level three addresses will be used for distances within 20 km, // level two within 100 km, level one within 500km, and level zero has no distance threshold. func (p PlacenameList) Closest(lat, lon float64) (Placename, bool) { var res Placename sort.Sort(p) var found bool var distance float64 for _, placename := range p { dist := placename.Distance(lat, lon) if dist > 20.0 && placename.Level > 2 { continue } if dist > 100.0 && placename.Level > 1 { continue } if dist > 500.0 && placename.Level > 0 { continue } if !found || dist < distance { distance, res, found = dist, placename, true } } return res, found } func LoadPlacenames(path string) ([]Placename, error) { var s []Placename if err := LoadList(path, (*PlacenameList)(&s)); err != nil { return nil, err } sort.Sort(PlacenameList(s)) return s, nil }
meta/placenames.go
0.802672
0.596874
placenames.go
starcoder
package processor import ( "fmt" "sync/atomic" "time" "github.com/Jeffail/benthos/lib/log" "github.com/Jeffail/benthos/lib/message/tracing" "github.com/Jeffail/benthos/lib/metrics" "github.com/Jeffail/benthos/lib/processor/condition" "github.com/Jeffail/benthos/lib/response" "github.com/Jeffail/benthos/lib/types" ) //------------------------------------------------------------------------------ func init() { Constructors[TypeWhile] = TypeSpec{ constructor: NewWhile, description: ` While is a processor that has a condition and a list of child processors. The child processors are executed continously on a message batch for as long as the child condition resolves to true. The field ` + "`at_least_once`" + `, if true, ensures that the child processors are always executed at least one time (like a do .. while loop.) The field ` + "`max_loops`" + `, if greater than zero, caps the number of loops for a message batch to this value. If following a loop execution the number of messages in a batch is reduced to zero the loop is exited regardless of the condition result. If following a loop execution there are more than 1 message batches the condition is checked against the first batch only. You can find a [full list of conditions here](../conditions).`, sanitiseConfigFunc: func(conf Config) (interface{}, error) { condSanit, err := condition.SanitiseConfig(conf.While.Condition) if err != nil { return nil, err } procConfs := make([]interface{}, len(conf.While.Processors)) for i, pConf := range conf.While.Processors { if procConfs[i], err = SanitiseConfig(pConf); err != nil { return nil, err } } return map[string]interface{}{ "at_least_once": conf.While.AtLeastOnce, "max_loops": conf.While.MaxLoops, "condition": condSanit, "processors": procConfs, }, nil }, } } //------------------------------------------------------------------------------ // WhileConfig is a config struct containing fields for the While // processor. type WhileConfig struct { AtLeastOnce bool `json:"at_least_once" yaml:"at_least_once"` MaxLoops int `json:"max_loops" yaml:"max_loops"` Condition condition.Config `json:"condition" yaml:"condition"` Processors []Config `json:"processors" yaml:"processors"` } // NewWhileConfig returns a default WhileConfig. func NewWhileConfig() WhileConfig { return WhileConfig{ AtLeastOnce: false, MaxLoops: 0, Condition: condition.NewConfig(), Processors: []Config{}, } } //------------------------------------------------------------------------------ // While is a processor that applies child processors for as long as a child // condition resolves to true. type While struct { running int32 maxLoops int atLeastOnce bool cond condition.Type children []types.Processor log log.Modular mCount metrics.StatCounter mLoop metrics.StatCounter mCondFailed metrics.StatCounter mSent metrics.StatCounter mBatchSent metrics.StatCounter } // NewWhile returns a While processor. func NewWhile( conf Config, mgr types.Manager, log log.Modular, stats metrics.Type, ) (Type, error) { cond, err := condition.New(conf.While.Condition, mgr, log.NewModule(".condition"), metrics.Namespaced(stats, "condition")) if err != nil { return nil, err } var children []types.Processor for i, pconf := range conf.While.Processors { ns := fmt.Sprintf("while.%v", i) nsStats := metrics.Namespaced(stats, ns) nsLog := log.NewModule("." + ns) var proc Type if proc, err = New(pconf, mgr, nsLog, nsStats); err != nil { return nil, err } children = append(children, proc) } return &While{ running: 1, maxLoops: conf.While.MaxLoops, atLeastOnce: conf.While.AtLeastOnce, cond: cond, children: children, log: log, mCount: stats.GetCounter("count"), mLoop: stats.GetCounter("loop"), mCondFailed: stats.GetCounter("failed"), mSent: stats.GetCounter("sent"), mBatchSent: stats.GetCounter("batch.sent"), }, nil } //------------------------------------------------------------------------------ // ProcessMessage applies the processor to a message, either creating >0 // resulting messages or a response to be sent back to the message source. func (w *While) ProcessMessage(msg types.Message) (msgs []types.Message, res types.Response) { w.mCount.Incr(1) spans := tracing.CreateChildSpans(TypeWhile, msg) msgs = []types.Message{msg} loops := 0 condResult := w.atLeastOnce || w.cond.Check(msg) for condResult { if atomic.LoadInt32(&w.running) != 1 { return nil, response.NewError(types.ErrTypeClosed) } if w.maxLoops > 0 && loops >= w.maxLoops { w.log.Traceln("Reached max loops count") break } w.mLoop.Incr(1) w.log.Traceln("Looped") for _, s := range spans { s.LogEvent("loop") } msgs, res = ExecuteAll(w.children, msgs...) if len(msgs) == 0 { return } condResult = w.cond.Check(msgs[0]) loops++ } for _, s := range spans { s.SetTag("result", condResult) s.Finish() } w.mBatchSent.Incr(int64(len(msgs))) totalParts := 0 for _, msg := range msgs { totalParts += msg.Len() } w.mSent.Incr(int64(totalParts)) return } // CloseAsync shuts down the processor and stops processing requests. func (w *While) CloseAsync() { atomic.StoreInt32(&w.running, 0) for _, p := range w.children { p.CloseAsync() } } // WaitForClose blocks until the processor has closed down. func (w *While) WaitForClose(timeout time.Duration) error { stopBy := time.Now().Add(timeout) for _, p := range w.children { if err := p.WaitForClose(time.Until(stopBy)); err != nil { return err } } return nil } //------------------------------------------------------------------------------
lib/processor/while.go
0.691393
0.410284
while.go
starcoder
package planet import ( "github.com/willbeason/worldproc/pkg/geodesic" "github.com/willbeason/worldproc/pkg/noise" "github.com/willbeason/worldproc/pkg/render" "github.com/willbeason/worldproc/pkg/sun" "image" "math" ) func AddTerrain(p *Planet, sphere *geodesic.Geodesic, perlinNoise *noise.PerlinFractal) { p.Heights = make([]float64, len(sphere.Centers)) for cell, pos := range sphere.Centers { p.Heights[cell] = perlinNoise.ValueAt(pos) } } func RenderTerrain(p *Planet, projection render.Projection, spheres []*geodesic.Geodesic, light sun.Light) *image.RGBA { screen := projection.Screen img := image.NewRGBA(image.Rect(0, 0, screen.Width, screen.Height)) pxWaterHeights := make([]float64, screen.Width*screen.Height) pxLandHeights := make([]float64, screen.Width*screen.Height) pxLights := make([]float64, screen.Width*screen.Height) pxSunlight := make([]geodesic.Angle, screen.Width*screen.Height) heights := p.Heights waters := p.Waters flow := p.Flows sphere := spheres[len(spheres)-1] for x := 0; x < screen.Width; x++ { for y := 0; y < screen.Height; y++ { pidx := y*screen.Width + x angle := projection.Pixels[pidx] v := angle.Vector() idx := geodesic.Find(spheres, v) dist := math.Sqrt(geodesic.DistSq(v, sphere.Centers[idx])) pxW1 := waters[idx] + flow[idx]/2000.0 pxH1 := heights[idx] // Linearly interpolate the cell's stats with the second-closest cell. idx2 := 0 distSq2 := math.MaxFloat64 for _, n := range sphere.Faces[idx].Neighbors { nDistSq2 := geodesic.DistSq(v, sphere.Centers[n]) if nDistSq2 < distSq2 { idx2 = n distSq2 = nDistSq2 } } dist2 := math.Sqrt(distSq2) pxW2 := waters[idx2] + flow[idx2]/2000.0 pxH2 := heights[idx2] pxWaterHeights[pidx] = render.Lerp(pxW1, pxW2, dist/(dist+dist2)) pxLandHeights[pidx] = render.Lerp(pxH1, pxH2, dist/(dist+dist2)) pxLights[pidx] = light.VisualIntensity(v) pxSunlight[pidx] = light.AltitudeAzimuth(angle) } } screen.PaintLandWater(pxLandHeights, pxWaterHeights, pxLights, pxSunlight, img) return img }
pkg/planet/terrain.go
0.626696
0.45181
terrain.go
starcoder
package dtables import ( "errors" "io" "github.com/dolthub/dolt/go/libraries/doltcore/diff" "github.com/dolthub/dolt/go/libraries/doltcore/doltdb" "github.com/dolthub/dolt/go/libraries/doltcore/env/actions" "github.com/dolthub/dolt/go/libraries/doltcore/sqle/index" "github.com/dolthub/go-mysql-server/sql" ) // UnscopedDiffTable is a sql.Table implementation of a system table that shows which tables have // changed in each commit, across all branches. type UnscopedDiffTable struct { ddb *doltdb.DoltDB head *doltdb.Commit } // tableChange is an internal data structure used to hold the results of processing // a diff.TableDelta structure into the output data for this system table. type tableChange struct { tableName string dataChange bool schemaChange bool } // NewUnscopedDiffTable creates an UnscopedDiffTable func NewUnscopedDiffTable(_ *sql.Context, ddb *doltdb.DoltDB, head *doltdb.Commit) sql.Table { return &UnscopedDiffTable{ddb: ddb, head: head} } // Name is a sql.Table interface function which returns the name of the table which is defined by the constant // LogTableName func (dt *UnscopedDiffTable) Name() string { return doltdb.DiffTableName } // String is a sql.Table interface function which returns the name of the table which is defined by the constant // DiffTableName func (dt *UnscopedDiffTable) String() string { return doltdb.DiffTableName } // Schema is a sql.Table interface function that returns the sql.Schema for this system table. func (dt *UnscopedDiffTable) Schema() sql.Schema { return []*sql.Column{ {Name: "commit_hash", Type: sql.Text, Source: doltdb.DiffTableName, PrimaryKey: true}, {Name: "table_name", Type: sql.Text, Source: doltdb.DiffTableName, PrimaryKey: true}, {Name: "committer", Type: sql.Text, Source: doltdb.DiffTableName, PrimaryKey: false}, {Name: "email", Type: sql.Text, Source: doltdb.DiffTableName, PrimaryKey: false}, {Name: "date", Type: sql.Datetime, Source: doltdb.DiffTableName, PrimaryKey: false}, {Name: "message", Type: sql.Text, Source: doltdb.DiffTableName, PrimaryKey: false}, {Name: "data_change", Type: sql.Boolean, Source: doltdb.DiffTableName, PrimaryKey: false}, {Name: "schema_change", Type: sql.Boolean, Source: doltdb.DiffTableName, PrimaryKey: false}, } } // Partitions is a sql.Table interface function that returns a partition of the data. Currently data is unpartitioned. func (dt *UnscopedDiffTable) Partitions(*sql.Context) (sql.PartitionIter, error) { return index.SinglePartitionIterFromNomsMap(nil), nil } // PartitionRows is a sql.Table interface function that gets a row iterator for a partition. func (dt *UnscopedDiffTable) PartitionRows(ctx *sql.Context, _ sql.Partition) (sql.RowIter, error) { return NewUnscopedDiffTableItr(ctx, dt.ddb, dt.head) } // UnscopedDiffTableItr is a sql.RowItr implementation which iterates over each commit as if it's a row in the table. type UnscopedDiffTableItr struct { ctx *sql.Context ddb *doltdb.DoltDB commits []*doltdb.Commit commitIdx int tableChanges []tableChange tableChangesIdx int } // NewUnscopedDiffTableItr creates a UnscopedDiffTableItr from the current environment. func NewUnscopedDiffTableItr(ctx *sql.Context, ddb *doltdb.DoltDB, head *doltdb.Commit) (*UnscopedDiffTableItr, error) { commits, err := actions.TimeSortedCommits(ctx, ddb, head, -1) if err != nil { return nil, err } return &UnscopedDiffTableItr{ctx, ddb, commits, 0, nil, -1}, nil } // HasNext returns true if this UnscopedDiffItr has more elements left. func (itr *UnscopedDiffTableItr) HasNext() bool { // There are more diff records to iterate over if: // 1) there is more than one commit left to process, or // 2) the tableChanges array isn't nilled out and has data left to process return itr.commitIdx+1 < len(itr.commits) || itr.tableChanges != nil } // incrementIndexes increments the table changes index, and if it's the end of the table changes array, moves // to the next commit, and resets the table changes index so that it can be populated when Next() is called. func (itr *UnscopedDiffTableItr) incrementIndexes() { itr.tableChangesIdx++ if itr.tableChangesIdx >= len(itr.tableChanges) { itr.tableChangesIdx = -1 itr.tableChanges = nil itr.commitIdx++ } } // Next retrieves the next row. It will return io.EOF if it's the last row. // After retrieving the last row, Close will be automatically closed. func (itr *UnscopedDiffTableItr) Next(*sql.Context) (sql.Row, error) { if !itr.HasNext() { return nil, io.EOF } defer itr.incrementIndexes() // Load table changes if we don't have them for this commit yet for itr.tableChanges == nil { err := itr.loadTableChanges(itr.commits[itr.commitIdx]) if err != nil { return nil, err } } commit := itr.commits[itr.commitIdx] hash, err := commit.HashOf() if err != nil { return nil, err } meta, err := commit.GetCommitMeta() if err != nil { return nil, err } tableChange := itr.tableChanges[itr.tableChangesIdx] return sql.NewRow(hash.String(), tableChange.tableName, meta.Name, meta.Email, meta.Time(), meta.Description, tableChange.dataChange, tableChange.schemaChange), nil } // loadTableChanges loads the set of table changes for the current commit into this iterator, taking // care of advancing the iterator if that commit didn't mutate any tables and checking for EOF condition. func (itr *UnscopedDiffTableItr) loadTableChanges(commit *doltdb.Commit) error { tableChanges, err := itr.calculateTableChanges(commit) if err != nil { return err } // If there are no table deltas for this commit (e.g. a "dolt doc" commit), // advance to the next commit, checking for EOF condition. if len(tableChanges) == 0 { itr.commitIdx++ if !itr.HasNext() { return io.EOF } } else { itr.tableChanges = tableChanges itr.tableChangesIdx = 0 } return nil } // calculateTableChanges calculates the tables that changed in the specified commit, by comparing that // commit with its immediate ancestor commit. func (itr *UnscopedDiffTableItr) calculateTableChanges(commit *doltdb.Commit) ([]tableChange, error) { toRootValue, err := commit.GetRootValue() if err != nil { return nil, err } parent, err := itr.ddb.ResolveParent(itr.ctx, commit, 0) if err != nil { return nil, err } fromRootValue, err := parent.GetRootValue() if err != nil { return nil, err } deltas, err := diff.GetTableDeltas(itr.ctx, fromRootValue, toRootValue) if err != nil { return nil, err } tableChanges := make([]tableChange, len(deltas)) for i := 0; i < len(deltas); i++ { change, err := itr.processTableDelta(deltas[i]) if err != nil { return nil, err } tableChanges[i] = *change } // Not all commits mutate tables (e.g. empty commits) if len(tableChanges) == 0 { return nil, nil } return tableChanges, nil } // processTableDelta processes the specified TableDelta to determine what kind of change it was (i.e. table drop, // table rename, table create, or data update) and returns a tableChange struct representing the change. func (itr *UnscopedDiffTableItr) processTableDelta(delta diff.TableDelta) (*tableChange, error) { // Dropping a table is always a schema change, and also a data change if the table contained data if itr.isTableDropChange(delta) { isEmpty, err := itr.isTableDataEmpty(delta.FromTable) if err != nil { return nil, err } return &tableChange{ tableName: delta.FromName, dataChange: !isEmpty, schemaChange: true, }, nil } // Renaming a table is always a schema change, and also a data change if the table data differs if itr.isRenameChange(delta) { dataChanged, err := itr.isTableDataDifferent(delta) if err != nil { return nil, err } return &tableChange{ tableName: delta.ToName, dataChange: dataChanged, schemaChange: true, }, nil } // Creating a table is always a schema change, and also a data change if data was inserted if itr.isTableCreateChange(delta) { isEmpty, err := itr.isTableDataEmpty(delta.ToTable) if err != nil { return nil, err } return &tableChange{ tableName: delta.ToName, dataChange: !isEmpty, schemaChange: true, }, nil } dataChanged, err := itr.isTableDataDifferent(delta) if err != nil { return nil, err } schemaChanged, err := itr.isTableSchemaDifferent(delta) if err != nil { return nil, err } return &tableChange{ tableName: delta.ToName, dataChange: dataChanged, schemaChange: schemaChanged, }, nil } // Close closes the iterator. func (itr *UnscopedDiffTableItr) Close(*sql.Context) error { return nil } // isTableDataEmpty return true if the table does not contain any data func (itr *UnscopedDiffTableItr) isTableDataEmpty(table *doltdb.Table) (bool, error) { rowData, err := table.GetRowData(itr.ctx) if err != nil { return false, err } return rowData.Empty(), nil } // isRenameChange returns true if the specified TableDelta represents a table rename change. func (itr *UnscopedDiffTableItr) isRenameChange(delta diff.TableDelta) bool { return delta.FromTable != nil && delta.ToTable != nil && delta.FromName != delta.ToName } // isTableDropChange return true if the specified TableDelta represents a table drop change. func (itr *UnscopedDiffTableItr) isTableDropChange(delta diff.TableDelta) bool { return len(delta.FromName) > 0 && len(delta.ToName) == 0 } // isTableCreateChange returns true if the specified TableDelta represents a table create change. func (itr *UnscopedDiffTableItr) isTableCreateChange(delta diff.TableDelta) bool { return delta.FromTable == nil && delta.ToTable != nil } // isTableDataDifferent returns true if the data in the from and to tables is different. This method // should only be called with both from and to tables are not nil. func (itr *UnscopedDiffTableItr) isTableDataDifferent(delta diff.TableDelta) (bool, error) { if delta.FromTable == nil || delta.ToTable == nil { return false, errors.New("specified FromTable and ToTable should never be nil") } fromTableHash, err := delta.FromTable.HashOf() if err != nil { return false, err } toTableHash, err := delta.ToTable.HashOf() if err != nil { return false, err } return fromTableHash != toTableHash, nil } // isTableSchemaDifferent returns true if the schema in the from and to tables is different. This method // should only be called with both from and to tables are not nil. func (itr *UnscopedDiffTableItr) isTableSchemaDifferent(delta diff.TableDelta) (bool, error) { if delta.FromTable == nil || delta.ToTable == nil { return false, errors.New("specified FromTable and ToTable should never be nil") } fromSchemaHash, err := delta.FromTable.GetSchemaHash(itr.ctx) if err != nil { return false, err } toSchemaHash, err := delta.ToTable.GetSchemaHash(itr.ctx) if err != nil { return false, err } return fromSchemaHash != toSchemaHash, nil }
go/libraries/doltcore/sqle/dtables/unscoped_diff_table.go
0.658747
0.448728
unscoped_diff_table.go
starcoder
package query import "fmt" type ( reducer string ) var ( // REDUCERS // ReducerArgMax outputs for each tick, the tick and the concatenation separated by ‘,’ of the values of the labels for which the value is the maximum of Geo Time SeriesTM which are in the same equivalence class ReducerArgMax = func(i float64) reducer { return reducer(fmt.Sprintf("%f reducer.argmax", i)) } // ReducerArgMin outputs for each tick, the tick and the concatenation separated by ‘,’ with the values of the labels for which the value is the minimum of Geo Time SeriesTM which are in the same equivalence class ReducerArgMin = func(i float64) reducer { return reducer(fmt.Sprintf("%f reducer.argmin", i)) } // ReducerCount computes for each tick the number of measures of Geo Time SeriesTM which are in the same equivalence class ReducerCount reducer = "reducer.count" // ReducerCountExcludeNulls computes at each tick the number of measures of Geo Time SeriesTM which are in the same equivalence class ReducerCountExcludeNulls reducer = "reducer.count.exclude-nulls" // ReducerCountIncludeNulls computes at each tick the number of measures of Geo Time SeriesTM which are in the same equivalence class ReducerCountIncludeNulls reducer = "reducer.count.include-nulls" // ReducerJoin outputs for each tick of Geo Time SeriesTM which are in the same equivalence class, the concatenation of the string representation of values separated by the join string ReducerJoin = func(s string) reducer { return reducer(fmt.Sprintf("%s reducer.join", s)) } // ReducerJoinForbidNulls outputs for each tick of Geo Time SeriesTM which are in the same equivalence class, the concatenation of the string representation of values separated by the join string ReducerJoinForbidNulls = func(s string) reducer { return reducer(fmt.Sprintf("%s reducer.join.forbid-nulls", s)) } // ReducerMax outputs for each tick the maximum value of Geo Time SeriesTM which are in the same equivalence class.It operates on any type ReducerMax reducer = "reducer.max" // ReducerMaxForbidNulls outputs for each tick the maximum value of Geo Time SeriesTM which are in the same equivalence class ReducerMaxForbidNulls reducer = "reducer.max.forbid-nulls" // ReducerMean outputs for each tick the mean of the values of Geo Time SeriesTM which are in the same equivalence class ReducerMean reducer = "reducer.mean" // ReducerMeanExcludeNulls outputs for each tick the mean of the values of Geo Time SeriesTM which are in the same equivalence class, excluding nulls from the computation ReducerMeanExcludeNulls reducer = "reducer.mean.exclude-nulls" // ReducerMeanCircular push a mapper onto the stack which can then be used to compute the circular mean of all the values found in each sliding window. The associated location is the centroid of all the encountered locations. The associated elevation is the mean of the encountered elevations ReducerMeanCircular reducer = "reducer.mean.circular" // ReducerMeanCircularExcludeNulls push a mapper onto the stack which can then be used to compute the circular mean of all the values found in each sliding window. The associated location is the centroid of all the encountered locations. The associated elevation is the mean of the encountered elevations ReducerMeanCircularExcludeNulls reducer = "reducer.mean.circular.exclude-nulls" // ReducerMedian outputs for each tick the median of the values of Geo Time SeriesTM which are in the same equivalence class ReducerMedian reducer = "reducer.median" // ReducerMin outputs for each tick the minimum value of Geo Time SeriesTM which are in the same equivalence class. It operates on any type ReducerMin reducer = "reducer.min" // ReducerMinForbidNulls outputs for each tick the minimum value of Geo Time SeriesTM which are in the same equivalence class ReducerMinForbidNulls reducer = "reducer.min.forbid-nulls" // ReducerAnd outputs the result of the logical operator AND for each tick value of Geo Time SeriesTM which are in the same equivalence class ReducerAnd reducer = "reducer.and" // ReducerAndExcludeNulls outputs the result of the logical operator AND for each tick value of Geo Time SeriesTM which are in the same equivalence class, excluding nulls from the computation ReducerAndExcludeNulls reducer = "reducer.and.exclude-nulls" // ReducerOr outputs the result of the logical operator OReducer for each tick value of Geo Time SeriesTM which are in the same equivalence class ReducerOr reducer = "reducer.or" // ReducerOrExcludeNulls outputs the result of the logical operator OReducer for each tick value of Geo Time SeriesTM which are in the same equivalence class, excluding nulls from the computation ReducerOrExcludeNulls reducer = "reducer.or.exclude-nulls" // ReducerSd outputs for each tick the standard deviation of the values of Geo Time SeriesTM which are in the same equivalence class ReducerSd = func(b bool) reducer { return reducer(fmt.Sprintf("%t reducer.sd", b)) } // ReducerShannonentropy0 computes the Shannon entropy of the values it receives from the framework ReducerEDUCE at each tick ReducerShannonentropy0 reducer = "reducer.shannonentropy.0" // ReducerShannonentropy1 computes the Shannon entropy of the values it receives from the framework ReducerEDUCE at each tick ReducerShannonentropy1 reducer = "reducer.shannonentropy.1" // ReducerSum computes at each tick the sum of the values of Geo Time SeriesTM which are in the same equivalence class ReducerSum reducer = "reducer.sum" // ReducerSumForbidNulls computes at each tick the sum of the values of Geo Time SeriesTM which are in the same equivalence class ReducerSumForbidNulls reducer = "reducer.sum.forbid-nulls" // ReducerVar outputs for each tick the variance of the values of Geo Time SeriesTM which are in the same equivalence class ReducerVar = func(b bool) reducer { return reducer(fmt.Sprintf("%t reducer.var", b)) } )
query/reducers.go
0.817319
0.63768
reducers.go
starcoder
package entities import ( "errors" "math/big" "github.com/daoleno/uniswap-sdk-core/entities" "github.com/daoleno/uniswapv3-sdk/constants" "github.com/daoleno/uniswapv3-sdk/utils" ) var ( ErrTickOrder = errors.New("tick order error") ErrTickLower = errors.New("tick lower error") ErrTickUpper = errors.New("tick upper error") ) // Position Represents a position on a Uniswap V3 Pool type Position struct { Pool *Pool TickLower int TickUpper int Liquidity *big.Int // cached resuts for the getters token0Amount *entities.CurrencyAmount token1Amount *entities.CurrencyAmount mintAmounts []*big.Int } /** * Constructs a position for a given pool with the given liquidity * @param pool For which pool the liquidity is assigned * @param liquidity The amount of liquidity that is in the position * @param tickLower The lower tick of the position * @param tickUpper The upper tick of the position */ func NewPosition(pool *Pool, liquidity *big.Int, tickLower int, tickUpper int) (*Position, error) { if tickLower >= tickUpper { return nil, ErrTickOrder } if tickLower < utils.MinTick || tickLower%pool.tickSpacing() != 0 { return nil, ErrTickLower } if tickUpper > utils.MaxTick || tickUpper%pool.tickSpacing() != 0 { return nil, ErrTickUpper } return &Position{ Pool: pool, Liquidity: liquidity, TickLower: tickLower, TickUpper: tickUpper, }, nil } // Token0PriceLower Returns the price of token0 at the lower tick func (p *Position) Token0PriceLower() (*entities.Price, error) { return utils.TickToPrice(p.Pool.Token0, p.Pool.Token1, p.TickLower) } // Token0PriceUpper Returns the price of token0 at the upper tick func (p *Position) Token0PriceUpper() (*entities.Price, error) { return utils.TickToPrice(p.Pool.Token0, p.Pool.Token1, p.TickUpper) } // Amount0 Returns the amount of token0 that this position's liquidity could be burned for at the current pool price func (p *Position) Amount0() (*entities.CurrencyAmount, error) { if p.token0Amount == nil { if p.Pool.TickCurrent < p.TickLower { sqrtTickLower, err := utils.GetSqrtRatioAtTick(p.TickLower) if err != nil { return nil, err } sqrtTickUpper, err := utils.GetSqrtRatioAtTick(p.TickUpper) if err != nil { return nil, err } p.token0Amount = entities.FromRawAmount(p.Pool.Token0.Currency, utils.GetAmount0Delta(sqrtTickLower, sqrtTickUpper, p.Liquidity, false)) } else if p.Pool.TickCurrent < p.TickUpper { sqrtTickUpper, err := utils.GetSqrtRatioAtTick(p.TickUpper) if err != nil { return nil, err } p.token0Amount = entities.FromRawAmount(p.Pool.Token0.Currency, utils.GetAmount0Delta(p.Pool.SqrtRatioX96, sqrtTickUpper, p.Liquidity, true)) } else { p.token0Amount = entities.FromRawAmount(p.Pool.Token0.Currency, constants.Zero) } } return p.token0Amount, nil } // Amount1 Returns the amount of token1 that this position's liquidity could be burned for at the current pool price func (p *Position) Amount1() (*entities.CurrencyAmount, error) { if p.token1Amount == nil { if p.Pool.TickCurrent < p.TickLower { p.token1Amount = entities.FromRawAmount(p.Pool.Token1.Currency, constants.Zero) } else if p.Pool.TickCurrent < p.TickUpper { sqrtTickLower, err := utils.GetSqrtRatioAtTick(p.TickLower) if err != nil { return nil, err } p.token1Amount = entities.FromRawAmount(p.Pool.Token1.Currency, utils.GetAmount1Delta(sqrtTickLower, p.Pool.SqrtRatioX96, p.Liquidity, false)) } else { sqrtTickLower, err := utils.GetSqrtRatioAtTick(p.TickLower) if err != nil { return nil, err } sqrtTickUpper, err := utils.GetSqrtRatioAtTick(p.TickUpper) if err != nil { return nil, err } p.token1Amount = entities.FromRawAmount(p.Pool.Token1.Currency, utils.GetAmount1Delta(sqrtTickLower, sqrtTickUpper, p.Liquidity, false)) } } return p.token1Amount, nil } /** * Returns the lower and upper sqrt ratios if the price 'slips' up to slippage tolerance percentage * @param slippageTolerance The amount by which the price can 'slip' before the transaction will revert * @returns The sqrt ratios after slippage */ func (p *Position) ratiosAfterSlippage(slippageTolerance *entities.Percent) (sqrtRatioX96Lower *big.Int, sqrtRatioX96Upper *big.Int) { priceLower := p.Pool.Token0Price().Fraction.Multiply(entities.NewPercent(big.NewInt(1), big.NewInt(1)).Subtract(slippageTolerance).Fraction) priceUpper := p.Pool.Token0Price().Fraction.Multiply(entities.NewPercent(big.NewInt(1), big.NewInt(1)).Add(slippageTolerance).Fraction) sqrtRatioX96Lower = utils.EncodeSqrtRatioX96(priceLower.Numerator, priceLower.Denominator) if sqrtRatioX96Lower.Cmp(utils.MinSqrtRatio) <= 0 { sqrtRatioX96Lower = new(big.Int).Add(utils.MinSqrtRatio, big.NewInt(1)) } sqrtRatioX96Upper = utils.EncodeSqrtRatioX96(priceUpper.Numerator, priceUpper.Denominator) if sqrtRatioX96Upper.Cmp(utils.MaxSqrtRatio) >= 0 { sqrtRatioX96Upper = new(big.Int).Sub(utils.MaxSqrtRatio, big.NewInt(1)) } return sqrtRatioX96Lower, sqrtRatioX96Upper } /** * Returns the minimum amounts that must be sent in order to safely mint the amount of liquidity held by the position * with the given slippage tolerance * @param slippageTolerance Tolerance of unfavorable slippage from the current price * @returns The amounts, with slippage */ func (p *Position) MintAmountsWithSlippage(slippageTolerance *entities.Percent) (amount0, amount1 *big.Int, err error) { // get lower/upper prices sqrtRatioX96Upper, sqrtRatioX96Lower := p.ratiosAfterSlippage(slippageTolerance) // construct counterfactual pools tickLower, err := utils.GetTickAtSqrtRatio(sqrtRatioX96Lower) if err != nil { return nil, nil, err } poolLower, err := NewPool(p.Pool.Token0, p.Pool.Token1, p.Pool.Fee, sqrtRatioX96Lower, big.NewInt(0) /* liquidity doesn't matter */, tickLower, nil) if err != nil { return nil, nil, err } tickUpper, err := utils.GetTickAtSqrtRatio(sqrtRatioX96Upper) if err != nil { return nil, nil, err } poolUpper, err := NewPool(p.Pool.Token0, p.Pool.Token1, p.Pool.Fee, sqrtRatioX96Upper, big.NewInt(0) /* liquidity doesn't matter */, tickUpper, nil) if err != nil { return nil, nil, err } // because the router is imprecise, we need to calculate the position that will be created (assuming no slippage) // the mint amounts are what will be passed as calldata a0, a1, err := p.MintAmounts() if err != nil { return nil, nil, err } positionThatWillBeCreated, err := FromAmounts(p.Pool, p.TickLower, p.TickUpper, a0, a1, false) if err != nil { return nil, nil, err } // we want the smaller amounts... // ...which occurs at the upper price for amount0... pUpper, err := NewPosition(poolUpper, positionThatWillBeCreated.Liquidity, p.TickLower, p.TickUpper) if err != nil { return nil, nil, err } // ...and the lower for amount1 pLower, err := NewPosition(poolLower, positionThatWillBeCreated.Liquidity, p.TickLower, p.TickUpper) if err != nil { return nil, nil, err } amount0, _, err = pLower.MintAmounts() if err != nil { return nil, nil, err } _, amount1, err = pUpper.MintAmounts() if err != nil { return nil, nil, err } return amount0, amount1, nil } /** * Returns the minimum amounts that should be requested in order to safely burn the amount of liquidity held by the * position with the given slippage tolerance * @param slippageTolerance tolerance of unfavorable slippage from the current price * @returns The amounts, with slippage */ func (p *Position) BurnAmountsWithSlippage(slippageTolerance *entities.Percent) (amount0, amount1 *big.Int, err error) { // get lower/upper prices sqrtRatioX96Lower, sqrtRatioX96Upper := p.ratiosAfterSlippage(slippageTolerance) // construct counterfactual pools tickLower, err := utils.GetTickAtSqrtRatio(sqrtRatioX96Lower) if err != nil { return nil, nil, err } poolLower, err := NewPool(p.Pool.Token0, p.Pool.Token1, p.Pool.Fee, sqrtRatioX96Lower, big.NewInt(0) /* liquidity doesn't matter */, tickLower, nil) if err != nil { return nil, nil, err } tickUpper, err := utils.GetTickAtSqrtRatio(sqrtRatioX96Upper) if err != nil { return nil, nil, err } poolUpper, err := NewPool(p.Pool.Token0, p.Pool.Token1, p.Pool.Fee, sqrtRatioX96Upper, big.NewInt(0) /* liquidity doesn't matter */, tickUpper, nil) if err != nil { return nil, nil, err } // we want the smaller amounts... // ...which occurs at the upper price for amount0... pUpper, err := NewPosition(poolUpper, p.Liquidity, p.TickLower, p.TickUpper) if err != nil { return nil, nil, err } // ...and the lower for amount1 pLower, err := NewPosition(poolLower, p.Liquidity, p.TickLower, p.TickUpper) if err != nil { return nil, nil, err } a0, err := pUpper.Amount0() if err != nil { return nil, nil, err } a1, err := pLower.Amount1() if err != nil { return nil, nil, err } return a0.Quotient(), a1.Quotient(), nil } /** * Returns the minimum amounts that must be sent in order to mint the amount of liquidity held by the position at * the current price for the pool */ func (p *Position) MintAmounts() (amount0, amount1 *big.Int, err error) { if p.mintAmounts == nil { rLower, err := utils.GetSqrtRatioAtTick(p.TickLower) if err != nil { return nil, nil, err } rUpper, err := utils.GetSqrtRatioAtTick(p.TickUpper) if err != nil { return nil, nil, err } var amount0, amount1 *big.Int if p.Pool.TickCurrent < p.TickLower { amount0 = utils.GetAmount0Delta(rLower, rUpper, p.Liquidity, true) amount1 = constants.Zero return amount0, amount1, nil } else if p.Pool.TickCurrent < p.TickUpper { amount0 = utils.GetAmount0Delta(p.Pool.SqrtRatioX96, rUpper, p.Liquidity, true) amount1 = utils.GetAmount1Delta(rLower, p.Pool.SqrtRatioX96, p.Liquidity, true) } else { amount0 = constants.Zero amount1 = utils.GetAmount1Delta(rLower, rUpper, p.Liquidity, true) } return amount0, amount1, nil } return p.mintAmounts[0], p.mintAmounts[1], nil } /** * Computes the maximum amount of liquidity received for a given amount of token0, token1, * and the prices at the tick boundaries. * @param pool The pool for which the position should be created * @param tickLower The lower tick of the position * @param tickUpper The upper tick of the position * @param amount0 token0 amount * @param amount1 token1 amount * @param useFullPrecision If false, liquidity will be maximized according to what the router can calculate, * not what core can theoretically support * @returns The amount of liquidity for the position */ func FromAmounts(pool *Pool, tickLower, tickUpper int, amount0, amount1 *big.Int, useFullPrecision bool) (*Position, error) { sqrtRatioAX96, err := utils.GetSqrtRatioAtTick(tickLower) if err != nil { return nil, err } sqrtRatioBX96, err := utils.GetSqrtRatioAtTick(tickUpper) if err != nil { return nil, err } return NewPosition(pool, utils.MaxLiquidityForAmounts(pool.SqrtRatioX96, sqrtRatioAX96, sqrtRatioBX96, amount0, amount1, useFullPrecision), tickLower, tickUpper) } /** * Computes a position with the maximum amount of liquidity received for a given amount of token0, assuming an unlimited amount of token1 * @param pool The pool for which the position is created * @param tickLower The lower tick * @param tickUpper The upper tick * @param amount0 The desired amount of token0 * @param useFullPrecision If true, liquidity will be maximized according to what the router can calculate, * not what core can theoretically support * @returns The position */ func FromAmount0(pool *Pool, tickLower, tickUpper int, amount0 *big.Int, useFullPrecision bool) (*Position, error) { return FromAmounts(pool, tickLower, tickUpper, amount0, entities.MaxUint256, useFullPrecision) } /** * Computes a position with the maximum amount of liquidity received for a given amount of token1, assuming an unlimited amount of token0 * @param pool The pool for which the position is created * @param tickLower The lower tick * @param tickUpper The upper tick * @param amount1 The desired amount of token1 * @returns The position */ func FromAmount1(pool *Pool, tickLower, tickUpper int, amount1 *big.Int) (*Position, error) { // this function always uses full precision, return FromAmounts(pool, tickLower, tickUpper, entities.MaxUint256, amount1, true) }
entities/position.go
0.758421
0.439447
position.go
starcoder
package ionoscloud import ( "encoding/json" "strings" "time" ) // PtrBool - returns a pointer to given boolean value. func PtrBool(v bool) *bool { return &v } // PtrInt - returns a pointer to given integer value. func PtrInt(v int) *int { return &v } // PtrInt32 - returns a pointer to given integer value. func PtrInt32(v int32) *int32 { return &v } // PtrInt64 - returns a pointer to given integer value. func PtrInt64(v int64) *int64 { return &v } // PtrFloat32 - returns a pointer to given float value. func PtrFloat32(v float32) *float32 { return &v } // PtrFloat64 - returns a pointer to given float value. func PtrFloat64(v float64) *float64 { return &v } // PtrString - returns a pointer to given string value. func PtrString(v string) *string { return &v } // PtrTime - returns a pointer to given Time value. func PtrTime(v time.Time) *time.Time { return &v } // ToBool - returns the value of the bool pointer passed in func ToBool(ptr *bool) bool { return *ptr } // ToBoolDefault - returns the value of the bool pointer passed in, or false if the pointer is nil func ToBoolDefault(ptr *bool) bool { var defaultVal bool if ptr == nil { return defaultVal } return *ptr } // ToBoolSlice - returns a bool slice of the pointer passed in func ToBoolSlice(ptrSlice *[]bool) []bool { valSlice := make([]bool, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToByte - returns the value of the byte pointer passed in func ToByte(ptr *byte) byte { return *ptr } // ToByteDefault - returns the value of the byte pointer passed in, or 0 if the pointer is nil func ToByteDefault(ptr *byte) byte { var defaultVal byte if ptr == nil { return defaultVal } return *ptr } // ToByteSlice - returns a byte slice of the pointer passed in func ToByteSlice(ptrSlice *[]byte) []byte { valSlice := make([]byte, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToString - returns the value of the string pointer passed in func ToString(ptr *string) string { return *ptr } // ToStringDefault - returns the value of the string pointer passed in, or "" if the pointer is nil func ToStringDefault(ptr *string) string { var defaultVal string if ptr == nil { return defaultVal } return *ptr } // ToStringSlice - returns a string slice of the pointer passed in func ToStringSlice(ptrSlice *[]string) []string { valSlice := make([]string, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToInt - returns the value of the int pointer passed in func ToInt(ptr *int) int { return *ptr } // ToIntDefault - returns the value of the int pointer passed in, or 0 if the pointer is nil func ToIntDefault(ptr *int) int { var defaultVal int if ptr == nil { return defaultVal } return *ptr } // ToIntSlice - returns a int slice of the pointer passed in func ToIntSlice(ptrSlice *[]int) []int { valSlice := make([]int, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToInt8 - returns the value of the int8 pointer passed in func ToInt8(ptr *int8) int8 { return *ptr } // ToInt8Default - returns the value of the int8 pointer passed in, or 0 if the pointer is nil func ToInt8Default(ptr *int8) int8 { var defaultVal int8 if ptr == nil { return defaultVal } return *ptr } // ToInt8Slice - returns a int8 slice of the pointer passed in func ToInt8Slice(ptrSlice *[]int8) []int8 { valSlice := make([]int8, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToInt16 - returns the value of the int16 pointer passed in func ToInt16(ptr *int16) int16 { return *ptr } // ToInt16Default - returns the value of the int16 pointer passed in, or 0 if the pointer is nil func ToInt16Default(ptr *int16) int16 { var defaultVal int16 if ptr == nil { return defaultVal } return *ptr } // ToInt16Slice - returns a int16 slice of the pointer passed in func ToInt16Slice(ptrSlice *[]int16) []int16 { valSlice := make([]int16, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToInt32 - returns the value of the int32 pointer passed in func ToInt32(ptr *int32) int32 { return *ptr } // ToInt32Default - returns the value of the int32 pointer passed in, or 0 if the pointer is nil func ToInt32Default(ptr *int32) int32 { var defaultVal int32 if ptr == nil { return defaultVal } return *ptr } // ToInt32Slice - returns a int32 slice of the pointer passed in func ToInt32Slice(ptrSlice *[]int32) []int32 { valSlice := make([]int32, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToInt64 - returns the value of the int64 pointer passed in func ToInt64(ptr *int64) int64 { return *ptr } // ToInt64Default - returns the value of the int64 pointer passed in, or 0 if the pointer is nil func ToInt64Default(ptr *int64) int64 { var defaultVal int64 if ptr == nil { return defaultVal } return *ptr } // ToInt64Slice - returns a int64 slice of the pointer passed in func ToInt64Slice(ptrSlice *[]int64) []int64 { valSlice := make([]int64, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToUint - returns the value of the uint pointer passed in func ToUint(ptr *uint) uint { return *ptr } // ToUintDefault - returns the value of the uint pointer passed in, or 0 if the pointer is nil func ToUintDefault(ptr *uint) uint { var defaultVal uint if ptr == nil { return defaultVal } return *ptr } // ToUintSlice - returns a uint slice of the pointer passed in func ToUintSlice(ptrSlice *[]uint) []uint { valSlice := make([]uint, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToUint8 -returns the value of the uint8 pointer passed in func ToUint8(ptr *uint8) uint8 { return *ptr } // ToUint8Default - returns the value of the uint8 pointer passed in, or 0 if the pointer is nil func ToUint8Default(ptr *uint8) uint8 { var defaultVal uint8 if ptr == nil { return defaultVal } return *ptr } // ToUint8Slice - returns a uint8 slice of the pointer passed in func ToUint8Slice(ptrSlice *[]uint8) []uint8 { valSlice := make([]uint8, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToUint16 - returns the value of the uint16 pointer passed in func ToUint16(ptr *uint16) uint16 { return *ptr } // ToUint16Default - returns the value of the uint16 pointer passed in, or 0 if the pointer is nil func ToUint16Default(ptr *uint16) uint16 { var defaultVal uint16 if ptr == nil { return defaultVal } return *ptr } // ToUint16Slice - returns a uint16 slice of the pointer passed in func ToUint16Slice(ptrSlice *[]uint16) []uint16 { valSlice := make([]uint16, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToUint32 - returns the value of the uint32 pointer passed in func ToUint32(ptr *uint32) uint32 { return *ptr } // ToUint32Default - returns the value of the uint32 pointer passed in, or 0 if the pointer is nil func ToUint32Default(ptr *uint32) uint32 { var defaultVal uint32 if ptr == nil { return defaultVal } return *ptr } // ToUint32Slice - returns a uint32 slice of the pointer passed in func ToUint32Slice(ptrSlice *[]uint32) []uint32 { valSlice := make([]uint32, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToUint64 - returns the value of the uint64 pointer passed in func ToUint64(ptr *uint64) uint64 { return *ptr } // ToUint64Default - returns the value of the uint64 pointer passed in, or 0 if the pointer is nil func ToUint64Default(ptr *uint64) uint64 { var defaultVal uint64 if ptr == nil { return defaultVal } return *ptr } // ToUint64Slice - returns a uint63 slice of the pointer passed in func ToUint64Slice(ptrSlice *[]uint64) []uint64 { valSlice := make([]uint64, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToFloat32 - returns the value of the float32 pointer passed in func ToFloat32(ptr *float32) float32 { return *ptr } // ToFloat32Default - returns the value of the float32 pointer passed in, or 0 if the pointer is nil func ToFloat32Default(ptr *float32) float32 { var defaultVal float32 if ptr == nil { return defaultVal } return *ptr } // ToFloat32Slice - returns a float32 slice of the pointer passed in func ToFloat32Slice(ptrSlice *[]float32) []float32 { valSlice := make([]float32, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToFloat64 - returns the value of the float64 pointer passed in func ToFloat64(ptr *float64) float64 { return *ptr } // ToFloat64Default - returns the value of the float64 pointer passed in, or 0 if the pointer is nil func ToFloat64Default(ptr *float64) float64 { var defaultVal float64 if ptr == nil { return defaultVal } return *ptr } // ToFloat64Slice - returns a float64 slice of the pointer passed in func ToFloat64Slice(ptrSlice *[]float64) []float64 { valSlice := make([]float64, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } // ToTime - returns the value of the Time pointer passed in func ToTime(ptr *time.Time) time.Time { return *ptr } // ToTimeDefault - returns the value of the Time pointer passed in, or 0001-01-01 00:00:00 +0000 UTC if the pointer is nil func ToTimeDefault(ptr *time.Time) time.Time { var defaultVal time.Time if ptr == nil { return defaultVal } return *ptr } // ToTimeSlice - returns a Time slice of the pointer passed in func ToTimeSlice(ptrSlice *[]time.Time) []time.Time { valSlice := make([]time.Time, len(*ptrSlice)) for i, v := range *ptrSlice { valSlice[i] = v } return valSlice } type NullableBool struct { value *bool isSet bool } func (v NullableBool) Get() *bool { return v.value } func (v *NullableBool) Set(val *bool) { v.value = val v.isSet = true } func (v NullableBool) IsSet() bool { return v.isSet } func (v *NullableBool) Unset() { v.value = nil v.isSet = false } func NewNullableBool(val *bool) *NullableBool { return &NullableBool{value: val, isSet: true} } func (v NullableBool) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableBool) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) } type NullableInt struct { value *int isSet bool } func (v NullableInt) Get() *int { return v.value } func (v *NullableInt) Set(val *int) { v.value = val v.isSet = true } func (v NullableInt) IsSet() bool { return v.isSet } func (v *NullableInt) Unset() { v.value = nil v.isSet = false } func NewNullableInt(val *int) *NullableInt { return &NullableInt{value: val, isSet: true} } func (v NullableInt) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableInt) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) } type NullableInt32 struct { value *int32 isSet bool } func (v NullableInt32) Get() *int32 { return v.value } func (v *NullableInt32) Set(val *int32) { v.value = val v.isSet = true } func (v NullableInt32) IsSet() bool { return v.isSet } func (v *NullableInt32) Unset() { v.value = nil v.isSet = false } func NewNullableInt32(val *int32) *NullableInt32 { return &NullableInt32{value: val, isSet: true} } func (v NullableInt32) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableInt32) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) } type NullableInt64 struct { value *int64 isSet bool } func (v NullableInt64) Get() *int64 { return v.value } func (v *NullableInt64) Set(val *int64) { v.value = val v.isSet = true } func (v NullableInt64) IsSet() bool { return v.isSet } func (v *NullableInt64) Unset() { v.value = nil v.isSet = false } func NewNullableInt64(val *int64) *NullableInt64 { return &NullableInt64{value: val, isSet: true} } func (v NullableInt64) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableInt64) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) } type NullableFloat32 struct { value *float32 isSet bool } func (v NullableFloat32) Get() *float32 { return v.value } func (v *NullableFloat32) Set(val *float32) { v.value = val v.isSet = true } func (v NullableFloat32) IsSet() bool { return v.isSet } func (v *NullableFloat32) Unset() { v.value = nil v.isSet = false } func NewNullableFloat32(val *float32) *NullableFloat32 { return &NullableFloat32{value: val, isSet: true} } func (v NullableFloat32) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableFloat32) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) } type NullableFloat64 struct { value *float64 isSet bool } func (v NullableFloat64) Get() *float64 { return v.value } func (v *NullableFloat64) Set(val *float64) { v.value = val v.isSet = true } func (v NullableFloat64) IsSet() bool { return v.isSet } func (v *NullableFloat64) Unset() { v.value = nil v.isSet = false } func NewNullableFloat64(val *float64) *NullableFloat64 { return &NullableFloat64{value: val, isSet: true} } func (v NullableFloat64) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableFloat64) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) } type NullableString struct { value *string isSet bool } func (v NullableString) Get() *string { return v.value } func (v *NullableString) Set(val *string) { v.value = val v.isSet = true } func (v NullableString) IsSet() bool { return v.isSet } func (v *NullableString) Unset() { v.value = nil v.isSet = false } func NewNullableString(val *string) *NullableString { return &NullableString{value: val, isSet: true} } func (v NullableString) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableString) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) } type NullableTime struct { value *time.Time isSet bool } func (v NullableTime) Get() *time.Time { return v.value } func (v *NullableTime) Set(val *time.Time) { v.value = val v.isSet = true } func (v NullableTime) IsSet() bool { return v.isSet } func (v *NullableTime) Unset() { v.value = nil v.isSet = false } func NewNullableTime(val *time.Time) *NullableTime { return &NullableTime{value: val, isSet: true} } func (v NullableTime) MarshalJSON() ([]byte, error) { return v.value.MarshalJSON() } func (v *NullableTime) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) } type IonosTime struct { time.Time } func (t *IonosTime) UnmarshalJSON(data []byte) error { str := string(data) if strlen(str) == 0 { t = nil return nil } if str[0] == '"' { str = str[1:] } if str[len(str)-1] == '"' { str = str[:len(str)-1] } if !strings.Contains(str, "Z") { /* forcefully adding timezone suffix to be able to parse the * string using RFC3339 */ str += "Z" } tt, err := time.Parse(time.RFC3339, str) if err != nil { return err } *t = IonosTime{tt} return nil }
utils.go
0.788909
0.434461
utils.go
starcoder
package blurhash import ( "fmt" "github.com/buckket/go-blurhash/base83" "image" "math" "strings" ) func init() { initLinearTable(channelToLinear[:]) } var channelToLinear [256]float64 func initLinearTable(table []float64) { for i := range table { channelToLinear[i] = sRGBToLinear(i) } } // An InvalidParameterError occurs when an invalid argument is passed to either the Decode or Encode function. type InvalidParameterError struct { Value int Parameter string } func (e InvalidParameterError) Error() string { return fmt.Sprintf("blurhash: %sComponents (%d) must be element of [1-9]", e.Parameter, e.Value) } // An EncodingError represents an error that occurred during the encoding of the given value. // This most likely means that your input image is invalid and can not be processed. type EncodingError string func (e EncodingError) Error() string { return fmt.Sprintf("blurhash: %s", string(e)) } // Encode calculates the Blurhash for an image using the given x and y component counts. // The x and y components have to be between 1 and 9 respectively. // The image must be of image.Image type. func Encode(xComponents int, yComponents int, rgba image.Image) (string, error) { if xComponents < 1 || xComponents > 9 { return "", InvalidParameterError{xComponents, "x"} } if yComponents < 1 || yComponents > 9 { return "", InvalidParameterError{yComponents, "y"} } var blurhash strings.Builder blurhash.Grow(4 + 2*xComponents*yComponents) // Size Flag str, err := base83.Encode((xComponents-1)+(yComponents-1)*9, 1) if err != nil { return "", EncodingError("could not encode size flag") } blurhash.WriteString(str) factors := make([]float64, yComponents*xComponents*3) multiplyBasisFunction(rgba, factors, xComponents, yComponents) var maximumValue float64 var quantisedMaximumValue int var acCount = xComponents*yComponents - 1 if acCount > 0 { var actualMaximumValue float64 for i := 0; i < acCount*3; i++ { actualMaximumValue = math.Max(math.Abs(factors[i+3]), actualMaximumValue) } quantisedMaximumValue = int(math.Max(0, math.Min(82, math.Floor(actualMaximumValue*166-0.5)))) maximumValue = (float64(quantisedMaximumValue) + 1) / 166 } else { maximumValue = 1 } // Quantised max AC component str, err = base83.Encode(quantisedMaximumValue, 1) if err != nil { return "", EncodingError("could not encode quantised max AC component") } blurhash.WriteString(str) // DC value str, err = base83.Encode(encodeDC(factors[0], factors[1], factors[2]), 4) if err != nil { return "", EncodingError("could not encode DC value") } blurhash.WriteString(str) // AC values for i := 0; i < acCount; i++ { str, err = base83.Encode(encodeAC(factors[3+(i*3+0)], factors[3+(i*3+1)], factors[3+(i*3+2)], maximumValue), 2) if err != nil { return "", EncodingError("could not encode AC value") } blurhash.WriteString(str) } if blurhash.Len() != 4+2*xComponents*yComponents { return "", EncodingError("hash does not match expected size") } return blurhash.String(), nil } func multiplyBasisFunction(rgba image.Image, factors []float64, xComponents int, yComponents int) { height := rgba.Bounds().Max.Y width := rgba.Bounds().Max.X xvalues := make([][]float64, xComponents) for xComponent := 0; xComponent < xComponents; xComponent++ { xvalues[xComponent] = make([]float64, width) for x := 0; x < width; x++ { xvalues[xComponent][x] = math.Cos(math.Pi * float64(xComponent) * float64(x) / float64(width)) } } yvalues := make([][]float64, yComponents) for yComponent := 0; yComponent < yComponents; yComponent++ { yvalues[yComponent] = make([]float64, height) for y := 0; y < height; y++ { yvalues[yComponent][y] = math.Cos(math.Pi * float64(yComponent) * float64(y) / float64(height)) } } for y := 0; y < height; y++ { for x := 0; x < width; x++ { rt, gt, bt, _ := rgba.At(x, y).RGBA() lr := channelToLinear[rt>>8] lg := channelToLinear[gt>>8] lb := channelToLinear[bt>>8] for yc := 0; yc < yComponents; yc++ { for xc := 0; xc < xComponents; xc++ { scale := 1 / float64(width*height) if xc != 0 || yc != 0 { scale = 2 / float64(width*height) } basis := xvalues[xc][x] * yvalues[yc][y] factors[0+xc*3+yc*3*xComponents] += lr * basis * scale factors[1+xc*3+yc*3*xComponents] += lg * basis * scale factors[2+xc*3+yc*3*xComponents] += lb * basis * scale } } } } } func encodeDC(r, g, b float64) int { return (linearTosRGB(r) << 16) + (linearTosRGB(g) << 8) + linearTosRGB(b) } func encodeAC(r, g, b, maximumValue float64) int { quant := func(f float64) int { return int(math.Max(0, math.Min(18, math.Floor(signPow(f/maximumValue, 0.5)*9+9.5)))) } return quant(r)*19*19 + quant(g)*19 + quant(b) }
vendor/github.com/buckket/go-blurhash/encode.go
0.759493
0.452899
encode.go
starcoder
package byteslice // RUnset apply AND operation on a byte slice with an "unset" byte slice using little endian order. func RUnset(data, unsetData []byte) []byte { var dataLength = len(data) if dataLength < 1 { return data } unsetDataLength := len(unsetData) operationLength := dataLength operationCut := 0 if unsetDataLength > dataLength { operationLength = unsetDataLength operationCut = operationLength - dataLength } result, _ := Unset(LPad(data, operationLength, 0xFF), LPad(unsetData, operationLength, 0xFF)) return result[operationCut:] } // RSet apply OR operation on a byte slice with an "set" byte slice using little endian order. func RSet(data, setData []byte) []byte { dataLength := len(data) setDataLength := len(setData) operationLength := dataLength if setDataLength > dataLength { operationLength = setDataLength } result, _ := Set(LPad(data, operationLength, 0x00), LPad(setData, operationLength, 0x00)) return result } // RToggle apply XOR operation on a byte slice with an "toggle" byte slice using little endian order. func RToggle(data, toggleData []byte) []byte { dataLength := len(data) toggleDataLength := len(toggleData) operationLength := dataLength if toggleDataLength > dataLength { operationLength = toggleDataLength } result, _ := Toggle(LPad(data, operationLength, 0x00), LPad(toggleData, operationLength, 0x00)) return result } // RSubset get the byte slice of a subset of the little endian ordered data byte defined // by the least significant bit and the most significant bit. func RSubset(data []byte, leastSignificantBit, mostSignificantBit uint64) []byte { var maxDataMostSignificantBit = uint64(maxBitsLength*len(data) - 1) if mostSignificantBit <= leastSignificantBit || leastSignificantBit > maxDataMostSignificantBit { return make([]byte, 0) } if mostSignificantBit > maxDataMostSignificantBit { mostSignificantBit = maxDataMostSignificantBit } var result = LShift(data, maxDataMostSignificantBit-mostSignificantBit) var correctiveShift = maxDataMostSignificantBit - mostSignificantBit + leastSignificantBit result = RShift(result, correctiveShift) var size = computeSize(leastSignificantBit, mostSignificantBit) return result[uint64(len(result))-size:] }
byteslice_littleendian.go
0.788868
0.598165
byteslice_littleendian.go
starcoder
package graph import ( i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization" ) // WorkbookTableRow type WorkbookTableRow struct { Entity // Returns the index number of the row within the rows collection of the table. Zero-indexed. Read-only. index *int32; // Represents the raw values of the specified range. The data returned could be of type string, number, or a boolean. Cell that contain an error will return the error string. values *Json; } // NewWorkbookTableRow instantiates a new workbookTableRow and sets the default values. func NewWorkbookTableRow()(*WorkbookTableRow) { m := &WorkbookTableRow{ Entity: *NewEntity(), } return m } // GetIndex gets the index property value. Returns the index number of the row within the rows collection of the table. Zero-indexed. Read-only. func (m *WorkbookTableRow) GetIndex()(*int32) { if m == nil { return nil } else { return m.index } } // GetValues gets the values property value. Represents the raw values of the specified range. The data returned could be of type string, number, or a boolean. Cell that contain an error will return the error string. func (m *WorkbookTableRow) GetValues()(*Json) { if m == nil { return nil } else { return m.values } } // GetFieldDeserializers the deserialization information for the current model func (m *WorkbookTableRow) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) { res := m.Entity.GetFieldDeserializers() res["index"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error { val, err := n.GetInt32Value() if err != nil { return err } if val != nil { m.SetIndex(val) } return nil } res["values"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error { val, err := n.GetObjectValue(func () i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable { return NewJson() }) if err != nil { return err } if val != nil { m.SetValues(val.(*Json)) } return nil } return res } func (m *WorkbookTableRow) IsNil()(bool) { return m == nil } // Serialize serializes information the current object func (m *WorkbookTableRow) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) { err := m.Entity.Serialize(writer) if err != nil { return err } { err = writer.WriteInt32Value("index", m.GetIndex()) if err != nil { return err } } { err = writer.WriteObjectValue("values", m.GetValues()) if err != nil { return err } } return nil } // SetIndex sets the index property value. Returns the index number of the row within the rows collection of the table. Zero-indexed. Read-only. func (m *WorkbookTableRow) SetIndex(value *int32)() { m.index = value } // SetValues sets the values property value. Represents the raw values of the specified range. The data returned could be of type string, number, or a boolean. Cell that contain an error will return the error string. func (m *WorkbookTableRow) SetValues(value *Json)() { m.values = value }
models/microsoft/graph/workbook_table_row.go
0.717309
0.454896
workbook_table_row.go
starcoder
package coordtransform import ( "math" ) const ( offset = 0.00669342162296594323 axis = 6378245.0 ) // IsOutOFChina 范围检测 func IsOutOFChina(p Point) bool { lon, lat := p.Lon, p.Lat return !(lon > 72.004 && lon < 135.05 && lat > 3.86 && lat < 53.55) } // delta func delta(p Point) Point { lon, lat := p.Lon, p.Lat tp := transform(Point{lon - 105.0, lat - 35.0}) dlat, dlon := tp.Lon, tp.Lat radlat := lat / 180.0 * math.Pi magic := math.Sin(radlat) magic = 1 - offset*magic*magic sqrtmagic := math.Sqrt(magic) dlat = (dlat * 180.0) / ((axis * (1 - offset)) / (magic * sqrtmagic) * math.Pi) dlon = (dlon * 180.0) / (axis / sqrtmagic * math.Cos(radlat) * math.Pi) return Point{ Lon: lon + dlon, Lat: lat + dlat, } } // transform location transform func transform(p Point) Point { lon, lat := p.Lon, p.Lat var lonlat = lon * lat var absX = math.Sqrt(math.Abs(lon)) var lonPi, latPi = lon * math.Pi, lat * math.Pi var d = 20.0*math.Sin(6.0*lonPi) + 20.0*math.Sin(2.0*lonPi) x, y := d, d x += 20.0*math.Sin(latPi) + 40.0*math.Sin(latPi/3.0) y += 20.0*math.Sin(lonPi) + 40.0*math.Sin(lonPi/3.0) x += 160.0*math.Sin(latPi/12.0) + 320*math.Sin(latPi/30.0) y += 150.0*math.Sin(lonPi/12.0) + 300.0*math.Sin(lonPi/30.0) x *= 2.0 / 3.0 y *= 2.0 / 3.0 x += -100.0 + 2.0*lon + 3.0*lat + 0.2*lat*lat + 0.1*lonlat + 0.2*absX y += 300.0 + lon + 2.0*lat + 0.1*lon*lon + 0.1*lonlat + 0.1*absX return Point{x, y} } // WGS84toGCJ02 WGS84坐标系->火星坐标系 func WGS84toGCJ02(p Point) Point { if IsOutOFChina(p) { return Point{} } return delta(p) } // GCJ02toWGS84 火星坐标系->WGS84坐标系 func GCJ02toWGS84(p Point) Point { if IsOutOFChina(p) { return Point{} } m := delta(p) return Point{ Lon: p.Lon*2 - m.Lon, Lat: p.Lat*2 - m.Lat, } } // BD09toWGS84 百度坐标系->WGS84坐标系 func BD09toWGS84(p Point) Point { return GCJ02toWGS84(BD09toGCJ02(p)) } // WGS84toBD09 WGS84坐标系->百度坐标系 func WGS84toBD09(p Point) Point { return GCJ02toBD09(WGS84toGCJ02(p)) } // ToMercator 任意坐标系->墨卡托坐标系 func ToMercator(a Point) Point { x, y := a.Lon, a.Lat x = x * 20037508.34 / 180 y = math.Log(math.Tan((90+y)*math.Pi/360)) / (math.Pi / 180) y = y * 20037508.34 / 180 return Point{x, y} } // FromMercator 墨卡托坐标系->任意坐标系 func FromMercator(a Point) Point { x, y := a.Lon, a.Lat x = x / 20037508.34 * 180 y = y / 20037508.34 * 180 y = 180 / math.Pi * (2*math.Atan(math.Exp(y*math.Pi/180)) - math.Pi/2) return Point{x, y} }
coordtransform.go
0.543106
0.424412
coordtransform.go
starcoder
package p174 import "fmt" /** The demons had captured the princess (P) and imprisoned her in the bottom-right corner of a dungeon. The dungeon consists of M x N rooms laid out in a 2D grid. Our valiant knight (K) was initially positioned in the top-left room and must fight his way through the dungeon to rescue the princess. The knight has an initial health point represented by a positive integer. If at any point his health point drops to 0 or below, he dies immediately. Some of the rooms are guarded by demons, so the knight loses health (negative integers) upon entering these rooms; other rooms are either empty (0's) or contain magic orbs that increase the knight's health (positive integers). In order to reach the princess as quickly as possible, the knight decides to move only rightward or downward in each step. Write a function to determine the knight's minimum initial health so that he is able to rescue the princess. For example, given the dungeon below, the initial health of the knight must be at least 7 if he follows the optimal path RIGHT-> RIGHT -> DOWN -> DOWN. -2 (K) -3 3 -5 -10 1 10 30 -5 (P) Notes: The knight's health has no upper bound. Any room can contain threats or power-ups, even the first room the knight enters and the bottom-right room where the princess is imprisoned. */ func max(a, b int) int { if a > b { return a } return b } func calculateMinimumHP(dungeon [][]int) int { rows := len(dungeon) if rows == 0 { return 0 } cols := len(dungeon[0]) preDp := make([]int, cols) dp := make([]int, cols) for i := rows - 1; i >= 0; i-- { dp, preDp = preDp, dp for j := cols - 1; j >= 0; j-- { if i == rows-1 && j == cols-1 { dp[j] = dungeon[i][j] } else if i == rows-1 { dp[j] = dungeon[i][j] + dp[j+1] } else if j == cols-1 { dp[j] = dungeon[i][j] + preDp[j] } else { dp[j] = max(dungeon[i][j]+preDp[j], dungeon[i][j]+dp[j+1]) } if dp[j] >= 0 { dp[j] = 0 } } fmt.Println(dp) } return 1 - dp[0] }
algorithms/p174/174.go
0.505859
0.707481
174.go
starcoder
package xyml import ( "fmt" "gopkg.in/yaml.v3" ) const ( errNoPos = `expected a node of type %s, instead got type %s` errWithPos = errNoPos + ` @ %d:%d` errKind = `expected a node of kind %s, instead got %s` errKindPos = errKind + ` @ %d:%d` ) // RequireBinary returns an error if the given node is not of type binary. func RequireBinary(y *yaml.Node) error { if !IsBinary(y) { if hasPos(y) { return fmt.Errorf(errWithPos, TagBinary, y.Tag, y.Line, y.Column) } return fmt.Errorf(errNoPos, TagBinary, y.Tag) } return nil } // RequireBool returns an error if the given node is not of type bool. func RequireBool(y *yaml.Node) error { if !IsBool(y) { if hasPos(y) { return fmt.Errorf(errWithPos, TagBool, y.Tag, y.Line, y.Column) } return fmt.Errorf(errNoPos, TagBool, y.Tag) } return nil } // RequireFloat returns an error if the given node is not of type float. func RequireFloat(y *yaml.Node) error { if !IsFloat(y) { if hasPos(y) { return fmt.Errorf(errWithPos, TagFloat, y.Tag, y.Line, y.Column) } return fmt.Errorf(errNoPos, TagFloat, y.Tag) } return nil } // RequireInt returns an error if the given node is not of type int. func RequireInt(y *yaml.Node) error { if !IsInt(y) { if hasPos(y) { return fmt.Errorf(errWithPos, TagInt, y.Tag, y.Line, y.Column) } return fmt.Errorf(errNoPos, TagInt, y.Tag) } return nil } // RequireMap returns an error if the given node is not a mapping node. func RequireMap(y *yaml.Node) error { if !IsMap(y) { if hasPos(y) { return fmt.Errorf(errKindPos, kindToString(yaml.MappingNode), kindToString(y.Kind), y.Line, y.Column) } return fmt.Errorf(errKind, kindToString(yaml.MappingNode), kindToString(y.Kind)) } return nil } // RequireNilType returns an error if the given node is not of type null. func RequireNilType(y *yaml.Node) error { if !IsNilType(y) { if hasPos(y) { return fmt.Errorf(errWithPos, TagNil, y.Tag, y.Line, y.Column) } return fmt.Errorf(errNoPos, TagNil, y.Tag) } return nil } // RequireOrderedMap returns an error if the given node is not of type omap. func RequireOrderedMap(y *yaml.Node) error { if !IsOrderedMap(y) { if hasPos(y) { return fmt.Errorf(errWithPos, TagOrderedMap, y.Tag, y.Line, y.Column) } return fmt.Errorf(errNoPos, TagOrderedMap, y.Tag) } return nil } // RequirePairs returns an error if the given node is not of type pairs. func RequirePairs(y *yaml.Node) error { if !IsPairs(y) { if hasPos(y) { return fmt.Errorf(errWithPos, TagPairs, y.Tag, y.Line, y.Column) } return fmt.Errorf(errNoPos, TagPairs, y.Tag) } return nil } // RequireScalar returns an error if the given node is not a scalar node. func RequireScalar(y *yaml.Node) error { if !IsScalar(y) { if hasPos(y) { return fmt.Errorf(errKindPos, kindToString(yaml.ScalarNode), kindToString(y.Kind), y.Line, y.Column) } return fmt.Errorf(errKind, kindToString(yaml.ScalarNode), kindToString(y.Kind)) } return nil } // RequireSequence returns an error if the given node is not a sequence node. func RequireSequence(y *yaml.Node) error { if !IsSequence(y) { if hasPos(y) { return fmt.Errorf(errKindPos, kindToString(yaml.SequenceNode), kindToString(y.Kind), y.Line, y.Column) } return fmt.Errorf(errKind, kindToString(yaml.SequenceNode), kindToString(y.Kind)) } return nil } // RequireTimestamp returns an error if the given node is not of type timestamp. func RequireTimestamp(y *yaml.Node) error { if !IsTimestamp(y) { if hasPos(y) { return fmt.Errorf(errWithPos, TagTimestamp, y.Tag, y.Line, y.Column) } return fmt.Errorf(errNoPos, TagTimestamp, y.Tag) } return nil } // RequireString returns an error if the given node is not of type string. func RequireString(y *yaml.Node) error { if !IsString(y) { if hasPos(y) { return fmt.Errorf(errWithPos, TagString, y.Tag, y.Line, y.Column) } return fmt.Errorf(errNoPos, TagString, y.Tag) } return nil } func hasPos(y *yaml.Node) bool { return y.Line > 0 } func kindToString(kind yaml.Kind) string { switch kind { case yaml.DocumentNode: return "document" case yaml.SequenceNode: return "sequence" case yaml.MappingNode: return "mapping" case yaml.ScalarNode: return "scalar" case yaml.AliasNode: return "alias" } return "unknown" }
v1/pkg/xyml/require.go
0.771672
0.561636
require.go
starcoder
package ecs // TraversalMode represents a graph traversal mode. type TraversalMode uint8 const ( // TraverseDFS is Depth First Search traversal, starting from all matching // roots. TraverseDFS TraversalMode = 1 << iota traverseCo // TraverseCoDFS is Reversed Depth First Search traversal, starting from // all matching leaves. TraverseCoDFS = traverseCo | TraverseDFS // TODO: other modes like BFS ) // Traverse returns a new graph travers for the given type clause and mode. func (G *Graph) Traverse(tcl TypeClause, mode TraversalMode) GraphTraverser { switch mode { case TraverseDFS, TraverseCoDFS: return &dfsTraverser{ g: G, tcl: tcl, mode: mode, } default: panic("invalid graph traversal mode") } } // GraphTraverser traverses a graph in some order. type GraphTraverser interface { Init(seed ...EntityID) Traverse() bool G() *Graph Edge() Entity Node() Entity } type dfsTraverser struct { g *Graph tcl TypeClause mode TraversalMode seen map[EntityID]struct{} edge EntityID node EntityID curs []Cursor q []EntityID } func (gt *dfsTraverser) G() *Graph { return gt.g } func (gt *dfsTraverser) Edge() Entity { return gt.g.Ref(gt.edge) } func (gt *dfsTraverser) Node() Entity { return gt.g.aCore.Ref(gt.node) } func (gt *dfsTraverser) Traverse() bool { for gt.traverse() { if _, seen := gt.seen[gt.node]; !seen { gt.seen[gt.node] = struct{}{} return true } } return false } func (gt *dfsTraverser) traverse() bool { if gt.node != 0 { var cur Cursor if gt.mode&traverseCo == 0 { cur = gt.g.Select(gt.tcl, InA(gt.node)) } else { cur = gt.g.Select(gt.tcl, InB(gt.node)) } if cur.Scan() { gt.curs = append(gt.curs, cur) gt.setState(cur) return true } for i := len(gt.curs) - 1; i >= 0; i-- { if cur := gt.curs[i]; cur.Scan() { gt.setState(cur) return true } gt.curs = gt.curs[:i] } gt.edge = 0 gt.node = 0 } if i := len(gt.q) - 1; i >= 0 { gt.node = gt.q[i] gt.q = gt.q[:i] return true } return false } func (gt *dfsTraverser) setState(cur Cursor) { gt.edge = cur.R().id if gt.mode&traverseCo == 0 { gt.node = cur.B().id } else { gt.node = cur.A().id } } func (gt *dfsTraverser) Init(seed ...EntityID) { if len(gt.seen) > 0 { for id := range gt.seen { delete(gt.seen, id) } } else { // TODO: shave down this estimate? gt.seen = make(map[EntityID]struct{}, gt.g.Len()) } gt.edge = 0 gt.node = 0 gt.q = gt.q[:0] if len(seed) > 0 { gt.q = append(gt.q, seed...) } else { var ( triset map[EntityID]bool n int ) if gt.mode&traverseCo == 0 { triset, n = gt.g.roots(gt.tcl, nil) } else { triset, n = gt.g.leaves(gt.tcl, nil) } if n <= 0 { return } if cap(gt.q) < n { gt.q = make([]EntityID, 0, n) } for id, in := range triset { if in { gt.q = append(gt.q, id) } } } }
internal/ecs/graph_traversal.go
0.511961
0.446796
graph_traversal.go
starcoder
package stat /** * Statistics for identifier resource. */ type Streamidentifierstats struct { /** * Name of the stream identifier. */ Name string `json:"name,omitempty"` /** * Values on which grouping is performed are displayed in the output as row titles. If grouping is performed on two or more fields, their values are separated by a question mark in the row title. For example, consider a selector that contains the expressions HTTP.REQ.URL and CLIENT.IP.SRC (in that order), on an appliance that has accumulated records of a number of requests for two URLs, example.com/page1.html and example.com/page2.html, from two client IP addresses, 192.0.2.10 and 192.0.2.11. With a pattern of ? ?, the appliance performs grouping on both fields and displays statistics for the following: * Requests for example.com/abc.html from 192.0.2.10, with a row title of example.com/abc.html?192.0.2.10. * Requests for example.com/abc.html from 192.0.2.11, with a row title of example.com/abc.html?192.0.2.11. * Requests for example.com/def.html from 192.0.2.10, with a row title of example.com/def.html?192.0.2.10. * Requests for example.com/def.html from 192.0.2.11, with a row title of example.com/def.html?192.0.2.11. With a pattern of * ?, the appliance performs grouping on only the client IP address values and displays statistics for the following requests: * All requests from 192.0.2.10, with the IP address as the row title. * All requests from 192.0.2.11, with the IP address as the row title. With a pattern of ? *, the appliance performs grouping on only the URL values and displays statistics for the following requests: * All requests for example.com/abc.html, with the URL as the row title. * All requests for example.com/def.html, with the URL as the row title. With a pattern of * *, the appliance displays one set of collective statistics for all the requests received, with no row title. With a pattern of example.com/abc.html ?, the appliance displays statistics for requests for example.com/abc.html from each unique client IP address. With a pattern of * 192.0.2.11, the appliance displays statistics for all requests from 192.0.2.11. */ Pattern []string `json:"pattern,omitempty"` /** * Clear the statsistics / counters */ Clearstats string `json:"clearstats,omitempty"` /** * use this argument to sort by specific key */ Sortby string `json:"sortby,omitempty"` /** * use this argument to specify sort order */ Sortorder string `json:"sortorder,omitempty"` Streamobjreq int `json:"streamobjreq,omitempty"` Streamobjbandw int `json:"streamobjbandw,omitempty"` Streamobjresptime int `json:"streamobjresptime,omitempty"` Streamobjconn int `json:"streamobjconn,omitempty"` Streamobjbreachcnt int `json:"streamobjbreachcnt,omitempty"` Streamobjpktcredits int `json:"streamobjpktcredits,omitempty"` Streamobjpktspersecond int `json:"streamobjpktspersecond,omitempty"` Streamobjdroppedconns int `json:"streamobjdroppedconns,omitempty"` }
resource/stat/streamidentifier_stats.go
0.760117
0.428771
streamidentifier_stats.go
starcoder
package main import ( "errors" "fmt" ) // piece is a higolot daily calendar puzzle piece. type piece [][]bool // width returns the number of grid columns required to accommodate the piece. func (p *piece) width() int { return len((*p)[0]) } // height returns the number of grid rows required to accommodate the piece. func (p *piece) height() int { return len(*p) } // getEmptyRotatedArray returns an empty rotated copy of the piece's representation. func (p *piece) getEmptyRotatedArray() piece { newArr := make([][]bool, p.width()) for i := range newArr { newArr[i] = make([]bool, p.height()) } return newArr } // transpose transposes the piece. func (p *piece) transpose() { newArr := p.getEmptyRotatedArray() for iy := 0; iy < p.height(); iy++ { for ix := 0; ix < p.width(); ix++ { newArr[ix][iy] = (*p)[iy][ix] } } *p = newArr } // grid is a higolot daily calendar layout. type grid [][]string // width returns the grid width in piece spaces. func (g *grid) width() int { return len((*g)[0]) } // height returns the grid height in piece spaces. func (g *grid) height() int { return len(*g) } // setTarget marks the grid with the given target month, day number and day name. // i.e. the target spots of the grid are blanked out. // An error is returned if it's unable to set all three targets. func (g *grid) setTarget(month, dayNum, dayName string) error { setNum := 0 for iy, r := range *g { for ix, x := range r { if x == month || x == dayNum || x == dayName { (*g)[iy][ix] = "" setNum++ } } } if setNum != 3 { return errors.New("failed to set target") } return nil } // puzzle is a higolot daily calendar. type puzzle struct { grid grid pieces []piece } // solve tries to solve the puzzle for the given month and day number and name. func (p *puzzle) solve(month, dayNum, dayName string) { p.grid.setTarget(month, dayNum, dayName) // TODO.. } var p puzzle = puzzle{ grid: grid{ {"Jan", "Feb", "Mar", "Apr", "May", "Jun", ""}, {"Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ""}, {"1", "2", "3", "4", "5", "6", "7"}, {"8", "9", "10", "11", "12", "13", "14"}, {"15", "16", "17", "18", "19", "20", "21"}, {"22", "23", "24", "25", "26", "27", "28"}, {"29", "30", "31", "Sun", "Mon", "Tues", "Wed"}, {"", "", "", "", "Thur", "Fri", "Sat"}, }, pieces: []piece{ { {true, true, true}, {true, false, false}, {true, false, false}, }, { {true, false}, {true, false}, {true, false}, {true, true}, }, { {true, true, false}, {false, true, true}, }, { {true, false, false}, {true, true, true}, {true, false, false}, }, { {false, true, true, true}, {true, true, false, false}, }, { {true, true, true}, {true, true, false}, }, { {true, true, true}, {true, false, true}, }, { {false, true}, {false, true}, {true, true}, }, { {true, false, false}, {true, true, true}, {false, false, true}, }, { {true}, {true}, {true}, {true}, }, }, } func main() { var month, dayNum, day string fmt.Println("Enter the month to solve for e.g. 'Jan' or 'Feb': ") fmt.Scanln(&month) fmt.Println("Enter the day number solve for e.g. '1' or '10': ") fmt.Scanln(&dayNum) fmt.Println("Enter the day to solve for e.g. 'Mon' or 'Tues': ") fmt.Scanln(&day) // TODO: Validation (just handle the error from setting target?)? p.solve(month, dayNum, day) }
solver.go
0.618896
0.468791
solver.go
starcoder
package quantumchess import "fmt" //InvalidMove is an error returned when we try to perform an invalid move on the board. // Returns the square we want to move to. type InvalidMove int //InvalidPiece is an error returned when we expect a piece to be on the board, but it isn't // Returns the position where we expected a piece. type InvalidPiece int //InvalidPieceAccess is an error returned when we get a piece ID from the board but it does not exist in the struct Pieces.List. // Returns the piece ID of the piece that does not exist. type InvalidPieceAccess int //InvalidAction is an error returned when we parse a string and it is not one of the defined quantum actions. // Returns the unexpected string. type InvalidAction string //InvalidEntanglementDelete is an error returned when we try to delete a piece from entanglements that is still entangled. // Returns the piece id of such a piece. type InvalidEntanglementDelete int //InvalidMissingState is an error returned when a quantum piece has all zero states. // Returns the piece's state space. type InvalidMissingState []string //InvalidDeterminedState is an error returned when a quantum piece is passed in as a mixed state // but is actually in a determined state type InvalidDeterminedState []string //InvalidSetState is an error returned when setting a state has too many values // returns the state space of the piece whose state we tried to set type InvalidSetState []string func (e InvalidMove) Error() string { return fmt.Sprintf("Illegal move to position %d", e) } func (e InvalidPiece) Error() string { return fmt.Sprintf(" Illegal piece on Board at position %d", e) } func (e InvalidPieceAccess) Error() string { return fmt.Sprintf("Illegal piece accessed in pieces struct for id %d", e) } func (e InvalidAction) Error() string { s := e[:] return fmt.Sprintf("Unrecognized action: %v", s) } func (e InvalidEntanglementDelete) Error() string { return fmt.Sprintf("Cannot delete id %d from entanglements: still entangled", e) } func (e InvalidMissingState) Error() string { s := e[:] return fmt.Sprintf("States `%v` are both 0 ", s) } func (e InvalidDeterminedState) Error() string { s := e[:] return fmt.Sprintf("%v state was passed in as a mixed state", s) } func (e InvalidSetState) Error() string { s := e[:] return fmt.Sprintf("Tried to set state of %v but failed", s) }
pkg/quantumchess/error.go
0.839043
0.585842
error.go
starcoder
// Package queueimpl6 implements an unbounded, dynamically growing FIFO queue. // Internally, queue store the values in fixed sized slices that are linked using a singly linked list. // This implementation tests the queue performance when performing lazy creation of the first slice as // well as starting with an slice of size 1 and doubling its size up to 128. package queueimpl6 const ( // maxInternalSliceSize holds the maximum size of each internal slice. maxInternalSliceSize = 128 ) // Queueimpl6 represents an unbounded, dynamically growing FIFO queue. // The zero value for queue is an empty queue ready to use. type Queueimpl6 struct { // Head points to the first node of the linked list. head *Node // Tail points to the last node of the linked list. // In an empty queue, head and tail points to the same node. tail *Node // Hp is the index pointing to the current first element in the queue // (i.e. first element added in the current queue values). hp int // Tp is the index pointing to the current last element in the queue // (i.e. last element added in the current queue values). tp int // Len holds the current queue values length. len int // lastsliceReize holds the size of the last created internal slice. lastsliceSize int // lasthp holds the last position on the current head slice. lasthp int // lasttp holds the last position on the current tail slice. lasttp int } // Node represents a queue node. // Each node holds an slice of user managed values. type Node struct { // v holds the list of user added values in this node. v []interface{} // n points to the next node in the linked list. n *Node } // New returns an initialized queue. func New() *Queueimpl6 { return new(Queueimpl6).Init() } // Init initializes or clears queue q. func (q *Queueimpl6) Init() *Queueimpl6 { q.head = nil q.tail = nil q.hp = 0 q.tp = 0 q.len = 0 q.lasthp = 0 q.lasttp = 0 q.lastsliceSize = 1 return q } // Len returns the number of elements of queue q. // The complexity is O(1). func (q *Queueimpl6) Len() int { return q.len } // Front returns the first element of list l or nil if the list is empty. // The second, bool result indicates whether a valid value was returned; // if the queue is empty, false will be returned. // The complexity is O(1). func (q *Queueimpl6) Front() (interface{}, bool) { if q.len == 0 { return nil, false } return q.head.v[q.hp], true } // Push adds a value to the queue. // The complexity is O(1). func (q *Queueimpl6) Push(v interface{}) { if q.head == nil { q.Init() h := q.newNode() q.head = h q.tail = h } if q.tp > q.lasttp { n := q.newNode() q.tail.n = n q.tail = n q.tp = 0 } q.tail.v[q.tp] = v q.tp++ q.len++ } // Pop retrieves and removes the next element from the queue. // The second, bool result indicates whether a valid value was returned; // if the queue is empty, false will be returned. // The complexity is O(1). func (q *Queueimpl6) Pop() (interface{}, bool) { if q.len == 0 { return nil, false } v := q.head.v[q.hp] q.head.v[q.hp] = nil // Avoid memory leaks q.len-- q.hp++ if q.hp > q.lasthp { n := q.head.n q.head.n = nil // Avoid memory leaks q.head = n q.hp = 0 if n != nil { q.lasthp = len(n.v) - 1 } } return v, true } // newNode returns an initialized node. func (q *Queueimpl6) newNode() *Node { if q.lastsliceSize <= maxInternalSliceSize { q.lasttp = q.lastsliceSize - 1 n := &Node{ v: make([]interface{}, q.lastsliceSize), } q.lastsliceSize *= 2 return n } return &Node{ v: make([]interface{}, maxInternalSliceSize), } }
queueimpl6/queueimpl6.go
0.84412
0.58886
queueimpl6.go
starcoder
package timedata import ( "math" "sort" "time" ) // ResampleTimeSeriesData resamples the given [timestamp,value] data to numsteps between start-end (returns numSteps+1 points). // If the data does not extend past start/end then there will likely be NaN in the output data. func ResampleTimeSeriesData(data [][]float64, start float64, end float64, numSteps int) [][]float64 { var newData [][]float64 l := len(data) step := (end - start) / float64(numSteps) for pos := start; pos <= end; pos += step { idx := sort.Search(l, func(i int) bool { return data[i][0] >= pos }) var val float64 if idx == 0 { val = math.NaN() // off the left } else if idx == l { val = math.NaN() // off the right } else { // between two points - linear interpolation left := data[idx-1] right := data[idx] dvdt := (right[1] - left[1]) / (right[0] - left[0]) val = left[1] + (pos-left[0])*dvdt } newData = append(newData, []float64{pos, val}) } return newData } // CalculateTimeQuantum determines the given [timestamp,value] data func CalculateTimeQuantum(data [][]float64) time.Duration { if len(data) > 1 { minTime := time.UnixMilli(int64(data[0][0])) maxTime := time.UnixMilli(int64(data[len(data)-1][0])) return time.Duration(int64(maxTime.Sub(minTime)) / int64(len(data)-1)) } return 0 } // BuildTimeSeriesLabels returns a list of short labels representing time values from the given [timestamp,value] data func BuildTimeSeriesLabels(data [][]float64) []string { minTime := time.UnixMilli(int64(data[0][0])) maxTime := time.UnixMilli(int64(data[len(data)-1][0])) timeRange := maxTime.Sub(minTime) var timeFormat string if timeRange.Hours() < 24 { timeFormat = "15:04" } else if timeRange.Hours() < 24*7 { timeFormat = "Mon 15:04" } else if timeRange.Hours() < 24*365 { timeFormat = "02-Jan" } else { timeFormat = "Jan 2006" } var labels []string for i := range data { labels = append(labels, time.UnixMilli(int64(data[i][0])).Format(timeFormat)) } return labels }
pkg/timedata/timedata.go
0.728555
0.633481
timedata.go
starcoder
package kernelsupport type kernelFeatureVersion struct { version KernelVersion features KernelFeatures } // a list of eBPF kernel features which are available from a given kernel version forward. // largely based on https://github.com/iovisor/bcc/blob/master/docs/kernel-versions.md var featureMinVersion = []kernelFeatureVersion{ { version: KernelVersion{Major: 3, Minor: 16}, features: KernelFeatures{ Arch: KFeatArchx86_64, }, }, { version: KernelVersion{Major: 3, Minor: 18}, features: KernelFeatures{ Arch: KFeatArchARM64, API: KFeatAPIBasic, }, }, { version: KernelVersion{Major: 3, Minor: 19}, features: KernelFeatures{ Map: KFeatMapHash | KFeatMapArray, Program: KFeatProgSocketFilter, Attach: KFeatAttachINetIngressEgress, }, }, { version: KernelVersion{Major: 4, Minor: 1}, features: KernelFeatures{ Arch: KFeatArchs390, Program: KFeatProgKProbe | KFeatProgSchedCLS | KFeatProgSchedACT, }, }, { version: KernelVersion{Major: 4, Minor: 2}, features: KernelFeatures{ Map: KFeatMapTailCall, }, }, { version: KernelVersion{Major: 4, Minor: 3}, features: KernelFeatures{ Map: KFeatMapPerfEvent, }, }, { version: KernelVersion{Major: 4, Minor: 4}, features: KernelFeatures{ API: KFeatAPIObjPinGet, }, }, { version: KernelVersion{Major: 4, Minor: 6}, features: KernelFeatures{ Map: KFeatMapPerCPUHash | KFeatMapPerCPUArray | KFeatMapStackTrace, }, }, { version: KernelVersion{Major: 4, Minor: 7}, features: KernelFeatures{ Program: KFeatProgTracepoint, }, }, { version: KernelVersion{Major: 4, Minor: 8}, features: KernelFeatures{ Arch: KFeatArchPP64, Map: KFeatMapCGroupArray, Program: KFeatProgXDP, }, }, { version: KernelVersion{Major: 4, Minor: 9}, features: KernelFeatures{ Program: KFeatProgPerfEvent, }, }, { version: KernelVersion{Major: 4, Minor: 10}, features: KernelFeatures{ Map: KFeatMapLRUHash | KFeatMapLRUPerCPUHash, Program: KFeatProgCGroupSKB | KFeatProgCGroupSocket | KFeatProgLWTIn | KFeatProgLWTOut | KFeatProgLWTXmit, Attach: KFeatAttachInetSocketCreate, API: KFeatAPIProgramAttachDetach, }, }, { version: KernelVersion{Major: 4, Minor: 11}, features: KernelFeatures{ Map: KFeatMapLPMTrie, }, }, { version: KernelVersion{Major: 4, Minor: 12}, features: KernelFeatures{ Arch: KFeatArchSparc64, Map: KFeatMapArrayOfMaps | KFeatMapHashOfMaps, API: KFeatAPIMapGetNextNull | KFeatAPIProgramTestRun, }, }, { version: KernelVersion{Major: 4, Minor: 13}, features: KernelFeatures{ Arch: KFeatArchMIPS, Program: KFeatProgSocketOps, Attach: KFeatAttachSocketOps, API: KFeatAPIProgramGetNextID | KFeatAPIMapGetNextID | KFeatAPIProgramGetFDByID | KFeatAPIMapGetFDByID | KFeatAPIObjectGetInfoByFD, }, }, { version: KernelVersion{Major: 4, Minor: 14}, features: KernelFeatures{ Arch: KFeatArchARM32, Map: KFeatMapNetdevArray | KFeatMapSocketArray, API: KFeatAPIMapNumaCreate, Program: KFeatProgSKSKB, Attach: KFeatAttachStreamParserVerdict, }, }, { version: KernelVersion{Major: 4, Minor: 15}, features: KernelFeatures{ Map: KFeatMapCPU, API: KFeatAPIMapSyscallRW | KFeatAPIMapName | KFeatAPIProgramQuery, Program: KFeatProgCGroupDevice, Attach: KFeatAttachCGroupDevice, }, }, { version: KernelVersion{Major: 4, Minor: 16}, features: KernelFeatures{ Map: KFeatMapLPMTrieNextKey, }, }, { version: KernelVersion{Major: 4, Minor: 17}, features: KernelFeatures{ Program: KFeatProgSKMsg | KFeatProgRawTracepoint | KFeatProgCGroupSocketAddr, Attach: KFeatAttachSKMsgVerdict | KFeatAttachCGroupInetBind | KFeatAttachCGroupInetConnect | KFeatAttachCGroupInetPostBind, API: KFeatAPIRawTracepointOpen, }, }, { version: KernelVersion{Major: 4, Minor: 18}, features: KernelFeatures{ Arch: KFeatArchx86, Map: KFeatMapAFXDP | KFeatMapSocketHash, Program: KFeatProgLWTSeg6Local | KFeatProgLIRCMode2, Attach: KFeatAttachCGroupUDPSendMsg | KFeatAttachLIRCMode2, API: KFeatAPIBTFLoad | KFeatAPIBTFGetFDByID | KFeatAPITaskFDQuery, }, }, { version: KernelVersion{Major: 4, Minor: 19}, features: KernelFeatures{ Map: KFeatMapCGroupStorage | KFeatMapReuseportSocketArray, Program: KFeatProgSKReusePort, }, }, { version: KernelVersion{Major: 4, Minor: 20}, features: KernelFeatures{ Map: KFeatMapPerCPUCGroupStorage | KFeatMapQueue | KFeatMapStack, API: KFeatAPIMapLookupAndDelete, Program: KFeatProgFlowDissector, Attach: KFeatAttachFlowDissector, }, }, { version: KernelVersion{Major: 5, Minor: 0}, features: KernelFeatures{ API: KFeatAPIMapZeroSeed, }, }, { version: KernelVersion{Major: 5, Minor: 1}, features: KernelFeatures{ Arch: KFeatArchRiscVRV64G, API: KFeatAPIMapLock, }, }, { version: KernelVersion{Major: 5, Minor: 2}, features: KernelFeatures{ Map: KFeatMapSocketLocalStorage, API: KFeatAPIMapBPFRW | KFeatAPIMapFreeze, Program: KFeatProgCGroupSysctl | KFeatProgRawTracepointWritable, Attach: KFeatAttachCGroupSysctl | KFeatAttachCGroupUDPRecvMsg, Misc: KFeatGlobalData, }, }, { version: KernelVersion{Major: 5, Minor: 3}, features: KernelFeatures{ Program: KFeatProgCgroupSocketOpt, Attach: KFeatAttachCGroupGetSetSocket, }, }, { version: KernelVersion{Major: 5, Minor: 4}, features: KernelFeatures{ Map: KFeatMapNetdevHash, API: KFeatAPIBTFGetNextID, Misc: KFeatMiscXSKRingFlags, }, }, { version: KernelVersion{Major: 5, Minor: 5}, features: KernelFeatures{ API: KFeatAPIMapMMap, Program: KFeatProgTracing, Attach: KFeatAttachTraceRawTP | KFeatAttachTraceFentry | KFeatAttachTraceFExit, }, }, { version: KernelVersion{Major: 5, Minor: 6}, features: KernelFeatures{ Map: KFeatMapStructOps, API: KFeatAPIMapBatchOps, Program: KFeatProgStructOps | KFeatProgExt, Misc: KFeatBTFFuncScope, }, }, { version: KernelVersion{Major: 5, Minor: 7}, features: KernelFeatures{ Arch: KFeatArchRiscVRV32G, Program: KFeatProgLSM, Attach: KFeatAttachModifyReturn | KFeatAttachLSMMAC, API: KFeatAPILinkCreate | KFeatAPILinkUpdate, }, }, { version: KernelVersion{Major: 5, Minor: 8}, features: KernelFeatures{ Map: KFeatMapRingBuffer, Attach: KFeatAttachTraceIter | KFeatAttachCGroupINetGetPeerName | KFeatAttachCGroupINetGetSocketName | KFeatAttachXDPDevMap, API: KFeatAPILinkGetFDByID | KFeatAPILinkGetNextID | KFeatAPIEnableStats | KFeatAPIIterCreate, }, }, { version: KernelVersion{Major: 5, Minor: 9}, features: KernelFeatures{ Program: KFeatProgSKLookup, Attach: KFeatAttachCGroupInetSocketRelease | KFeatAttachXDPCPUMap | KFeatAttachSKLookup | KFeatAttachXDP, API: KFeatAPILinkDetach, }, }, { version: KernelVersion{Major: 5, Minor: 10}, features: KernelFeatures{ Map: KFeatMapINodeStorage | KFeatMapDynamicInnerMap, API: KFeatAPIProgBindMap, }, }, { version: KernelVersion{Major: 5, Minor: 11}, features: KernelFeatures{ Map: KFeatMapTaskStorage, }, }, { version: KernelVersion{Major: 5, Minor: 13}, features: KernelFeatures{ Map: KFeatMapPerCPUArrayBatchOps | KFeatMapLPMTrieBatchOps, }, }, }
kernelsupport/versions.go
0.759582
0.41182
versions.go
starcoder
package golisp2 import "fmt" type ( // ArgMapper is a utility that makes it easier to map lists of values to ArgMapper struct { iter valueIterator err error } // valueIterator is a generic way to traverse/process a set of value-like // objects. valueIterator interface { // Next returns the next value in the iterator. If none are left, (nil, nil) // will be returned. Next() (Value, error) } // valueSet implements valueIterator through simply iterating through the set. valueSet struct { vals []Value argIndex int } // exprSet implements valueIterator by evaluating expressions on demand. exprSet struct { ec *EvalContext exprs []Expr argIndex int } ) // ArgMapperValues creates an argument mapper for the provided values. func ArgMapperValues(vals ...Value) *ArgMapper { return &ArgMapper{ iter: &valueSet{ vals: vals, argIndex: 0, }, err: nil, } } // ArgMapperExprs creates an argument mapper for the provided context/expr set. func ArgMapperExprs(ec *EvalContext, exprs []Expr) *ArgMapper { return &ArgMapper{ iter: &exprSet{ ec: ec, exprs: exprs, argIndex: 0, }, err: nil, } } // ReadString will try to read the next argument as a string value, or report an // error. func (am *ArgMapper) ReadString(v **StringValue) *ArgMapper { switch tV := am.next().(type) { case *StringValue: *v = tV default: am.err = fmt.Errorf("ArgMapper: type error - expected string, got %T", tV) } return am } // ReadBool will try to read the next argument as a bool value, or report an // error. func (am *ArgMapper) ReadBool(v **BoolValue) *ArgMapper { switch tV := am.next().(type) { case *BoolValue: *v = tV default: am.err = fmt.Errorf("ArgMapper: type error - expected bool, got %T", tV) } return am } // ReadFunc will try to read the next function as a list value, or report an // error. func (am *ArgMapper) ReadFunc(v **FuncValue) *ArgMapper { switch tV := am.next().(type) { case *FuncValue: *v = tV default: am.err = fmt.Errorf("ArgMapper: type error - expected func, got %T", tV) } return am } // ReadNumber will try to read the next argument as a number value, or report an // error. func (am *ArgMapper) ReadNumber(v **NumberValue) *ArgMapper { switch tV := am.next().(type) { case *NumberValue: *v = tV default: am.err = fmt.Errorf("ArgMapper: type error - expected number, got %T", tV) } return am } // ReadCell will try to read the next argument as a cell value, or report an // error. func (am *ArgMapper) ReadCell(v **CellValue) *ArgMapper { switch tV := am.next().(type) { case *CellValue: *v = tV default: am.err = fmt.Errorf("ArgMapper: type error - expected cell, got %T", tV) } return am } // ReadList will try to read the next argument as a list value, or report an // error. func (am *ArgMapper) ReadList(v **ListValue) *ArgMapper { switch tV := am.next().(type) { case *ListValue: *v = tV default: am.err = fmt.Errorf("ArgMapper: type error - expected list, got %T", tV) } return am } // ReadMap will try to read the next argument as a map value, or report an // error. func (am *ArgMapper) ReadMap(v **MapValue) *ArgMapper { switch tV := am.next().(type) { case *MapValue: *v = tV default: am.err = fmt.Errorf("ArgMapper: type error - expected map, got %T", tV) } return am } // ReadValue will try to read the next argument as any value, or report an // error. func (am *ArgMapper) ReadValue(v *Value) *ArgMapper { if nextV := am.next(); nextV != nil { *v = nextV } return am } // MaybeReadValue will try to read the next argument as any value, or report an // error. func (am *ArgMapper) MaybeReadValue(v *Value) *ArgMapper { if nextV := am.maybeNext(); nextV != nil { *v = nextV } return am } // ReadNumbers will try to read the remaining argument as number values, or // report an error. func (am *ArgMapper) ReadNumbers(v *[]*NumberValue) *ArgMapper { nums := []*NumberValue{} for { v := am.maybeNext() if v == nil { break } switch tV := v.(type) { case *NumberValue: nums = append(nums, tV) default: am.err = fmt.Errorf("ArgMapper: type error - expected number, got %T", tV) break } } *v = nums return am } // ReadStrings will try to read the remaining arguments as string values, or // report an error. func (am *ArgMapper) ReadStrings(v *[]*StringValue) *ArgMapper { nums := []*StringValue{} for { v := am.maybeNext() if v == nil { break } switch tV := v.(type) { case *StringValue: nums = append(nums, tV) default: am.err = fmt.Errorf("ArgMapper: type error - expected number, got %T", tV) break } } *v = nums return am } // ReadBools will try to read the remaining arguments as string values, or // report an error. func (am *ArgMapper) ReadBools(v *[]*BoolValue) *ArgMapper { nums := []*BoolValue{} for { v := am.maybeNext() if v == nil { break } switch tV := v.(type) { case *BoolValue: nums = append(nums, tV) default: am.err = fmt.Errorf("ArgMapper: type error - expected number, got %T", tV) break } } *v = nums return am } // Complete will return any errors encountered during the mapping; and add a new // error if there are still unprocessed arguments remaining. func (am *ArgMapper) Complete() error { remaining := am.maybeNext() if remaining != nil { am.err = fmt.Errorf( "ArgMapper: unprocessed arguments remaining at end of mapping") } return am.err } // Err returns any encountered errors during the mapping. func (am *ArgMapper) Err() error { return am.err } func (am *ArgMapper) next() Value { nextV := am.maybeNext() if nextV == nil { // note (bs): this is a little imprecise; may wish to make it possible to // better label the source of errors. That's a broader problem than just // this; really. am.err = fmt.Errorf("ArgMapper: not enough arguments") } return nextV } func (am *ArgMapper) maybeNext() Value { if am.err != nil { return nil } nextV, nextVErr := am.iter.Next() if nextVErr != nil { am.err = nextVErr return nil } return nextV } func (vs *valueSet) Next() (Value, error) { if vs.argIndex >= len(vs.vals) { return nil, nil } v := vs.vals[vs.argIndex] vs.argIndex++ return v, nil } func (es *exprSet) Next() (Value, error) { if es.argIndex >= len(es.exprs) { return nil, nil } v, err := es.exprs[es.argIndex].Eval(es.ec) es.argIndex++ return v, err }
arg_mapper.go
0.587825
0.476641
arg_mapper.go
starcoder
package vector import ( "math" ) func min(a, b int) int { if a < b { return a } return b } // Vector represents mathematical vector. type Vector []float64 // New returns vector of specified szie. func New(size int) Vector { return make(Vector, size) } // NewWithValues returns vector with specified values. // The size of new vector is equal to one of array. func NewWithValues(values []float64) Vector { v := make(Vector, len(values)) copy(v, values) return v } // Clone this vector, returning a new Vector. func (v Vector) Clone() Vector { return NewWithValues(v) } // Set sets the values of this vector. func (v Vector) Set(values []float64) { copy(v, values) } // Scale this vector (performs scalar multiplication) by the specified value. func (v Vector) Scale(value float64) { l := len(v) for i := 0; i < l; i++ { v[i] *= value } } // Magnitude returns the magnitude of this vector. func (v Vector) Magnitude() float64 { result := 0.0 for _, e := range v { result += e * e } return math.Sqrt(result) } // Zero sets all values to zero. func (v Vector) Zero() { for i := range v { v[i] = 0.0 } } // Do iterates over the elements and invokes sepcified function. func (v Vector) Do(applyFn func(float64) float64) { for i, e := range v { v[i] = applyFn(e) } } // DoWithIndex iterates over the elements and invokes sepcified function. func (v Vector) DoWithIndex(applyFn func(int, float64) float64) { for i, e := range v { v[i] = applyFn(i, e) } } // Add adds another vector and returns resutl as new vector. // Another vector must have the same dimensionality. func (v Vector) Add(other Vector) (Vector, error) { if len(v) != len(other) { return nil, ErrVectorNotSameSize } l := min(len(v), len(other)) result := make(Vector, l) for i := 0; i < l; i++ { result[i] = v[i] + other[i] } return result, nil } // Sub substracts another vector and returns result as new vector. // Another vector must have the same dimensionality. func (v Vector) Sub(other Vector) (Vector, error) { if len(v) != len(other) { return nil, ErrVectorNotSameSize } l := min(len(v), len(other)) result := make(Vector, l) for i := 0; i < l; i++ { result[i] = v[i] - other[i] } return result, nil } // Dot computes dot product with another vector. // Another vector must have the same dimensionality. func (v Vector) Dot(other Vector) (float64, error) { if len(v) != len(other) { return 0.0, ErrVectorNotSameSize } l := len(v) result := 0.0 for i := 0; i < l; i++ { result += v[i] * other[i] } return result, nil } // Cross computes cross-product with another vector. // Vector dimensionality msut be equal to 3 func (v Vector) Cross(other Vector) (Vector, error) { // Early error check to prevent redundant cloning if len(v) != 3 || len(other) != 3 { return nil, ErrVectorInvalidDimension } result := make(Vector, 3) result[0] = v[1]*other[2] - v[2]*other[1] result[1] = v[2]*other[0] - v[0]*other[2] result[2] = v[0]*other[1] - v[1]*other[0] return result, nil } // Unit computes unit vector result as new vector. func Unit(v Vector) Vector { magRec := 1.0 / v.Magnitude() unit := v.Clone() for i := range unit { unit[i] *= magRec } return unit } // Hadamard computes Hadamard product with another vector // and returns result as new vector. Another vector must // have the same dimensionality. func (v Vector) Hadamard(other Vector) (Vector, error) { if len(v) != len(other) { return nil, ErrVectorNotSameSize } l := len(v) result := make(Vector, l) for i := 0; i < l; i++ { result[i] = v[i] * other[i] } return result, nil }
vector.go
0.904879
0.665404
vector.go
starcoder
// +build appengine package simd // This file contains functions which operate on slices of 2- or 4-byte // elements (typically small structs or integers) in ways that differ from the // corresponding operations on single-byte elements. // In this context, there is little point in making the interface based on // []byte, since the caller will need to unsafely cast to it. Instead, most // functions take unsafe.Pointer(s) and a count, and have names ending in // 'Raw'; the caller should write safe wrappers around them when appropriate. // We provide sample wrappers for the int16 and uint16 cases. (Originally did // this for int32/uint32, but turns out the compiler has hardcoded // optimizations for those cases which are currently missing for {u}int16.) // RepeatI16 fills dst[] with the given int16. func RepeatI16(dst []int16, val int16) { for i := range dst { dst[i] = val } } // RepeatU16 fills dst[] with the given uint16. func RepeatU16(dst []uint16, val uint16) { for i := range dst { dst[i] = val } } // ReverseI16Inplace reverses a []int16 in-place. func ReverseI16Inplace(main []int16) { nElem := len(main) nElemDiv2 := nElem >> 1 for i, j := 0, nElem-1; i != nElemDiv2; i, j = i+1, j-1 { main[i], main[j] = main[j], main[i] } } // ReverseU16Inplace reverses a []uint16 in-place. func ReverseU16Inplace(main []uint16) { nElem := len(main) nElemDiv2 := nElem >> 1 for i, j := 0, nElem-1; i != nElemDiv2; i, j = i+1, j-1 { main[i], main[j] = main[j], main[i] } } // ReverseI16 sets dst[len(src) - 1 - pos] := src[pos] for each position in // src. It panics if len(src) != len(dst). func ReverseI16(dst, src []int16) { if len(dst) != len(src) { panic("ReverseI16() requires len(src) == len(dst).") } nElemMinus1 := len(dst) - 1 for i := range dst { dst[i] = src[nElemMinus1-i] } } // ReverseU16 sets dst[len(src) - 1 - pos] := src[pos] for each position in // src. It panics if len(src) != len(dst). func ReverseU16(dst, src []uint16) { if len(dst) != len(src) { panic("ReverseU16() requires len(src) == len(dst).") } nElemMinus1 := len(dst) - 1 for i := range dst { dst[i] = src[nElemMinus1-i] } } // Benchmark results suggest that Reverse32Raw is unimportant.
simd/multibyte_appengine.go
0.521715
0.435001
multibyte_appengine.go
starcoder
package cli import ( "context" "encoding/json" "fmt" "log" "net/url" "os" "path" "strconv" "strings" "time" "github.com/Vivino/rankdb/api/client" "github.com/goadesign/goa" goaclient "github.com/goadesign/goa/client" uuid "github.com/goadesign/goa/uuid" "github.com/spf13/cobra" ) type ( // DeleteBackupCommand is the command line data structure for the delete action of backup DeleteBackupCommand struct { BackupID string PrettyPrint bool } // StatusBackupCommand is the command line data structure for the status action of backup StatusBackupCommand struct { BackupID string PrettyPrint bool } // CreateElementsCommand is the command line data structure for the create action of elements CreateElementsCommand struct { Payload string ContentType string // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string // Return this number of elements above and below the current element in `neighbors` field. Range int PrettyPrint bool } // DeleteElementsCommand is the command line data structure for the delete action of elements DeleteElementsCommand struct { // ID of element ElementID string // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string PrettyPrint bool } // DeleteMultiElementsCommand is the command line data structure for the delete-multi action of elements DeleteMultiElementsCommand struct { // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string // IDs of elements ElementIds []string PrettyPrint bool } // GetElementsCommand is the command line data structure for the get action of elements GetElementsCommand struct { // ID of element ElementID string // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string // Return this number of elements above and below the current element in `neighbors` field. Range int PrettyPrint bool } // GetAroundElementsCommand is the command line data structure for the get-around action of elements GetAroundElementsCommand struct { Payload string ContentType string // ID of element ElementID string // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string // Return this number of elements above and below the current element in `neighbors` field. Range int PrettyPrint bool } // GetMultiElementsCommand is the command line data structure for the get-multi action of elements GetMultiElementsCommand struct { Payload string ContentType string // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string PrettyPrint bool } // PutElementsCommand is the command line data structure for the put action of elements PutElementsCommand struct { Payload string ContentType string // ID of element ElementID string // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string // Return this number of elements above and below the current element in `neighbors` field. Range int PrettyPrint bool } // PutMultiElementsCommand is the command line data structure for the put-multi action of elements PutMultiElementsCommand struct { Payload string ContentType string // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string // Return results of the operation. If disabled, operations will be faster and require less memory. Results string PrettyPrint bool } // HealthHealthCommand is the command line data structure for the health action of health HealthHealthCommand struct { PrettyPrint bool } // RootHealthCommand is the command line data structure for the root action of health RootHealthCommand struct { PrettyPrint bool } // JWTJWTCommand is the command line data structure for the jwt action of jwt JWTJWTCommand struct { // Expire token in this many minutes. Default is 24 hours. Expire int // Create key with list restrictions. // Use commas to separate multiple elements OnlyElements string // Create key with list restrictions. // Use commas to separate multiple lists OnlyLists string // Create key with scope Scope string PrettyPrint bool } // CloneListsCommand is the command line data structure for the clone action of lists CloneListsCommand struct { Payload string ContentType string // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string PrettyPrint bool } // CreateListsCommand is the command line data structure for the create action of lists CreateListsCommand struct { Payload string ContentType string // Replace list if exists. Replace string PrettyPrint bool } // DeleteListsCommand is the command line data structure for the delete action of lists DeleteListsCommand struct { // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string PrettyPrint bool } // GetListsCommand is the command line data structure for the get action of lists GetListsCommand struct { // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string // Include top_element and bottom_element in result. TopBottom string PrettyPrint bool } // GetPercentileListsCommand is the command line data structure for the get-percentile action of lists GetPercentileListsCommand struct { // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string // Return median percentile element. // If the percentile is between two elements, the element with the highest score is returned. // Value must be parseable as a float point number and must be between 0.0 and 100.0 FromTop string // Return this number of elements above and below the current element in `neighbors` field. Range int PrettyPrint bool } // GetRangeListsCommand is the command line data structure for the get-range action of lists GetRangeListsCommand struct { // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string // First result will be at this rank from the bottom of the list. FromBottom int // First result will be at this rank from the top of the list. FromTop int // Number of results to return Limit int PrettyPrint bool } // GetAllListsCommand is the command line data structure for the get_all action of lists GetAllListsCommand struct { // Start with element following this ID. Empty will return from start. AfterID string // Return elements preceding this ID. BeforeID string // Maximum Number of results Limit int PrettyPrint bool } // ReindexListsCommand is the command line data structure for the reindex action of lists ReindexListsCommand struct { // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string PrettyPrint bool } // RepairListsCommand is the command line data structure for the repair action of lists RepairListsCommand struct { // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string // Clear list if unable to repair Clear string PrettyPrint bool } // VerifyListsCommand is the command line data structure for the verify action of lists VerifyListsCommand struct { // The ID of the list to apply the operation on. // Can be `a` to `z` (both upper/lower case), `0` to `9` or one of these characters `_-.` ListID string // Clear list if unable to repair Clear string // Attempt to repair list Repair string PrettyPrint bool } // BackupMultilistCommand is the command line data structure for the backup action of multilist BackupMultilistCommand struct { Payload string ContentType string PrettyPrint bool } // CreateMultilistCommand is the command line data structure for the create action of multilist CreateMultilistCommand struct { Payload string ContentType string // Returns errors only. If disabled, operations will be faster and require less memory. ErrorsOnly string // Return results of the operation. If disabled, operations will be faster and require less memory. Results string PrettyPrint bool } // DeleteMultilistCommand is the command line data structure for the delete action of multilist DeleteMultilistCommand struct { // ID of element ElementID string // Include all lists in these sets AllInSets []string // Returns errors only. If disabled, operations will be faster and require less memory. ErrorsOnly string // Include lists that match exact list names Lists []string // Include lists that match all values in metadata. // Payload must be valid json with string->string values. // Example: {"country":"dk","game":"match4"} MatchMetadata string PrettyPrint bool } // GetMultilistCommand is the command line data structure for the get action of multilist GetMultilistCommand struct { // ID of element ElementID string // Include all lists in these sets AllInSets []string // Include lists that match exact list names Lists []string // Include lists that match all values in metadata. // Payload must be valid json with string->string values. // Example: {"country":"dk","game":"match4"} MatchMetadata string PrettyPrint bool } // PutMultilistCommand is the command line data structure for the put action of multilist PutMultilistCommand struct { Payload string ContentType string // Returns errors only. If disabled, operations will be faster and require less memory. ErrorsOnly string // Return results of the operation. If disabled, operations will be faster and require less memory. Results string PrettyPrint bool } // ReindexMultilistCommand is the command line data structure for the reindex action of multilist ReindexMultilistCommand struct { Payload string ContentType string // Returns errors only. If disabled, operations will be faster and require less memory. ErrorsOnly string PrettyPrint bool } // RestoreMultilistCommand is the command line data structure for the restore action of multilist RestoreMultilistCommand struct { // Keep existing lists. Only restore missing lists Keep string // Optional alternative list id prefix. // If not provided the original list id/segment ids will be used and any existing list will be overwritten. ListIDPrefix string // Optional alternative list id suffix. // If not provided the original list id/segment ids will be used and any existing list will be overwritten. ListIDSuffix string // The body will not contain any data. Instead load data from provided URL. // The call will return when the backup has finished. // If the source is s3, the source should be defined as s3://bucket/path/file.bin. Replace bucket and path+file Src string // The body will not contain any data. Instead load data from file from this path. // The call will return when the backup has finished. SrcFile string PrettyPrint bool } // VerifyMultilistCommand is the command line data structure for the verify action of multilist VerifyMultilistCommand struct { Payload string ContentType string // Clear list if unable to repair Clear string // Verify elements as well Elements string // Returns errors only. If disabled, operations will be faster and require less memory. ErrorsOnly string // Repair lists with problems automatically. Repair string PrettyPrint bool } // DownloadCommand is the command line data structure for the download command. DownloadCommand struct { // OutFile is the path to the download output file. OutFile string } ) // RegisterCommands registers the resource action CLI commands. func RegisterCommands(app *cobra.Command, c *client.Client) { var command, sub *cobra.Command command = &cobra.Command{ Use: "backup", Short: `Backup lists. If no lists, nor any search is specified all lists are backed up.A callback is provided to check progress.`, } tmp1 := new(BackupMultilistCommand) sub = &cobra.Command{ Use: `multilist ["/xlist/backup"]`, Short: `Cross-list operations`, Long: `Cross-list operations Payload example: { "destination": { "path": "./backup.bin", "server_list_id_prefix": "Molestiae aspernatur mollitia commodi id deserunt.", "server_list_id_suffix": "Reiciendis voluptate ea id aperiam nulla.", "type": "file" }, "lists": { "all_in_sets": [ "storage-set", "backup-set" ], "lists": [ "highscore-dk-all", "highscore-uk-all" ], "match_metadata": { "country": "dk" } } }`, RunE: func(cmd *cobra.Command, args []string) error { return tmp1.Run(c, args) }, } tmp1.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp1.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "clone", Short: `Creates a clone of the list to a new list with the supplied metadata. The URL list is the source and the payload must contain the new list ID.`, } tmp2 := new(CloneListsCommand) sub = &cobra.Command{ Use: `lists ["/lists/LIST_ID/clone"]`, Short: ``, Long: ` Payload example: { "id": "highscore-list", "load_index": false, "merge_size": 500, "metadata": { "country": "dk", "game": "2" }, "populate": [ { "id": 100, "payload": { "country": "dk", "name": "<NAME>" }, "score": 100, "tie_breaker": 2000 }, { "id": 100, "payload": { "country": "dk", "name": "<NAME>" }, "score": 100, "tie_breaker": 2000 } ], "set": "storage-set", "split_size": 2000 }`, RunE: func(cmd *cobra.Command, args []string) error { return tmp2.Run(c, args) }, } tmp2.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp2.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "create", Short: `create action`, } tmp3 := new(CreateElementsCommand) sub = &cobra.Command{ Use: `elements ["/lists/LIST_ID/elements"]`, Short: ``, Long: ` Payload example: { "id": 100, "payload": { "country": "dk", "name": "<NAME>" }, "score": 100, "tie_breaker": 2000 }`, RunE: func(cmd *cobra.Command, args []string) error { return tmp3.Run(c, args) }, } tmp3.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp3.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) tmp4 := new(CreateListsCommand) sub = &cobra.Command{ Use: `lists ["/lists"]`, Short: ``, Long: ` Payload example: { "id": "highscore-list", "load_index": false, "merge_size": 500, "metadata": { "country": "dk", "game": "2" }, "populate": [ { "id": 100, "payload": { "country": "dk", "name": "<NAME>" }, "score": 100, "tie_breaker": 2000 }, { "id": 100, "payload": { "country": "dk", "name": "<NAME>" }, "score": 100, "tie_breaker": 2000 } ], "set": "storage-set", "split_size": 2000 }`, RunE: func(cmd *cobra.Command, args []string) error { return tmp4.Run(c, args) }, } tmp4.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp4.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) tmp5 := new(CreateMultilistCommand) sub = &cobra.Command{ Use: `multilist ["/xlist/elements"]`, Short: `Cross-list operations`, Long: `Cross-list operations Payload example: { "all_in_sets": [ "storage-set", "backup-set" ], "lists": [ "highscore-dk-all", "highscore-uk-all" ], "match_metadata": { "country": "dk" }, "payload": [ { "id": 100, "payload": { "country": "dk", "name": "<NAME>" }, "score": 100, "tie_breaker": 2000 } ] }`, RunE: func(cmd *cobra.Command, args []string) error { return tmp5.Run(c, args) }, } tmp5.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp5.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "delete", Short: `delete action`, } tmp6 := new(DeleteBackupCommand) sub = &cobra.Command{ Use: `backup ["/backup/BACKUP_ID"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp6.Run(c, args) }, } tmp6.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp6.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) tmp7 := new(DeleteElementsCommand) sub = &cobra.Command{ Use: `elements ["/lists/LIST_ID/elements/ELEMENT_ID"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp7.Run(c, args) }, } tmp7.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp7.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) tmp8 := new(DeleteListsCommand) sub = &cobra.Command{ Use: `lists ["/lists/LIST_ID"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp8.Run(c, args) }, } tmp8.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp8.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) tmp9 := new(DeleteMultilistCommand) sub = &cobra.Command{ Use: `multilist ["/xlist/elements/ELEMENT_ID"]`, Short: `Cross-list operations`, RunE: func(cmd *cobra.Command, args []string) error { return tmp9.Run(c, args) }, } tmp9.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp9.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "delete-multi", Short: `Delete Multiple Elements in list.If an element does not exist, success is returned. `, } tmp10 := new(DeleteMultiElementsCommand) sub = &cobra.Command{ Use: `elements ["/lists/LIST_ID/elements"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp10.Run(c, args) }, } tmp10.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp10.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "get", Short: `get action`, } tmp11 := new(GetElementsCommand) sub = &cobra.Command{ Use: `elements ["/lists/LIST_ID/elements/ELEMENT_ID"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp11.Run(c, args) }, } tmp11.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp11.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) tmp12 := new(GetListsCommand) sub = &cobra.Command{ Use: `lists ["/lists/LIST_ID"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp12.Run(c, args) }, } tmp12.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp12.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) tmp13 := new(GetMultilistCommand) sub = &cobra.Command{ Use: `multilist ["/xlist/elements/ELEMENT_ID"]`, Short: `Cross-list operations`, RunE: func(cmd *cobra.Command, args []string) error { return tmp13.Run(c, args) }, } tmp13.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp13.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "get-all", Short: `Get multiple lists. Lists are sorted lexicographically. See https://golang.org/pkg/strings/#Compare`, } tmp14 := new(GetAllListsCommand) sub = &cobra.Command{ Use: `lists ["/lists"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp14.Run(c, args) }, } tmp14.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp14.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "get-around", Short: `Get relation of one element to multiple specific elements. The element will have local_from_top and local_from_bottom populated.Elements that are not found are ignored. `, } tmp15 := new(GetAroundElementsCommand) sub = &cobra.Command{ Use: `elements ["/lists/LIST_ID/elements/ELEMENT_ID/around"]`, Short: ``, Long: ` Payload example: { "element_ids": [ 120, 340, 550 ] }`, RunE: func(cmd *cobra.Command, args []string) error { return tmp15.Run(c, args) }, } tmp15.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp15.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "get-multi", Short: `Get Multiple Elements in list. Will return 404 if list cannot be found, OK even if no elements are found.`, } tmp16 := new(GetMultiElementsCommand) sub = &cobra.Command{ Use: `elements ["/lists/LIST_ID/elements/find"]`, Short: ``, Long: ` Payload example: { "element_ids": [ 120, 340, 550 ] }`, RunE: func(cmd *cobra.Command, args []string) error { return tmp16.Run(c, args) }, } tmp16.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp16.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "get-percentile", Short: `Get element at percentile. Either ` + "`" + `from_top` + "`" + ` or ` + "`" + `from_bottom` + "`" + ` must be supplied`, } tmp17 := new(GetPercentileListsCommand) sub = &cobra.Command{ Use: `lists ["/lists/LIST_ID/percentile"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp17.Run(c, args) }, } tmp17.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp17.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "get-range", Short: `Get rank range of the list. Either ` + "`" + `from_top` + "`" + ` or ` + "`" + `from_bottom` + "`" + ` must be supplied`, } tmp18 := new(GetRangeListsCommand) sub = &cobra.Command{ Use: `lists ["/lists/LIST_ID/range"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp18.Run(c, args) }, } tmp18.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp18.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "health", Short: `Return system information`, } tmp19 := new(HealthHealthCommand) sub = &cobra.Command{ Use: `health ["/health"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp19.Run(c, args) }, } tmp19.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp19.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "jwt", Short: `JWT key generator. If left disabled in config, Unauthorized is returned`, } tmp20 := new(JWTJWTCommand) sub = &cobra.Command{ Use: `jwt ["/jwt"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp20.Run(c, args) }, } tmp20.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp20.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "put", Short: `put action`, } tmp21 := new(PutElementsCommand) sub = &cobra.Command{ Use: `elements ["/lists/LIST_ID/elements/ELEMENT_ID"]`, Short: ``, Long: ` Payload example: { "id": 100, "payload": { "country": "dk", "name": "<NAME>" }, "score": 100, "tie_breaker": 2000 }`, RunE: func(cmd *cobra.Command, args []string) error { return tmp21.Run(c, args) }, } tmp21.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp21.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) tmp22 := new(PutMultilistCommand) sub = &cobra.Command{ Use: `multilist ["/xlist/elements"]`, Short: `Cross-list operations`, Long: `Cross-list operations Payload example: { "all_in_sets": [ "storage-set", "backup-set" ], "lists": [ "highscore-dk-all", "highscore-uk-all" ], "match_metadata": { "country": "dk" }, "payload": [ { "id": 100, "payload": { "country": "dk", "name": "<NAME>" }, "score": 100, "tie_breaker": 2000 } ] }`, RunE: func(cmd *cobra.Command, args []string) error { return tmp22.Run(c, args) }, } tmp22.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp22.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "put-multi", Short: `Update Multiple Elements in list.If element does not exist, it is created in list. The returned "not_found" field will never be preset.`, } tmp23 := new(PutMultiElementsCommand) sub = &cobra.Command{ Use: `elements ["/lists/LIST_ID/elements"]`, Short: ``, Long: ` Payload example: [ { "id": 100, "payload": { "country": "dk", "name": "<NAME>" }, "score": 100, "tie_breaker": 2000 }, { "id": 100, "payload": { "country": "dk", "name": "<NAME>" }, "score": 100, "tie_breaker": 2000 }, { "id": 100, "payload": { "country": "dk", "name": "<NAME>" }, "score": 100, "tie_breaker": 2000 } ]`, RunE: func(cmd *cobra.Command, args []string) error { return tmp23.Run(c, args) }, } tmp23.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp23.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "reindex", Short: `reindex action`, } tmp24 := new(ReindexListsCommand) sub = &cobra.Command{ Use: `lists ["/lists/LIST_ID/reindex"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp24.Run(c, args) }, } tmp24.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp24.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) tmp25 := new(ReindexMultilistCommand) sub = &cobra.Command{ Use: `multilist ["/xlist/reindex"]`, Short: `Cross-list operations`, Long: `Cross-list operations Payload example: { "all_in_sets": [ "storage-set", "backup-set" ], "lists": [ "highscore-dk-all", "highscore-uk-all" ], "match_metadata": { "country": "dk" } }`, RunE: func(cmd *cobra.Command, args []string) error { return tmp25.Run(c, args) }, } tmp25.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp25.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "repair", Short: `Repairs the list, by recreating all segments and indexes. All access to the list is blocked while operation runs.`, } tmp26 := new(RepairListsCommand) sub = &cobra.Command{ Use: `lists ["/lists/LIST_ID/repair"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp26.Run(c, args) }, } tmp26.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp26.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "restore", Short: `Restore lists. Body must contain binary data with backup data, unless 'src' is specified.`, } tmp27 := new(RestoreMultilistCommand) sub = &cobra.Command{ Use: `multilist ["/xlist/restore"]`, Short: `Cross-list operations`, RunE: func(cmd *cobra.Command, args []string) error { return tmp27.Run(c, args) }, } tmp27.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp27.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "root", Short: `Ping server`, } tmp28 := new(RootHealthCommand) sub = &cobra.Command{ Use: `health ["/"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp28.Run(c, args) }, } tmp28.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp28.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "status", Short: `Return backup progress`, } tmp29 := new(StatusBackupCommand) sub = &cobra.Command{ Use: `backup ["/backup/BACKUP_ID"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp29.Run(c, args) }, } tmp29.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp29.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) command = &cobra.Command{ Use: "verify", Short: `verify action`, } tmp30 := new(VerifyListsCommand) sub = &cobra.Command{ Use: `lists ["/lists/LIST_ID/verify"]`, Short: ``, RunE: func(cmd *cobra.Command, args []string) error { return tmp30.Run(c, args) }, } tmp30.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp30.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) tmp31 := new(VerifyMultilistCommand) sub = &cobra.Command{ Use: `multilist ["/xlist/verify"]`, Short: `Cross-list operations`, Long: `Cross-list operations Payload example: { "all_in_sets": [ "storage-set", "backup-set" ], "lists": [ "highscore-dk-all", "highscore-uk-all" ], "match_metadata": { "country": "dk" } }`, RunE: func(cmd *cobra.Command, args []string) error { return tmp31.Run(c, args) }, } tmp31.RegisterFlags(sub, c) sub.PersistentFlags().BoolVar(&tmp31.PrettyPrint, "pp", false, "Pretty print response body") command.AddCommand(sub) app.AddCommand(command) dl := new(DownloadCommand) dlc := &cobra.Command{ Use: "download [PATH]", Short: "Download file with given path", RunE: func(cmd *cobra.Command, args []string) error { return dl.Run(c, args) }, } dlc.Flags().StringVar(&dl.OutFile, "out", "", "Output file") app.AddCommand(dlc) } func intFlagVal(name string, parsed int) *int { if hasFlag(name) { return &parsed } return nil } func float64FlagVal(name string, parsed float64) *float64 { if hasFlag(name) { return &parsed } return nil } func boolFlagVal(name string, parsed bool) *bool { if hasFlag(name) { return &parsed } return nil } func stringFlagVal(name string, parsed string) *string { if hasFlag(name) { return &parsed } return nil } func hasFlag(name string) bool { for _, arg := range os.Args[1:] { if strings.HasPrefix(arg, "--"+name) { return true } } return false } func jsonVal(val string) (*interface{}, error) { var t interface{} err := json.Unmarshal([]byte(val), &t) if err != nil { return nil, err } return &t, nil } func jsonArray(ins []string) ([]interface{}, error) { if ins == nil { return nil, nil } var vals []interface{} for _, id := range ins { val, err := jsonVal(id) if err != nil { return nil, err } vals = append(vals, val) } return vals, nil } func timeVal(val string) (*time.Time, error) { t, err := time.Parse(time.RFC3339, val) if err != nil { return nil, err } return &t, nil } func timeArray(ins []string) ([]time.Time, error) { if ins == nil { return nil, nil } var vals []time.Time for _, id := range ins { val, err := timeVal(id) if err != nil { return nil, err } vals = append(vals, *val) } return vals, nil } func uuidVal(val string) (*uuid.UUID, error) { t, err := uuid.FromString(val) if err != nil { return nil, err } return &t, nil } func uuidArray(ins []string) ([]uuid.UUID, error) { if ins == nil { return nil, nil } var vals []uuid.UUID for _, id := range ins { val, err := uuidVal(id) if err != nil { return nil, err } vals = append(vals, *val) } return vals, nil } func float64Val(val string) (*float64, error) { t, err := strconv.ParseFloat(val, 64) if err != nil { return nil, err } return &t, nil } func float64Array(ins []string) ([]float64, error) { if ins == nil { return nil, nil } var vals []float64 for _, id := range ins { val, err := float64Val(id) if err != nil { return nil, err } vals = append(vals, *val) } return vals, nil } func boolVal(val string) (*bool, error) { t, err := strconv.ParseBool(val) if err != nil { return nil, err } return &t, nil } func boolArray(ins []string) ([]bool, error) { if ins == nil { return nil, nil } var vals []bool for _, id := range ins { val, err := boolVal(id) if err != nil { return nil, err } vals = append(vals, *val) } return vals, nil } // Run downloads files with given paths. func (cmd *DownloadCommand) Run(c *client.Client, args []string) error { var ( fnf func(context.Context, string) (int64, error) fnd func(context.Context, string, string) (int64, error) rpath = args[0] outfile = cmd.OutFile logger = goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx = goa.WithLogger(context.Background(), logger) err error ) if rpath[0] != '/' { rpath = "/" + rpath } if strings.HasPrefix(rpath, "/doc/") { fnd = c.DownloadDoc rpath = rpath[5:] if outfile == "" { _, outfile = path.Split(rpath) } goto found } if strings.HasPrefix(rpath, "/api/swagger/") { fnd = c.DownloadSwagger rpath = rpath[13:] if outfile == "" { _, outfile = path.Split(rpath) } goto found } return fmt.Errorf("don't know how to download %s", rpath) found: ctx = goa.WithLogContext(ctx, "file", outfile) if fnf != nil { _, err = fnf(ctx, outfile) } else { _, err = fnd(ctx, rpath, outfile) } if err != nil { goa.LogError(ctx, "failed", "err", err) return err } return nil } // Run makes the HTTP request corresponding to the DeleteBackupCommand command. func (cmd *DeleteBackupCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/backup/%v", url.QueryEscape(cmd.BackupID)) } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.DeleteBackup(ctx, path) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *DeleteBackupCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var backupID string cc.Flags().StringVar(&cmd.BackupID, "backup_id", backupID, ``) } // Run makes the HTTP request corresponding to the StatusBackupCommand command. func (cmd *StatusBackupCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/backup/%v", url.QueryEscape(cmd.BackupID)) } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.StatusBackup(ctx, path) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *StatusBackupCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var backupID string cc.Flags().StringVar(&cmd.BackupID, "backup_id", backupID, ``) } // Run makes the HTTP request corresponding to the CreateElementsCommand command. func (cmd *CreateElementsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v/elements", url.QueryEscape(cmd.ListID)) } var payload client.Element if cmd.Payload != "" { err := json.Unmarshal([]byte(cmd.Payload), &payload) if err != nil { return fmt.Errorf("failed to deserialize payload: %s", err) } } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.CreateElements(ctx, path, &payload, intFlagVal("range", cmd.Range), cmd.ContentType) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *CreateElementsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { cc.Flags().StringVar(&cmd.Payload, "payload", "", "Request body encoded in JSON") cc.Flags().StringVar(&cmd.ContentType, "content", "", "Request content type override, e.g. 'application/x-www-form-urlencoded'") var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) cc.Flags().IntVar(&cmd.Range, "range", 5, `Return this number of elements above and below the current element in `+"`"+`neighbors`+"`"+` field.`) } // Run makes the HTTP request corresponding to the DeleteElementsCommand command. func (cmd *DeleteElementsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v/elements/%v", url.QueryEscape(cmd.ListID), url.QueryEscape(cmd.ElementID)) } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.DeleteElements(ctx, path) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *DeleteElementsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var elementID string cc.Flags().StringVar(&cmd.ElementID, "element_id", elementID, `ID of element`) var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) } // Run makes the HTTP request corresponding to the DeleteMultiElementsCommand command. func (cmd *DeleteMultiElementsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v/elements", url.QueryEscape(cmd.ListID)) } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.DeleteMultiElements(ctx, path, cmd.ElementIds) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *DeleteMultiElementsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) var elementIds []string cc.Flags().StringSliceVar(&cmd.ElementIds, "element_ids", elementIds, `IDs of elements`) } // Run makes the HTTP request corresponding to the GetElementsCommand command. func (cmd *GetElementsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v/elements/%v", url.QueryEscape(cmd.ListID), url.QueryEscape(cmd.ElementID)) } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.GetElements(ctx, path, intFlagVal("range", cmd.Range)) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *GetElementsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var elementID string cc.Flags().StringVar(&cmd.ElementID, "element_id", elementID, `ID of element`) var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) cc.Flags().IntVar(&cmd.Range, "range", 5, `Return this number of elements above and below the current element in `+"`"+`neighbors`+"`"+` field.`) } // Run makes the HTTP request corresponding to the GetAroundElementsCommand command. func (cmd *GetAroundElementsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v/elements/%v/around", url.QueryEscape(cmd.ListID), url.QueryEscape(cmd.ElementID)) } var payload client.MultiElement if cmd.Payload != "" { err := json.Unmarshal([]byte(cmd.Payload), &payload) if err != nil { return fmt.Errorf("failed to deserialize payload: %s", err) } } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.GetAroundElements(ctx, path, &payload, intFlagVal("range", cmd.Range), cmd.ContentType) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *GetAroundElementsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { cc.Flags().StringVar(&cmd.Payload, "payload", "", "Request body encoded in JSON") cc.Flags().StringVar(&cmd.ContentType, "content", "", "Request content type override, e.g. 'application/x-www-form-urlencoded'") var elementID string cc.Flags().StringVar(&cmd.ElementID, "element_id", elementID, `ID of element`) var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) cc.Flags().IntVar(&cmd.Range, "range", 5, `Return this number of elements above and below the current element in `+"`"+`neighbors`+"`"+` field.`) } // Run makes the HTTP request corresponding to the GetMultiElementsCommand command. func (cmd *GetMultiElementsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v/elements/find", url.QueryEscape(cmd.ListID)) } var payload client.MultiElement if cmd.Payload != "" { err := json.Unmarshal([]byte(cmd.Payload), &payload) if err != nil { return fmt.Errorf("failed to deserialize payload: %s", err) } } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.GetMultiElements(ctx, path, &payload, cmd.ContentType) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *GetMultiElementsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { cc.Flags().StringVar(&cmd.Payload, "payload", "", "Request body encoded in JSON") cc.Flags().StringVar(&cmd.ContentType, "content", "", "Request content type override, e.g. 'application/x-www-form-urlencoded'") var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) } // Run makes the HTTP request corresponding to the PutElementsCommand command. func (cmd *PutElementsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v/elements/%v", url.QueryEscape(cmd.ListID), url.QueryEscape(cmd.ElementID)) } var payload client.Element if cmd.Payload != "" { err := json.Unmarshal([]byte(cmd.Payload), &payload) if err != nil { return fmt.Errorf("failed to deserialize payload: %s", err) } } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.PutElements(ctx, path, &payload, intFlagVal("range", cmd.Range), cmd.ContentType) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *PutElementsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { cc.Flags().StringVar(&cmd.Payload, "payload", "", "Request body encoded in JSON") cc.Flags().StringVar(&cmd.ContentType, "content", "", "Request content type override, e.g. 'application/x-www-form-urlencoded'") var elementID string cc.Flags().StringVar(&cmd.ElementID, "element_id", elementID, `ID of element`) var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) cc.Flags().IntVar(&cmd.Range, "range", 5, `Return this number of elements above and below the current element in `+"`"+`neighbors`+"`"+` field.`) } // Run makes the HTTP request corresponding to the PutMultiElementsCommand command. func (cmd *PutMultiElementsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v/elements", url.QueryEscape(cmd.ListID)) } var payload client.PutMultiElementsPayload if cmd.Payload != "" { err := json.Unmarshal([]byte(cmd.Payload), &payload) if err != nil { return fmt.Errorf("failed to deserialize payload: %s", err) } } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) var tmp32 *bool if cmd.Results != "" { var err error tmp32, err = boolVal(cmd.Results) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--results", "err", err) return err } } resp, err := c.PutMultiElements(ctx, path, payload, tmp32, cmd.ContentType) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *PutMultiElementsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { cc.Flags().StringVar(&cmd.Payload, "payload", "", "Request body encoded in JSON") cc.Flags().StringVar(&cmd.ContentType, "content", "", "Request content type override, e.g. 'application/x-www-form-urlencoded'") var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) var results string cc.Flags().StringVar(&cmd.Results, "results", results, `Return results of the operation. If disabled, operations will be faster and require less memory.`) } // Run makes the HTTP request corresponding to the HealthHealthCommand command. func (cmd *HealthHealthCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = "/health" } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.HealthHealth(ctx, path) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *HealthHealthCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { } // Run makes the HTTP request corresponding to the RootHealthCommand command. func (cmd *RootHealthCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = "/" } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.RootHealth(ctx, path) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *RootHealthCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { } // Run makes the HTTP request corresponding to the JWTJWTCommand command. func (cmd *JWTJWTCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = "/jwt" } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.JWTJWT(ctx, path, cmd.Scope, intFlagVal("expire", cmd.Expire), stringFlagVal("only_elements", cmd.OnlyElements), stringFlagVal("only_lists", cmd.OnlyLists)) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *JWTJWTCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { cc.Flags().IntVar(&cmd.Expire, "expire", 1440, `Expire token in this many minutes. Default is 24 hours.`) var onlyElements string cc.Flags().StringVar(&cmd.OnlyElements, "only_elements", onlyElements, `Create key with list restrictions. Use commas to separate multiple elements`) var onlyLists string cc.Flags().StringVar(&cmd.OnlyLists, "only_lists", onlyLists, `Create key with list restrictions. Use commas to separate multiple lists`) cc.Flags().StringVar(&cmd.Scope, "scope", "api:read", `Create key with scope`) } // Run makes the HTTP request corresponding to the CloneListsCommand command. func (cmd *CloneListsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v/clone", url.QueryEscape(cmd.ListID)) } var payload client.RankList if cmd.Payload != "" { err := json.Unmarshal([]byte(cmd.Payload), &payload) if err != nil { return fmt.Errorf("failed to deserialize payload: %s", err) } } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.CloneLists(ctx, path, &payload, cmd.ContentType) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *CloneListsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { cc.Flags().StringVar(&cmd.Payload, "payload", "", "Request body encoded in JSON") cc.Flags().StringVar(&cmd.ContentType, "content", "", "Request content type override, e.g. 'application/x-www-form-urlencoded'") var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) } // Run makes the HTTP request corresponding to the CreateListsCommand command. func (cmd *CreateListsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = "/lists" } var payload client.RankList if cmd.Payload != "" { err := json.Unmarshal([]byte(cmd.Payload), &payload) if err != nil { return fmt.Errorf("failed to deserialize payload: %s", err) } } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) var tmp33 *bool if cmd.Replace != "" { var err error tmp33, err = boolVal(cmd.Replace) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--replace", "err", err) return err } } resp, err := c.CreateLists(ctx, path, &payload, tmp33, cmd.ContentType) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *CreateListsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { cc.Flags().StringVar(&cmd.Payload, "payload", "", "Request body encoded in JSON") cc.Flags().StringVar(&cmd.ContentType, "content", "", "Request content type override, e.g. 'application/x-www-form-urlencoded'") var replace string cc.Flags().StringVar(&cmd.Replace, "replace", replace, `Replace list if exists.`) } // Run makes the HTTP request corresponding to the DeleteListsCommand command. func (cmd *DeleteListsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v", url.QueryEscape(cmd.ListID)) } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.DeleteLists(ctx, path) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *DeleteListsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) } // Run makes the HTTP request corresponding to the GetListsCommand command. func (cmd *GetListsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v", url.QueryEscape(cmd.ListID)) } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) var tmp34 *bool if cmd.TopBottom != "" { var err error tmp34, err = boolVal(cmd.TopBottom) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--top_bottom", "err", err) return err } } resp, err := c.GetLists(ctx, path, tmp34) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *GetListsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) var topBottom string cc.Flags().StringVar(&cmd.TopBottom, "top_bottom", topBottom, `Include top_element and bottom_element in result.`) } // Run makes the HTTP request corresponding to the GetPercentileListsCommand command. func (cmd *GetPercentileListsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v/percentile", url.QueryEscape(cmd.ListID)) } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.GetPercentileLists(ctx, path, stringFlagVal("from_top", cmd.FromTop), intFlagVal("range", cmd.Range)) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *GetPercentileListsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) cc.Flags().StringVar(&cmd.FromTop, "from_top", "50.0", `Return median percentile element. If the percentile is between two elements, the element with the highest score is returned. Value must be parseable as a float point number and must be between 0.0 and 100.0`) cc.Flags().IntVar(&cmd.Range, "range", 5, `Return this number of elements above and below the current element in `+"`"+`neighbors`+"`"+` field.`) } // Run makes the HTTP request corresponding to the GetRangeListsCommand command. func (cmd *GetRangeListsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v/range", url.QueryEscape(cmd.ListID)) } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.GetRangeLists(ctx, path, intFlagVal("from_bottom", cmd.FromBottom), intFlagVal("from_top", cmd.FromTop), intFlagVal("limit", cmd.Limit)) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *GetRangeListsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) var fromBottom int cc.Flags().IntVar(&cmd.FromBottom, "from_bottom", fromBottom, `First result will be at this rank from the bottom of the list.`) var fromTop int cc.Flags().IntVar(&cmd.FromTop, "from_top", fromTop, `First result will be at this rank from the top of the list.`) cc.Flags().IntVar(&cmd.Limit, "limit", 25, `Number of results to return`) } // Run makes the HTTP request corresponding to the GetAllListsCommand command. func (cmd *GetAllListsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = "/lists" } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.GetAllLists(ctx, path, stringFlagVal("after_id", cmd.AfterID), stringFlagVal("before_id", cmd.BeforeID), intFlagVal("limit", cmd.Limit)) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *GetAllListsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var afterID string cc.Flags().StringVar(&cmd.AfterID, "after_id", afterID, `Start with element following this ID. Empty will return from start.`) var beforeID string cc.Flags().StringVar(&cmd.BeforeID, "before_id", beforeID, `Return elements preceding this ID.`) cc.Flags().IntVar(&cmd.Limit, "limit", 25, `Maximum Number of results`) } // Run makes the HTTP request corresponding to the ReindexListsCommand command. func (cmd *ReindexListsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v/reindex", url.QueryEscape(cmd.ListID)) } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.ReindexLists(ctx, path) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *ReindexListsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) } // Run makes the HTTP request corresponding to the RepairListsCommand command. func (cmd *RepairListsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v/repair", url.QueryEscape(cmd.ListID)) } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) var tmp35 *bool if cmd.Clear != "" { var err error tmp35, err = boolVal(cmd.Clear) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--clear", "err", err) return err } } resp, err := c.RepairLists(ctx, path, tmp35) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *RepairListsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) var clear string cc.Flags().StringVar(&cmd.Clear, "clear", clear, `Clear list if unable to repair`) } // Run makes the HTTP request corresponding to the VerifyListsCommand command. func (cmd *VerifyListsCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/lists/%v/verify", url.QueryEscape(cmd.ListID)) } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) var tmp36 *bool if cmd.Clear != "" { var err error tmp36, err = boolVal(cmd.Clear) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--clear", "err", err) return err } } var tmp37 *bool if cmd.Repair != "" { var err error tmp37, err = boolVal(cmd.Repair) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--repair", "err", err) return err } } resp, err := c.VerifyLists(ctx, path, tmp36, tmp37) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *VerifyListsCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var listID string cc.Flags().StringVar(&cmd.ListID, "list_id", listID, `The ID of the list to apply the operation on. Can be `+"`"+`a`+"`"+` to `+"`"+`z`+"`"+` (both upper/lower case), `+"`"+`0`+"`"+` to `+"`"+`9`+"`"+` or one of these characters `+"`"+`_-.`+"`"+``) var clear string cc.Flags().StringVar(&cmd.Clear, "clear", clear, `Clear list if unable to repair`) var repair string cc.Flags().StringVar(&cmd.Repair, "repair", repair, `Attempt to repair list`) } // Run makes the HTTP request corresponding to the BackupMultilistCommand command. func (cmd *BackupMultilistCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = "/xlist/backup" } var payload client.MultiListBackup if cmd.Payload != "" { err := json.Unmarshal([]byte(cmd.Payload), &payload) if err != nil { return fmt.Errorf("failed to deserialize payload: %s", err) } } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.BackupMultilist(ctx, path, &payload, cmd.ContentType) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *BackupMultilistCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { cc.Flags().StringVar(&cmd.Payload, "payload", "", "Request body encoded in JSON") cc.Flags().StringVar(&cmd.ContentType, "content", "", "Request content type override, e.g. 'application/x-www-form-urlencoded'") } // Run makes the HTTP request corresponding to the CreateMultilistCommand command. func (cmd *CreateMultilistCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = "/xlist/elements" } var payload client.ListPayloadQL if cmd.Payload != "" { err := json.Unmarshal([]byte(cmd.Payload), &payload) if err != nil { return fmt.Errorf("failed to deserialize payload: %s", err) } } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) var tmp38 *bool if cmd.ErrorsOnly != "" { var err error tmp38, err = boolVal(cmd.ErrorsOnly) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--errors_only", "err", err) return err } } var tmp39 *bool if cmd.Results != "" { var err error tmp39, err = boolVal(cmd.Results) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--results", "err", err) return err } } resp, err := c.CreateMultilist(ctx, path, &payload, tmp38, tmp39, cmd.ContentType) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *CreateMultilistCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { cc.Flags().StringVar(&cmd.Payload, "payload", "", "Request body encoded in JSON") cc.Flags().StringVar(&cmd.ContentType, "content", "", "Request content type override, e.g. 'application/x-www-form-urlencoded'") var errorsOnly string cc.Flags().StringVar(&cmd.ErrorsOnly, "errors_only", errorsOnly, `Returns errors only. If disabled, operations will be faster and require less memory.`) var results string cc.Flags().StringVar(&cmd.Results, "results", results, `Return results of the operation. If disabled, operations will be faster and require less memory.`) } // Run makes the HTTP request corresponding to the DeleteMultilistCommand command. func (cmd *DeleteMultilistCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/xlist/elements/%v", url.QueryEscape(cmd.ElementID)) } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) var tmp40 *bool if cmd.ErrorsOnly != "" { var err error tmp40, err = boolVal(cmd.ErrorsOnly) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--errors_only", "err", err) return err } } resp, err := c.DeleteMultilist(ctx, path, cmd.AllInSets, tmp40, cmd.Lists, stringFlagVal("match_metadata", cmd.MatchMetadata)) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *DeleteMultilistCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var elementID string cc.Flags().StringVar(&cmd.ElementID, "element_id", elementID, `ID of element`) var allInSets []string cc.Flags().StringSliceVar(&cmd.AllInSets, "all_in_sets", allInSets, `Include all lists in these sets`) var errorsOnly string cc.Flags().StringVar(&cmd.ErrorsOnly, "errors_only", errorsOnly, `Returns errors only. If disabled, operations will be faster and require less memory.`) var lists []string cc.Flags().StringSliceVar(&cmd.Lists, "lists", lists, `Include lists that match exact list names`) var matchMetadata string cc.Flags().StringVar(&cmd.MatchMetadata, "match_metadata", matchMetadata, `Include lists that match all values in metadata. Payload must be valid json with string->string values. Example: {"country":"dk","game":"match4"}`) } // Run makes the HTTP request corresponding to the GetMultilistCommand command. func (cmd *GetMultilistCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = fmt.Sprintf("/xlist/elements/%v", url.QueryEscape(cmd.ElementID)) } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) resp, err := c.GetMultilist(ctx, path, cmd.AllInSets, cmd.Lists, stringFlagVal("match_metadata", cmd.MatchMetadata)) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *GetMultilistCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var elementID string cc.Flags().StringVar(&cmd.ElementID, "element_id", elementID, `ID of element`) var allInSets []string cc.Flags().StringSliceVar(&cmd.AllInSets, "all_in_sets", allInSets, `Include all lists in these sets`) var lists []string cc.Flags().StringSliceVar(&cmd.Lists, "lists", lists, `Include lists that match exact list names`) var matchMetadata string cc.Flags().StringVar(&cmd.MatchMetadata, "match_metadata", matchMetadata, `Include lists that match all values in metadata. Payload must be valid json with string->string values. Example: {"country":"dk","game":"match4"}`) } // Run makes the HTTP request corresponding to the PutMultilistCommand command. func (cmd *PutMultilistCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = "/xlist/elements" } var payload client.ListPayloadQL if cmd.Payload != "" { err := json.Unmarshal([]byte(cmd.Payload), &payload) if err != nil { return fmt.Errorf("failed to deserialize payload: %s", err) } } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) var tmp41 *bool if cmd.ErrorsOnly != "" { var err error tmp41, err = boolVal(cmd.ErrorsOnly) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--errors_only", "err", err) return err } } var tmp42 *bool if cmd.Results != "" { var err error tmp42, err = boolVal(cmd.Results) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--results", "err", err) return err } } resp, err := c.PutMultilist(ctx, path, &payload, tmp41, tmp42, cmd.ContentType) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *PutMultilistCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { cc.Flags().StringVar(&cmd.Payload, "payload", "", "Request body encoded in JSON") cc.Flags().StringVar(&cmd.ContentType, "content", "", "Request content type override, e.g. 'application/x-www-form-urlencoded'") var errorsOnly string cc.Flags().StringVar(&cmd.ErrorsOnly, "errors_only", errorsOnly, `Returns errors only. If disabled, operations will be faster and require less memory.`) var results string cc.Flags().StringVar(&cmd.Results, "results", results, `Return results of the operation. If disabled, operations will be faster and require less memory.`) } // Run makes the HTTP request corresponding to the ReindexMultilistCommand command. func (cmd *ReindexMultilistCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = "/xlist/reindex" } var payload client.ListQL if cmd.Payload != "" { err := json.Unmarshal([]byte(cmd.Payload), &payload) if err != nil { return fmt.Errorf("failed to deserialize payload: %s", err) } } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) var tmp43 *bool if cmd.ErrorsOnly != "" { var err error tmp43, err = boolVal(cmd.ErrorsOnly) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--errors_only", "err", err) return err } } resp, err := c.ReindexMultilist(ctx, path, &payload, tmp43, cmd.ContentType) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *ReindexMultilistCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { cc.Flags().StringVar(&cmd.Payload, "payload", "", "Request body encoded in JSON") cc.Flags().StringVar(&cmd.ContentType, "content", "", "Request content type override, e.g. 'application/x-www-form-urlencoded'") var errorsOnly string cc.Flags().StringVar(&cmd.ErrorsOnly, "errors_only", errorsOnly, `Returns errors only. If disabled, operations will be faster and require less memory.`) } // Run makes the HTTP request corresponding to the RestoreMultilistCommand command. func (cmd *RestoreMultilistCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = "/xlist/restore" } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) var tmp44 *bool if cmd.Keep != "" { var err error tmp44, err = boolVal(cmd.Keep) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--keep", "err", err) return err } } resp, err := c.RestoreMultilist(ctx, path, tmp44, stringFlagVal("list_id_prefix", cmd.ListIDPrefix), stringFlagVal("list_id_suffix", cmd.ListIDSuffix), stringFlagVal("src", cmd.Src), stringFlagVal("src_file", cmd.SrcFile)) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *RestoreMultilistCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { var keep string cc.Flags().StringVar(&cmd.Keep, "keep", keep, `Keep existing lists. Only restore missing lists`) var listIDPrefix string cc.Flags().StringVar(&cmd.ListIDPrefix, "list_id_prefix", listIDPrefix, `Optional alternative list id prefix. If not provided the original list id/segment ids will be used and any existing list will be overwritten.`) var listIDSuffix string cc.Flags().StringVar(&cmd.ListIDSuffix, "list_id_suffix", listIDSuffix, `Optional alternative list id suffix. If not provided the original list id/segment ids will be used and any existing list will be overwritten.`) var src string cc.Flags().StringVar(&cmd.Src, "src", src, `The body will not contain any data. Instead load data from provided URL. The call will return when the backup has finished. If the source is s3, the source should be defined as s3://bucket/path/file.bin. Replace bucket and path+file`) var srcFile string cc.Flags().StringVar(&cmd.SrcFile, "src_file", srcFile, `The body will not contain any data. Instead load data from file from this path. The call will return when the backup has finished.`) } // Run makes the HTTP request corresponding to the VerifyMultilistCommand command. func (cmd *VerifyMultilistCommand) Run(c *client.Client, args []string) error { var path string if len(args) > 0 { path = args[0] } else { path = "/xlist/verify" } var payload client.ListQL if cmd.Payload != "" { err := json.Unmarshal([]byte(cmd.Payload), &payload) if err != nil { return fmt.Errorf("failed to deserialize payload: %s", err) } } logger := goa.NewLogger(log.New(os.Stderr, "", log.LstdFlags)) ctx := goa.WithLogger(context.Background(), logger) var tmp45 *bool if cmd.Clear != "" { var err error tmp45, err = boolVal(cmd.Clear) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--clear", "err", err) return err } } var tmp46 *bool if cmd.Elements != "" { var err error tmp46, err = boolVal(cmd.Elements) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--elements", "err", err) return err } } var tmp47 *bool if cmd.ErrorsOnly != "" { var err error tmp47, err = boolVal(cmd.ErrorsOnly) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--errors_only", "err", err) return err } } var tmp48 *bool if cmd.Repair != "" { var err error tmp48, err = boolVal(cmd.Repair) if err != nil { goa.LogError(ctx, "failed to parse flag into *bool value", "flag", "--repair", "err", err) return err } } resp, err := c.VerifyMultilist(ctx, path, &payload, tmp45, tmp46, tmp47, tmp48, cmd.ContentType) if err != nil { goa.LogError(ctx, "failed", "err", err) return err } goaclient.HandleResponse(c.Client, resp, cmd.PrettyPrint) return nil } // RegisterFlags registers the command flags with the command line. func (cmd *VerifyMultilistCommand) RegisterFlags(cc *cobra.Command, c *client.Client) { cc.Flags().StringVar(&cmd.Payload, "payload", "", "Request body encoded in JSON") cc.Flags().StringVar(&cmd.ContentType, "content", "", "Request content type override, e.g. 'application/x-www-form-urlencoded'") var clear string cc.Flags().StringVar(&cmd.Clear, "clear", clear, `Clear list if unable to repair`) var elements string cc.Flags().StringVar(&cmd.Elements, "elements", elements, `Verify elements as well`) var errorsOnly string cc.Flags().StringVar(&cmd.ErrorsOnly, "errors_only", errorsOnly, `Returns errors only. If disabled, operations will be faster and require less memory.`) var repair string cc.Flags().StringVar(&cmd.Repair, "repair", repair, `Repair lists with problems automatically.`) }
api/tool/cli/commands.go
0.620392
0.452899
commands.go
starcoder
package xlsx import ( "github.com/plandem/xlsx/format" "github.com/plandem/xlsx/types" ) //Range is a object that provides some functionality for cells inside of range. E.g.: A1:D12 type Range struct { //we don't want to pollute Range with bound's public properties bounds types.Bounds sheet Sheet } //newRangeFromRef create and returns Range for requested ref func newRangeFromRef(sheet Sheet, ref types.Ref) *Range { return &Range{ ref.ToBounds(), sheet, } } //newRange create and returns Range for requested 0-based indexes func newRange(sheet Sheet, fromCol, toCol, fromRow, toRow int) *Range { return &Range{ types.BoundsFromIndexes(fromCol, fromRow, toCol, toRow), sheet, } } //Bounds returns bounds of range func (r *Range) Bounds() types.Bounds { return r.bounds } //Reset resets each cell data into zero state func (r *Range) Reset() { r.Walk(func(idx, cIdx, rIdx int, c *Cell) { c.Reset() }) } //Clear clears each cell value in range func (r *Range) Clear() { r.Walk(func(idx, cIdx, rIdx int, c *Cell) { c.Clear() }) } //Cells returns iterator for all cells in range func (r *Range) Cells() RangeIterator { return newRangeIterator(r) } //Values returns values for all cells in range func (r *Range) Values() []string { width, height := r.bounds.Dimension() values := make([]string, 0, width*height) r.Walk(func(idx, cIdx, rIdx int, c *Cell) { values = append(values, c.Value()) }) return values } //Walk calls callback cb for each Cell in range func (r *Range) Walk(cb func(idx, cIdx, rIdx int, c *Cell)) { for idx, cells := 0, r.Cells(); cells.HasNext(); idx++ { iCol, iRow, cell := cells.Next() cb(idx, iCol, iRow, cell) } } //SetFormatting sets style format to all cells in range func (r *Range) SetFormatting(styleID format.StyleID) { r.Walk(func(idx, cIdx, rIdx int, c *Cell) { c.SetFormatting(styleID) }) } //CopyToRef copies range cells into another range starting with ref. //N.B.: Merged cells are not supported func (r *Range) CopyToRef(ref types.Ref) { target := ref.ToBounds() r.CopyTo(target.ToCol, target.ToRow) } //CopyTo copies range cells into another range starting indexes cIdx and rIdx //N.B.: Merged cells are not supported func (r *Range) CopyTo(cIdx, rIdx int) { //result is unpredictable in stream mode if mode := r.sheet.mode(); (mode & sheetModeStream) != 0 { panic(errorNotSupportedStream) } //ignore self-copying if cIdx != r.bounds.FromCol || rIdx != r.bounds.FromRow { cOffset, rOffset := cIdx-r.bounds.FromCol, rIdx-r.bounds.FromRow r.Walk(func(idx, cIdxSource, rIdxSource int, source *Cell) { //process only non empty cells if !isCellEmpty(source.ml) { //ignore target cells with negative indexes cIdxTarget, rIdxTarget := cIdxSource+cOffset, rIdxSource+rOffset if cIdxTarget >= 0 && rIdxTarget >= 0 { target := r.sheet.Cell(cIdxTarget, rIdxTarget) //copy data *target.ml = *source.ml //refresh ref target.ml.Ref = types.CellRefFromIndexes(cIdxTarget, rIdxTarget) } } }) } }
range.go
0.666605
0.479626
range.go
starcoder
package dataframe import ( "fmt" "reflect" "strings" "github.com/ptiger10/pd/internal/index" "github.com/ptiger10/pd/internal/values" "github.com/ptiger10/pd/options" ) // A DataFrame is a 2D collection of one or more Series with a shared index and associated columns. type DataFrame struct { name string vals []values.Container cols index.Columns Columns Columns index index.Index Index Index InPlace InPlace } func (df *DataFrame) String() string { if Equal(df, newEmptyDataFrame()) { return "{Empty DataFrame}" } return df.print() } // Index contains index level data. type Index struct { df *DataFrame } func (idx Index) String() string { printer := fmt.Sprintf("{DataFrame Index | Len: %d, NumLevels: %d}\n", idx.Len(), idx.df.IndexLevels()) return printer } // Columns contains column level data. type Columns struct { df *DataFrame } func (col Columns) String() string { printer := fmt.Sprintf("{DataFrame Columns | NumCols: %d, NumLevels: %d}\n", col.df.NumCols(), col.df.ColLevels()) return printer } // A Row is a single row in a DataFrame. type Row struct { Values []interface{} Nulls []bool ValueTypes []options.DataType Labels []interface{} LabelTypes []options.DataType } func (r Row) String() string { var printStr string for _, pair := range [][]interface{}{ {"Values", r.Values}, {"IsNull", r.Nulls}, {"ValueTypes", r.ValueTypes}, {"Labels", r.Labels}, {"LabelTypes", r.LabelTypes}, } { // LabelTypes is 10 characters wide, so left padding set to 10 printStr += fmt.Sprintf("%10v:%v%v\n", pair[0], strings.Repeat(" ", values.GetDisplayElementWhitespaceBuffer()), pair[1]) } return printStr } // Config customizes the DataFrame constructor. type Config struct { Name string DataType options.DataType Index interface{} IndexName string MultiIndex []interface{} MultiIndexNames []string Col []string ColName string MultiCol [][]string MultiColNames []string Manual bool } // A Grouping returns a collection of index labels with mutually exclusive integer positions. type Grouping struct { df *DataFrame groups map[string]*group err bool } func (g Grouping) String() string { printer := fmt.Sprintf("{DataFrame Grouping | NumGroups: %v, Groups: [%v]}\n", len(g.groups), strings.Join(g.Groups(), ", ")) return printer } // InPlace contains methods for modifying a DataFrame in place. type InPlace struct { df *DataFrame } func (ip InPlace) String() string { printer := "{InPlace DataFrame Handler}\n" printer += "Methods:\n" t := reflect.TypeOf(InPlace{}) for i := 0; i < t.NumMethod(); i++ { method := t.Method(i) printer += fmt.Sprintln(method.Name) } return printer }
dataframe/dataframe.go
0.629319
0.400837
dataframe.go
starcoder
package comments import ( "fmt" "strings" "sigs.k8s.io/kustomize/kyaml/yaml" ) // lostComment specifies a mapping between a fieldName (in the old structure), which doesn't exist in the // new tree, and its related comment. It optionally specifies the line number of the comment, a positive // line number is used to distinguish inline comments, which require special handling to resolve the // correct field name, since they are attached to the value and not the key of a YAML key-value pair. type lostComment struct { fieldName string comment string line int } // Since the YAML walker needs to visit all keys as scalar nodes, we have no way of distinguishing keys from // values when trying to resolve the field names for inline comments. By tracking the leftmost key (lowest // column value, be it a key or value) for each row, we can figure out the actual key for inline comments // and not accidentally use a value as the field name, since keys are guaranteed to come before values. type trackedKey struct { name string column int } // trackKey compares the column position of the given node to the stored best (lowest) column position for the // node's line and replaces the best if the given node is more likely to be a key (has a smaller column value). func (c *copier) trackKey(node *yaml.Node) { // If the given key doesn't have a smaller column value, return. if key, ok := c.trackedKeys[node.Line]; ok { if key.column < node.Column { return } } // Store the new best tracked key for the line. c.trackedKeys[node.Line] = trackedKey{ name: node.Value, column: node.Column, } } // parseComments parses the line, head and foot comments of the given node in this // order and cleans them up (removes the potential "#" prefix and trims whitespace). func parseComments(node *yaml.Node) (comments []string) { for _, comment := range []string{node.LineComment, node.HeadComment, node.FootComment} { comments = append(comments, strings.TrimSpace(strings.TrimPrefix(comment, "#"))) } return } // rememberLostComments goes through the comments attached to the 'from' node and adds // them to the internal lostComments slice for usage after the tree walk. It also // stores the line numbers for inline comments for resolving the correct field names. func (c *copier) rememberLostComments(from *yaml.RNode) { // Track the given node as a potential key for inline comments. c.trackKey(from.Document()) // Get the field name, for head/foot comments this is the correct key, // but for inline comments this resolves to the value of the field instead. fieldName := from.Document().Value comments := parseComments(from.Document()) line := -1 // Don't store the line number of the comment by default, this is reserved for inline comments. for i, comment := range comments { // If the line number is set (positive), an inline comment // has been registered for this node and we can stop parsing. if line >= 0 { break } // Do not store blank comment entries (nonexistent comments). if len(comment) == 0 { continue } if i == 0 { // If this node has an inline comment, store its line // number for resolving the correct field name later. line = from.Document().Line } // Append the lost comment to the slice of copier. c.lostComments = append(c.lostComments, lostComment{ fieldName: fieldName, comment: comment, line: line, }) } } // restoreLostComments writes the cached lost comments to the top of the to YAML tree. // If it encounters inline comments, it will check the cached tracked keys for the // best key for the line on which the comment resided. If no key is found for some // reason, it will use the stored field name (the field value) as the key. func (c *copier) restoreLostComments(to *yaml.RNode) { for i, lc := range c.lostComments { if i == 0 { to.Document().HeadComment += "\nComments lost during file manipulation:" } fieldName := lc.fieldName if lc.line >= 0 { // This is an inline comment, resolve the field name from the tracked keys. if key, ok := c.trackedKeys[lc.line]; ok { fieldName = key.name } } to.Document().HeadComment += fmt.Sprintf("\n# Field %q: %q", fieldName, lc.comment) } to.Document().HeadComment = strings.TrimPrefix(to.Document().HeadComment, "\n") }
pkg/serializer/comments/lost.go
0.617282
0.48688
lost.go
starcoder
package html_builder func Text(text string) Node { return TextNode(text) } func Br() Node { return RawNode("<br/>") } func Hr() Node { return RawNode("<hr/>") } func A(attributes Attrs, children ...Node) Node { return Element{"a", attributes, children} } func Abbr(attributes Attrs, children ...Node) Node { return Element{"abbr", attributes, children} } func Acronym(attributes Attrs, children ...Node) Node { return Element{"acronym", attributes, children} } func Address(attributes Attrs, children ...Node) Node { return Element{"address", attributes, children} } func Applet(attributes Attrs, children ...Node) Node { return Element{"applet", attributes, children} } func Area(attributes Attrs, children ...Node) Node { return Element{"area", attributes, children} } func Article(attributes Attrs, children ...Node) Node { return Element{"article", attributes, children} } func Aside(attributes Attrs, children ...Node) Node { return Element{"aside", attributes, children} } func Audio(attributes Attrs, children ...Node) Node { return Element{"audio", attributes, children} } func B(attributes Attrs, children ...Node) Node { return Element{"b", attributes, children} } func Base(attributes Attrs, children ...Node) Node { return Element{"base", attributes, children} } func Basefont(attributes Attrs, children ...Node) Node { return Element{"basefont", attributes, children} } func Bdi(attributes Attrs, children ...Node) Node { return Element{"bdi", attributes, children} } func Bdo(attributes Attrs, children ...Node) Node { return Element{"bdo", attributes, children} } func Big(attributes Attrs, children ...Node) Node { return Element{"big", attributes, children} } func Blockquote(attributes Attrs, children ...Node) Node { return Element{"blockquote", attributes, children} } func Body(attributes Attrs, children ...Node) Node { return Element{"body", attributes, children} } func Button(attributes Attrs, children ...Node) Node { return Element{"button", attributes, children} } func Canvas(attributes Attrs, children ...Node) Node { return Element{"canvas", attributes, children} } func Caption(attributes Attrs, children ...Node) Node { return Element{"caption", attributes, children} } func Center(attributes Attrs, children ...Node) Node { return Element{"center", attributes, children} } func Cite(attributes Attrs, children ...Node) Node { return Element{"cite", attributes, children} } func Code(attributes Attrs, children ...Node) Node { return Element{"code", attributes, children} } func Col(attributes Attrs, children ...Node) Node { return Element{"col", attributes, children} } func Colgroup(attributes Attrs, children ...Node) Node { return Element{"colgroup", attributes, children} } func Data(attributes Attrs, children ...Node) Node { return Element{"data", attributes, children} } func Datalist(attributes Attrs, children ...Node) Node { return Element{"datalist", attributes, children} } func Dd(attributes Attrs, children ...Node) Node { return Element{"dd", attributes, children} } func Del(attributes Attrs, children ...Node) Node { return Element{"del", attributes, children} } func Details(attributes Attrs, children ...Node) Node { return Element{"details", attributes, children} } func Dfn(attributes Attrs, children ...Node) Node { return Element{"dfn", attributes, children} } func Dialog(attributes Attrs, children ...Node) Node { return Element{"dialog", attributes, children} } func Dir(attributes Attrs, children ...Node) Node { return Element{"dir", attributes, children} } func Div(attributes Attrs, children ...Node) Node { return Element{"div", attributes, children} } func Dl(attributes Attrs, children ...Node) Node { return Element{"dl", attributes, children} } func Dt(attributes Attrs, children ...Node) Node { return Element{"dt", attributes, children} } func Em(attributes Attrs, children ...Node) Node { return Element{"em", attributes, children} } func Embed(attributes Attrs, children ...Node) Node { return Element{"embed", attributes, children} } func Fieldset(attributes Attrs, children ...Node) Node { return Element{"fieldset", attributes, children} } func Figcaption(attributes Attrs, children ...Node) Node { return Element{"figcaption", attributes, children} } func Figure(attributes Attrs, children ...Node) Node { return Element{"figure", attributes, children} } func Font(attributes Attrs, children ...Node) Node { return Element{"font", attributes, children} } func Footer(attributes Attrs, children ...Node) Node { return Element{"footer", attributes, children} } func Form(attributes Attrs, children ...Node) Node { return Element{"form", attributes, children} } func Frame(attributes Attrs, children ...Node) Node { return Element{"frame", attributes, children} } func Frameset(attributes Attrs, children ...Node) Node { return Element{"frameset", attributes, children} } func H1(attributes Attrs, children ...Node) Node { return Element{"h1", attributes, children} } func H2(attributes Attrs, children ...Node) Node { return Element{"h2", attributes, children} } func H3(attributes Attrs, children ...Node) Node { return Element{"h3", attributes, children} } func H4(attributes Attrs, children ...Node) Node { return Element{"h4", attributes, children} } func H5(attributes Attrs, children ...Node) Node { return Element{"h5", attributes, children} } func H6(attributes Attrs, children ...Node) Node { return Element{"h6", attributes, children} } func Head(attributes Attrs, children ...Node) Node { return Element{"head", attributes, children} } func Header(attributes Attrs, children ...Node) Node { return Element{"header", attributes, children} } func Html(attributes Attrs, children ...Node) Node { return Element{"html", attributes, children} } func I(attributes Attrs, children ...Node) Node { return Element{"i", attributes, children} } func Iframe(attributes Attrs, children ...Node) Node { return Element{"iframe", attributes, children} } func Img(attributes Attrs, children ...Node) Node { return Element{"img", attributes, children} } func Input(attributes Attrs, children ...Node) Node { return Element{"input", attributes, children} } func Ins(attributes Attrs, children ...Node) Node { return Element{"ins", attributes, children} } func Kbd(attributes Attrs, children ...Node) Node { return Element{"kbd", attributes, children} } func Label(attributes Attrs, children ...Node) Node { return Element{"label", attributes, children} } func Legend(attributes Attrs, children ...Node) Node { return Element{"legend", attributes, children} } func Li(attributes Attrs, children ...Node) Node { return Element{"li", attributes, children} } func Link(attributes Attrs, children ...Node) Node { return Element{"link", attributes, children} } func Main(attributes Attrs, children ...Node) Node { return Element{"main", attributes, children} } func Map(attributes Attrs, children ...Node) Node { return Element{"map", attributes, children} } func Mark(attributes Attrs, children ...Node) Node { return Element{"mark", attributes, children} } func Meta(attributes Attrs, children ...Node) Node { return Element{"meta", attributes, children} } func Meter(attributes Attrs, children ...Node) Node { return Element{"meter", attributes, children} } func Nav(attributes Attrs, children ...Node) Node { return Element{"nav", attributes, children} } func Noframes(attributes Attrs, children ...Node) Node { return Element{"noframes", attributes, children} } func Noscript(attributes Attrs, children ...Node) Node { return Element{"noscript", attributes, children} } func Object(attributes Attrs, children ...Node) Node { return Element{"object", attributes, children} } func Ol(attributes Attrs, children ...Node) Node { return Element{"ol", attributes, children} } func Optgroup(attributes Attrs, children ...Node) Node { return Element{"optgroup", attributes, children} } func Option(attributes Attrs, children ...Node) Node { return Element{"option", attributes, children} } func Output(attributes Attrs, children ...Node) Node { return Element{"output", attributes, children} } func P(attributes Attrs, children ...Node) Node { return Element{"p", attributes, children} } func Param(attributes Attrs, children ...Node) Node { return Element{"param", attributes, children} } func Picture(attributes Attrs, children ...Node) Node { return Element{"picture", attributes, children} } func Pre(attributes Attrs, children ...Node) Node { return Element{"pre", attributes, children} } func Progress(attributes Attrs, children ...Node) Node { return Element{"progress", attributes, children} } func Q(attributes Attrs, children ...Node) Node { return Element{"q", attributes, children} } func Rp(attributes Attrs, children ...Node) Node { return Element{"rp", attributes, children} } func Rt(attributes Attrs, children ...Node) Node { return Element{"rt", attributes, children} } func Ruby(attributes Attrs, children ...Node) Node { return Element{"ruby", attributes, children} } func S(attributes Attrs, children ...Node) Node { return Element{"s", attributes, children} } func Samp(attributes Attrs, children ...Node) Node { return Element{"samp", attributes, children} } func Script(attributes Attrs, children ...Node) Node { return Element{"script", attributes, children} } func Section(attributes Attrs, children ...Node) Node { return Element{"section", attributes, children} } func Select(attributes Attrs, children ...Node) Node { return Element{"select", attributes, children} } func Small(attributes Attrs, children ...Node) Node { return Element{"small", attributes, children} } func Source(attributes Attrs, children ...Node) Node { return Element{"source", attributes, children} } func Span(attributes Attrs, children ...Node) Node { return Element{"span", attributes, children} } func Strike(attributes Attrs, children ...Node) Node { return Element{"strike", attributes, children} } func Strong(attributes Attrs, children ...Node) Node { return Element{"strong", attributes, children} } func Style(attributes Attrs, children ...Node) Node { return Element{"style", attributes, children} } func Sub(attributes Attrs, children ...Node) Node { return Element{"sub", attributes, children} } func Summary(attributes Attrs, children ...Node) Node { return Element{"summary", attributes, children} } func Sup(attributes Attrs, children ...Node) Node { return Element{"sup", attributes, children} } func Svg(attributes Attrs, children ...Node) Node { return Element{"svg", attributes, children} } func Table(attributes Attrs, children ...Node) Node { return Element{"table", attributes, children} } func Tbody(attributes Attrs, children ...Node) Node { return Element{"tbody", attributes, children} } func Td(attributes Attrs, children ...Node) Node { return Element{"td", attributes, children} } func Template(attributes Attrs, children ...Node) Node { return Element{"template", attributes, children} } func Textarea(attributes Attrs, children ...Node) Node { return Element{"textarea", attributes, children} } func Tfoot(attributes Attrs, children ...Node) Node { return Element{"tfoot", attributes, children} } func Th(attributes Attrs, children ...Node) Node { return Element{"th", attributes, children} } func Thead(attributes Attrs, children ...Node) Node { return Element{"thead", attributes, children} } func Time(attributes Attrs, children ...Node) Node { return Element{"time", attributes, children} } func Title(attributes Attrs, children ...Node) Node { return Element{"title", attributes, children} } func Tr(attributes Attrs, children ...Node) Node { return Element{"tr", attributes, children} } func Track(attributes Attrs, children ...Node) Node { return Element{"track", attributes, children} } func Tt(attributes Attrs, children ...Node) Node { return Element{"tt", attributes, children} } func U(attributes Attrs, children ...Node) Node { return Element{"u", attributes, children} } func Ul(attributes Attrs, children ...Node) Node { return Element{"ul", attributes, children} } func Var(attributes Attrs, children ...Node) Node { return Element{"var", attributes, children} } func Video(attributes Attrs, children ...Node) Node { return Element{"video", attributes, children} } func Wbr(attributes Attrs, children ...Node) Node { return Element{"wbr", attributes, children} }
elements.go
0.850018
0.45532
elements.go
starcoder
package graphlib import ( "math" "strconv" ) type Point struct { X uint Y uint } type MapNode struct { point Point name string Links []MapEdge } type MapEdge struct { Dist uint from *MapNode to *MapNode } type MapGraph struct { nodes map[string]*MapNode CoorExists map[string]bool NodeExists map[string]bool } func NewMapGraph() *MapGraph { return &MapGraph{nodes: map[string]*MapNode{}, CoorExists: map[string]bool{}, NodeExists: map[string]bool{}} } func (g *MapGraph) AddNodes(names map[string][]uint) { for name, coordinates := range names { if len(coordinates) != 2 { panic("there can only be 2 coordinates for each node") } coor := strconv.FormatUint(uint64(coordinates[0]), 10) + strconv.FormatUint(uint64(coordinates[1]), 10) if g.CoorExists[coor] { panic("same coordinates cannot have different points") } if g.NodeExists[name] { panic("points cannot have the same names") } g.CoorExists[coor] = true g.NodeExists[name] = true g.nodes[name] = &MapNode{point: Point{X: coordinates[0], Y: coordinates[1]}, name: name, Links: []MapEdge{}} } } func (g *MapGraph) CreatePath(from, to, ptype string, dist uint) { if ptype != "bi" && ptype != "u" { panic("path type can be either \"bi\"(bidirectional) or \"u\"(unidirectional)") } toNode := g.nodes[to] fromNode := g.nodes[from] if toNode == nil || fromNode == nil { panic("creating edge for node that does not exist!") } fromNode.Links = append(fromNode.Links, MapEdge{from: fromNode, to: toNode, Dist: dist}) if ptype == "bi" { toNode.Links = append(toNode.Links, MapEdge{to: fromNode, from: toNode, Dist: dist}) } } func (g *MapGraph) AStar(source string) (map[string]uint, map[string]string) { GetNext := func(hdist, visited map[string]uint) string { min := INFINITY u := "" for key, value := range hdist { if _, ok := visited[key]; ok || value == INFINITY { continue } else if min > value { min = value u = key } } return u } dist, hdist, prev := map[string]uint{}, map[string]uint{}, map[string]string{} if _, ok := g.nodes[source]; !ok { panic("the given source node does not exist") } for _, node := range g.nodes { dist[node.name] = INFINITY hdist[node.name] = INFINITY prev[node.name] = "" } CalculateDist := func(p1, p2 Point) float64 { return math.Sqrt(float64(float64((p1.X-p2.X))*float64((p1.X-p2.X)) + float64((p1.Y-p2.Y))*float64((p1.Y-p2.Y)))) } dist[source] = 0 hdist[source] = 0 visited := map[string]uint{} for u := source; u != ""; u = GetNext(hdist, visited) { visited[u] = 1 for _, link := range g.nodes[u].Links { if _, ok := visited[link.to.name]; !ok { cdist := dist[u] alt := cdist + link.Dist if alt < dist[link.to.name] { dist[link.to.name] = alt hdist[link.to.name] = alt + uint(CalculateDist(g.nodes[source].point, g.nodes[u].point)) prev[link.to.name] = u } } } } return dist, prev }
graphmap.go
0.587707
0.426799
graphmap.go
starcoder
package ast import ( "fmt" "strconv" "time" ) var _ BoolNode = (*BoolConstNode)(nil) var _ DatetimeNode = (*DatetimeConstNode)(nil) var _ Int64Node = (*Int64ConstNode)(nil) var _ Float64Node = (*Float64ConstNode)(nil) var _ StringNode = (*StringConstNode)(nil) func NewBoolConstNode(value bool) BoolNode { return &BoolConstNode{value: value} } // BoolConstNode wraps a bool constant expression type BoolConstNode struct { value bool } func (node *BoolConstNode) Accept(visitor Visitor) { visitor.VisitBoolConstNode(node) } func (node *BoolConstNode) GetType() NodeType { return NodeTypeBool } func (node *BoolConstNode) EvalBool(_ Symbols) bool { return node.value } func (node *BoolConstNode) String() string { return fmt.Sprintf("%v", node.value) } func (node *BoolConstNode) IsConst() bool { return true } // DatetimeConstNode wraps a datetime constant expression type DatetimeConstNode struct { value time.Time } func (node *DatetimeConstNode) Accept(visitor Visitor) { visitor.VisitDatetimeConstNode(node) } func (node *DatetimeConstNode) GetType() NodeType { return NodeTypeDatetime } func (node *DatetimeConstNode) EvalDatetime(_ Symbols) *time.Time { return &node.value } func (node *DatetimeConstNode) String() string { return fmt.Sprintf("%v", node.value) } func (node *DatetimeConstNode) IsConst() bool { return true } // Float64ConstNode wraps a float64 constant expression type Float64ConstNode struct { value float64 } func (node *Float64ConstNode) Accept(visitor Visitor) { visitor.VisitFloat64ConstNode(node) } func (node *Float64ConstNode) GetType() NodeType { return NodeTypeFloat64 } func (node *Float64ConstNode) EvalFloat64(_ Symbols) *float64 { return &node.value } func (node *Float64ConstNode) EvalString(_ Symbols) *string { result := strconv.FormatFloat(node.value, 'f', -1, 64) return &result } func (node *Float64ConstNode) String() string { return fmt.Sprintf("%v", node.value) } func (node *Float64ConstNode) IsConst() bool { return true } // Int64ConstNode wraps an int64 constant expression type Int64ConstNode struct { value int64 } func (node *Int64ConstNode) Accept(visitor Visitor) { visitor.VisitInt64ConstNode(node) } func (node *Int64ConstNode) GetType() NodeType { return NodeTypeInt64 } func (node *Int64ConstNode) EvalInt64(_ Symbols) *int64 { return &node.value } func (node *Int64ConstNode) EvalString(_ Symbols) *string { result := strconv.FormatInt(node.value, 10) return &result } func (node *Int64ConstNode) ToFloat64() Float64Node { return &Float64ConstNode{value: float64(node.value)} } func (node *Int64ConstNode) String() string { return strconv.FormatInt(node.value, 10) } func (node *Int64ConstNode) IsConst() bool { return true } // StringConstNode wraps a string constant expression type StringConstNode struct { value string } func (node *StringConstNode) Accept(visitor Visitor) { visitor.VisitStringConstNode(node) } func (node *StringConstNode) GetType() NodeType { return NodeTypeString } func (node *StringConstNode) EvalString(_ Symbols) *string { return &node.value } func (node *StringConstNode) String() string { return fmt.Sprintf(`"%v"`, node.value) } func (node *StringConstNode) IsConst() bool { return true } var _ Node = (*NullConstNode)(nil) // NullNode wraps a null constant expression type NullConstNode struct{} func (node NullConstNode) Accept(visitor Visitor) { visitor.VisitNullConstNode(node) } func (NullConstNode) GetType() NodeType { return NodeTypeOther } func (NullConstNode) String() string { return "null" } func (node NullConstNode) IsConst() bool { return true }
storage/ast/node_const.go
0.763219
0.408808
node_const.go
starcoder
package window import ( "syscall/js" ) func (document document) Selection() Selection { return Selection(document.Call("getSelection")) } type ( Selection js.Value Range js.Value ) func (selection Selection) Range(index int) Range { return Range(js.Value(selection).Call("getRangeAt", index)) } func (selection Selection) RangeCount() int { return js.Value(selection).Get("rangeCount").Int() } func (selection Selection) AddRange(ran Range) { js.Value(selection).Call("addRange", ran) } func (selection Selection) RemoveAllRanges() { js.Value(selection).Call("removeAllRanges") } func (selection Selection) JSValue() js.Value { return js.Value(selection) } func (selection Selection) Get(key string) js.Value { return selection.JSValue().Get(key) } func (selection Selection) Equal(w js.Value) bool { return selection.JSValue().Equal(w) } func (selection Selection) Set(key string, value interface{}) { selection.JSValue().Set(key, value) } func (selection Selection) Call(m string, args ...interface{}) js.Value { return selection.JSValue().Call(m, args...) } func (selection Selection) Type() js.Type { return selection.JSValue().Type() } func (selection Selection) Truthy() bool { return selection.JSValue().Truthy() } func (selection Selection) IsNull() bool { return selection.JSValue().IsNull() } func (selection Selection) IsUndefined() bool { return selection.JSValue().IsUndefined() } func (selection Selection) InstanceOf(t js.Value) bool { return selection.JSValue().InstanceOf(t) } func (selection Selection) String() string { return selection.JSValue().Call("toString").String() } func (document document) NewRange() Range { return Range(win.Get("Range").New()) } func (ran Range) CommonAncestor(node js.Value) Node { return nodeFactory(ran.JSValue().Call("commonAncestorContainer", node)) } func (ran Range) StartContainer() Node { return nodeFactory(ran.JSValue().Get("startContainer")) } func (ran Range) EndContainer() Node { return nodeFactory(ran.JSValue().Get("endContainer")) } func (ran Range) StartOffset() int { return ran.JSValue().Get("startOffset").Int() } func (ran Range) EndOffset() int { return ran.JSValue().Get("endOffset").Int() } func (ran Range) SetStart(node Node, offset int) { ran.JSValue().Call("setStart", node.JSValue(), offset) } func (ran Range) SetEnd(node Node, offset int) { ran.JSValue().Call("setEnd", node.JSValue(), offset) } func (ran Range) SetStartBefore(node js.Value) { ran.JSValue().Call("setStartBefore", node) } func (ran Range) SetEndBefore(node js.Value) { ran.JSValue().Call("setEndBefore", node) } func (ran Range) SetStartAfter(node js.Value) { ran.JSValue().Call("setStartAfter", node) } func (ran Range) SetEndAfter(node js.Value) { ran.JSValue().Call("setEndAfter", node) } func (ran Range) Select(node js.Value) { ran.JSValue().Call("selectNode", node) } func (ran Range) CreateContextualFragment(content string) DocumentFragment { return DocumentFragment(ran.JSValue().Call("createContextualFragment", content)) } func (ran Range) JSValue() js.Value { return js.Value(ran) } func (ran Range) Get(key string) js.Value { return ran.JSValue().Get(key) } func (ran Range) Equal(w js.Value) bool { return ran.JSValue().Equal(w) } func (ran Range) Set(key string, value interface{}) { ran.JSValue().Set(key, value) } func (ran Range) Call(m string, args ...interface{}) js.Value { return ran.JSValue().Call(m, args...) } func (ran Range) Type() js.Type { return ran.JSValue().Type() } func (ran Range) Truthy() bool { return ran.JSValue().Truthy() } func (ran Range) IsNull() bool { return ran.JSValue().IsNull() } func (ran Range) IsUndefined() bool { return ran.JSValue().IsUndefined() } func (ran Range) InstanceOf(t js.Value) bool { return ran.JSValue().InstanceOf(t) }
selection.go
0.742702
0.407687
selection.go
starcoder
package mmdbwriter import ( "net" "github.com/maxmind/mmdbwriter/mmdbtype" "github.com/pkg/errors" ) type recordType byte const ( recordTypeEmpty recordType = iota recordTypeData recordTypeNode recordTypeAlias recordTypeFixedNode recordTypeReserved ) type record struct { node *node valueKey dataMapKey recordType recordType } // each node contains two records. type node struct { children [2]record nodeNum int } type insertRecord struct { ip net.IP prefixLen int recordType recordType inserter func(value mmdbtype.DataType) (mmdbtype.DataType, error) insertedNode *node dataMap *dataMap } func (n *node) insert(iRec insertRecord, currentDepth int) error { newDepth := currentDepth + 1 // Check if we are inside the network already if newDepth > iRec.prefixLen { // Data already exists for the network so insert into all the children. // We will prune duplicate nodes when we finalize. err := n.children[0].insert(iRec, newDepth) if err != nil { return err } return n.children[1].insert(iRec, newDepth) } // We haven't reached the network yet. pos := bitAt(iRec.ip, currentDepth) r := &n.children[pos] return r.insert(iRec, newDepth) } func (r *record) insert( iRec insertRecord, newDepth int, ) error { switch r.recordType { case recordTypeNode, recordTypeFixedNode: case recordTypeEmpty, recordTypeData: // When we add record merging support, it should go here. if newDepth >= iRec.prefixLen { r.node = iRec.insertedNode r.recordType = iRec.recordType if iRec.recordType == recordTypeData { existingValue := iRec.dataMap.get(r.valueKey) value, err := iRec.inserter(existingValue) if err != nil { return err } if value == nil { r.recordType = recordTypeEmpty } else { key, err := iRec.dataMap.store(value) if err != nil { return err } r.valueKey = key } } else { r.valueKey = noDataMapKey } return nil } // We are splitting this record so we create two duplicate child // records. r.node = &node{children: [2]record{*r, *r}} r.valueKey = noDataMapKey r.recordType = recordTypeNode case recordTypeReserved: if iRec.prefixLen >= newDepth { return errors.Errorf( "attempt to insert %s/%d, which is in a reserved network", iRec.ip, iRec.prefixLen, ) } // If we are inserting a network that contains a reserved network, // we silently remove the reserved network. return nil case recordTypeAlias: if iRec.prefixLen < newDepth { // Do nothing. We are inserting a network that contains an aliased // network. We silently ignore. return nil } // attempting to insert _into_ an aliased network return errors.Errorf( "attempt to insert %s/%d, which is in an aliased network", iRec.ip, iRec.prefixLen, ) default: return errors.Errorf("inserting into record type %d not implemented!", r.recordType) } return r.node.insert(iRec, newDepth) } func (n *node) get( ip net.IP, depth int, ) (int, record) { r := n.children[bitAt(ip, depth)] depth++ switch r.recordType { case recordTypeNode, recordTypeAlias, recordTypeFixedNode: return r.node.get(ip, depth) default: return depth, r } } // finalize prunes unnecessary nodes (e.g., where the two records are the same) and // sets the node number for the node. It returns a record pointer that is nil if // the node is not mergeable or the value of the merged record if it can be merged. // The second return value is the current node count, including the subtree. func (n *node) finalize(currentNum int) (*record, int) { n.nodeNum = currentNum currentNum++ for i := 0; i < 2; i++ { switch n.children[i].recordType { case recordTypeFixedNode: // We don't consider merging for fixed nodes _, currentNum = n.children[i].node.finalize(currentNum) case recordTypeNode: record, newCurrentNum := n.children[i].node.finalize(currentNum) if record == nil { // nothing to merge. Use current number from child. currentNum = newCurrentNum } else { n.children[i] = *record } default: } } if n.children[0].recordType == n.children[1].recordType && (n.children[0].recordType == recordTypeEmpty || (n.children[0].recordType == recordTypeData && n.children[0].valueKey == n.children[1].valueKey)) { return &record{ recordType: n.children[0].recordType, valueKey: n.children[0].valueKey, }, currentNum } return nil, currentNum } func bitAt(ip net.IP, depth int) byte { return (ip[depth/8] >> uint(7 - (depth % 8))) & 1 }
node.go
0.565779
0.471102
node.go
starcoder