code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package engine
import (
"fmt"
"math"
)
type Vector struct {
X, Y, Z float32
}
var (
Zero = Vector{0, 0, 0}
Up = Vector{0, 1, 0}
Down = Vector{0, -1, 0}
Left = Vector{-1, 0, 0}
Right = Vector{1, 0, 0}
Forward = Vector{0, 0, 1}
Backward = Vector{0, 0, -1}
One = Vector{1, 1, 1}
MinusOne = Vector{-1, -1, -1}
)
func Roundf(val float32, places int) float32 {
if places < 0 {
panic("places should be >= 0")
}
factor := float32(math.Pow10(places))
val = val * factor
tmp := float32(int(val))
return tmp / factor
}
func Lerpf(from, to float32, t float32) float32 {
return from + ((to - from) * t)
}
func LerpAngle(from, to float32, t float32) float32 {
for to-from > 180 {
from += 360
}
for from-to > 180 {
to += 360
}
return from + ((to - from) * t)
}
func (v *Vector) String() string {
return fmt.Sprintf("(%f,%f,%f)", v.X, v.Y, v.Z)
}
func NewVector2(x, y float32) Vector {
return Vector{x, y, 1}
}
func NewVector3(x, y, z float32) Vector {
return Vector{x, y, z}
}
func (v *Vector) Add(vect Vector) Vector {
return Vector{v.X + vect.X, v.Y + vect.Y, v.Z + vect.Z}
}
func (v *Vector) Sub(vect Vector) Vector {
return Vector{v.X - vect.X, v.Y - vect.Y, v.Z - vect.Z}
}
func (v *Vector) Mul(vect Vector) Vector {
return Vector{v.X * vect.X, v.Y * vect.Y, v.Z * vect.Z}
}
func (v *Vector) Mul2(vect float32) Vector {
return Vector{v.X * vect, v.Y * vect, v.Z * vect}
}
func (v *Vector) Distance(vect Vector) float32 {
x := v.X - vect.X
y := v.Y - vect.Y
return float32(math.Sqrt(float64(x*x + y*y)))
}
func (v *Vector) Div(vect Vector) Vector {
return Vector{v.X / vect.X, v.Y / vect.Y, v.Z / vect.Z}
}
func (v *Vector) Transform(transform Matrix) Vector {
return NewVector3(
(v.X*transform[0])+(v.Y*transform[4])+(v.Z*transform[8])+transform[12],
(v.X*transform[1])+(v.Y*transform[5])+(v.Z*transform[9])+transform[13],
(v.X*transform[2])+(v.Y*transform[6])+(v.Z*transform[10])+transform[14])
}
func (v *Vector) fixAngle() {
for v.X >= 360 {
v.X -= 360
}
for v.X <= -360 {
v.X += 360
}
for v.Y >= 360 {
v.Y -= 360
}
for v.Y <= -360 {
v.Y += 360
}
for v.Z >= 360 {
v.Z -= 360
}
for v.Z <= -360 {
v.Z += 360
}
}
func (v *Vector) Length() float32 {
return float32(math.Sqrt(float64(v.X*v.X + v.Y*v.Y)))
}
func Lerp(from, to Vector, t float32) Vector {
return NewVector2(from.X+((to.X-from.X)*t), from.Y+((to.Y-from.Y)*t))
}
func (v *Vector) Normalize() {
l := v.Length()
v.X /= l
v.Y /= l
v.Z /= l
}
func (v *Vector) Normalized() Vector {
l := v.Length()
if l == 0 {
return NewVector3(0, 0, 0)
}
return NewVector3(v.X/l, v.Y/l, v.Z/l)
} | engine/Vector.go | 0.635449 | 0.538316 | Vector.go | starcoder |
package sql
import (
"math"
"reflect"
"github.com/tobgu/qframe/internal/math/float"
"github.com/tobgu/qframe/qerrors"
)
// Column implements the sql.Scanner interface
// and allows arbitrary data types to be loaded from
// any database/sql/driver into a QFrame.
type Column struct {
kind reflect.Kind
nulls int
// pointer to the data slice which
// contains the inferred data type
ptr interface{}
data struct {
Ints []int
Floats []float64
Bools []bool
Strings []*string
}
coerce func(t interface{}) error
precision int
}
// Null appends a new Null value to
// the underlying column data.
func (c *Column) Null() error {
// If we haven't inferred the type of
// data we are scanning simply count
// the number of NULL values we receive.
// The only scenario this will happen is
// when the first returned values are NULL.
if c.kind == reflect.Invalid {
c.nulls++
return nil
}
switch c.kind {
case reflect.Float64:
c.data.Floats = append(c.data.Floats, math.NaN())
case reflect.String:
c.data.Strings = append(c.data.Strings, nil)
default:
return qerrors.New("Column Null", "non-nullable type: %s", c.kind)
}
return nil
}
// Int adds a new int to the underlying data slice
func (c *Column) Int(i int) {
if c.ptr == nil {
c.kind = reflect.Int
c.ptr = &c.data.Ints
}
c.data.Ints = append(c.data.Ints, i)
}
// Float adds a new float to the underlying data slice
func (c *Column) Float(f float64) {
if c.ptr == nil {
c.kind = reflect.Float64
c.ptr = &c.data.Floats
// add any NULL floats previously scanned
if c.nulls > 0 {
for i := 0; i < c.nulls; i++ {
c.data.Floats = append(c.data.Floats, math.NaN())
}
c.nulls = 0
}
}
if c.precision > 0 {
f = float.Fixed(f, c.precision)
}
c.data.Floats = append(c.data.Floats, f)
}
// String adds a new string to the underlying data slice
func (c *Column) String(s string) {
if c.ptr == nil {
c.kind = reflect.String
c.ptr = &c.data.Strings
// add any NULL strings previously scanned
if c.nulls > 0 {
for i := 0; i < c.nulls; i++ {
c.data.Strings = append(c.data.Strings, nil)
}
c.nulls = 0
}
}
c.data.Strings = append(c.data.Strings, &s)
}
// Bool adds a new bool to the underlying data slice
func (c *Column) Bool(b bool) {
if c.ptr == nil {
c.kind = reflect.Bool
c.ptr = &c.data.Bools
}
c.data.Bools = append(c.data.Bools, b)
}
// Scan implements the sql.Scanner interface
func (c *Column) Scan(t interface{}) error {
if c.coerce != nil {
return c.coerce(t)
}
switch v := t.(type) {
case bool:
c.Bool(v)
case string:
c.String(v)
case int64:
c.Int(int(v))
case []uint8:
c.String(string(v))
case float64:
c.Float(v)
case nil:
err := c.Null()
if err != nil {
return err
}
default:
return qerrors.New(
"Column Scan", "unsupported scan type: %s", reflect.ValueOf(t).Kind())
}
return nil
}
// Data returns the underlying data slice
func (c *Column) Data() interface{} {
if c.ptr == nil {
return nil
}
// *[]<T> -> []<T>
return reflect.ValueOf(c.ptr).Elem().Interface()
} | internal/io/sql/column.go | 0.587233 | 0.46132 | column.go | starcoder |
package sql
import (
"fmt"
"github.com/dolthub/vitess/go/vt/proto/query"
"github.com/dolthub/go-mysql-server/sql/values"
)
// ConvertToValue converts the interface to a sql value.
func ConvertToValue(v interface{}) (Value, error) {
switch v := v.(type) {
case nil:
return Value{
Typ: query.Type_NULL_TYPE,
Val: nil,
}, nil
case int:
return Value{
Typ: query.Type_INT64,
Val: values.WriteInt64(make([]byte, values.Int64Size), int64(v)),
}, nil
case int8:
return Value{
Typ: query.Type_INT8,
Val: values.WriteInt8(make([]byte, values.Int8Size), v),
}, nil
case int16:
return Value{
Typ: query.Type_INT16,
Val: values.WriteInt16(make([]byte, values.Int16Size), v),
}, nil
case int32:
return Value{
Typ: query.Type_INT32,
Val: values.WriteInt32(make([]byte, values.Int32Size), v),
}, nil
case int64:
return Value{
Typ: query.Type_INT64,
Val: values.WriteInt64(make([]byte, values.Int64Size), v),
}, nil
case uint:
return Value{
Typ: query.Type_UINT64,
Val: values.WriteUint64(make([]byte, values.Uint64Size), uint64(v)),
}, nil
case uint8:
return Value{
Typ: query.Type_UINT8,
Val: values.WriteUint8(make([]byte, values.Uint8Size), v),
}, nil
case uint16:
return Value{
Typ: query.Type_UINT16,
Val: values.WriteUint16(make([]byte, values.Uint16Size), v),
}, nil
case uint32:
return Value{
Typ: query.Type_UINT32,
Val: values.WriteUint32(make([]byte, values.Uint32Size), v),
}, nil
case uint64:
return Value{
Typ: query.Type_UINT64,
Val: values.WriteUint64(make([]byte, values.Uint64Size), v),
}, nil
case float32:
return Value{
Typ: query.Type_FLOAT32,
Val: values.WriteFloat32(make([]byte, values.Float32Size), v),
}, nil
case float64:
return Value{
Typ: query.Type_FLOAT64,
Val: values.WriteFloat64(make([]byte, values.Float64Size), v),
}, nil
case string:
return Value{
Typ: query.Type_VARCHAR,
Val: values.WriteString(make([]byte, len(v)), v, values.ByteOrderCollation),
}, nil
case []byte:
return Value{
Typ: query.Type_BLOB,
Val: values.WriteBytes(make([]byte, len(v)), v, values.ByteOrderCollation),
}, nil
default:
return Value{}, fmt.Errorf("type %T not implemented", v)
}
} | sql/convert_value.go | 0.565179 | 0.505188 | convert_value.go | starcoder |
package match
import (
"fmt"
"reflect"
"github.com/tidwall/gjson"
)
// JSON will perform some matches on the given JSON body, returning an error on a mis-match.
// It can be assumed that the bytes are valid JSON.
type JSON func(body []byte) error
// JSONKeyEqual returns a matcher which will check that `wantKey` is present and its value matches `wantValue`.
// `wantKey` can be nested, see https://godoc.org/github.com/tidwall/gjson#Get for details.
// `wantValue` is matched via reflect.DeepEqual and the JSON takes the forms according to https://godoc.org/github.com/tidwall/gjson#Result.Value
func JSONKeyEqual(wantKey string, wantValue interface{}) JSON {
return func(body []byte) error {
res := gjson.GetBytes(body, wantKey)
if res.Index == 0 {
return fmt.Errorf("key '%s' missing", wantKey)
}
gotValue := res.Value()
if !reflect.DeepEqual(gotValue, wantValue) {
return fmt.Errorf("key '%s' got '%v' want '%v'", wantKey, gotValue, wantValue)
}
return nil
}
}
// JSONKeyPresent returns a matcher which will check that `wantKey` is present in the JSON object.
// `wantKey` can be nested, see https://godoc.org/github.com/tidwall/gjson#Get for details.
func JSONKeyPresent(wantKey string) JSON {
return func(body []byte) error {
res := gjson.GetBytes(body, wantKey)
if !res.Exists() {
return fmt.Errorf("key '%s' missing", wantKey)
}
return nil
}
}
// JSONKeyTypeEqual returns a matcher which will check that `wantKey` is present and its value is of the type `wantType`.
// `wantKey` can be nested, see https://godoc.org/github.com/tidwall/gjson#Get for details.
func JSONKeyTypeEqual(wantKey string, wantType gjson.Type) JSON {
return func(body []byte) error {
res := gjson.GetBytes(body, wantKey)
if !res.Exists() {
return fmt.Errorf("key '%s' missing", wantKey)
}
if res.Type != wantType {
return fmt.Errorf("key '%s' is of the wrong type, got %s want %s", wantKey, res.Type, wantType)
}
return nil
}
}
// JSONArrayEach returns a matcher which will check that `wantKey` is an array then loops over each
// item calling `fn`. If `fn` returns an error, iterating stops and an error is returned.
func JSONArrayEach(wantKey string, fn func(gjson.Result) error) JSON {
return func(body []byte) error {
res := gjson.GetBytes(body, wantKey)
if !res.Exists() {
return fmt.Errorf("missing key '%s'", wantKey)
}
if !res.IsArray() {
return fmt.Errorf("key '%s' is not an array", wantKey)
}
var err error
res.ForEach(func(_, val gjson.Result) bool {
err = fn(val)
if err == nil {
return true
}
return false
})
return err
}
}
// JSONMapEach returns a matcher which will check that `wantKey` is a map then loops over each
// item calling `fn`. If `fn` returns an error, iterating stops and an error is returned.
func JSONMapEach(wantKey string, fn func(k, v gjson.Result) error) JSON {
return func(body []byte) error {
res := gjson.GetBytes(body, wantKey)
if !res.Exists() {
return fmt.Errorf("missing key '%s'", wantKey)
}
if !res.IsObject() {
return fmt.Errorf("key '%s' is not an object", wantKey)
}
var err error
res.ForEach(func(key, val gjson.Result) bool {
err = fn(key, val)
if err == nil {
return true
}
return false
})
return err
}
} | internal/match/json.go | 0.759225 | 0.492371 | json.go | starcoder |
package easycsv
import (
"fmt"
"reflect"
"strconv"
)
var predefinedDecoders = map[string]func(t reflect.Type) interface{}{
"hex": func(t reflect.Type) interface{} {
return createIntConverter(t, 16)
},
"oct": func(t reflect.Type) interface{} {
return createIntConverter(t, 8)
},
"deci": func(t reflect.Type) interface{} {
return createIntConverter(t, 10)
},
}
func createIntConverter(t reflect.Type, base int) interface{} {
switch t.Kind() {
case reflect.Int:
return func(s string) (int, error) {
i, err := strconv.ParseInt(s, base, 0)
return int(i), err
}
case reflect.Int8:
return func(s string) (int8, error) {
i, err := strconv.ParseInt(s, base, 8)
return int8(i), err
}
case reflect.Int16:
return func(s string) (int16, error) {
i, err := strconv.ParseInt(s, base, 16)
return int16(i), err
}
case reflect.Int32:
return func(s string) (int32, error) {
i, err := strconv.ParseInt(s, base, 32)
return int32(i), err
}
case reflect.Int64:
return func(s string) (int64, error) {
i, err := strconv.ParseInt(s, base, 64)
return int64(i), err
}
case reflect.Uint:
return func(s string) (uint, error) {
i, err := strconv.ParseUint(s, base, 0)
return uint(i), err
}
case reflect.Uint8:
return func(s string) (uint8, error) {
i, err := strconv.ParseUint(s, base, 8)
return uint8(i), err
}
case reflect.Uint16:
return func(s string) (uint16, error) {
i, err := strconv.ParseUint(s, base, 16)
return uint16(i), err
}
case reflect.Uint32:
return func(s string) (uint32, error) {
i, err := strconv.ParseUint(s, base, 32)
return uint32(i), err
}
case reflect.Uint64:
return func(s string) (uint64, error) {
i, err := strconv.ParseUint(s, base, 32)
return uint64(i), err
}
default:
return nil
}
}
func validateTypeDecoder(t reflect.Type, conv interface{}) error {
convT := reflect.TypeOf(conv)
if convT.Kind() != reflect.Func {
return fmt.Errorf("The decoder for %v must be a function but %v", t, convT)
}
if convT.NumIn() != 1 || convT.NumOut() != 2 {
return fmt.Errorf("The decoder for %v must receive one arguments and returns two values", t)
}
if convT.In(0).Kind() != reflect.String {
return fmt.Errorf("The decoder for %v must receive a string as the first arg, but receives %v", t, convT.In(0))
}
if convT.Out(0) != t || convT.Out(1) != errorType {
return fmt.Errorf("The decoder for %v must return (%v, error), but returned (%v, %v)",
t, t, convT.Out(0), convT.Out(1))
}
return nil
}
func createConverterFromType(opt Option, t reflect.Type) (interface{}, error) {
if opt.TypeDecoders != nil {
if conv, ok := opt.TypeDecoders[t]; ok {
if err := validateTypeDecoder(t, conv); err != nil {
return nil, err
}
return conv, nil
}
}
return createDefaultConverter(t), nil
}
func createDefaultConverter(t reflect.Type) interface{} {
c := createIntConverter(t, 0)
if c != nil {
return c
}
switch t.Kind() {
case reflect.Float32:
return func(s string) (float32, error) {
f, err := strconv.ParseFloat(s, 32)
return float32(f), err
}
case reflect.Float64:
return func(s string) (float64, error) {
f, err := strconv.ParseFloat(s, 64)
return float64(f), err
}
case reflect.Bool:
return strconv.ParseBool
case reflect.String:
return func(s string) (string, error) {
return s, nil
}
default:
return nil
}
} | encode.go | 0.532668 | 0.417628 | encode.go | starcoder |
package iso20022
// Fund Processing Passsport (FPP) is a fully harmonised document with all key operational information that fund promoters should provide on their investment funds in order to facilitate their trading.
type FundProcessingPassport1 struct {
// Date of last revision.
UpdatedDate *UpdatedDate `xml:"UpdtdDt"`
// Financial instruments representing a sum of rights of the investor vis-a-vis the issuer.
SecurityIdentification *SecurityIdentification1 `xml:"SctyId"`
// Principal entity appointed by the fund, to which orders should be submitted. Usually located in the country of domicile.
MainFundOrderDesk *ContactAttributes1 `xml:"MainFndOrdrDsk"`
// Company that is responsible for the management and operation of the fund, eg, determines the investment strategy, appoints
// the service providers, and makes major decisions for the fund. It is usually responsible for the distribution and marketing
// of the fund. For self-managed funds, this wlll often be a separate promoter or sponsor of the fund.
FundManagementCompany *ContactAttributes1 `xml:"FndMgmtCpny"`
// Security that is a sub-set of an investment fund, and is governed by the same investment fund policy, eg, dividend option or valuation currency.
FundDetails *FinancialInstrument20 `xml:"FndDtls"`
// Processing characteristics linked to the instrument, ie, not to the market.
ValuationDealingCharacteristics *ValuationDealingProcessingCharacteristics2 `xml:"ValtnDealgChrtcs"`
// Processing characteristics linked to the instrument, ie, not to the market.
InvestmentRestrictions *InvestmentRestrictions2 `xml:"InvstmtRstrctns"`
// Processing characteristics linked to the instrument, ie, not to the market.
SubscriptionProcessingCharacteristics *ProcessingCharacteristics2 `xml:"SbcptPrcgChrtcs"`
// Processing characteristics linked to the instrument, ie, not to the market.
RedemptionProcessingCharacteristics *ProcessingCharacteristics3 `xml:"RedPrcgChrtcs"`
// Account to or from which a cash entry is made.
SettlementDetails []*CashAccount22 `xml:"SttlmDtls"`
// Context, or geographic environment, in which trading parties may meet in order to negotiate and execute trades among themselves.
LocalMarketAnnex []*LocalMarketAnnex2 `xml:"LclMktAnx,omitempty"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
Extension []*Extension1 `xml:"Xtnsn,omitempty"`
}
func (f *FundProcessingPassport1) AddUpdatedDate() *UpdatedDate {
f.UpdatedDate = new(UpdatedDate)
return f.UpdatedDate
}
func (f *FundProcessingPassport1) AddSecurityIdentification() *SecurityIdentification1 {
f.SecurityIdentification = new(SecurityIdentification1)
return f.SecurityIdentification
}
func (f *FundProcessingPassport1) AddMainFundOrderDesk() *ContactAttributes1 {
f.MainFundOrderDesk = new(ContactAttributes1)
return f.MainFundOrderDesk
}
func (f *FundProcessingPassport1) AddFundManagementCompany() *ContactAttributes1 {
f.FundManagementCompany = new(ContactAttributes1)
return f.FundManagementCompany
}
func (f *FundProcessingPassport1) AddFundDetails() *FinancialInstrument20 {
f.FundDetails = new(FinancialInstrument20)
return f.FundDetails
}
func (f *FundProcessingPassport1) AddValuationDealingCharacteristics() *ValuationDealingProcessingCharacteristics2 {
f.ValuationDealingCharacteristics = new(ValuationDealingProcessingCharacteristics2)
return f.ValuationDealingCharacteristics
}
func (f *FundProcessingPassport1) AddInvestmentRestrictions() *InvestmentRestrictions2 {
f.InvestmentRestrictions = new(InvestmentRestrictions2)
return f.InvestmentRestrictions
}
func (f *FundProcessingPassport1) AddSubscriptionProcessingCharacteristics() *ProcessingCharacteristics2 {
f.SubscriptionProcessingCharacteristics = new(ProcessingCharacteristics2)
return f.SubscriptionProcessingCharacteristics
}
func (f *FundProcessingPassport1) AddRedemptionProcessingCharacteristics() *ProcessingCharacteristics3 {
f.RedemptionProcessingCharacteristics = new(ProcessingCharacteristics3)
return f.RedemptionProcessingCharacteristics
}
func (f *FundProcessingPassport1) AddSettlementDetails() *CashAccount22 {
newValue := new (CashAccount22)
f.SettlementDetails = append(f.SettlementDetails, newValue)
return newValue
}
func (f *FundProcessingPassport1) AddLocalMarketAnnex() *LocalMarketAnnex2 {
newValue := new (LocalMarketAnnex2)
f.LocalMarketAnnex = append(f.LocalMarketAnnex, newValue)
return newValue
}
func (f *FundProcessingPassport1) AddExtension() *Extension1 {
newValue := new (Extension1)
f.Extension = append(f.Extension, newValue)
return newValue
} | FundProcessingPassport1.go | 0.648244 | 0.469703 | FundProcessingPassport1.go | starcoder |
package graphdb
func addedge(ids []NodeId, id NodeId) ([]NodeId, bool) {
// An empty list is easily defined as a single new edge.
if ids == nil {
ids = make([]NodeId, 0, 1)
}
// Make sure the edge does not already exist in the list.
for _, edge := range ids {
if edge == id {
return ids, false
}
}
return append(ids, id), true
}
func removeedge(ids []NodeId, id NodeId) ([]NodeId, bool) {
if ids == nil {
return make([]NodeId, 0), false
}
i := 0
iend := len(ids)
for i < iend {
// If ids[i] == id, replace ids[i] with the end and shorten the list.
if ids[i] == id {
iend -= 1
ids[i] = ids[iend]
} else {
// Otherwise, advance the list.
i += 1
}
}
if iend == len(ids) {
return ids, false
} else {
return ids[0:iend], true
}
}
// Add an edge going from id1 to id2 nodes.
func (db *GraphDb) AddEdge(id1, id2 NodeId) error {
node1, err := db.Get(id1)
if err != nil {
return err
}
node2, err := db.Get(id2)
if err != nil {
return err
}
var changed1, changed2 bool
node1.OutEdges, changed1 = addedge(node1.OutEdges, id2)
node2.InEdges, changed2 = addedge(node2.InEdges, id1)
if changed1 {
_, err = db.putNode(node1)
if err != nil {
return err
}
}
if changed2 {
_, err = db.putNode(node2)
if err != nil {
return err
}
}
return nil
}
// Remove an edge from id1 to id2.
func (db *GraphDb) RemoveEdge(id1, id2 NodeId) error {
node1, err := db.Get(id1)
if err != nil {
return err
}
node2, err := db.Get(id2)
if err != nil {
return err
}
var changed1, changed2 bool
node1.OutEdges, changed1 = removeedge(node1.OutEdges, id2)
node2.InEdges, changed2 = removeedge(node2.InEdges, id1)
if changed1 {
_, err = db.putNode(node1)
if err != nil {
return err
}
}
if changed2 {
_, err = db.putNode(node2)
if err != nil {
return err
}
}
return nil
}
// Connect two nodes by adding edges in both directions.
func (db *GraphDb) Connect(id1, id2 NodeId) error {
node1, err := db.Get(id1)
if err != nil {
return err
}
node2, err := db.Get(id2)
if err != nil {
return err
}
var changed1, changed2, changed3, changed4 bool
node1.OutEdges, changed1 = addedge(node1.OutEdges, id2)
node1.InEdges, changed2 = addedge(node1.InEdges, id2)
node2.OutEdges, changed3 = addedge(node2.OutEdges, id1)
node2.InEdges, changed4 = addedge(node2.InEdges, id1)
if changed1 || changed2 {
_, err = db.putNode(node1)
if err != nil {
return err
}
}
if changed3 || changed4 {
_, err = db.putNode(node2)
if err != nil {
return err
}
}
return nil
}
// Connect two nodes by adding edges in both directions.
func (db *GraphDb) Disconnect(id1, id2 NodeId) error {
node1, err := db.Get(id1)
if err != nil {
return err
}
node2, err := db.Get(id2)
if err != nil {
return err
}
var changed1, changed2, changed3, changed4 bool
node1.OutEdges, changed1 = removeedge(node1.OutEdges, id2)
node1.InEdges, changed2 = removeedge(node1.InEdges, id2)
node2.OutEdges, changed3 = removeedge(node2.OutEdges, id1)
node2.InEdges, changed4 = removeedge(node2.InEdges, id1)
if changed1 || changed2 {
_, err = db.putNode(node1)
if err != nil {
return err
}
}
if changed3 || changed4 {
_, err = db.putNode(node2)
if err != nil {
return err
}
}
return nil
} | pkg/sdsai/graphdb/edge.go | 0.541651 | 0.436142 | edge.go | starcoder |
package gokd
import (
"errors"
"math"
"sort"
)
// KDTree represents a static miltidimensional binary search tree
type KDTree struct {
dimensions int
root *Node
nodes int64
}
// Node represents a single leaf or edge node within a KDTree
type Node struct {
Coordinates []float64
Left *Node
Right *Node
Data interface{}
depth int
distance float64
tree *KDTree
}
// ErrDimensionMismatch occurs when two or more Node or float arrays
// are expected to have the same number of dimensions but do not
var ErrDimensionMismatch = errors.New("dimensionality mismatch")
// New returns a new KDTree with d dimensions
func New(d int) *KDTree {
t := &KDTree{
dimensions: d,
}
return t
}
// NewNode returns a new Node
func (t *KDTree) NewNode() *Node {
return &Node{
tree: t,
}
}
// Load does a balanced insertion of nodes into the KDTree. The order of the nodes
// within nodes may be changed during sorting
func (t *KDTree) Load(nodes []Node) (err error) {
for nodeIndex := range nodes {
if len(nodes[nodeIndex].Coordinates) != t.dimensions {
return ErrDimensionMismatch
}
nodes[nodeIndex].tree = t
}
t.root = recursiveBuild(t, 0, nodes)
return nil
}
// Nearest does a nearest neighbor search on KDTree t, returning a maximum of
// n Nodes
func (t *KDTree) Nearest(coords []float64, n int) (nodes []*Node, err error) {
if len(coords) != t.dimensions {
return nil, ErrDimensionMismatch
}
return nil, nil
}
// Clear removes all nodes in KDTree t
func (t *KDTree) Clear() {
t.root = nil
}
func recursiveBuild(tree *KDTree, depth int, nodes []Node) *Node {
for nodeIndex := range nodes {
nodes[nodeIndex].depth = depth
}
sort.Sort(bydimension(nodes))
left, right, median := getParts(nodes)
newNode := tree.NewNode()
newNode.Coordinates = median.Coordinates
newNode.Data = median.Data
if len(left) != 0 {
newNode.Left = recursiveBuild(tree, depth+1, left)
}
if len(right) != 0 {
newNode.Right = recursiveBuild(tree, depth+1, right)
}
return newNode
}
func euclideanDistance(p []float64, q []float64) (dist float64, err error) {
dims := len(p)
if len(p) != len(q) {
return 0, ErrDimensionMismatch
}
var sum float64
for x := 0; x < dims; x++ {
sum += ((p[x] - q[x]) * (p[x] - q[x]))
}
return math.Sqrt(sum), nil
}
func getParts(nodes []Node) (left []Node, right []Node, median *Node) {
if len(nodes) == 0 {
return nil, nil, nil
}
if len(nodes) == 1 {
return nil, nil, &nodes[0]
}
if len(nodes) == 2 {
return nodes[:1], nil, &nodes[1]
}
medianIndex := 0
if len(nodes)%2 == 0 {
medianIndex = -1
}
medianIndex += len(nodes) / 2
left = nodes[:medianIndex]
median = &nodes[medianIndex]
right = nodes[medianIndex+1:]
return left, right, median
} | main.go | 0.810779 | 0.582877 | main.go | starcoder |
package plaid
import (
"encoding/json"
)
// TransactionBase A representation of a transaction
type TransactionBase struct {
// Please use the `payment_channel` field, `transaction_type` will be deprecated in the future. `digital:` transactions that took place online. `place:` transactions that were made at a physical location. `special:` transactions that relate to banks, e.g. fees or deposits. `unresolved:` transactions that do not fit into the other three types.
TransactionType *string `json:"transaction_type,omitempty"`
// The ID of a posted transaction's associated pending transaction, where applicable.
PendingTransactionId NullableString `json:"pending_transaction_id,omitempty"`
// The ID of the category to which this transaction belongs. See [Categories](https://plaid.com/docs/#category-overview). If the `transactions` object was returned by an Assets endpoint such as `/asset_report/get/` or `/asset_report/pdf/get`, this field will only appear in an Asset Report with Insights.
CategoryId NullableString `json:"category_id,omitempty"`
// A hierarchical array of the categories to which this transaction belongs. See [Categories](https://plaid.com/docs/#category-overview). If the `transactions` object was returned by an Assets endpoint such as `/asset_report/get/` or `/asset_report/pdf/get`, this field will only appear in an Asset Report with Insights.
Category []string `json:"category,omitempty"`
Location *Location `json:"location,omitempty"`
PaymentMeta *PaymentMeta `json:"payment_meta,omitempty"`
// The name of the account owner. This field is not typically populated and only relevant when dealing with sub-accounts.
AccountOwner NullableString `json:"account_owner,omitempty"`
// The merchant name or transaction description. If the `transactions` object was returned by a Transactions endpoint such as `/transactions/get`, this field will always appear. If the `transactions` object was returned by an Assets endpoint such as `/asset_report/get/` or `/asset_report/pdf/get`, this field will only appear in an Asset Report with Insights.
Name *string `json:"name,omitempty"`
// The string returned by the financial institution to describe the transaction. For transactions returned by `/transactions/get`, this field is in beta and will be omitted unless the client is both enrolled in the closed beta program and has set `options.include_original_description` to `true`.
OriginalDescription NullableString `json:"original_description,omitempty"`
// The ID of the account in which this transaction occurred.
AccountId string `json:"account_id"`
// The settled value of the transaction, denominated in the account's currency, as stated in `iso_currency_code` or `unofficial_currency_code`. Positive values when money moves out of the account; negative values when money moves in. For example, debit card purchases are positive; credit card payments, direct deposits, and refunds are negative.
Amount float32 `json:"amount"`
// The ISO-4217 currency code of the transaction. Always `null` if `unofficial_currency_code` is non-null.
IsoCurrencyCode NullableString `json:"iso_currency_code"`
// The unofficial currency code associated with the transaction. Always `null` if `iso_currency_code` is non-`null`. Unofficial currency codes are used for currencies that do not have official ISO currency codes, such as cryptocurrencies and the currencies of certain countries. See the [currency code schema](https://plaid.com/docs/api/accounts#currency-code-schema) for a full listing of supported `iso_currency_code`s.
UnofficialCurrencyCode NullableString `json:"unofficial_currency_code"`
// For pending transactions, the date that the transaction occurred; for posted transactions, the date that the transaction posted. Both dates are returned in an [ISO 8601](https://wikipedia.org/wiki/ISO_8601) format ( `YYYY-MM-DD` ).
Date string `json:"date"`
// When `true`, identifies the transaction as pending or unsettled. Pending transaction details (name, type, amount, category ID) may change before they are settled.
Pending bool `json:"pending"`
// The unique ID of the transaction. Like all Plaid identifiers, the `transaction_id` is case sensitive.
TransactionId string `json:"transaction_id"`
AdditionalProperties map[string]interface{}
}
type _TransactionBase TransactionBase
// NewTransactionBase instantiates a new TransactionBase object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewTransactionBase(accountId string, amount float32, isoCurrencyCode NullableString, unofficialCurrencyCode NullableString, date string, pending bool, transactionId string) *TransactionBase {
this := TransactionBase{}
this.AccountId = accountId
this.Amount = amount
this.IsoCurrencyCode = isoCurrencyCode
this.UnofficialCurrencyCode = unofficialCurrencyCode
this.Date = date
this.Pending = pending
this.TransactionId = transactionId
return &this
}
// NewTransactionBaseWithDefaults instantiates a new TransactionBase object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewTransactionBaseWithDefaults() *TransactionBase {
this := TransactionBase{}
return &this
}
// GetTransactionType returns the TransactionType field value if set, zero value otherwise.
func (o *TransactionBase) GetTransactionType() string {
if o == nil || o.TransactionType == nil {
var ret string
return ret
}
return *o.TransactionType
}
// GetTransactionTypeOk returns a tuple with the TransactionType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *TransactionBase) GetTransactionTypeOk() (*string, bool) {
if o == nil || o.TransactionType == nil {
return nil, false
}
return o.TransactionType, true
}
// HasTransactionType returns a boolean if a field has been set.
func (o *TransactionBase) HasTransactionType() bool {
if o != nil && o.TransactionType != nil {
return true
}
return false
}
// SetTransactionType gets a reference to the given string and assigns it to the TransactionType field.
func (o *TransactionBase) SetTransactionType(v string) {
o.TransactionType = &v
}
// GetPendingTransactionId returns the PendingTransactionId field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *TransactionBase) GetPendingTransactionId() string {
if o == nil || o.PendingTransactionId.Get() == nil {
var ret string
return ret
}
return *o.PendingTransactionId.Get()
}
// GetPendingTransactionIdOk returns a tuple with the PendingTransactionId field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *TransactionBase) GetPendingTransactionIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.PendingTransactionId.Get(), o.PendingTransactionId.IsSet()
}
// HasPendingTransactionId returns a boolean if a field has been set.
func (o *TransactionBase) HasPendingTransactionId() bool {
if o != nil && o.PendingTransactionId.IsSet() {
return true
}
return false
}
// SetPendingTransactionId gets a reference to the given NullableString and assigns it to the PendingTransactionId field.
func (o *TransactionBase) SetPendingTransactionId(v string) {
o.PendingTransactionId.Set(&v)
}
// SetPendingTransactionIdNil sets the value for PendingTransactionId to be an explicit nil
func (o *TransactionBase) SetPendingTransactionIdNil() {
o.PendingTransactionId.Set(nil)
}
// UnsetPendingTransactionId ensures that no value is present for PendingTransactionId, not even an explicit nil
func (o *TransactionBase) UnsetPendingTransactionId() {
o.PendingTransactionId.Unset()
}
// GetCategoryId returns the CategoryId field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *TransactionBase) GetCategoryId() string {
if o == nil || o.CategoryId.Get() == nil {
var ret string
return ret
}
return *o.CategoryId.Get()
}
// GetCategoryIdOk returns a tuple with the CategoryId field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *TransactionBase) GetCategoryIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.CategoryId.Get(), o.CategoryId.IsSet()
}
// HasCategoryId returns a boolean if a field has been set.
func (o *TransactionBase) HasCategoryId() bool {
if o != nil && o.CategoryId.IsSet() {
return true
}
return false
}
// SetCategoryId gets a reference to the given NullableString and assigns it to the CategoryId field.
func (o *TransactionBase) SetCategoryId(v string) {
o.CategoryId.Set(&v)
}
// SetCategoryIdNil sets the value for CategoryId to be an explicit nil
func (o *TransactionBase) SetCategoryIdNil() {
o.CategoryId.Set(nil)
}
// UnsetCategoryId ensures that no value is present for CategoryId, not even an explicit nil
func (o *TransactionBase) UnsetCategoryId() {
o.CategoryId.Unset()
}
// GetCategory returns the Category field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *TransactionBase) GetCategory() []string {
if o == nil {
var ret []string
return ret
}
return o.Category
}
// GetCategoryOk returns a tuple with the Category field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *TransactionBase) GetCategoryOk() (*[]string, bool) {
if o == nil || o.Category == nil {
return nil, false
}
return &o.Category, true
}
// HasCategory returns a boolean if a field has been set.
func (o *TransactionBase) HasCategory() bool {
if o != nil && o.Category != nil {
return true
}
return false
}
// SetCategory gets a reference to the given []string and assigns it to the Category field.
func (o *TransactionBase) SetCategory(v []string) {
o.Category = v
}
// GetLocation returns the Location field value if set, zero value otherwise.
func (o *TransactionBase) GetLocation() Location {
if o == nil || o.Location == nil {
var ret Location
return ret
}
return *o.Location
}
// GetLocationOk returns a tuple with the Location field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *TransactionBase) GetLocationOk() (*Location, bool) {
if o == nil || o.Location == nil {
return nil, false
}
return o.Location, true
}
// HasLocation returns a boolean if a field has been set.
func (o *TransactionBase) HasLocation() bool {
if o != nil && o.Location != nil {
return true
}
return false
}
// SetLocation gets a reference to the given Location and assigns it to the Location field.
func (o *TransactionBase) SetLocation(v Location) {
o.Location = &v
}
// GetPaymentMeta returns the PaymentMeta field value if set, zero value otherwise.
func (o *TransactionBase) GetPaymentMeta() PaymentMeta {
if o == nil || o.PaymentMeta == nil {
var ret PaymentMeta
return ret
}
return *o.PaymentMeta
}
// GetPaymentMetaOk returns a tuple with the PaymentMeta field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *TransactionBase) GetPaymentMetaOk() (*PaymentMeta, bool) {
if o == nil || o.PaymentMeta == nil {
return nil, false
}
return o.PaymentMeta, true
}
// HasPaymentMeta returns a boolean if a field has been set.
func (o *TransactionBase) HasPaymentMeta() bool {
if o != nil && o.PaymentMeta != nil {
return true
}
return false
}
// SetPaymentMeta gets a reference to the given PaymentMeta and assigns it to the PaymentMeta field.
func (o *TransactionBase) SetPaymentMeta(v PaymentMeta) {
o.PaymentMeta = &v
}
// GetAccountOwner returns the AccountOwner field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *TransactionBase) GetAccountOwner() string {
if o == nil || o.AccountOwner.Get() == nil {
var ret string
return ret
}
return *o.AccountOwner.Get()
}
// GetAccountOwnerOk returns a tuple with the AccountOwner field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *TransactionBase) GetAccountOwnerOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.AccountOwner.Get(), o.AccountOwner.IsSet()
}
// HasAccountOwner returns a boolean if a field has been set.
func (o *TransactionBase) HasAccountOwner() bool {
if o != nil && o.AccountOwner.IsSet() {
return true
}
return false
}
// SetAccountOwner gets a reference to the given NullableString and assigns it to the AccountOwner field.
func (o *TransactionBase) SetAccountOwner(v string) {
o.AccountOwner.Set(&v)
}
// SetAccountOwnerNil sets the value for AccountOwner to be an explicit nil
func (o *TransactionBase) SetAccountOwnerNil() {
o.AccountOwner.Set(nil)
}
// UnsetAccountOwner ensures that no value is present for AccountOwner, not even an explicit nil
func (o *TransactionBase) UnsetAccountOwner() {
o.AccountOwner.Unset()
}
// GetName returns the Name field value if set, zero value otherwise.
func (o *TransactionBase) GetName() string {
if o == nil || o.Name == nil {
var ret string
return ret
}
return *o.Name
}
// GetNameOk returns a tuple with the Name field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *TransactionBase) GetNameOk() (*string, bool) {
if o == nil || o.Name == nil {
return nil, false
}
return o.Name, true
}
// HasName returns a boolean if a field has been set.
func (o *TransactionBase) HasName() bool {
if o != nil && o.Name != nil {
return true
}
return false
}
// SetName gets a reference to the given string and assigns it to the Name field.
func (o *TransactionBase) SetName(v string) {
o.Name = &v
}
// GetOriginalDescription returns the OriginalDescription field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *TransactionBase) GetOriginalDescription() string {
if o == nil || o.OriginalDescription.Get() == nil {
var ret string
return ret
}
return *o.OriginalDescription.Get()
}
// GetOriginalDescriptionOk returns a tuple with the OriginalDescription field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *TransactionBase) GetOriginalDescriptionOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.OriginalDescription.Get(), o.OriginalDescription.IsSet()
}
// HasOriginalDescription returns a boolean if a field has been set.
func (o *TransactionBase) HasOriginalDescription() bool {
if o != nil && o.OriginalDescription.IsSet() {
return true
}
return false
}
// SetOriginalDescription gets a reference to the given NullableString and assigns it to the OriginalDescription field.
func (o *TransactionBase) SetOriginalDescription(v string) {
o.OriginalDescription.Set(&v)
}
// SetOriginalDescriptionNil sets the value for OriginalDescription to be an explicit nil
func (o *TransactionBase) SetOriginalDescriptionNil() {
o.OriginalDescription.Set(nil)
}
// UnsetOriginalDescription ensures that no value is present for OriginalDescription, not even an explicit nil
func (o *TransactionBase) UnsetOriginalDescription() {
o.OriginalDescription.Unset()
}
// GetAccountId returns the AccountId field value
func (o *TransactionBase) GetAccountId() string {
if o == nil {
var ret string
return ret
}
return o.AccountId
}
// GetAccountIdOk returns a tuple with the AccountId field value
// and a boolean to check if the value has been set.
func (o *TransactionBase) GetAccountIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.AccountId, true
}
// SetAccountId sets field value
func (o *TransactionBase) SetAccountId(v string) {
o.AccountId = v
}
// GetAmount returns the Amount field value
func (o *TransactionBase) GetAmount() float32 {
if o == nil {
var ret float32
return ret
}
return o.Amount
}
// GetAmountOk returns a tuple with the Amount field value
// and a boolean to check if the value has been set.
func (o *TransactionBase) GetAmountOk() (*float32, bool) {
if o == nil {
return nil, false
}
return &o.Amount, true
}
// SetAmount sets field value
func (o *TransactionBase) SetAmount(v float32) {
o.Amount = v
}
// GetIsoCurrencyCode returns the IsoCurrencyCode field value
// If the value is explicit nil, the zero value for string will be returned
func (o *TransactionBase) GetIsoCurrencyCode() string {
if o == nil || o.IsoCurrencyCode.Get() == nil {
var ret string
return ret
}
return *o.IsoCurrencyCode.Get()
}
// GetIsoCurrencyCodeOk returns a tuple with the IsoCurrencyCode field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *TransactionBase) GetIsoCurrencyCodeOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.IsoCurrencyCode.Get(), o.IsoCurrencyCode.IsSet()
}
// SetIsoCurrencyCode sets field value
func (o *TransactionBase) SetIsoCurrencyCode(v string) {
o.IsoCurrencyCode.Set(&v)
}
// GetUnofficialCurrencyCode returns the UnofficialCurrencyCode field value
// If the value is explicit nil, the zero value for string will be returned
func (o *TransactionBase) GetUnofficialCurrencyCode() string {
if o == nil || o.UnofficialCurrencyCode.Get() == nil {
var ret string
return ret
}
return *o.UnofficialCurrencyCode.Get()
}
// GetUnofficialCurrencyCodeOk returns a tuple with the UnofficialCurrencyCode field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *TransactionBase) GetUnofficialCurrencyCodeOk() (*string, bool) {
if o == nil {
return nil, false
}
return o.UnofficialCurrencyCode.Get(), o.UnofficialCurrencyCode.IsSet()
}
// SetUnofficialCurrencyCode sets field value
func (o *TransactionBase) SetUnofficialCurrencyCode(v string) {
o.UnofficialCurrencyCode.Set(&v)
}
// GetDate returns the Date field value
func (o *TransactionBase) GetDate() string {
if o == nil {
var ret string
return ret
}
return o.Date
}
// GetDateOk returns a tuple with the Date field value
// and a boolean to check if the value has been set.
func (o *TransactionBase) GetDateOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Date, true
}
// SetDate sets field value
func (o *TransactionBase) SetDate(v string) {
o.Date = v
}
// GetPending returns the Pending field value
func (o *TransactionBase) GetPending() bool {
if o == nil {
var ret bool
return ret
}
return o.Pending
}
// GetPendingOk returns a tuple with the Pending field value
// and a boolean to check if the value has been set.
func (o *TransactionBase) GetPendingOk() (*bool, bool) {
if o == nil {
return nil, false
}
return &o.Pending, true
}
// SetPending sets field value
func (o *TransactionBase) SetPending(v bool) {
o.Pending = v
}
// GetTransactionId returns the TransactionId field value
func (o *TransactionBase) GetTransactionId() string {
if o == nil {
var ret string
return ret
}
return o.TransactionId
}
// GetTransactionIdOk returns a tuple with the TransactionId field value
// and a boolean to check if the value has been set.
func (o *TransactionBase) GetTransactionIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.TransactionId, true
}
// SetTransactionId sets field value
func (o *TransactionBase) SetTransactionId(v string) {
o.TransactionId = v
}
func (o TransactionBase) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.TransactionType != nil {
toSerialize["transaction_type"] = o.TransactionType
}
if o.PendingTransactionId.IsSet() {
toSerialize["pending_transaction_id"] = o.PendingTransactionId.Get()
}
if o.CategoryId.IsSet() {
toSerialize["category_id"] = o.CategoryId.Get()
}
if o.Category != nil {
toSerialize["category"] = o.Category
}
if o.Location != nil {
toSerialize["location"] = o.Location
}
if o.PaymentMeta != nil {
toSerialize["payment_meta"] = o.PaymentMeta
}
if o.AccountOwner.IsSet() {
toSerialize["account_owner"] = o.AccountOwner.Get()
}
if o.Name != nil {
toSerialize["name"] = o.Name
}
if o.OriginalDescription.IsSet() {
toSerialize["original_description"] = o.OriginalDescription.Get()
}
if true {
toSerialize["account_id"] = o.AccountId
}
if true {
toSerialize["amount"] = o.Amount
}
if true {
toSerialize["iso_currency_code"] = o.IsoCurrencyCode.Get()
}
if true {
toSerialize["unofficial_currency_code"] = o.UnofficialCurrencyCode.Get()
}
if true {
toSerialize["date"] = o.Date
}
if true {
toSerialize["pending"] = o.Pending
}
if true {
toSerialize["transaction_id"] = o.TransactionId
}
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
}
return json.Marshal(toSerialize)
}
func (o *TransactionBase) UnmarshalJSON(bytes []byte) (err error) {
varTransactionBase := _TransactionBase{}
if err = json.Unmarshal(bytes, &varTransactionBase); err == nil {
*o = TransactionBase(varTransactionBase)
}
additionalProperties := make(map[string]interface{})
if err = json.Unmarshal(bytes, &additionalProperties); err == nil {
delete(additionalProperties, "transaction_type")
delete(additionalProperties, "pending_transaction_id")
delete(additionalProperties, "category_id")
delete(additionalProperties, "category")
delete(additionalProperties, "location")
delete(additionalProperties, "payment_meta")
delete(additionalProperties, "account_owner")
delete(additionalProperties, "name")
delete(additionalProperties, "original_description")
delete(additionalProperties, "account_id")
delete(additionalProperties, "amount")
delete(additionalProperties, "iso_currency_code")
delete(additionalProperties, "unofficial_currency_code")
delete(additionalProperties, "date")
delete(additionalProperties, "pending")
delete(additionalProperties, "transaction_id")
o.AdditionalProperties = additionalProperties
}
return err
}
type NullableTransactionBase struct {
value *TransactionBase
isSet bool
}
func (v NullableTransactionBase) Get() *TransactionBase {
return v.value
}
func (v *NullableTransactionBase) Set(val *TransactionBase) {
v.value = val
v.isSet = true
}
func (v NullableTransactionBase) IsSet() bool {
return v.isSet
}
func (v *NullableTransactionBase) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableTransactionBase(val *TransactionBase) *NullableTransactionBase {
return &NullableTransactionBase{value: val, isSet: true}
}
func (v NullableTransactionBase) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableTransactionBase) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_transaction_base.go | 0.878588 | 0.568296 | model_transaction_base.go | starcoder |
package qspp
import "github.com/privacybydesign/keyproof/common"
import "github.com/privacybydesign/gabi/big"
type AlmostSafePrimeProductProof struct {
Nonce *big.Int
Commitments []*big.Int
Responses []*big.Int
}
type AlmostSafePrimeProductCommit struct {
Nonce *big.Int
Commitments []*big.Int
Logs []*big.Int
}
func AlmostSafePrimeProductBuildCommitments(list []*big.Int, Pprime *big.Int, Qprime *big.Int) ([]*big.Int, AlmostSafePrimeProductCommit) {
// Setup proof structure
var commit AlmostSafePrimeProductCommit
commit.Commitments = []*big.Int{}
commit.Logs = []*big.Int{}
// Calculate N and phiN
N := new(big.Int).Mul(new(big.Int).Add(new(big.Int).Lsh(Pprime, 1), big.NewInt(1)), new(big.Int).Add(new(big.Int).Lsh(Qprime, 1), big.NewInt(1)))
phiN := new(big.Int).Lsh(new(big.Int).Mul(Pprime, Qprime), 2)
// Generate nonce
nonceMax := new(big.Int).Lsh(big.NewInt(1), almostSafePrimeProductNonceSize)
commit.Nonce = common.RandomBigInt(nonceMax)
for i := 0; i < almostSafePrimeProductIters; i++ {
// Calculate base from nonce
curc := common.GetHashNumber(commit.Nonce, nil, i, uint(N.BitLen()))
curc.Mod(curc, N)
if new(big.Int).GCD(nil, nil, curc, N).Cmp(big.NewInt(1)) != 0 {
panic("Generated number not in Z_N")
}
log := common.RandomBigInt(phiN)
com := new(big.Int).Exp(curc, log, N)
list = append(list, com)
commit.Commitments = append(commit.Commitments, com)
commit.Logs = append(commit.Logs, log)
}
return list, commit
}
func AlmostSafePrimeProductBuildProof(Pprime *big.Int, Qprime *big.Int, challenge *big.Int, index *big.Int, commit AlmostSafePrimeProductCommit) AlmostSafePrimeProductProof {
// Setup proof structure
var proof AlmostSafePrimeProductProof
proof.Nonce = commit.Nonce
proof.Commitments = commit.Commitments
proof.Responses = []*big.Int{}
// Calculate useful constants
N := new(big.Int).Mul(new(big.Int).Add(new(big.Int).Lsh(Pprime, 1), big.NewInt(1)), new(big.Int).Add(new(big.Int).Lsh(Qprime, 1), big.NewInt(1)))
phiN := new(big.Int).Lsh(new(big.Int).Mul(Pprime, Qprime), 2)
oddPhiN := new(big.Int).Mul(Pprime, Qprime)
factors := []*big.Int{
Pprime,
Qprime,
}
// Calculate responses
for i := 0; i < almostSafePrimeProductIters; i++ {
// Derive challenge
curc := common.GetHashNumber(challenge, index, i, uint(2*N.BitLen()))
log := new(big.Int).Mod(new(big.Int).Add(commit.Logs[i], curc), phiN)
// Calculate response
x1 := new(big.Int).Mod(log, oddPhiN)
x2 := new(big.Int).Sub(oddPhiN, x1)
x3 := new(big.Int).Mod(new(big.Int).Mul(new(big.Int).ModInverse(big.NewInt(2), oddPhiN), x1), oddPhiN)
x4 := new(big.Int).Sub(oddPhiN, x3)
r1, ok1 := common.ModSqrt(x1, factors)
r2, ok2 := common.ModSqrt(x2, factors)
r3, ok3 := common.ModSqrt(x3, factors)
r4, ok4 := common.ModSqrt(x4, factors)
// And add the useful one
if ok1 {
proof.Responses = append(proof.Responses, r1)
} else if ok2 {
proof.Responses = append(proof.Responses, r2)
} else if ok3 {
proof.Responses = append(proof.Responses, r3)
} else if ok4 {
proof.Responses = append(proof.Responses, r4)
} else {
panic("none of +-x, +-x/2 are square")
}
}
return proof
}
func AlmostSafePrimeProductVerifyStructure(proof AlmostSafePrimeProductProof) bool {
if proof.Nonce == nil {
return false
}
if proof.Commitments == nil || proof.Responses == nil {
return false
}
if len(proof.Commitments) != almostSafePrimeProductIters || len(proof.Responses) != almostSafePrimeProductIters {
return false
}
for _, val := range proof.Commitments {
if val == nil {
return false
}
}
for _, val := range proof.Responses {
if val == nil {
return false
}
}
return true
}
func AlmostSafePrimeProductExtractCommitments(list []*big.Int, proof AlmostSafePrimeProductProof) []*big.Int {
return append(list, proof.Commitments...)
}
func AlmostSafePrimeProductVerifyProof(N *big.Int, challenge *big.Int, index *big.Int, proof AlmostSafePrimeProductProof) bool {
// Verify N=1(mod 3), as this decreases the error prob from 9/10 to 4/5
if new(big.Int).Mod(N, big.NewInt(3)).Cmp(big.NewInt(1)) != 0 {
return false
}
// Prepare gamma
gamma := new(big.Int).Lsh(big.NewInt(1), uint(N.BitLen()))
// Check responses
for i := 0; i < almostSafePrimeProductIters; i++ {
// Generate base
base := common.GetHashNumber(proof.Nonce, nil, i, uint(N.BitLen()))
base.Mod(base, N)
// Generate challenge
x := common.GetHashNumber(challenge, index, i, uint(2*N.BitLen()))
y := new(big.Int).Mod(
new(big.Int).Mul(
proof.Commitments[i],
new(big.Int).Exp(base, x, N)),
N)
// Verify
yg := new(big.Int).Exp(y, gamma, N)
t1 := new(big.Int).Exp(base, gamma, N)
t1.Exp(t1, proof.Responses[i], N)
t1.Exp(t1, proof.Responses[i], N)
t2 := new(big.Int).ModInverse(t1, N)
t3 := new(big.Int).Exp(t1, big.NewInt(2), N)
t4 := new(big.Int).ModInverse(t3, N)
ok1 := (t1.Cmp(yg) == 0)
ok2 := (t2.Cmp(yg) == 0)
ok3 := (t3.Cmp(yg) == 0)
ok4 := (t4.Cmp(yg) == 0)
if !ok1 && !ok2 && !ok3 && !ok4 {
return false
}
}
return true
} | qspp/almostsafeprimeproduct.go | 0.512937 | 0.423816 | almostsafeprimeproduct.go | starcoder |
package geometry
import (
"github.com/go-gl/mathgl/mgl32"
)
type Quad struct {
Vertices [4]mgl32.Vec3
VertexBuffer []float32
}
func NewQuad(corners [4]mgl32.Vec3) *Quad {
vertices := make([][]float32, 4)
for i := 0; i < 4; i++ {
vertices[i] = []float32{corners[i].X(), corners[i].Y(), corners[i].Z()}
}
vertexBuffer := make([]float32, 0)
// v0, v1, v2
tri1Indices := [3]int{0, 1, 2}
for _, index := range tri1Indices {
vertexBuffer = append(vertexBuffer, vertices[index]...)
}
// v0, v3, v2
tri2Indices := [3]int{0, 3, 2}
for _, index := range tri2Indices {
vertexBuffer = append(vertexBuffer, vertices[index]...)
}
return &Quad{
Vertices: corners,
VertexBuffer: vertexBuffer,
}
}
func NewQuadFourPoints(xzPairs [4][]float32) *Quad {
corners := [4]mgl32.Vec3{}
for i := 0; i < 4; i++ {
corners[i] = mgl32.Vec3{xzPairs[i][0], 0, xzPairs[i][1]}
}
return NewQuad(corners)
}
func NewRectangle(x float32, z float32, width float32, depth float32) *Quad {
corners := [4]mgl32.Vec3{
mgl32.Vec3{x, 0, z},
mgl32.Vec3{x, 0, z + depth},
mgl32.Vec3{x + width, 0, z + depth},
mgl32.Vec3{x + width, 0, z},
}
return NewQuad(corners)
}
func NewTexturedRectangle(vertices [4][]float32, uvs [4][]float32) *Quad {
vertexBuffer := make([]float32, 0)
// v0, v1, v2
tri1Indices := [3]int{0, 1, 2}
for _, index := range tri1Indices {
vertexBuffer = append(vertexBuffer, vertices[index]...)
vertexBuffer = append(vertexBuffer, uvs[index]...)
}
// v0, v3, v2
tri2Indices := [3]int{0, 3, 2}
for _, index := range tri2Indices {
vertexBuffer = append(vertexBuffer, vertices[index]...)
vertexBuffer = append(vertexBuffer, uvs[index]...)
}
return &Quad{
VertexBuffer: vertexBuffer,
}
}
func NewQuadMD1(vertices [4][]float32, uvs [4][]float32, normals [4][]float32) *Quad {
vertexBuffer := make([]float32, 0)
// MD1 vertex order is different (v0, v1, v3, v2)
// Vertex order of other quads is (v0, v1, v2, v3)
// v0, v1, v3
tri1Indices := [3]int{0, 1, 3}
for _, index := range tri1Indices {
vertexBuffer = append(vertexBuffer, vertices[index]...)
vertexBuffer = append(vertexBuffer, uvs[index]...)
vertexBuffer = append(vertexBuffer, normals[index]...)
}
// v0, v2, v3
tri2Indices := [3]int{0, 2, 3}
for _, index := range tri2Indices {
vertexBuffer = append(vertexBuffer, vertices[index]...)
vertexBuffer = append(vertexBuffer, uvs[index]...)
vertexBuffer = append(vertexBuffer, normals[index]...)
}
return &Quad{
VertexBuffer: vertexBuffer,
}
} | geometry/quad.go | 0.727201 | 0.528777 | quad.go | starcoder |
package sema
import (
"github.com/onflow/cadence/runtime/ast"
"github.com/onflow/cadence/runtime/common"
"github.com/onflow/cadence/runtime/errors"
)
func (checker *Checker) VisitBinaryExpression(expression *ast.BinaryExpression) ast.Repr {
// The left-hand side is always evaluated.
// However, the right-hand side might not necessarily be evaluated,
// e.g. in boolean logic or in nil-coalescing
leftType := expression.Left.Accept(checker).(Type)
leftIsInvalid := leftType.IsInvalidType()
operation := expression.Operation
operationKind := binaryOperationKind(operation)
unsupportedOperation := func() Type {
panic(&unsupportedOperation{
kind: common.OperationKindBinary,
operation: operation,
Range: ast.NewRangeFromPositioned(expression),
})
}
switch operationKind {
case BinaryOperationKindArithmetic,
BinaryOperationKindNonEqualityComparison,
BinaryOperationKindEquality,
BinaryOperationKindBitwise:
// Right hand side will always be evaluated
rightType := expression.Right.Accept(checker).(Type)
rightIsInvalid := rightType.IsInvalidType()
anyInvalid := leftIsInvalid || rightIsInvalid
switch operationKind {
case BinaryOperationKindArithmetic,
BinaryOperationKindNonEqualityComparison,
BinaryOperationKindBitwise:
return checker.checkBinaryExpressionArithmeticOrNonEqualityComparisonOrBitwise(
expression, operation, operationKind,
leftType, rightType,
leftIsInvalid, rightIsInvalid, anyInvalid,
)
case BinaryOperationKindEquality:
return checker.checkBinaryExpressionEquality(
expression, operation, operationKind,
leftType, rightType,
leftIsInvalid, rightIsInvalid, anyInvalid,
)
default:
return unsupportedOperation()
}
case BinaryOperationKindBooleanLogic,
BinaryOperationKindNilCoalescing:
// The evaluation of the right-hand side is not guaranteed.
// That means that resource invalidation and returns
// are not definite, but only potential.
rightType := checker.checkPotentiallyUnevaluated(func() Type {
return expression.Right.Accept(checker).(Type)
})
rightIsInvalid := rightType.IsInvalidType()
anyInvalid := leftIsInvalid || rightIsInvalid
switch operationKind {
case BinaryOperationKindBooleanLogic:
return checker.checkBinaryExpressionBooleanLogic(
expression, operation, operationKind,
leftType, rightType,
leftIsInvalid, rightIsInvalid, anyInvalid,
)
case BinaryOperationKindNilCoalescing:
resultType := checker.checkBinaryExpressionNilCoalescing(
expression, operation, operationKind,
leftType, rightType,
leftIsInvalid, rightIsInvalid, anyInvalid,
)
checker.Elaboration.BinaryExpressionResultTypes[expression] = resultType
checker.Elaboration.BinaryExpressionRightTypes[expression] = rightType
return resultType
default:
return unsupportedOperation()
}
default:
return unsupportedOperation()
}
}
func (checker *Checker) checkBinaryExpressionArithmeticOrNonEqualityComparisonOrBitwise(
expression *ast.BinaryExpression,
operation ast.Operation,
operationKind BinaryOperationKind,
leftType, rightType Type,
leftIsInvalid, rightIsInvalid, anyInvalid bool,
) Type {
// check both types are number/integer subtypes
var expectedSuperType Type
switch operationKind {
case BinaryOperationKindArithmetic,
BinaryOperationKindNonEqualityComparison:
expectedSuperType = &NumberType{}
case BinaryOperationKindBitwise:
expectedSuperType = &IntegerType{}
default:
panic(errors.NewUnreachableError())
}
leftIsNumber := IsSubType(leftType, expectedSuperType)
rightIsNumber := IsSubType(rightType, expectedSuperType)
reportedInvalidOperands := false
if !leftIsNumber && !rightIsNumber {
if !anyInvalid {
checker.report(
&InvalidBinaryOperandsError{
Operation: operation,
LeftType: leftType,
RightType: rightType,
Range: ast.NewRangeFromPositioned(expression),
},
)
reportedInvalidOperands = true
}
} else if !leftIsNumber {
if !leftIsInvalid {
checker.report(
&InvalidBinaryOperandError{
Operation: operation,
Side: common.OperandSideLeft,
ExpectedType: expectedSuperType,
ActualType: leftType,
Range: ast.NewRangeFromPositioned(expression.Left),
},
)
}
} else if !rightIsNumber {
if !rightIsInvalid {
checker.report(
&InvalidBinaryOperandError{
Operation: operation,
Side: common.OperandSideRight,
ExpectedType: expectedSuperType,
ActualType: rightType,
Range: ast.NewRangeFromPositioned(expression.Right),
},
)
}
}
// check both types are equal
if !reportedInvalidOperands &&
!anyInvalid &&
!leftType.Equal(rightType) {
checker.report(
&InvalidBinaryOperandsError{
Operation: operation,
LeftType: leftType,
RightType: rightType,
Range: ast.NewRangeFromPositioned(expression),
},
)
}
switch operationKind {
case BinaryOperationKindArithmetic,
BinaryOperationKindBitwise:
return leftType
case BinaryOperationKindNonEqualityComparison:
return &BoolType{}
default:
panic(errors.NewUnreachableError())
}
}
func (checker *Checker) checkBinaryExpressionEquality(
expression *ast.BinaryExpression,
operation ast.Operation,
operationKind BinaryOperationKind,
leftType, rightType Type,
leftIsInvalid, rightIsInvalid, anyInvalid bool,
) (resultType Type) {
resultType = &BoolType{}
if anyInvalid {
return
}
if !AreCompatibleEquatableTypes(leftType, rightType) {
checker.report(
&InvalidBinaryOperandsError{
Operation: operation,
LeftType: leftType,
RightType: rightType,
Range: ast.NewRangeFromPositioned(expression),
},
)
}
checker.checkUnusedExpressionResourceLoss(leftType, expression.Left)
checker.checkUnusedExpressionResourceLoss(rightType, expression.Right)
return
}
func (checker *Checker) checkBinaryExpressionBooleanLogic(
expression *ast.BinaryExpression,
operation ast.Operation,
operationKind BinaryOperationKind,
leftType, rightType Type,
leftIsInvalid, rightIsInvalid, anyInvalid bool,
) Type {
// check both types are boolean subtypes
leftIsBool := IsSubType(leftType, &BoolType{})
rightIsBool := IsSubType(rightType, &BoolType{})
if !leftIsBool && !rightIsBool {
if !anyInvalid {
checker.report(
&InvalidBinaryOperandsError{
Operation: operation,
LeftType: leftType,
RightType: rightType,
Range: ast.NewRangeFromPositioned(expression),
},
)
}
} else if !leftIsBool {
if !leftIsInvalid {
checker.report(
&InvalidBinaryOperandError{
Operation: operation,
Side: common.OperandSideLeft,
ExpectedType: &BoolType{},
ActualType: leftType,
Range: ast.NewRangeFromPositioned(expression.Left),
},
)
}
} else if !rightIsBool {
if !rightIsInvalid {
checker.report(
&InvalidBinaryOperandError{
Operation: operation,
Side: common.OperandSideRight,
ExpectedType: &BoolType{},
ActualType: rightType,
Range: ast.NewRangeFromPositioned(expression.Right),
},
)
}
}
return &BoolType{}
}
func (checker *Checker) checkBinaryExpressionNilCoalescing(
expression *ast.BinaryExpression,
operation ast.Operation,
operationKind BinaryOperationKind,
leftType, rightType Type,
leftIsInvalid, rightIsInvalid, anyInvalid bool,
) Type {
leftOptional, leftIsOptional := leftType.(*OptionalType)
if !leftIsInvalid {
checker.recordResourceInvalidation(
expression.Left,
leftType,
ResourceInvalidationKindMoveDefinite,
)
if !leftIsOptional {
checker.report(
&InvalidBinaryOperandError{
Operation: operation,
Side: common.OperandSideLeft,
ExpectedType: &OptionalType{},
ActualType: leftType,
Range: ast.NewRangeFromPositioned(expression.Left),
},
)
}
}
if leftIsInvalid || !leftIsOptional {
return &InvalidType{}
}
leftInner := leftOptional.Type
if _, ok := leftInner.(*NeverType); ok {
return rightType
}
canNarrow := false
if !rightIsInvalid {
if rightType.IsResourceType() {
checker.report(
&InvalidNilCoalescingRightResourceOperandError{
Range: ast.NewRangeFromPositioned(expression.Right),
},
)
}
if !IsSubType(rightType, leftOptional) {
checker.report(
&InvalidBinaryOperandError{
Operation: operation,
Side: common.OperandSideRight,
ExpectedType: leftOptional,
ActualType: rightType,
Range: ast.NewRangeFromPositioned(expression.Right),
},
)
} else {
canNarrow = IsSubType(rightType, leftInner)
}
}
if !canNarrow {
return leftOptional
}
return leftInner
} | runtime/sema/check_binary_expression.go | 0.772101 | 0.492737 | check_binary_expression.go | starcoder |
package ast
// Location contains location information about where an AST type is in a document.
type Location struct {
Line int
Column int
}
// @wg:field self
const (
PathNodeKindString PathNodeKind = iota
PathNodeKindInt
)
// PathNodeKind an enum type that defines the type of data stored in a PathNode.
type PathNodeKind uint8
// PathNode is an individual part of the path.
type PathNode struct {
Kind PathNodeKind
String string
Int int
}
// NewStringPathNode returns a new PathNode with the given string as it's value.
func NewStringPathNode(s string) PathNode {
return PathNode{
Kind: PathNodeKindString,
String: s,
}
}
// NewIntPathNode returns a new PathNode with the given int as it's value.
func NewIntPathNode(i int) PathNode {
return PathNode{
Kind: PathNodeKindInt,
Int: i,
}
}
// 2.2 Document
// http://facebook.github.io/graphql/June2018/#sec-Language.Document
type Document struct {
Definitions *Definitions
OperationDefinitions int32
FragmentDefinitions int32
DirectiveDefinitions int32
SchemaDefinitions int32
TypeDefinitions int32
SchemaExtensions int32
TypeExtensions int32
}
const (
// @wg:field ExecutableDefinition
DefinitionKindExecutable DefinitionKind = iota
// @wg:field TypeSystemDefinition
DefinitionKindTypeSystem
// @wg:field TypeSystemExtension
DefinitionKindTypeSystemExtension
)
type DefinitionKind int8
func (k DefinitionKind) String() string {
switch k {
case DefinitionKindExecutable:
return "executable"
case DefinitionKindTypeSystem:
return "type system"
case DefinitionKindTypeSystemExtension:
return "type system extension"
}
return "invalid"
}
type Definition struct {
Location Location
ExecutableDefinition *ExecutableDefinition
TypeSystemDefinition *TypeSystemDefinition
TypeSystemExtension *TypeSystemExtension
Kind DefinitionKind
}
// 2.3 Operations
// http://facebook.github.io/graphql/June2018/#sec-Language.Operations
// 2.8 Fragments
// http://facebook.github.io/graphql/June2018/#sec-Language.Fragments
const (
// @wg:field OperationDefinition
ExecutableDefinitionKindOperation ExecutableDefinitionKind = iota
// @wg:field FragmentDefinition
ExecutableDefinitionKindFragment
)
type ExecutableDefinitionKind int8
func (k ExecutableDefinitionKind) String() string {
switch k {
case ExecutableDefinitionKindOperation:
return "operation"
case ExecutableDefinitionKindFragment:
return "fragment"
}
return "invalid"
}
type ExecutableDefinition struct {
FragmentDefinition *FragmentDefinition
OperationDefinition *OperationDefinition
Kind ExecutableDefinitionKind
}
func (def ExecutableDefinition) String() string {
switch def.Kind {
case ExecutableDefinitionKindFragment:
return def.FragmentDefinition.Name
case ExecutableDefinitionKindOperation:
return def.OperationDefinition.Name
}
return "UnnamedExecutableDefinition"
}
type FragmentDefinition struct {
Name string
TypeCondition *TypeCondition
Directives *Directives
SelectionSet *Selections
}
// @wg:field self
const (
OperationDefinitionKindQuery OperationDefinitionKind = iota
OperationDefinitionKindMutation
OperationDefinitionKindSubscription
)
type OperationDefinitionKind int8
func (t OperationDefinitionKind) String() string {
switch t {
case OperationDefinitionKindQuery:
return "query"
case OperationDefinitionKindMutation:
return "mutation"
case OperationDefinitionKindSubscription:
return "subscription"
}
return "invalid"
}
type OperationDefinition struct {
Name string
VariableDefinitions *VariableDefinitions
Directives *Directives
SelectionSet *Selections
Kind OperationDefinitionKind
}
// 2.4 Selection Sets
// http://facebook.github.io/graphql/June2018/#sec-Selection-Sets
// @wg:field self
const (
SelectionKindField SelectionKind = iota
SelectionKindFragmentSpread
SelectionKindInlineFragment
)
type SelectionKind int8
func (k SelectionKind) String() string {
switch k {
case SelectionKindField:
return "field"
case SelectionKindFragmentSpread:
return "fragment spread"
case SelectionKindInlineFragment:
return "inline fragment"
}
return "unknown"
}
type Selection struct {
Name string // but not "on"
Alias string
// @wg:on_kinds InlineFragmentSelection
TypeCondition *TypeCondition
Arguments *Arguments
Directives *Directives
SelectionSet *Selections
Path *PathNodes
Kind SelectionKind
}
// 2.6 Arguments
// http://facebook.github.io/graphql/June2018/#sec-Language.Arguments
// Argument :
type Argument struct {
Name string
Value Value
}
// 2.8 Fragments
// http://facebook.github.io/graphql/June2018/#sec-Language.Fragments
type TypeCondition struct {
NamedType Type // Only allow "TypeKindNamed" kind NamedType.
}
// 2.9 Input Values
// http://facebook.github.io/graphql/June2018/#sec-Input-Values
// Value :
// @wg:field self
const (
ValueKindVariable ValueKind = iota
ValueKindInt
ValueKindFloat
ValueKindString
ValueKindBoolean
ValueKindNull
ValueKindEnum
ValueKindList
ValueKindObject
)
type ValueKind int8
func (k ValueKind) String() string {
switch k {
case ValueKindVariable:
return "variable value"
case ValueKindInt:
return "int8 value"
case ValueKindFloat:
return "float value"
case ValueKindString:
return "string value"
case ValueKindBoolean:
return "boolean value"
case ValueKindNull:
return "null value"
case ValueKindEnum:
return "enum value"
case ValueKindList:
return "list value"
case ValueKindObject:
return "object value"
}
return "unknown value"
}
type Value struct {
IntValue int
FloatValue float64
// StringValue covers variables and enums, enums are names, but not `true`, `false`, or `null`.
StringValue string
// @wg:on_kinds ListValue
ListValue []Value
// @wg:on_kinds ObjectValue
ObjectValue []ObjectField
BooleanValue bool
Kind ValueKind
}
type ObjectField struct {
Name string
Value Value
}
// 2.10 Variables
// http://facebook.github.io/graphql/June2018/#sec-Language.Variables
type VariableDefinition struct {
Name string
Type Type
DefaultValue *Value
}
// 2.11 Type References
// http://facebook.github.io/graphql/June2018/#sec-Type-References
// @wg:field self
const (
TypeKindNamed TypeKind = iota
TypeKindList
)
type TypeKind int8
func (k TypeKind) String() string {
switch k {
case TypeKindNamed:
return "NamedType"
case TypeKindList:
return "ListType"
}
return "InvalidType"
}
type Type struct {
NamedType string
ListType *Type
NonNullable bool
Kind TypeKind
}
func (t Type) String() string {
if t.ListType != nil {
return nonNullable(t.NonNullable, "["+t.ListType.String()+"]")
}
return nonNullable(t.NonNullable, t.NamedType)
}
func nonNullable(isNN bool, typeName string) string {
if isNN {
return typeName + "!"
}
return typeName
}
// 2.12 Directives
// http://facebook.github.io/graphql/June2018/#sec-Language.Directives
// Directive :
type Directive struct {
Name string
Arguments *Arguments
Location DirectiveLocation
}
// 3.0 NamedType System
// http://facebook.github.io/graphql/June2018/#TypeSystemDefinition
const (
// @wg:field SchemaDefinition
TypeSystemDefinitionKindSchema TypeSystemDefinitionKind = iota
// @wg:field TypeDefinition
TypeSystemDefinitionKindType
// @wg:field DirectiveDefinition
TypeSystemDefinitionKindDirective
)
type TypeSystemDefinitionKind int8
func (k TypeSystemDefinitionKind) String() string {
switch k {
case TypeSystemDefinitionKindSchema:
return "schema"
case TypeSystemDefinitionKindType:
return "type"
case TypeSystemDefinitionKindDirective:
return "directive"
}
return "invalid"
}
type TypeSystemDefinition struct {
SchemaDefinition *SchemaDefinition
TypeDefinition *TypeDefinition
DirectiveDefinition *DirectiveDefinition
Kind TypeSystemDefinitionKind
}
// 3.1 Type System Extensions
const (
// @wg:field SchemaExtension
TypeSystemExtensionKindSchema TypeSystemExtensionKind = iota
// @wg:field TypeExtension
TypeSystemExtensionKindType
)
type TypeSystemExtensionKind uint8
type TypeSystemExtension struct {
SchemaExtension *SchemaExtension
TypeExtension *TypeExtension
Kind TypeSystemExtensionKind
}
// 3.2 Schema
// http://facebook.github.io/graphql/June2018/#sec-Schema
type SchemaDefinition struct {
Directives *Directives
OperationTypeDefinitions *OperationTypeDefinitions
}
// 3.2.2 Schema Extension
type SchemaExtension struct {
Directives *Directives
OperationTypeDefinitions *OperationTypeDefinitions
}
type OperationTypeDefinition struct {
NamedType Type
OperationType OperationDefinitionKind
}
// 3.4 Types
// http://facebook.github.io/graphql/June2018/#sec-Types
// @wg:field self
const (
TypeDefinitionKindScalar TypeDefinitionKind = iota
TypeDefinitionKindObject
TypeDefinitionKindInterface
TypeDefinitionKindUnion
TypeDefinitionKindEnum
TypeDefinitionKindInputObject
)
type TypeDefinitionKind int8
// String ...
func (k TypeDefinitionKind) String() string {
switch k {
case TypeDefinitionKindScalar:
return "scalar"
case TypeDefinitionKindObject:
return "object"
case TypeDefinitionKindInterface:
return "interface"
case TypeDefinitionKindUnion:
return "union"
case TypeDefinitionKindEnum:
return "enum"
case TypeDefinitionKindInputObject:
return "input object"
default:
return "unknown"
}
}
type TypeDefinition struct {
Description string
Name string
ImplementsInterface *Types // Only allow "TypeKindNamed" kind NamedType.
Directives *Directives
FieldsDefinition *FieldDefinitions
UnionMemberTypes *Types // Only allow "TypeKindNamed" kind NamedType.
EnumValuesDefinition *EnumValueDefinitions
InputFieldsDefinition *InputValueDefinitions
Kind TypeDefinitionKind
}
// EnumValueDefinitionByName ...
func (d TypeDefinition) EnumValueDefinitionByName(valueName string) (EnumValueDefinition, bool) {
var evd EnumValueDefinition
var found bool
if IsEnumTypeDefinition(&d) {
gen := d.EnumValuesDefinition.Generator()
for ievd, i := gen.Next(); i < d.EnumValuesDefinition.Len(); ievd, i = gen.Next() {
if ievd.EnumValue == valueName {
evd = ievd
found = true
break
}
}
}
return evd, found
}
// FieldDefinitionByName ...
func (d TypeDefinition) FieldDefinitionByName(fieldName string) (FieldDefinition, bool) {
if IsObjectTypeDefinition(&d) || IsInterfaceTypeDefinition(&d) {
if d.FieldsDefinition.Len() == 0 {
return FieldDefinition{}, false
}
gen := d.FieldsDefinition.Generator()
for ifd, i := gen.Next(); i >= 0; ifd, i = gen.Next() {
if ifd.Name == fieldName {
return ifd, true
}
}
}
return FieldDefinition{}, false
}
// IsScalarTypeDefinition ...
func IsScalarTypeDefinition(def *TypeDefinition) bool {
return def.Kind == TypeDefinitionKindScalar
}
// IsObjectTypeDefinition ...
func IsObjectTypeDefinition(def *TypeDefinition) bool {
return def.Kind == TypeDefinitionKindObject
}
// IsInterfaceTypeDefinition ...
func IsInterfaceTypeDefinition(def *TypeDefinition) bool {
return def.Kind == TypeDefinitionKindInterface
}
// IsUnionTypeDefinition ...
func IsUnionTypeDefinition(def *TypeDefinition) bool {
return def.Kind == TypeDefinitionKindUnion
}
// IsEnumTypeDefinition ...
func IsEnumTypeDefinition(def *TypeDefinition) bool {
return def.Kind == TypeDefinitionKindEnum
}
// IsInputObjectTypeDefinition ...
func IsInputObjectTypeDefinition(def *TypeDefinition) bool {
return def.Kind == TypeDefinitionKindInputObject
}
type FieldDefinition struct {
Description string
Name string
ArgumentsDefinition *InputValueDefinitions
Type Type
Directives *Directives
}
type EnumValueDefinition struct {
Description string
Directives *Directives
EnumValue string // Name but not true or false or null.
}
// 3.4.3 Type Extensions
// @wg:field self
const (
TypeExtensionKindScalar TypeExtensionKind = iota
TypeExtensionKindObject
TypeExtensionKindInterface
TypeExtensionKindUnion
TypeExtensionKindEnum
TypeExtensionKindInputObject
)
type TypeExtensionKind int8
type TypeExtension struct {
Directives *Directives
ImplementsInterface *Types // Only allow "TypeKindNamed" kind NamedType.
FieldsDefinition *FieldDefinitions
UnionMemberTypes *Types // Only allow "TypeKindNamed" kind NamedType.
EnumValuesDefinition *EnumValueDefinitions
InputFieldsDefinition *InputValueDefinitions
Name string
Kind TypeExtensionKind
}
// IsScalarTypeExtension ...
func IsScalarTypeExtension(def *TypeExtension) bool {
return def.Kind == TypeExtensionKindScalar
}
// IsObjectTypeExtension ...
func IsObjectTypeExtension(def *TypeExtension) bool {
return def.Kind == TypeExtensionKindObject
}
// IsInterfaceTypeExtension ...
func IsInterfaceTypeExtension(def *TypeExtension) bool {
return def.Kind == TypeExtensionKindInterface
}
// IsUnionTypeExtension ...
func IsUnionTypeExtension(def *TypeExtension) bool {
return def.Kind == TypeExtensionKindUnion
}
// IsEnumTypeExtension ...
func IsEnumTypeExtension(def *TypeExtension) bool {
return def.Kind == TypeExtensionKindEnum
}
// IsInputObjectTypeExtension ...
func IsInputObjectTypeExtension(def *TypeExtension) bool {
return def.Kind == TypeExtensionKindInputObject
}
// 3.6 Objects
// http://facebook.github.io/graphql/June2018/#sec-Objects
type InputValueDefinition struct {
Description string
Name string
Type Type
Directives *Directives
DefaultValue *Value
}
// 3.13 Directives
// http://facebook.github.io/graphql/June2018/#sec-Type-System.Directives
var DirectiveLocationsByName = map[string]DirectiveLocation{
"QUERY": DirectiveLocationKindQuery,
"MUTATION": DirectiveLocationKindMutation,
"SUBSCRIPTION": DirectiveLocationKindSubscription,
"FIELD": DirectiveLocationKindField,
"FRAGMENT_DEFINITION": DirectiveLocationKindFragmentDefinition,
"FRAGMENT_SPREAD": DirectiveLocationKindFragmentSpread,
"INLINE_FRAGMENT": DirectiveLocationKindInlineFragment,
"VARIABLE_DEFINITION": DirectiveLocationKindVariableDefinition,
"SCHEMA": DirectiveLocationKindSchema,
"SCALAR": DirectiveLocationKindScalar,
"OBJECT": DirectiveLocationKindObject,
"FIELD_DEFINITION": DirectiveLocationKindFieldDefinition,
"ARGUMENT_DEFINITION": DirectiveLocationKindArgumentDefinition,
"INTERFACE": DirectiveLocationKindInterface,
"UNION": DirectiveLocationKindUnion,
"ENUM": DirectiveLocationKindEnum,
"ENUM_VALUE": DirectiveLocationKindEnumValue,
"INPUT_OBJECT": DirectiveLocationKindInputObject,
"INPUT_FIELD_DEFINITION": DirectiveLocationKindInputFieldDefinition,
}
var NamesByDirectiveLocations = map[DirectiveLocation]string{
DirectiveLocationKindQuery: "QUERY",
DirectiveLocationKindMutation: "MUTATION",
DirectiveLocationKindSubscription: "SUBSCRIPTION",
DirectiveLocationKindField: "FIELD",
DirectiveLocationKindFragmentDefinition: "FRAGMENT_DEFINITION",
DirectiveLocationKindFragmentSpread: "FRAGMENT_SPREAD",
DirectiveLocationKindInlineFragment: "INLINE_FRAGMENT",
DirectiveLocationKindVariableDefinition: "VARIABLE_DEFINITION",
DirectiveLocationKindSchema: "SCHEMA",
DirectiveLocationKindScalar: "SCALAR",
DirectiveLocationKindObject: "OBJECT",
DirectiveLocationKindFieldDefinition: "FIELD_DEFINITION",
DirectiveLocationKindArgumentDefinition: "ARGUMENT_DEFINITION",
DirectiveLocationKindInterface: "INTERFACE",
DirectiveLocationKindUnion: "UNION",
DirectiveLocationKindEnum: "ENUM",
DirectiveLocationKindEnumValue: "ENUM_VALUE",
DirectiveLocationKindInputObject: "INPUT_OBJECT",
DirectiveLocationKindInputFieldDefinition: "INPUT_FIELD_DEFINITION",
}
const (
DirectiveLocationKindQuery DirectiveLocation = 1 << iota
DirectiveLocationKindMutation
DirectiveLocationKindSubscription
DirectiveLocationKindField
DirectiveLocationKindFragmentDefinition
DirectiveLocationKindFragmentSpread
DirectiveLocationKindInlineFragment
DirectiveLocationKindVariableDefinition
DirectiveLocationKindSchema
DirectiveLocationKindScalar
DirectiveLocationKindObject
DirectiveLocationKindFieldDefinition
DirectiveLocationKindArgumentDefinition
DirectiveLocationKindInterface
DirectiveLocationKindUnion
DirectiveLocationKindEnum
DirectiveLocationKindEnumValue
DirectiveLocationKindInputObject
DirectiveLocationKindInputFieldDefinition
)
type DirectiveLocation int32
func (l DirectiveLocation) String() string {
return NamesByDirectiveLocations[l]
}
type DirectiveDefinition struct {
Description string
Name string
ArgumentsDefinition *InputValueDefinitions
DirectiveLocations DirectiveLocation
} | ast/ast.go | 0.719679 | 0.494385 | ast.go | starcoder |
package buildroot
import (
"github.com/0xPolygon/polygon-edge/helper/keccak"
itrie "github.com/0xPolygon/polygon-edge/state/immutable-trie"
"github.com/0xPolygon/polygon-edge/types"
"github.com/umbracle/fastrlp"
)
var arenaPool fastrlp.ArenaPool
// CalculateReceiptsRoot calculates the root of a list of receipts
func CalculateReceiptsRoot(receipts []*types.Receipt) types.Hash {
ar := arenaPool.Get()
res := calculateRootWithRlp(len(receipts), func(i int) *fastrlp.Value {
ar.Reset()
return receipts[i].MarshalRLPWith(ar)
})
arenaPool.Put(ar)
return res
}
// CalculateTransactionsRoot calculates the root of a list of transactions
func CalculateTransactionsRoot(transactions []*types.Transaction) types.Hash {
ar := arenaPool.Get()
res := calculateRootWithRlp(len(transactions), func(i int) *fastrlp.Value {
ar.Reset()
return transactions[i].MarshalRLPWith(ar)
})
arenaPool.Put(ar)
return res
}
// CalculateUncleRoot calculates the root of a list of uncles
func CalculateUncleRoot(uncles []*types.Header) types.Hash {
if len(uncles) == 0 {
return types.EmptyUncleHash
}
a := arenaPool.Get()
v := a.NewArray()
for _, i := range uncles {
v.Set(i.MarshalRLPWith(a))
}
root := keccak.Keccak256Rlp(nil, v)
arenaPool.Put(a)
return types.BytesToHash(root)
}
func calculateRootWithRlp(num int, h func(indx int) *fastrlp.Value) types.Hash {
hF := func(indx int) []byte {
return h(indx).MarshalTo(nil)
}
return CalculateRoot(num, hF)
}
// CalculateRoot calculates a root with a callback
func CalculateRoot(num int, h func(indx int) []byte) types.Hash {
if num == 0 {
return types.EmptyRootHash
}
if num <= 128 {
fastH := acquireFastHasher()
dst, ok := fastH.Hash(num, h)
// important to copy the return before releasing the hasher
res := types.BytesToHash(dst)
releaseFastHasher(fastH)
if ok {
return res
}
}
// fallback to slow hash
return types.BytesToHash(deriveSlow(num, h))
}
var numArenaPool fastrlp.ArenaPool
func deriveSlow(num int, h func(indx int) []byte) []byte {
t := itrie.NewTrie()
txn := t.Txn()
ar := numArenaPool.Get()
for i := 0; i < num; i++ {
indx := ar.NewUint(uint64(i))
txn.Insert(indx.MarshalTo(nil), h(i))
ar.Reset()
}
numArenaPool.Put(ar)
x, _ := txn.Hash()
return x
} | types/buildroot/buildroot.go | 0.679179 | 0.400192 | buildroot.go | starcoder |
package ot
type OpType int
const (
OpRetain OpType = iota
OpInsert
OpDelete
)
type Op interface {
Type() OpType
Span() int
IsZero() bool
}
type RetainOp int
func (p RetainOp) Type() OpType {
return OpRetain
}
func (p RetainOp) Span() int {
return int(p)
}
func (p RetainOp) IsZero() bool {
return p == 0
}
func (p RetainOp) ComposeRetain(q RetainOp) (Op, Op, Op) {
switch {
case p > q:
return q, p - q, Noop
default:
return p, Noop, q - p
}
}
func (p RetainOp) ComposeDelete(q DeleteOp) (Op, Op, Op) {
switch {
case int(p) < int(q):
return DeleteOp(p), Noop, q - DeleteOp(p)
default:
return q, p - RetainOp(q), Noop
}
}
func (p RetainOp) TransformRetain(q RetainOp) (Op, Op, Op, Op) {
switch {
case p > q:
return q, q, p - q, Noop
default:
return p, p, Noop, q - p
}
}
func (p RetainOp) TransformDelete(q DeleteOp) (Op, Op, Op, Op) {
switch {
case int(p) > int(q):
return Noop, q, p - RetainOp(q), Noop
case int(p) < int(q):
return Noop, DeleteOp(p), Noop, q - DeleteOp(p)
default:
return Noop, DeleteOp(p), Noop, Noop
}
}
var Noop = RetainOp(0)
type InsertOp string
func (p InsertOp) Type() OpType {
return OpInsert
}
func (p InsertOp) Span() int {
return len(p)
}
func (p InsertOp) IsZero() bool {
return p == ""
}
func (p InsertOp) ComposeRetain(q RetainOp) (Op, Op, Op) {
switch {
case len(p) > int(q):
return p[:int(q)], p[int(q):], Noop
default:
return p, Noop, q - RetainOp(len(p))
}
}
func (p InsertOp) ComposeDelete(q DeleteOp) (Op, Op, Op) {
switch {
case len(p) > int(q):
return Noop, p[int(q):], Noop
case len(p) < int(q):
return Noop, Noop, q - DeleteOp(len(p))
default:
return Noop, Noop, Noop
}
}
type DeleteOp int
func (p DeleteOp) Type() OpType {
return OpDelete
}
func (p DeleteOp) Span() int {
return int(p)
}
func (p DeleteOp) IsZero() bool {
return p == 0
}
func (p DeleteOp) TransformRetain(q RetainOp) (Op, Op, Op, Op) {
switch {
case int(p) > int(q):
return DeleteOp(q), Noop, p - DeleteOp(q), Noop
default:
return p, Noop, Noop, q - RetainOp(p)
}
}
func (p DeleteOp) TransformDelete(q DeleteOp) (Op, Op, Op, Op) {
switch {
case p > q:
return Noop, Noop, p - q, Noop
case p < q:
return Noop, Noop, Noop, q - p
default:
return Noop, Noop, Noop, Noop
}
}
type RetainComposer interface {
ComposeRetain(q RetainOp) (Op, Op, Op)
}
type DeleteComposer interface {
ComposeDelete(q DeleteOp) (Op, Op, Op)
}
type RetainTransformer interface {
TransformRetain(q RetainOp) (Op, Op, Op, Op)
}
type DeleteTransformer interface {
TransformDelete(q DeleteOp) (Op, Op, Op, Op)
} | ot/op.go | 0.568655 | 0.576482 | op.go | starcoder |
package chart
import (
"math"
util "github.com/leesjensen/go-chart/util"
)
// YAxis is a veritcal rule of the range.
// There can be (2) y-axes; a primary and secondary.
type YAxis struct {
Name string
NameStyle Style
Style Style
Zero GridLine
AxisType YAxisType
Ascending bool
ValueFormatter ValueFormatter
Range Range
TickStyle Style
Ticks []Tick
GridLines []GridLine
GridMajorStyle Style
GridMinorStyle Style
}
// GetName returns the name.
func (ya YAxis) GetName() string {
return ya.Name
}
// GetNameStyle returns the name style.
func (ya YAxis) GetNameStyle() Style {
return ya.NameStyle
}
// GetStyle returns the style.
func (ya YAxis) GetStyle() Style {
return ya.Style
}
// GetValueFormatter returns the value formatter for the axis.
func (ya YAxis) GetValueFormatter() ValueFormatter {
if ya.ValueFormatter != nil {
return ya.ValueFormatter
}
return FloatValueFormatter
}
// GetTickStyle returns the tick style.
func (ya YAxis) GetTickStyle() Style {
return ya.TickStyle
}
// GetTicks returns the ticks for a series.
// The coalesce priority is:
// - User Supplied Ticks (i.e. Ticks array on the axis itself).
// - Range ticks (i.e. if the range provides ticks).
// - Generating continuous ticks based on minimum spacing and canvas width.
func (ya YAxis) GetTicks(r Renderer, ra Range, defaults Style, vf ValueFormatter) []Tick {
if len(ya.Ticks) > 0 {
return ya.Ticks
}
if tp, isTickProvider := ra.(TicksProvider); isTickProvider {
return tp.GetTicks(r, defaults, vf)
}
tickStyle := ya.Style.InheritFrom(defaults)
return GenerateContinuousTicks(r, ra, true, tickStyle, vf)
}
// GetGridLines returns the gridlines for the axis.
func (ya YAxis) GetGridLines(ticks []Tick) []GridLine {
if len(ya.GridLines) > 0 {
return ya.GridLines
}
return GenerateGridLines(ticks, ya.GridMajorStyle, ya.GridMinorStyle)
}
// Measure returns the bounds of the axis.
func (ya YAxis) Measure(r Renderer, canvasBox Box, ra Range, defaults Style, ticks []Tick) Box {
var tx int
if ya.AxisType == YAxisPrimary {
tx = canvasBox.Right + DefaultYAxisMargin
} else if ya.AxisType == YAxisSecondary {
tx = canvasBox.Left - DefaultYAxisMargin
}
ya.TickStyle.InheritFrom(ya.Style.InheritFrom(defaults)).WriteToRenderer(r)
var minx, maxx, miny, maxy = math.MaxInt32, 0, math.MaxInt32, 0
var maxTextHeight int
for _, t := range ticks {
v := t.Value
ly := canvasBox.Bottom - ra.Translate(v)
tb := r.MeasureText(t.Label)
tbh2 := tb.Height() >> 1
finalTextX := tx
if ya.AxisType == YAxisSecondary {
finalTextX = tx - tb.Width()
}
maxTextHeight = util.Math.MaxInt(tb.Height(), maxTextHeight)
if ya.AxisType == YAxisPrimary {
minx = canvasBox.Right
maxx = util.Math.MaxInt(maxx, tx+tb.Width())
} else if ya.AxisType == YAxisSecondary {
minx = util.Math.MinInt(minx, finalTextX)
maxx = util.Math.MaxInt(maxx, tx)
}
miny = util.Math.MinInt(miny, ly-tbh2)
maxy = util.Math.MaxInt(maxy, ly+tbh2)
}
if ya.NameStyle.Show && len(ya.Name) > 0 {
maxx += (DefaultYAxisMargin + maxTextHeight)
}
return Box{
Top: miny,
Left: minx,
Right: maxx,
Bottom: maxy,
}
}
// Render renders the axis.
func (ya YAxis) Render(r Renderer, canvasBox Box, ra Range, defaults Style, ticks []Tick) {
tickStyle := ya.TickStyle.InheritFrom(ya.Style.InheritFrom(defaults))
tickStyle.WriteToRenderer(r)
sw := tickStyle.GetStrokeWidth(defaults.StrokeWidth)
var lx int
var tx int
if ya.AxisType == YAxisPrimary {
lx = canvasBox.Right + int(sw)
tx = lx + DefaultYAxisMargin
} else if ya.AxisType == YAxisSecondary {
lx = canvasBox.Left - int(sw)
tx = lx - DefaultYAxisMargin
}
r.MoveTo(lx, canvasBox.Bottom)
r.LineTo(lx, canvasBox.Top)
r.Stroke()
var maxTextWidth int
var finalTextX, finalTextY int
for _, t := range ticks {
v := t.Value
ly := canvasBox.Bottom - ra.Translate(v)
tb := Draw.MeasureText(r, t.Label, tickStyle)
if tb.Width() > maxTextWidth {
maxTextWidth = tb.Width()
}
if ya.AxisType == YAxisSecondary {
finalTextX = tx - tb.Width()
} else {
finalTextX = tx
}
if tickStyle.TextRotationDegrees == 0 {
finalTextY = ly + tb.Height()>>1
} else {
finalTextY = ly
}
tickStyle.WriteToRenderer(r)
r.MoveTo(lx, ly)
if ya.AxisType == YAxisPrimary {
// r.LineTo(lx+DefaultHorizontalTickWidth, ly)
r.LineTo(canvasBox.Left, ly)
} else if ya.AxisType == YAxisSecondary {
// r.LineTo(lx-DefaultHorizontalTickWidth, ly)
r.LineTo(canvasBox.Right, ly)
}
r.Stroke()
Draw.Text(r, t.Label, finalTextX, finalTextY, tickStyle)
}
nameStyle := ya.NameStyle.InheritFrom(defaults.InheritFrom(Style{TextRotationDegrees: 90}))
if ya.NameStyle.Show && len(ya.Name) > 0 {
nameStyle.GetTextOptions().WriteToRenderer(r)
tb := Draw.MeasureText(r, ya.Name, nameStyle)
var tx int
if ya.AxisType == YAxisPrimary {
tx = canvasBox.Right + int(sw) + DefaultYAxisMargin + maxTextWidth + DefaultYAxisMargin
} else if ya.AxisType == YAxisSecondary {
tx = canvasBox.Left - (DefaultYAxisMargin + int(sw) + maxTextWidth + DefaultYAxisMargin)
}
var ty int
if nameStyle.TextRotationDegrees == 0 {
ty = canvasBox.Top + (canvasBox.Height()>>1 - tb.Width()>>1)
} else {
ty = canvasBox.Top + (canvasBox.Height()>>1 - tb.Height()>>1)
}
Draw.Text(r, ya.Name, tx, ty, nameStyle)
}
if ya.Zero.Style.Show {
ya.Zero.Render(r, canvasBox, ra, false, Style{})
}
if ya.GridMajorStyle.Show || ya.GridMinorStyle.Show {
for _, gl := range ya.GetGridLines(ticks) {
if (gl.IsMinor && ya.GridMinorStyle.Show) || (!gl.IsMinor && ya.GridMajorStyle.Show) {
defaults := ya.GridMajorStyle
if gl.IsMinor {
defaults = ya.GridMinorStyle
}
gl.Render(r, canvasBox, ra, false, gl.Style.InheritFrom(defaults))
}
}
}
} | yaxis.go | 0.704668 | 0.476823 | yaxis.go | starcoder |
package lib
import "math"
import "sort"
import "fmt"
import "strings"
import "strconv"
// HistogramInt64 statistical histogram.
type HistogramInt64 struct {
// stats
n int64
minval int64
maxval int64
sum int64
sumsq float64
histogram []int64
// setup
init bool
from int64
till int64
width int64
}
// NewhistorgramInt64 return a new histogram object.
func NewhistorgramInt64(from, till, width int64) *HistogramInt64 {
from = (from / width) * width
till = (till / width) * width
h := &HistogramInt64{from: from, till: till, width: width}
h.histogram = make([]int64, 1+((till-from)/width)+1)
return h
}
// Add a sample to this histogram.
func (h *HistogramInt64) Add(sample int64) {
h.n++
h.sum += sample
f := float64(sample)
h.sumsq += f * f
if h.init == false || sample < h.minval {
h.minval = sample
h.init = true
}
if h.maxval < sample {
h.maxval = sample
}
if sample < h.from {
h.histogram[0]++
} else if sample >= h.till {
h.histogram[len(h.histogram)-1]++
} else {
h.histogram[((sample-h.from)/h.width)+1]++
}
}
// Min return minimum value from sample.
func (h *HistogramInt64) Min() int64 {
return h.minval
}
// Max return maximum value from sample.
func (h *HistogramInt64) Max() int64 {
return h.maxval
}
// Samples return total number of samples in the set.
func (h *HistogramInt64) Samples() int64 {
return h.n
}
// Sum return the sum of all sample values.
func (h *HistogramInt64) Sum() int64 {
return h.sum
}
// Mean return the average value of all samples.
func (h *HistogramInt64) Mean() int64 {
if h.n == 0 {
return 0
}
return int64(float64(h.sum) / float64(h.n))
}
// Variance return the squared deviation of a random sample from
// its mean.
func (h *HistogramInt64) Variance() int64 {
if h.n == 0 {
return 0
}
nF, meanF := float64(h.n), float64(h.Mean())
return int64((h.sumsq / nF) - (meanF * meanF))
}
// SD return by how much the samples differ from the mean value of
// sample set.
func (h *HistogramInt64) SD() int64 {
if h.n == 0 {
return 0
}
return int64(math.Sqrt(float64(h.Variance())))
}
// Clone copies the entire instance.
func (h *HistogramInt64) Clone() *HistogramInt64 {
newh := *h
newh.histogram = make([]int64, len(h.histogram))
copy(newh.histogram, h.histogram)
return &newh
}
// Stats return a map of histogram.
func (h *HistogramInt64) Stats() map[string]int64 {
m := make(map[string]int64)
cumm := int64(0)
for i := len(h.histogram) - 1; i >= 0; i-- {
if h.histogram[i] == 0 {
continue
}
for j := 0; j <= i; j++ {
v := h.histogram[j]
key := strconv.Itoa(int(h.from + (int64(j) * h.width)))
cumm += v
if j == i {
m["+"] = cumm
} else {
m[key] = cumm
}
}
break
}
return m
}
// Fullstats includes mean,variance,stddeviance in the Stats().
func (h *HistogramInt64) Fullstats() map[string]interface{} {
hmap := make(map[string]interface{})
for k, v := range h.Stats() {
hmap[k] = v
}
return map[string]interface{}{
"samples": h.Samples(),
"min": h.Min(),
"max": h.Max(),
"mean": h.Mean(),
"variance": h.Variance(),
"stddeviance": h.SD(),
"histogram": hmap,
}
}
// Logstring return Fullstats as loggable string.
func (h *HistogramInt64) Logstring() string {
stats, keys := h.Fullstats(), []string{}
// everything except histogram
for k := range stats {
if k == "histogram" {
continue
}
keys = append(keys, k)
}
sort.Strings(keys)
ss := []string{}
for _, key := range keys {
ss = append(ss, fmt.Sprintf(`"%v": %v`, key, stats[key]))
}
// sort historgram
hkeys := []int{}
histogram := stats["histogram"].(map[string]interface{})
for k := range histogram {
if k == "+" {
continue
}
n, _ := strconv.Atoi(k)
hkeys = append(hkeys, n)
}
sort.Ints(hkeys)
hs := []string{}
for _, k := range hkeys {
ks := strconv.Itoa(k)
hs = append(hs, fmt.Sprintf(`"%v": %v`, ks, histogram[ks]))
}
hs = append(hs, fmt.Sprintf(`"%v": %v`, "+", histogram["+"]))
s := "{" + strings.Join(hs, ",") + "}"
ss = append(ss, fmt.Sprintf(`"histogram": %v`, s))
return "{" + strings.Join(ss, ",") + "}"
} | lib/htgint.go | 0.63273 | 0.432183 | htgint.go | starcoder |
package atlas
import (
"image"
"github.com/PieterD/crap/roguelike/game/atlas/aspect"
"github.com/PieterD/crap/roguelike/grid"
"github.com/PieterD/crap/roguelike/vision"
"github.com/PieterD/crap/roguelike/wallify"
"math/rand"
"time"
)
type Glyph struct {
Code int
Fore grid.Color
Back grid.Color
}
func Translate(screen image.Rectangle, center image.Point, atlas image.Rectangle) image.Point {
tl := center.Sub(screen.Max.Div(2))
if screen.Max.X > atlas.Max.X {
tl.X = -(screen.Max.X - atlas.Max.X) / 2
} else {
if tl.X < 0 {
tl.X = 0
}
if tl.X >= atlas.Max.X-screen.Max.X {
tl.X = atlas.Max.X - screen.Max.X
}
}
if screen.Max.Y > atlas.Max.Y {
tl.Y = -(screen.Max.Y - atlas.Max.Y) / 2
} else {
if tl.Y < 0 {
tl.Y = 0
}
if tl.Y >= atlas.Max.Y-screen.Max.Y {
tl.Y = atlas.Max.Y - screen.Max.Y
}
}
return tl
}
type Atlas struct {
cells []Cell
bounds image.Rectangle
visible uint64
}
func (atlas *Atlas) cell(p image.Point) *Cell {
if !p.In(atlas.bounds) {
return &Cell{}
}
return &atlas.cells[p.X+p.Y*atlas.bounds.Max.X]
}
func New() *Atlas {
rand.Seed(time.Now().UnixNano())
w := 100
h := 60
atlas := &Atlas{
cells: make([]Cell, w*h),
bounds: image.Rectangle{
Min: image.Point{X: 0, Y: 0},
Max: image.Point{X: w, Y: h},
},
visible: 1,
}
for x := 0; x < w; x++ {
for y := 0; y < h; y++ {
atlas.setFeature(x, y, aspect.Floor)
}
}
for x := 0; x < w; x++ {
atlas.setFeature(x, 0, aspect.Wall)
atlas.setFeature(x, h-1, aspect.Wall)
}
for y := 0; y < h; y++ {
atlas.setFeature(0, y, aspect.Wall)
atlas.setFeature(w-1, y, aspect.Wall)
}
//GenTestlevel(atlas)
GenCave(atlas)
return atlas
}
func (atlas *Atlas) ExploreAll() {
for y := 0; y < atlas.bounds.Max.Y; y++ {
for x := 0; x < atlas.bounds.Max.X; x++ {
atlas.cell(image.Point{X: x, Y: y}).seen = true
}
}
}
func (atlas *Atlas) Bounds() image.Rectangle {
return atlas.bounds
}
func (atlas *Atlas) GetFeature(pos image.Point) aspect.Feature {
return atlas.cell(pos).feature
}
func (atlas *Atlas) SetFeature(pos image.Point, feature aspect.Feature) {
atlas.cell(pos).feature = feature
}
func (atlas *Atlas) setFeature(x, y int, ft aspect.Feature) {
atlas.SetFeature(image.Point{X: x, Y: y}, ft)
}
func (atlas *Atlas) Glyph(p image.Point) Glyph {
cell := atlas.cell(p)
glyph := Glyph{
Code: 32,
Fore: grid.Black,
Back: grid.Black,
}
if atlas.cell(p).seen {
switch cell.feature {
case aspect.Wall:
glyph = Glyph{
Code: wallify.Wallify(atlas, p, wallify.SingleWall),
Fore: grid.Gray,
Back: grid.Black,
}
case aspect.Floor:
glyph = Glyph{
Code: atlas.floorrune(p, floorRune),
Fore: grid.DarkGray,
Back: grid.Black,
}
case aspect.ClosedDoor:
glyph = Glyph{
Code: 43,
Fore: grid.DarkRed,
Back: grid.Black,
}
case aspect.OpenDoor:
glyph = Glyph{
Code: 47,
Fore: grid.DarkRed,
Back: grid.Black,
}
}
if !atlas.IsVisible(p) {
if glyph.Fore != grid.Black {
glyph.Fore = grid.VeryDarkGray
}
glyph.Back = grid.Black
}
}
return glyph
}
func (atlas *Atlas) IsPassable(p image.Point) bool {
return atlas.cell(p).feature.Passable
}
func (atlas *Atlas) IsTransparent(p image.Point) bool {
return atlas.cell(p).feature.Transparent
}
func (atlas *Atlas) SetVisible(p image.Point) {
atlas.cell(p).visible = atlas.visible
atlas.cell(p).seen = true
}
func (atlas *Atlas) IsVisible(p image.Point) bool {
return atlas.cell(p).visible == atlas.visible
}
func (atlas *Atlas) IsWallable(p image.Point) bool {
return atlas.cell(p).feature.Wallable
}
func (atlas *Atlas) IsSeen(p image.Point) bool {
return atlas.cell(p).seen
}
func (atlas *Atlas) RandomFloor() image.Point {
for try := 0; try < 1000; try++ {
x := rand.Intn(atlas.bounds.Max.X)
y := rand.Intn(atlas.bounds.Max.Y)
p := image.Point{X: x, Y: y}
if atlas.cell(p).feature == aspect.Floor {
return p
}
}
panic("Couldn't find random floor after 1000 tries!")
}
func (atlas *Atlas) Vision(source image.Point) {
atlas.visible++
vision.ShadowCastPar(atlas, vision.EndlessRadius(), source)
}
//var floorRune = []int{44, 46, 96, 249, 250}
//var floorRune = []int{44, 46, 96, 249, 39}
//var floorRune = []int{44, 46, 96, 249, 39, 250, 250}
var floorRune = []int{250, 44, 250, 46, 250, 96, 250, 249, 250, 39, 250}
//var floorRune = []int{250}
func (atlas *Atlas) floorrune(p image.Point, runes []int) int {
x := uint64(p.X)
y := uint64(p.Y)
ui := ((x<<32)|y)*(x^y) + y - x
return runes[ui%uint64(len(runes))]
} | roguelike/game/atlas/atlas.go | 0.611382 | 0.449211 | atlas.go | starcoder |
package selectpeers
import (
"fmt"
"math"
"github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/types"
)
// minValidators is a minimum number of validators needed in order to execute the selection
// algorithm. For less than this number, we connect to all validators.
const minValidators = 5
// DIP6 selector selects validators from the `validatorSetMembers`, based on algorithm
// described in DIP-6 https://github.com/dashpay/dips/blob/master/dip-0006.md
type dip6PeerSelector struct {
quorumHash bytes.HexBytes
}
// NewDIP6ValidatorSelector creates new implementation of validator selector algorithm
func NewDIP6ValidatorSelector(quorumHash bytes.HexBytes) ValidatorSelector {
return &dip6PeerSelector{quorumHash: quorumHash}
}
// SelectValidators implements ValidtorSelector.
// SelectValidators selects some validators from `validatorSetMembers`, according to the algorithm
// described in DIP-6 https://github.com/dashpay/dips/blob/master/dip-0006.md
func (s *dip6PeerSelector) SelectValidators(
validatorSetMembers []*types.Validator,
me *types.Validator,
) ([]*types.Validator, error) {
if len(validatorSetMembers) < 2 {
return nil, fmt.Errorf("not enough validators: got %d, need 2", len(validatorSetMembers))
}
// Build the deterministic list of quorum members:
// 1. Retrieve the deterministic masternode list which is valid at quorumHeight
// 2. Calculate SHA256(proTxHash, quorumHash) for each entry in the list
// 3. Sort the resulting list by the calculated hashes
sortedValidators := newSortedValidatorList(validatorSetMembers, s.quorumHash)
// Loop through the list until the member finds itself in the list. The index at which it finds itself is called i.
meSortable := newSortableValidator(*me, s.quorumHash)
myIndex := sortedValidators.index(meSortable)
if myIndex < 0 {
return []*types.Validator{}, fmt.Errorf("current node is not a member of provided validator set")
}
// Fallback if we don't have enough validators, we connect to all of them
if sortedValidators.Len() < minValidators {
ret := make([]*types.Validator, 0, len(validatorSetMembers)-1)
// We connect to all validators
for index, val := range sortedValidators {
if index != myIndex {
ret = append(ret, val.Copy())
}
}
return ret, nil
}
// Calculate indexes (i+2^k)%n where k is in the range 0..floor(log2(n-1))-1
// and n is equal to the size of the list.
n := float64(sortedValidators.Len())
count := math.Floor(math.Log2(n-1.0)) - 1.0
ret := make([]*types.Validator, 0, int(count))
i := float64(myIndex)
for k := float64(0); k <= count; k++ {
index := int(math.Mod(i+math.Pow(2, k), n))
// Add addresses of masternodes at indexes calculated at previous step
// to the set of deterministic connections.
ret = append(ret, sortedValidators[index].Validator.Copy())
}
return ret, nil
} | dash/quorum/selectpeers/dip6.go | 0.855972 | 0.477067 | dip6.go | starcoder |
package pigo
import (
"bytes"
"encoding/binary"
"math"
"math/rand"
"sort"
"unsafe"
)
// Puploc contains all the information resulted from the pupil detection
// needed for accessing from a global scope.
type Puploc struct {
Row int
Col int
Scale float32
Perturbs int
}
// PuplocCascade is a general struct for storing
// the cascade tree values encoded into the binary file.
type PuplocCascade struct {
stages uint32
scales float32
trees uint32
treeDepth uint32
treeCodes []int8
treePreds []float32
}
// NewPuplocCascade initializes the PuplocCascade constructor method.
func NewPuplocCascade() *PuplocCascade {
return &PuplocCascade{}
}
// UnpackCascade unpacks the pupil localization cascade file.
func (plc *PuplocCascade) UnpackCascade(packet []byte) (*PuplocCascade, error) {
var (
stages uint32
scales float32
trees uint32
treeDepth uint32
treeCodes []int8
treePreds []float32
)
pos := 0
buff := make([]byte, 4)
dataView := bytes.NewBuffer(buff)
// Read the depth (size) of each tree and write it into the buffer array.
_, err := dataView.Write([]byte{packet[pos+0], packet[pos+1], packet[pos+2], packet[pos+3]})
if err != nil {
return nil, err
}
if dataView.Len() > 0 {
// Get the number of stages as 32-bit uint and write it into the buffer array.
stages = binary.LittleEndian.Uint32(packet[pos:])
_, err := dataView.Write([]byte{packet[pos+0], packet[pos+1], packet[pos+2], packet[pos+3]})
if err != nil {
return nil, err
}
pos += 4
// Obtain the scale multiplier (applied after each stage) and write it into the buffer array.
u32scales := binary.LittleEndian.Uint32(packet[pos:])
// Convert uint32 to float32
scales = *(*float32)(unsafe.Pointer(&u32scales))
_, err = dataView.Write([]byte{packet[pos+0], packet[pos+1], packet[pos+2], packet[pos+3]})
if err != nil {
return nil, err
}
pos += 4
// Obtain the number of trees per stage and write it into the buffer array.
trees = binary.LittleEndian.Uint32(packet[pos:])
_, err = dataView.Write([]byte{packet[pos+0], packet[pos+1], packet[pos+2], packet[pos+3]})
if err != nil {
return nil, err
}
pos += 4
// Obtain the depth of each tree and write it into the buffer array.
treeDepth = binary.LittleEndian.Uint32(packet[pos:])
_, err = dataView.Write([]byte{packet[pos+0], packet[pos+1], packet[pos+2], packet[pos+3]})
if err != nil {
return nil, err
}
pos += 4
// Traverse all the stages of the binary tree
for s := 0; s < int(stages); s++ {
// Traverse the branches of each stage
for t := 0; t < int(trees); t++ {
depth := int(math.Pow(2, float64(treeDepth)))
code := packet[pos : pos+4*depth-4]
// Convert unsigned bytecodes to signed ones.
i8code := *(*[]int8)(unsafe.Pointer(&code))
treeCodes = append(treeCodes, i8code...)
pos += 4*depth - 4
// Read prediction from tree's leaf nodes.
for i := 0; i < depth; i++ {
for l := 0; l < 2; l++ {
_, err := dataView.Write([]byte{packet[pos+0], packet[pos+1], packet[pos+2], packet[pos+3]})
if err != nil {
return nil, err
}
u32pred := binary.LittleEndian.Uint32(packet[pos:])
// Convert uint32 to float32
f32pred := *(*float32)(unsafe.Pointer(&u32pred))
treePreds = append(treePreds, f32pred)
pos += 4
}
}
}
}
}
return &PuplocCascade{
stages: stages,
scales: scales,
trees: trees,
treeDepth: treeDepth,
treeCodes: treeCodes,
treePreds: treePreds,
}, nil
}
// classifyRegion applies the face classification function over an image.
func (plc *PuplocCascade) classifyRegion(r, c, s float32, nrows, ncols int, pixels []uint8, dim int, flipV bool) []float32 {
var c1, c2 int
root := 0
treeDepth := int(math.Pow(2, float64(plc.treeDepth)))
for i := 0; i < int(plc.stages); i++ {
var dr, dc float32 = 0.0, 0.0
for j := 0; j < int(plc.trees); j++ {
idx := 0
for k := 0; k < int(plc.treeDepth); k++ {
r1 := min(nrows-1, max(0, (256*int(r)+int(plc.treeCodes[root+4*idx+0])*int(round(float64(s))))>>8))
r2 := min(nrows-1, max(0, (256*int(r)+int(plc.treeCodes[root+4*idx+2])*int(round(float64(s))))>>8))
// flipV means that we wish to flip the column coordinates sign in the tree nodes.
// This is required at running the facial landmark detector over the right side of the detected face.
if flipV {
c1 = min(ncols-1, max(0, (256*int(c)+int(-plc.treeCodes[root+4*idx+1])*int(round(float64(s))))>>8))
c2 = min(ncols-1, max(0, (256*int(c)+int(-plc.treeCodes[root+4*idx+3])*int(round(float64(s))))>>8))
} else {
c1 = min(ncols-1, max(0, (256*int(c)+int(plc.treeCodes[root+4*idx+1])*int(round(float64(s))))>>8))
c2 = min(ncols-1, max(0, (256*int(c)+int(plc.treeCodes[root+4*idx+3])*int(round(float64(s))))>>8))
}
bintest := func(p1, p2 uint8) uint8 {
if p1 > p2 {
return 1
}
return 0
}
idx = 2*idx + 1 + int(bintest(pixels[r1*dim+c1], pixels[r2*dim+c2]))
}
lutIdx := 2 * (int(plc.trees)*treeDepth*i + treeDepth*j + idx - (treeDepth - 1))
dr += plc.treePreds[lutIdx+0]
if flipV {
dc += -plc.treePreds[lutIdx+1]
} else {
dc += plc.treePreds[lutIdx+1]
}
root += 4*treeDepth - 4
}
r += dr * s
c += dc * s
s *= plc.scales
}
return []float32{r, c, s}
}
// classifyRotatedRegion applies the face classification function over a rotated image.
func (plc *PuplocCascade) classifyRotatedRegion(r, c, s float32, a float64, nrows, ncols int, pixels []uint8, dim int, flipV bool) []float32 {
var row1, col1, row2, col2 int
root := 0
treeDepth := int(math.Pow(2, float64(plc.treeDepth)))
qCosTable := []float32{256, 251, 236, 212, 181, 142, 97, 49, 0, -49, -97, -142, -181, -212, -236, -251, -256, -251, -236, -212, -181, -142, -97, -49, 0, 49, 97, 142, 181, 212, 236, 251, 256}
qSinTable := []float32{0, 49, 97, 142, 181, 212, 236, 251, 256, 251, 236, 212, 181, 142, 97, 49, 0, -49, -97, -142, -181, -212, -236, -251, -256, -251, -236, -212, -181, -142, -97, -49, 0}
qsin := s * qSinTable[int(32.0*a)] //s*(256.0*math.Sin(2*math.Pi*a))
qcos := s * qCosTable[int(32.0*a)] //s*(256.0*math.Cos(2*math.Pi*a))
for i := 0; i < int(plc.stages); i++ {
var dr, dc float32 = 0.0, 0.0
for j := 0; j < int(plc.trees); j++ {
idx := 0
for k := 0; k < int(plc.treeDepth); k++ {
row1 = int(plc.treeCodes[root+4*idx+0])
row2 = int(plc.treeCodes[root+4*idx+2])
// flipV means that we wish to flip the column coordinates sign in the tree nodes.
// This is required at running the facial landmark detector over the right side of the detected face.
if flipV {
col1 = int(-plc.treeCodes[root+4*idx+1])
col2 = int(-plc.treeCodes[root+4*idx+3])
} else {
col1 = int(plc.treeCodes[root+4*idx+1])
col2 = int(plc.treeCodes[root+4*idx+3])
}
r1 := min(nrows-1, max(0, 65536*int(r)+int(qcos)*row1-int(qsin)*col1)>>16)
c1 := min(ncols-1, max(0, 65536*int(c)+int(qsin)*row1+int(qcos)*col1)>>16)
r2 := min(nrows-1, max(0, 65536*int(r)+int(qcos)*row2-int(qsin)*col2)>>16)
c2 := min(ncols-1, max(0, 65536*int(c)+int(qsin)*row2+int(qcos)*col2)>>16)
bintest := func(px1, px2 uint8) int {
if px1 <= px2 {
return 1
}
return 0
}
idx = 2*idx + 1 + bintest(pixels[r1*dim+c1], pixels[r2*dim+c2])
}
lutIdx := 2 * (int(plc.trees)*treeDepth*i + treeDepth*j + idx - (treeDepth - 1))
dr += plc.treePreds[lutIdx+0]
if flipV {
dc += -plc.treePreds[lutIdx+1]
} else {
dc += plc.treePreds[lutIdx+1]
}
root += 4*treeDepth - 4
}
r += dr * s
c += dc * s
s *= plc.scales
}
return []float32{r, c, s}
}
// RunDetector runs the pupil localization function.
func (plc *PuplocCascade) RunDetector(pl Puploc, img ImageParams, angle float64, flipV bool) *Puploc {
rows, cols, scale := []float32{}, []float32{}, []float32{}
res := []float32{}
for i := 0; i < pl.Perturbs; i++ {
row := float32(pl.Row) + float32(pl.Scale)*0.15*(0.5-rand.Float32())
col := float32(pl.Col) + float32(pl.Scale)*0.15*(0.5-rand.Float32())
sc := float32(pl.Scale) * (0.925 + 0.15*rand.Float32())
if angle > 0.0 {
if angle > 1.0 {
angle = 1.0
}
res = plc.classifyRotatedRegion(row, col, sc, angle, img.Rows, img.Cols, img.Pixels, img.Dim, flipV)
} else {
res = plc.classifyRegion(row, col, sc, img.Rows, img.Cols, img.Pixels, img.Dim, flipV)
}
rows = append(rows, res[0])
cols = append(cols, res[1])
scale = append(scale, res[2])
}
// Sorting the perturbations in ascendent order
sort.Sort(plocSort(rows))
sort.Sort(plocSort(cols))
sort.Sort(plocSort(scale))
// Get the median value of the sorted perturbation results
return &Puploc{
Row: int(rows[int(round(float64(pl.Perturbs)/2))]),
Col: int(cols[int(round(float64(pl.Perturbs)/2))]),
Scale: scale[int(round(float64(pl.Perturbs)/2))],
}
}
// Implement custom sorting function on detection values.
type plocSort []float32
func (q plocSort) Len() int { return len(q) }
func (q plocSort) Less(i, j int) bool { return q[i] < q[j] }
func (q plocSort) Swap(i, j int) { q[i], q[j] = q[j], q[i] } | core/puploc.go | 0.550124 | 0.508544 | puploc.go | starcoder |
package raycast
import (
"image"
"image/color"
"image/draw"
"math"
"github.com/faiface/pixel"
)
// Player is the camera
type Player struct {
Position pixel.Vec
Direction pixel.Vec
Plane pixel.Vec
}
// NewPlayer is the constructor
func NewPlayer(posX, posY, dirX, dirY, planeX, planeY float64) Player {
return Player{
Position: pixel.V(posX, posY),
Direction: pixel.V(dirX, dirY),
Plane: pixel.V(planeX, planeY),
}
}
// Turn around
func (p *Player) Turn(s float64) {
p.Direction.Y = p.Direction.X*math.Sin(s) + p.Direction.Y*math.Cos(s)
p.Direction.X = p.Direction.X*math.Cos(s) - p.Direction.Y*math.Sin(s)
p.Plane.Y = p.Plane.X*math.Sin(s) + p.Plane.Y*math.Cos(s)
p.Plane.X = p.Plane.X*math.Cos(s) - p.Plane.Y*math.Sin(s)
}
// MoveFront wherever you want
func (p *Player) MoveFront(s float64, grid [][]string) {
if grid[int(p.Position.X+p.Direction.X*s)][int(p.Position.Y)] == "0" {
p.Position.X += p.Direction.X * s
}
if grid[int(p.Position.X)][int(p.Position.Y+p.Direction.Y*s)] == "0" {
p.Position.Y += p.Direction.Y * s
}
}
// MoveBack wherever you want
func (p *Player) MoveBack(s float64, grid [][]string) {
if grid[int(p.Position.X-p.Direction.X*s)][int(p.Position.Y)] == "0" {
p.Position.X -= p.Direction.X * s
}
if grid[int(p.Position.X)][int(p.Position.Y-p.Direction.Y*s)] == "0" {
p.Position.Y -= p.Direction.Y * s
}
}
// MoveSideWays can move from left to right and viceversa
func (p *Player) MoveSideWays(s float64, grid [][]string) {
if grid[int(p.Position.X-p.Plane.X*s)][int(p.Position.Y)] == "0" {
p.Position.X -= p.Plane.X * s
}
if grid[int(p.Position.X)][int(p.Position.Y-p.Plane.Y*s)] == "0" {
p.Position.Y -= p.Plane.Y * s
}
}
// Cast makes the necesary calculations for casting
func (p *Player) Cast(grid [][]string) *pixel.Sprite {
img := image.NewRGBA(image.Rect(0, 0, int(WindowWidth), int(WindowHeight)))
ww := int(WindowWidth)
wh := int(WindowHeight)
draw.Draw(img, image.Rect(0, 0, ww, wh/2), &image.Uniform{color.RGBA{255, 255, 255, 255}}, image.Point{}, draw.Src)
draw.Draw(img, image.Rect(0, wh/2, ww, wh), &image.Uniform{color.RGBA{64, 64, 64, 255}}, image.Point{}, draw.Src)
for i := 0.0; i < WindowWidth; i++ {
cameraX := (2 * i / WindowWidth) - 1
stepX := 0.0
sideDistX := 0.0
mapX := p.Position.X
rayDirX := p.Direction.X + p.Plane.X*cameraX
deltaDistX := math.Abs(1 / rayDirX)
if rayDirX < 0 {
stepX = -1
sideDistX = (p.Position.X - mapX) * deltaDistX
} else {
stepX = 1
sideDistX = (mapX + 1 - p.Position.X) * deltaDistX
}
stepY := 0.0
sideDistY := 0.0
mapY := p.Position.Y
rayDirY := p.Direction.Y + p.Plane.Y*cameraX
deltaDistY := math.Abs(1 / rayDirY)
if rayDirY < 0 {
stepY = -1
sideDistY = (p.Position.Y - mapY) * deltaDistY
} else {
stepY = 1
sideDistY = (mapY + 1 - p.Position.Y) * deltaDistY
}
hit := 0.0
side := 0.0
for hit == 0 {
if sideDistX < sideDistY {
sideDistX += deltaDistX
mapX += stepX
side = 0
} else {
sideDistY += deltaDistY
mapY += stepY
side = 1
}
if grid[int(mapX)][int(mapY)] != "0" {
hit = 1
}
}
perpWallDist := 0.0
if side == 0 {
perpWallDist = (mapX - p.Position.X + (1-stepX)/2) / rayDirX
} else {
perpWallDist = (mapY - p.Position.Y + (1-stepY)/2) / rayDirY
}
lineHeight := WindowHeight / perpWallDist
drawStart := -lineHeight/2 + WindowHeight/2
if drawStart < 0 {
drawStart = 0
}
drawEnd := lineHeight/2 + WindowHeight/2
if drawEnd >= WindowHeight {
drawEnd = WindowHeight - 1
}
color := Colors[grid[int(mapX)][int(mapY)]]
if side == 1 {
color.R = color.R / 2
color.G = color.G / 2
color.B = color.B / 2
}
for j := drawStart; j < drawEnd-1; j++ {
img.Set(int(i), int(j), color)
}
}
pic := pixel.PictureDataFromImage(img)
return pixel.NewSprite(pic, pic.Bounds())
} | raycast/player.go | 0.675765 | 0.411702 | player.go | starcoder |
package functions
import (
"reflect"
)
type functionType int
const (
// Predicate is any function taking one argument and returning a bool.
// f: X -> bool
Predicate functionType = iota
// Consumer is any function taking one argument and returning none.
// Use this to produce side effects.
Consumer
// Map is any function taking one argument and returning another.
// f: X -> Y
Map
// Supplier is any function taking no argument and returning one.
// f: Ø -> X
Supplier
)
// CallFunction calls the given function with the given argument and returns the resulting reflect.Value.
// Will panic if it is not possible.
func CallFunction(function interface{}, argument interface{}) reflect.Value {
valValue := reflect.ValueOf(argument)
funcValue := reflect.ValueOf(function)
return funcValue.Call([]reflect.Value{valValue})[0]
}
// Consume calls the given function with the given argument.
// Will panic if it is not possible to do.
func Consume(consumer interface{}, argument interface{}) {
valValue := reflect.ValueOf(argument)
reflect.ValueOf(consumer).Call([]reflect.Value{valValue})
}
// CallSupplier calls the given supplier and returns the return value.
func CallSupplier(supplier interface{}) interface{} {
return reflect.ValueOf(supplier).Call([]reflect.Value{})[0].Interface()
}
// TakesArgument returns true if the given function can take the given argument.
func TakesArgument(function interface{}, argument interface{}) bool {
functionType := reflect.TypeOf(function)
argumentType := reflect.TypeOf(argument)
return isFunction(functionType) && argumentType.AssignableTo(functionType.In(0))
}
// IsValid returns true if the given function follows the rules for the given FunctionType.
func IsValid(functionType functionType, function interface{}) bool {
funcType := reflect.ValueOf(function).Type()
switch functionType {
case Predicate:
return isPredicate(funcType)
case Consumer:
return isConsumer(funcType)
case Map:
return isMap(funcType)
case Supplier:
return isSupplier(funcType)
default:
return false
}
}
func isSupplier(funcType reflect.Type) bool {
return isFunction(funcType) &&
funcType.NumIn() == 0 && funcType.NumOut() == 1
}
func isPredicate(funcType reflect.Type) bool {
return isFunction(funcType) &&
isOneToOneFunction(funcType) &&
funcType.Out(0).Kind() == reflect.Bool
}
func isConsumer(consumer reflect.Type) bool {
return isFunction(consumer) &&
consumer.NumIn() == 1 && consumer.NumOut() == 0
}
func isMap(funcType reflect.Type) bool {
return isFunction(funcType) && isOneToOneFunction(funcType)
}
func isFunction(funcType reflect.Type) bool {
a := funcType.Kind() == reflect.Func
return a
}
func isOneToOneFunction(funcType reflect.Type) bool {
return funcType.NumIn() == 1 && funcType.NumOut() == 1
} | functions/functions.go | 0.678433 | 0.59561 | functions.go | starcoder |
package content
import (
"github.com/nboughton/go-roll"
"github.com/nboughton/swnt/content/format"
"github.com/nboughton/swnt/content/table"
)
// Encounter represents an encounter
type Encounter struct {
Type string
Fields [][]string
}
// NewEncounter creates a new encounter
func NewEncounter(wilderness bool) Encounter {
if wilderness {
return Encounter{
Type: "Wilderness",
Fields: wildernessEncounterTable.Roll(),
}
}
return Encounter{
Type: "Urban",
Fields: urbanEncounterTable.Roll(),
}
}
// Format e as output type t
func (e Encounter) Format(t format.OutputType) string {
return format.Table(t, []string{e.Type + " Encounter", ""}, e.Fields)
}
func (e Encounter) String() string {
return e.Format(format.TEXT)
}
// Urban represents the OneRoll tables for rolling quick encounters
var urbanEncounterTable = table.OneRoll{
D4: roll.List{
Name: "What's the Conflict About?",
Items: []string{
"Money, extortion, payment due, debts",
"Respect, submission to social authority",
"Grudges, ethnic resentment, gang payback",
"Politics, religion, or other ideology",
},
},
D6: roll.List{
Name: "General Venue of the Event",
Items: []string{
"In the middle of the street",
"In a public plaza",
"Down a side alley",
"Inside a local business",
"Next to or in a public park",
"At a mass-transit station",
},
},
D8: roll.List{
Name: "Why are the PCs Involved?",
Items: []string{
"A sympathetic participant appeals to them",
"Ways around it are all dangerous/blocked",
"It happens immediately around them",
"A valuable thing looks snatchable amid it",
"A participant offers a reward for help",
"Someone mistakenly involves the PCs in it",
"The seeming way out just leads deeper in",
"Responsibility is somehow pinned on them",
},
},
D10: roll.List{
Name: "What's the Nature of the Event?",
Items: []string{
"A parade or festival is being disrupted",
"Innocents are being assaulted",
"An establishment is being robbed",
"A disturbance over local politics happens",
"Someone is being blamed for something",
"Fires or building collapses are happening",
"A medical emergency is happening",
"Someone’s trying to cheat the PCs",
"A vehicle accident is happening",
"A religious ceremony is being disrupted",
},
},
D12: roll.List{
Name: "What Antagonists are Involved?",
Items: []string{
"A local bully and their thugs",
"A ruthless political boss and their zealots",
"Violent criminals",
"Religious fanatics",
"A blisteringly obnoxious offworlder",
"Corrupt or over-strict government official",
"A mob of intoxicated locals",
"A ranting demagogue and their followers",
"A stupidly bull-headed local grandee",
"A very capable assassin or strong-arm",
"A self-centered local scion of power",
"A confused foreigner or backwoodsman",
},
},
D20: roll.List{
Name: "Relevant Urban Features",
Items: []string{
"Heavy traffic running through the place",
"Music blaring at deafening volumes",
"Two groups present that detest each other",
"Large delivery taking place right there",
"Swarm of schoolkids or feral youth",
"Insistent soapbox preacher here",
"Several pickpockets working the crowd",
"A kiosk is tipping over and spilling things",
"Streetlights are out or visibility is low",
"A cop patrol is here and reluctant to act",
"PC-hostile reporters are recording here",
"Someone’s trying to sell something to PCs",
"Feral dogs or other animals crowd here",
"Unrelated activists are protesting here",
"Street kids are trying to steal from the PCs",
"GPS maps are dangerously wrong here",
"Downed power lines are a danger here",
"Numerous open manholes and utility holes",
"The street’s blockaded by something",
"Crowds so thick one can barely move",
},
},
}
// Wilderness represents the OneRoll tables for generating Wilderness Encounters
var wildernessEncounterTable = table.OneRoll{
D4: roll.List{
Name: "Initial Encounter Range",
Items: []string{
"Visible from a long distance away",
"Noticed 1d4 hundred meters away",
"Noticed only within 1d6 x 10 meters",
"Noticed only when adjacent to the event",
},
},
D6: roll.List{
Name: "Weather and Lighting",
Items: []string{
"Takes place in daylight and clear weather",
"Daylight, but fog, mist, rain or the like",
"Daylight, but harsh seasonal weather",
"Night encounter, but clear weather",
"Night, with rain or other obscuring effects",
"Night, with terrible weather and wind",
},
},
D8: roll.List{
Name: "Basic Nature of the Encounter",
Items: []string{
"Attack by pack of hostiles",
"Ambush by single lone hostile",
"Meet people who don’t want to be met",
"Encounter people in need of aid",
"Encounter hostile creatures",
"Nearby feature is somehow dangerous",
"Nearby feature promises useful loot",
"Meet hostiles that aren’t immediately so",
},
},
D10: roll.List{
Name: "Types of Friendly Creatures",
Items: []string{
"Affable but reclusive hermit",
"Local herd animal let loose to graze",
"Government ranger or circuit judge",
"Curious local animal",
"Remote homesteader and family",
"Working trapper or hunter",
"Back-country villager or native",
"Hiker or wilderness tourist",
"Religious recluse or holy person",
"Impoverished social exile",
},
},
D12: roll.List{
Name: "Types of Hostile Creatures",
Items: []string{
"Bandits in their wilderness hideout",
"Dangerous locals looking for easy marks",
"Rabid or diseased large predator",
"Pack of hungry hunting beasts",
"Herd of potentially dangerous prey animals",
"Swarm of dangerous vermin",
"Criminal seeking to evade the law",
"Brutal local landowner and their men",
"Crazed hermit seeking enforced solitude",
"Friendly-seeming guide into lethal danger",
"Harmless-looking but dangerous beast",
"Confidence man seeking to gull the PCs",
},
},
D20: roll.List{
Name: "Specific Nearby Feature of Relevance",
Items: []string{
"Overgrown homestead",
"Stream prone to flash-flooding",
"Narrow bridge or beam over deep cleft",
"Box canyon with steep sides",
"Unstable hillside that slides if disturbed",
"Long-lost crash site of a gravflyer",
"Once-inhabited cave or tunnel",
"Steep and dangerous cliff",
"Quicksand-laden swamp or dust pit",
"Ruins of a ghost town or lost hamlet",
"Hunting cabin with necessities",
"Ill-tended graveyard of a lost family stead",
"Narrow pass that’s easily blocked",
"Dilapidated resort building",
"Remote government monitoring outpost",
"Illicit substance farm or processing center",
"Old and forgotten battleground",
"Zone overrun by dangerous plants",
"Thick growth that lights up at a spark",
"Abandoned vehicle",
},
},
} | content/encounter.go | 0.523177 | 0.453746 | encounter.go | starcoder |
package mlp3
import (
"encoding/json"
"errors"
"fmt"
"github.com/r9y9/nnet"
"math/rand"
"os"
"time"
)
const (
Bias = 1.0
)
// NeuralNetwork represents a Feed-forward Neural Network.
type NeuralNetwork struct {
OutputLayer []float64
HiddenLayer []float64
InputLayer []float64
OutputWeight [][]float64
HiddenWeight [][]float64
Option TrainingOption
}
type TrainingOption struct {
LearningRate float64
Epoches int
MiniBatchSize int
Monitoring bool
}
// Load loads Neural Network from a dump file and return its instatnce.
func Load(filename string) (*NeuralNetwork, error) {
file, err := os.Open(filename)
if err != nil {
return nil, err
}
defer file.Close()
decoder := json.NewDecoder(file)
net := &NeuralNetwork{}
err = decoder.Decode(net)
if err != nil {
return nil, err
}
return net, nil
}
// NewNeuralNetwork returns a new network instance with the number of
// input units, number of hidden units and number output units
// of the network.
func NewNeuralNetwork(numInputUnits,
numHiddenUnits, numOutputUnits int) *NeuralNetwork {
net := new(NeuralNetwork)
rand.Seed(time.Now().UnixNano())
// Layers
net.InputLayer = make([]float64, numInputUnits+1) // plus bias
net.HiddenLayer = make([]float64, numHiddenUnits)
net.OutputLayer = make([]float64, numOutputUnits)
// Weights
net.OutputWeight = nnet.MakeMatrix(numHiddenUnits, numOutputUnits)
net.HiddenWeight = nnet.MakeMatrix(numInputUnits+1, numHiddenUnits)
net.InitParam()
return net
}
// Dump writes Neural Network parameters to file in json format.
func (net *NeuralNetwork) Dump(filename string) error {
return nnet.DumpAsJson(filename, net)
}
// InitParam perform heuristic initialization of NN parameters.
func (net *NeuralNetwork) InitParam() {
for i := range net.HiddenWeight {
for j := range net.HiddenWeight[i] {
net.HiddenWeight[i][j] = rand.Float64() - 0.5
}
}
for i := range net.OutputWeight {
for j := range net.OutputWeight[i] {
net.OutputWeight[i][j] = rand.Float64() - 0.5
}
}
}
// Forward performs a forward transfer algorithm of Neural network
// and returns the output.
func (net *NeuralNetwork) Forward(input []float64) []float64 {
output := make([]float64, len(net.OutputLayer))
if len(input)+1 != len(net.InputLayer) {
panic("Dimention doesn't match: The number units of input layer")
}
// Copy
for i := range input {
net.InputLayer[i] = input[i]
}
net.InputLayer[len(net.InputLayer)-1] = Bias
// Transfer to hidden layer from input layer
for i := 0; i < len(net.HiddenLayer)-1; i++ {
sum := 0.0
for j := range net.InputLayer {
sum += net.HiddenWeight[j][i] * net.InputLayer[j]
}
net.HiddenLayer[i] = nnet.Sigmoid(sum)
}
net.HiddenLayer[len(net.HiddenLayer)-1] = Bias
// Transfer to output layer from hidden layer
for i := 0; i < len(net.OutputLayer); i++ {
sum := 0.0
for j := range net.HiddenLayer {
sum += net.OutputWeight[j][i] * net.HiddenLayer[j]
}
output[i] = nnet.Sigmoid(sum)
}
net.OutputLayer = output
return output
}
func (net *NeuralNetwork) ComputeDelta(predicted,
target []float64) ([]float64, []float64) {
outputDelta := make([]float64, len(net.OutputLayer))
hiddenDelta := make([]float64, len(net.HiddenLayer))
// Output Delta
for i := 0; i < len(net.OutputLayer); i++ {
outputDelta[i] = (predicted[i] - target[i]) *
nnet.DSigmoid(predicted[i])
}
// Hidden Delta
for i := 0; i < len(net.HiddenLayer); i++ {
sum := 0.0
for j := range net.OutputLayer {
sum += net.OutputWeight[i][j] * outputDelta[j]
}
hiddenDelta[i] = sum * nnet.DSigmoid(net.HiddenLayer[i])
}
return outputDelta, hiddenDelta
}
// Feedback performs a backward transfer algorithm.
func (net *NeuralNetwork) Feedback(predicted, target []float64) {
outputDelta, hiddenDelta := net.ComputeDelta(predicted, target)
// Update Weight of Output layer
for i := range net.OutputLayer {
for j := 0; j < len(net.HiddenLayer); j++ {
net.OutputWeight[j][i] -= net.Option.LearningRate *
outputDelta[i] * net.HiddenLayer[j]
}
}
// Update Weight of Hidden layer
for i := 0; i < len(net.HiddenLayer); i++ {
for j := range net.InputLayer {
net.HiddenWeight[j][i] -= net.Option.LearningRate *
hiddenDelta[i] * net.InputLayer[j]
}
}
}
// Objective returns the objective function to optimize in training network.
func (net *NeuralNetwork) Objective(input, target []float64) float64 {
sum := 0.0
for i := 0; i < len(target); i++ {
sum += (input[i] - target[i]) * (input[i] - target[i])
}
return 0.5 * sum
}
// Objective returns the objective function for all data.
func (net *NeuralNetwork) ObjectiveForAllData(input,
target [][]float64) float64 {
sum := 0.0
for i := 0; i < len(input); i++ {
sum += net.Objective(input[i], target[i])
}
return sum / float64(len(input))
}
func (net *NeuralNetwork) ParseTrainingOption(option TrainingOption) error {
net.Option = option
if net.Option.Epoches <= 0 {
return errors.New("Epoches must be larger than zero.")
}
if net.Option.LearningRate == 0 {
return errors.New("Learning rate must be specified to train NN.")
}
return nil
}
// SupervisedSGD performs stochastic gradient decent to optimize network.
func (net *NeuralNetwork) SupervisedSGD(input [][]float64, target [][]float64) {
for epoch := 0; epoch < net.Option.Epoches; epoch++ {
// Get random sample
randIndex := rand.Intn(len(input))
x := input[randIndex]
t := target[randIndex]
// One feed-fowrward procedure
predicted := net.Forward(x)
net.Feedback(predicted, t)
// Print objective function
if net.Option.Monitoring {
fmt.Println(epoch, net.Objective(predicted, t))
}
}
}
// Train performs supervised network training.
func (net *NeuralNetwork) Train(input [][]float64,
target [][]float64, option TrainingOption) error {
err := net.ParseTrainingOption(option)
if err != nil {
return err
}
// Perform SupervisedSGD
net.SupervisedSGD(input, target)
return nil
} | mlp3/mlp3.go | 0.772015 | 0.403156 | mlp3.go | starcoder |
package aug
import (
ts "github.com/sugarme/gotch/tensor"
)
// Normalize normalizes a tensor image with mean and standard deviation.
// Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
// channels, this transform will normalize each channel of the input
// ``torch.*Tensor`` i.e.,
// ``output[channel] = (input[channel] - mean[channel]) / std[channel]``
// .. note::
// This transform acts out of place, i.e., it does not mutate the input tensor.
// Args:
// - mean (sequence): Sequence of means for each channel.
// - std (sequence): Sequence of standard deviations for each channel.
type Normalize struct {
mean []float64 // should be from 0 to 1
std []float64 // should be > 0 and <= 1
}
type normalizeOptions struct {
mean []float64
std []float64
}
type NormalizeOption func(*normalizeOptions)
// Mean and SD can be calculated for specific dataset as follow:
/*
mean = 0.0
meansq = 0.0
count = 0
for index, data in enumerate(train_loader):
mean = data.sum()
meansq = meansq + (data**2).sum()
count += np.prod(data.shape)
total_mean = mean/count
total_var = (meansq/count) - (total_mean**2)
total_std = torch.sqrt(total_var)
print("mean: " + str(total_mean))
print("std: " + str(total_std))
*/
// For example. ImageNet dataset has RGB mean and standard error:
// meanVals := []float64{0.485, 0.456, 0.406}
// sdVals := []float64{0.229, 0.224, 0.225}
func defaultNormalizeOptions() *normalizeOptions {
return &normalizeOptions{
mean: []float64{0, 0, 0},
std: []float64{1, 1, 1},
}
}
func WithNormalizeStd(std []float64) NormalizeOption {
return func(o *normalizeOptions) {
o.std = std
}
}
func WithNormalizeMean(mean []float64) NormalizeOption {
return func(o *normalizeOptions) {
o.mean = mean
}
}
func newNormalize(opts ...NormalizeOption) *Normalize {
p := defaultNormalizeOptions()
for _, o := range opts {
o(p)
}
return &Normalize{
mean: p.mean,
std: p.std,
}
}
func (n *Normalize) Forward(x *ts.Tensor) *ts.Tensor {
fx := Byte2FloatImage(x)
out := normalize(fx, n.mean, n.std)
bx := Float2ByteImage(out)
fx.MustDrop()
out.MustDrop()
return bx
}
func WithNormalize(opts ...NormalizeOption) Option {
n := newNormalize(opts...)
return func(o *Options) {
o.normalize = n
}
} | vision/aug/normalize.go | 0.86378 | 0.510313 | normalize.go | starcoder |
package tetra3d
import (
"image/color"
"math"
)
// Color represents a color, containing R, G, B, and A components, each expected to range from 0 to 1.
type Color struct {
R, G, B, A float32
}
// NewColor returns a new Color, with the provided R, G, B, and A components expected to range from 0 to 1.
func NewColor(r, g, b, a float32) *Color {
return &Color{r, g, b, a}
}
// Clone returns a clone of the Color instance.
func (color *Color) Clone() *Color {
return NewColor(color.R, color.G, color.B, color.A)
}
// Set sets the RGBA components of the Color to the r, g, b, and a arguments provided.
func (color *Color) Set(r, g, b, a float32) {
color.R = r
color.G = g
color.B = b
color.A = a
}
// AddRGB adds the color value specified to the R, G, and B channels in the color.
func (color *Color) AddRGB(value float32) {
color.R += value
color.G += value
color.B += value
}
// ToFloat64s returns four float64 values for each channel in the Color.
func (color *Color) ToFloat64s() (float64, float64, float64, float64) {
return float64(color.R), float64(color.G), float64(color.B), float64(color.A)
}
// ToRGBA64 converts a color to a color.RGBA64 instance.
func (c *Color) ToRGBA64() color.RGBA64 {
return color.RGBA64{
uint16(c.R * math.MaxUint16),
uint16(c.G * math.MaxUint16),
uint16(c.B * math.MaxUint16),
uint16(c.A * math.MaxUint16),
}
}
// ConvertTosRGB() converts the color's R, G, and B components to the sRGB color space. This is used to convert
// colors from their values in GLTF to how they should appear on the screen. See: https://en.wikipedia.org/wiki/SRGB
func (color *Color) ConvertTosRGB() {
if color.R <= 0.0031308 {
color.R *= 12.92
} else {
color.R = float32(1.055*math.Pow(float64(color.R), 1/2.4) - 0.055)
}
if color.G <= 0.0031308 {
color.G *= 12.92
} else {
color.G = float32(1.055*math.Pow(float64(color.G), 1/2.4) - 0.055)
}
if color.B <= 0.0031308 {
color.B *= 12.92
} else {
color.B = float32(1.055*math.Pow(float64(color.B), 1/2.4) - 0.055)
}
} | color.go | 0.920781 | 0.732735 | color.go | starcoder |
package cryptypes
import "database/sql/driver"
// EncryptedByteSlice supports encrypting ByteSlice data
type EncryptedByteSlice struct {
Field
Raw []byte
}
// Scan converts the value from the DB into a usable EncryptedByteSlice value
func (s *EncryptedByteSlice) Scan(value interface{}) error {
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized EncryptedByteSlice value into a value that can safely be stored in the DB
func (s EncryptedByteSlice) Value() (driver.Value, error) {
return encrypt(s.Raw)
}
// NullEncryptedByteSlice supports encrypting nullable ByteSlice data
type NullEncryptedByteSlice struct {
Field
Raw []byte
Empty bool
}
// Scan converts the value from the DB into a usable NullEncryptedByteSlice value
func (s *NullEncryptedByteSlice) Scan(value interface{}) error {
if value == nil {
s.Raw = []byte{}
s.Empty = true
return nil
}
return decrypt(value.([]byte), &s.Raw)
}
// Value converts an initialized NullEncryptedByteSlice value into a value that can safely be stored in the DB
func (s NullEncryptedByteSlice) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encrypt(s.Raw)
}
// SignedByteSlice supports signing ByteSlice data
type SignedByteSlice struct {
Field
Raw []byte
Valid bool
}
// Scan converts the value from the DB into a usable SignedByteSlice value
func (s *SignedByteSlice) Scan(value interface{}) (err error) {
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedByteSlice value into a value that can safely be stored in the DB
func (s SignedByteSlice) Value() (driver.Value, error) {
return sign(s.Raw)
}
// NullSignedByteSlice supports signing nullable ByteSlice data
type NullSignedByteSlice struct {
Field
Raw []byte
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedByteSlice value
func (s *NullSignedByteSlice) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = []byte{}
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = verify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedByteSlice value into a value that can safely be stored in the DB
func (s NullSignedByteSlice) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return sign(s.Raw)
}
// SignedEncryptedByteSlice supports signing and encrypting ByteSlice data
type SignedEncryptedByteSlice struct {
Field
Raw []byte
Valid bool
}
// Scan converts the value from the DB into a usable SignedEncryptedByteSlice value
func (s *SignedEncryptedByteSlice) Scan(value interface{}) (err error) {
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized SignedEncryptedByteSlice value into a value that can safely be stored in the DB
func (s SignedEncryptedByteSlice) Value() (driver.Value, error) {
return encryptSign(s.Raw)
}
// NullSignedEncryptedByteSlice supports signing and encrypting nullable ByteSlice data
type NullSignedEncryptedByteSlice struct {
Field
Raw []byte
Empty bool
Valid bool
}
// Scan converts the value from the DB into a usable NullSignedEncryptedByteSlice value
func (s *NullSignedEncryptedByteSlice) Scan(value interface{}) (err error) {
if value == nil {
s.Raw = []byte{}
s.Empty = true
s.Valid = true
return nil
}
s.Valid, err = decryptVerify(value.([]byte), &s.Raw)
return
}
// Value converts an initialized NullSignedEncryptedByteSlice value into a value that can safely be stored in the DB
func (s NullSignedEncryptedByteSlice) Value() (driver.Value, error) {
if s.Empty {
return nil, nil
}
return encryptSign(s.Raw)
} | cryptypes/type_byte_slice.go | 0.816918 | 0.677724 | type_byte_slice.go | starcoder |
package algebra
import "constraints"
// Additive is a type that can use `+` operator.
type Additive interface {
constraints.Integer | constraints.Float | constraints.Complex | ~string
}
// Multiplicative is a type that can use `*` operator.
type Multiplicative interface {
constraints.Integer | constraints.Float | constraints.Complex
}
// Semigroup is a set of type `T` and its associative binary operation `Combine(T, T) T`
type Semigroup[T any] interface {
Combine(T, T) T
}
// DeriveAdditiveSemigroup derives Semigroup using `+` operator.
func DeriveAdditiveSemigroup[T Additive]() Semigroup[T] {
return additiveSemigroup[T]{}
}
type additiveSemigroup[T Additive] struct{}
func (additiveSemigroup[T]) Combine(x, y T) T {
return x + y
}
// DeriveMultiplicativeSemigroup derives Semigroup using `*` operator.
func DeriveMultiplicativeSemigroup[T Multiplicative]() Semigroup[T] {
return multiplicativeSemigroup[T]{}
}
type multiplicativeSemigroup[T Multiplicative] struct{}
func (multiplicativeSemigroup[T]) Combine(x, y T) T {
return x * y
}
// DefaultSemigroup is a default implementation of Semigroup.
type DefaultSemigroup[T any] struct {
CombineImpl func(T, T) T
}
func (s *DefaultSemigroup[T]) Combine(x, y T) T {
return s.CombineImpl(x, y)
}
// Monoid is a Semigroup with identity.
type Monoid[T any] interface {
Semigroup[T]
Empty() T
}
// DeriveAdditiveMonoid derives Monoid using `+` and zero value.
func DeriveAdditiveMonoid[T Additive]() Monoid[T] {
return additiveMonoid[T]{}
}
type additiveMonoid[T Additive] struct{}
func (additiveMonoid[T]) Combine(x, y T) T {
return x + y
}
func (additiveMonoid[T]) Empty() (zero T) {
return
}
// DeriveMultiplicativeMonoid derives Monoid using `*` and `1`.
func DeriveMultiplicativeMonoid[T Multiplicative]() Monoid[T] {
return multiplicativeMonoid[T]{}
}
type multiplicativeMonoid[T Multiplicative] struct{}
func (multiplicativeMonoid[T]) Combine(x, y T) T {
return x * y
}
func (multiplicativeMonoid[T]) Empty() T {
return 1
}
// DefaultMonoid is a default implementation of Monoid.
type DefaultMonoid[T any] struct {
Semigroup[T]
EmptyImpl func() T
}
func (m *DefaultMonoid[T]) Empty() T {
return m.EmptyImpl()
} | classes/algebra/algebra.go | 0.858689 | 0.525978 | algebra.go | starcoder |
package similgraph
import (
"sort"
"github.com/pkg/errors"
)
//go:generate gorewrite
//SimilGraph represents a cosine similarity graph whose edges are computed on the fly
type SimilGraph struct {
bigraphEdges []implicitEdge
vertexSlices []uint32
vertexCount uint32
}
//VertexCount return the count of the vertex in the graph
func (g SimilGraph) VertexCount() uint32 {
return g.vertexCount
}
//AreCorrelated return the cosine similarity graph whose edges are computed on the fly
/*func (g SimilGraph) AreCorrelated(v1, v2 uint32) (weight float32, err error) {
if v1 >= g.vertexCount {
return 0, errors.Errorf("similgraph: v1 (%v) is an invalid vertex, vertex limit is %v.", v1, g.vertexCount)
}
if v2 >= g.vertexCount {
return 0, errors.Errorf("similgraph: v2 (%v) is an invalid vertex, vertex limit is %v.", v2, g.vertexCount)
}
if v1 == v2 {
return 0, errors.Errorf("similgraph: v1 and v2 are the same vertex (%v).", v1)
}
edgegroup1 := g.bigraphEdges[g.vertexSlices[v1]:g.vertexSlices[v1+1]]
edgegroup2 := g.bigraphEdges[g.vertexSlices[v2]:g.vertexSlices[v2+1]]
f64weight := float64(0)
for {
edge1 := edgegroup1[0]
edge2 := edgegroup2[0]
switch {
case edge1.Vertex < edge2.Vertex:
if len(edgegroup1) > 1 {
edgegroup1 = edgegroup1[1:]
} else {
return float32(f64weight), nil
}
case edge1.Vertex > edge2.Vertex:
if len(edgegroup2) > 1 {
edgegroup2 = edgegroup2[1:]
} else {
return float32(f64weight), nil
}
default:
f64weight += float64(edge1.Weight) * float64(edge2.Weight)
if len(edgegroup1) > 1 && len(edgegroup2) > 1 {
edgegroup1 = edgegroup1[1:]
edgegroup2 = edgegroup2[1:]
} else {
return float32(f64weight), nil
}
}
}
}*/
//EdgeIterator iterate over the edges of the cosine similarity graph, computed on the fly.
func (g SimilGraph) EdgeIterator(v uint32) (smallerIterator, biggerIterator func() (info Edge, ok bool), err error) {
if v >= g.vertexCount {
return nil, nil, errors.Errorf("similgraph: v (%v) is an invalid vertex, vertex limit is %v.", v, g.vertexCount)
}
smallerh := make([]pivotedEdgesSpan, 0, 4)
biggerh := make([]pivotedEdgesSpan, 0, 4)
for _, e := range g.bigraphEdges[g.vertexSlices[v]:g.vertexSlices[v+1]] {
edgegroup := g.bigraphEdges[g.vertexSlices[e.Vertex]:g.vertexSlices[e.Vertex+1]]
index := sort.Search(len(edgegroup), func(i int) bool { return edgegroup[i].Vertex >= v })
if index >= len(edgegroup) || edgegroup[index].Vertex != v {
return nil, nil, errors.Errorf("similgraph internal error: vertex (%v) was not found where it should be.", v)
}
if index > 0 {
smallerh = append(smallerh, pivotedEdgesSpan{edgegroup[index], edgegroup[:index]})
}
if index+1 < len(edgegroup) {
biggerh = append(biggerh, pivotedEdgesSpan{edgegroup[index], edgegroup[index+1:]})
}
}
smallerIterator = edgeIterMergeFrom(pES2EdgeIter(smallerh...)...).NextSum
biggerIterator = edgeIterMergeFrom(pES2EdgeIter(biggerh...)...).NextSum
return smallerIterator, biggerIterator, nil
}
//Edge represent a weighted edge
type Edge struct {
VertexA uint32
VertexB uint32
Weight float32
}
//Less implement the Lexicographical order between two edges.
func (ei Edge) Less(ej Edge) bool {
if ei.VertexA < ej.VertexA {
return true
}
if ei.VertexA == ej.VertexA && ei.VertexB < ej.VertexB {
return true
}
return false
}
type implicitEdge struct {
Vertex uint32
Weight float32
}
type pivotedEdgesSpan struct {
Pivot implicitEdge
Edges []implicitEdge
}
func pES2EdgeIter(pes ...pivotedEdgesSpan) (nexts []func() (Edge, bool)) {
for _, s := range pes {
s := s
nexts = append(nexts, func() (e Edge, ok bool) {
if len(s.Edges) == 0 {
return
}
p, ie := s.Pivot, s.Edges[0]
s.Edges = s.Edges[1:]
return Edge{p.Vertex, ie.Vertex, p.Weight * ie.Weight}, true
})
}
return
}
func (m *edgeIterMerge) NextSum() (e Edge, ok bool) {
e, ok = m.Next()
if !ok {
return
}
f64eWeight := float64(e.Weight)
for e1, ok := m.Peek(); ok && equalIndexes(e, e1); e1, ok = m.Peek() {
e, _ := m.Next()
f64eWeight += float64(e.Weight)
}
e.Weight = float32(f64eWeight)
return
}
func equalIndexes(a, b Edge) bool {
return a.VertexA == b.VertexA && a.VertexB == b.VertexB
} | similgraph.go | 0.675872 | 0.691562 | similgraph.go | starcoder |
package daycount
import (
"math"
"time"
"github.com/fxtlabs/date"
)
// DayCounter computes the year fraction between a from and a to date
// according to a predefined day-count convention.
// All DayCounter functions assume that from is never later than to.
type DayCounter func(from, to date.Date) float64
// NewDayCounter returns a DayCounter based on the input convention.
func NewDayCounter(convention Convention) DayCounter {
switch convention {
case ActualActual:
return yearFractionActualActual
case ActualActualAFB:
return yearFractionActualActualAFB
case ActualThreeSixty:
return yearFractionActualThreeSixty
case ActualThreeSixtyFiveFixed:
return yearFractionActualThreeSixtyFiveFixed
case ThirtyThreeSixtyUS:
return yearFractionThirtyThreeSixtyUS
case ThirtyThreeSixtyEuropean:
return yearFractionThirtyThreeSixtyEuropean
case ThirtyThreeSixtyItalian:
return yearFractionThirtyThreeSixtyItalian
case ThirtyThreeSixtyGerman:
return yearFractionThirtyThreeSixtyGerman
default:
return yearFractionActualActual
}
}
// YearFraction returns the year fraction difference between two dates
// according to the input convention.
// If the convention is not recognized, it defaults to ActualActual.
func YearFraction(from, to date.Date, convention Convention) float64 {
return NewDayCounter(convention)(from, to)
}
const (
threeSixtyDays = 360.0
threeSixtyFiveDays = 365.0
threeSixtySixDays = 366.0
)
func yearFractionActualActual(from, to date.Date) float64 {
fromYear, toYear := from.Year(), to.Year()
if fromYear == toYear {
return float64(to.Sub(from)) / daysPerYear(fromYear)
}
firstFraction := float64(date.New(fromYear+1, time.January, 1).Sub(from)) / daysPerYear(fromYear)
lastFraction := float64(to.Sub(date.New(toYear, time.January, 1))) / daysPerYear(toYear)
return firstFraction + lastFraction + float64(toYear-fromYear-1)
}
func yearFractionActualActualAFB(from, to date.Date) float64 {
nbFullYears := 0
remaining := to
for tmp := to; tmp.After(from); {
tmp = tmp.AddDate(-1, 0, 0)
if tmp.Day() == 28 && tmp.Month() == time.February && isLeapYear(tmp.Year()) {
tmp = tmp.Add(1)
}
if !tmp.Before(from) {
nbFullYears++
remaining = tmp
}
}
return float64(nbFullYears) + float64(remaining.Sub(from))/computeYearDurationAFB(from, remaining)
}
func computeYearDurationAFB(from, remaining date.Date) float64 {
if isLeapYear(remaining.Year()) {
date := date.New(remaining.Year(), time.February, 29)
if remaining.After(date) && !from.After(date) {
return threeSixtySixDays
}
}
if isLeapYear(from.Year()) {
date := date.New(from.Year(), time.February, 29)
if remaining.After(date) && !from.After(date) {
return threeSixtySixDays
}
}
return threeSixtyFiveDays
}
func yearFractionActualThreeSixty(from, to date.Date) float64 {
return float64(to.Sub(from)) / threeSixtyDays
}
func yearFractionActualThreeSixtyFiveFixed(from, to date.Date) float64 {
return float64(to.Sub(from)) / threeSixtyFiveDays
}
func yearFractionThirtyThreeSixtyUS(from, to date.Date) float64 {
if to.Day() == 31 && from.Day() < 30 {
to = to.Add(1)
}
return yearFractionThirtyThreeSixty(from, to, 0.0)
}
func yearFractionThirtyThreeSixtyEuropean(from, to date.Date) float64 {
return yearFractionThirtyThreeSixty(from, to, 0.0)
}
func yearFractionThirtyThreeSixtyItalian(from, to date.Date) float64 {
shift := func(d date.Date) int {
if d.Month() == time.February && d.Day() > 27 {
return 30 - d.Day()
}
return 0
}
dayShift := shift(from) + shift(to)
return yearFractionThirtyThreeSixty(from, to, dayShift)
}
func yearFractionThirtyThreeSixtyGerman(from, to date.Date) float64 {
shift := func(d date.Date) int {
if tmp := d.Add(1); tmp.Month() == time.March && tmp.Day() == 1 {
return 1
}
return 0
}
dayShift := shift(from) + shift(to)
return yearFractionThirtyThreeSixty(from, to, dayShift)
}
func yearFractionThirtyThreeSixty(from, to date.Date, dayShift int) float64 {
yearDiff := float64(360 * (to.Year() - from.Year()))
monthDiff := float64(30 * (to.Month() - from.Month() - 1))
dayDiff := math.Max(0, float64(30-from.Day())) + math.Min(30, float64(to.Day()))
return (yearDiff + monthDiff + dayDiff + float64(dayShift)) / threeSixtyDays
}
func isLeapYear(year int) bool {
return year%4 == 0 && (year%100 != 0 || year%400 == 0)
}
func daysPerYear(year int) float64 {
if isLeapYear(year) {
return threeSixtySixDays
}
return threeSixtyFiveDays
} | daycount.go | 0.779028 | 0.665832 | daycount.go | starcoder |
package dsp
// region Complex Fir Filter
type CTFirFilter struct {
taps []complex64
sampleHistory []complex64
tapsLen int
decimation int
}
func MakeCTFirFilter(taps []complex64) *CTFirFilter {
return &CTFirFilter{
taps: taps,
sampleHistory: make([]complex64, len(taps)),
tapsLen: len(taps),
decimation: 1,
}
}
func MakeDecimationCTFirFilter(decimation int, taps []complex64) *CTFirFilter {
return &CTFirFilter{
taps: taps,
sampleHistory: make([]complex64, len(taps)),
tapsLen: len(taps),
decimation: decimation,
}
}
func (f *CTFirFilter) Filter(data []complex64, length int) {
var samples = append(f.sampleHistory, data...)
for i := 0; i < length; i++ {
ComplexDotProduct(&data[i], samples[i:i+f.tapsLen], f.taps)
}
f.sampleHistory = data[len(data)-f.tapsLen:]
}
func (f *CTFirFilter) FilterOut(data []complex64) []complex64 {
var samples = append(f.sampleHistory, data...)
var output = make([]complex64, len(data))
var length = len(samples) - f.tapsLen
for i := 0; i < length; i++ {
output[i] = ComplexDotProductResult(samples[i:], f.taps)
}
f.sampleHistory = samples[length:]
return output
}
func (f *CTFirFilter) FilterBuffer(input, output []complex64) int {
var samples = append(f.sampleHistory, input...)
var length = len(samples) - f.tapsLen
if len(output) < length {
panic("There is not enough space in output buffer")
}
for i := 0; i < length; i++ {
output[i] = ComplexDotProductResult(samples[i:], f.taps)
}
f.sampleHistory = samples[length:]
return length
}
func (f *CTFirFilter) Work(data []complex64) []complex64 {
if f.decimation > 1 {
return f.FilterDecimateOut(data, f.decimation)
}
return f.FilterOut(data)
}
func (f *CTFirFilter) WorkBuffer(input, output []complex64) int {
if f.decimation > 1 {
return f.FilterDecimateBuffer(input, output, f.decimation)
}
return f.FilterBuffer(input, output)
}
func (f *CTFirFilter) FilterSingle(data []complex64) complex64 {
return ComplexDotProductResult(data, f.taps)
}
func (f *CTFirFilter) FilterDecimate(data []complex64, decimate int, length int) {
var samples = append(f.sampleHistory, data...)
var j = 0
for i := 0; i < length; i++ {
ComplexDotProduct(&data[i], samples[j:], f.taps)
j += decimate
}
f.sampleHistory = data[len(data)-f.tapsLen:]
}
func (f *CTFirFilter) FilterDecimateOut(data []complex64, decimate int) []complex64 {
var samples = append(f.sampleHistory, data...)
var length = len(samples) / decimate
var remainder = len(samples) % decimate
var output = make([]complex64, length)
for i := 0; i < length; i++ {
var srcIdx = decimate * i
var sl = samples[srcIdx:]
if len(sl) < len(f.taps) {
div := len(sl) / decimate
if len(sl)%decimate > 0 {
div++
}
length -= div
remainder += div * decimate
break
}
output[i] = ComplexDotProductResult(sl, f.taps)
}
f.sampleHistory = samples[len(samples)-remainder:]
return output
}
func (f *CTFirFilter) FilterDecimateBuffer(input, output []complex64, decimate int) int {
var samples = append(f.sampleHistory, input...)
var length = len(samples) / decimate
var remainder = len(samples) % decimate
if len(output) < length {
panic("There is not enough space in output buffer")
}
for i := 0; i < length; i++ {
var srcIdx = decimate * i
var sl = samples[srcIdx:]
if len(sl) < len(f.taps) {
div := len(sl) / decimate
if len(sl)%decimate > 0 {
div++
}
length -= div
remainder += div * decimate
break
}
output[i] = ComplexDotProductResult(sl, f.taps)
}
f.sampleHistory = samples[len(samples)-remainder:]
return length
}
func (f *CTFirFilter) SetTaps(taps []complex64) {
f.taps = taps
f.tapsLen = len(taps)
}
func (f *CTFirFilter) PredictOutputSize(inputLength int) int {
// give extra space
return inputLength/f.decimation + 1
}
// endregion | dsp/ComplexFir.go | 0.535584 | 0.448607 | ComplexFir.go | starcoder |
package finverse
import (
"encoding/json"
)
// IncomeTotal struct for IncomeTotal
type IncomeTotal struct {
EstimatedMonthlyIncome *IncomeEstimate `json:"estimated_monthly_income,omitempty"`
// Number of transactions counted towards income
TransactionCount float32 `json:"transaction_count"`
MonthlyHistory []MonthlyIncomeEstimate `json:"monthly_history"`
}
// NewIncomeTotal instantiates a new IncomeTotal object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewIncomeTotal(transactionCount float32, monthlyHistory []MonthlyIncomeEstimate) *IncomeTotal {
this := IncomeTotal{}
this.TransactionCount = transactionCount
this.MonthlyHistory = monthlyHistory
return &this
}
// NewIncomeTotalWithDefaults instantiates a new IncomeTotal object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewIncomeTotalWithDefaults() *IncomeTotal {
this := IncomeTotal{}
return &this
}
// GetEstimatedMonthlyIncome returns the EstimatedMonthlyIncome field value if set, zero value otherwise.
func (o *IncomeTotal) GetEstimatedMonthlyIncome() IncomeEstimate {
if o == nil || o.EstimatedMonthlyIncome == nil {
var ret IncomeEstimate
return ret
}
return *o.EstimatedMonthlyIncome
}
// GetEstimatedMonthlyIncomeOk returns a tuple with the EstimatedMonthlyIncome field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *IncomeTotal) GetEstimatedMonthlyIncomeOk() (*IncomeEstimate, bool) {
if o == nil || o.EstimatedMonthlyIncome == nil {
return nil, false
}
return o.EstimatedMonthlyIncome, true
}
// HasEstimatedMonthlyIncome returns a boolean if a field has been set.
func (o *IncomeTotal) HasEstimatedMonthlyIncome() bool {
if o != nil && o.EstimatedMonthlyIncome != nil {
return true
}
return false
}
// SetEstimatedMonthlyIncome gets a reference to the given IncomeEstimate and assigns it to the EstimatedMonthlyIncome field.
func (o *IncomeTotal) SetEstimatedMonthlyIncome(v IncomeEstimate) {
o.EstimatedMonthlyIncome = &v
}
// GetTransactionCount returns the TransactionCount field value
func (o *IncomeTotal) GetTransactionCount() float32 {
if o == nil {
var ret float32
return ret
}
return o.TransactionCount
}
// GetTransactionCountOk returns a tuple with the TransactionCount field value
// and a boolean to check if the value has been set.
func (o *IncomeTotal) GetTransactionCountOk() (*float32, bool) {
if o == nil {
return nil, false
}
return &o.TransactionCount, true
}
// SetTransactionCount sets field value
func (o *IncomeTotal) SetTransactionCount(v float32) {
o.TransactionCount = v
}
// GetMonthlyHistory returns the MonthlyHistory field value
func (o *IncomeTotal) GetMonthlyHistory() []MonthlyIncomeEstimate {
if o == nil {
var ret []MonthlyIncomeEstimate
return ret
}
return o.MonthlyHistory
}
// GetMonthlyHistoryOk returns a tuple with the MonthlyHistory field value
// and a boolean to check if the value has been set.
func (o *IncomeTotal) GetMonthlyHistoryOk() ([]MonthlyIncomeEstimate, bool) {
if o == nil {
return nil, false
}
return o.MonthlyHistory, true
}
// SetMonthlyHistory sets field value
func (o *IncomeTotal) SetMonthlyHistory(v []MonthlyIncomeEstimate) {
o.MonthlyHistory = v
}
func (o IncomeTotal) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.EstimatedMonthlyIncome != nil {
toSerialize["estimated_monthly_income"] = o.EstimatedMonthlyIncome
}
if true {
toSerialize["transaction_count"] = o.TransactionCount
}
if true {
toSerialize["monthly_history"] = o.MonthlyHistory
}
return json.Marshal(toSerialize)
}
type NullableIncomeTotal struct {
value *IncomeTotal
isSet bool
}
func (v NullableIncomeTotal) Get() *IncomeTotal {
return v.value
}
func (v *NullableIncomeTotal) Set(val *IncomeTotal) {
v.value = val
v.isSet = true
}
func (v NullableIncomeTotal) IsSet() bool {
return v.isSet
}
func (v *NullableIncomeTotal) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableIncomeTotal(val *IncomeTotal) *NullableIncomeTotal {
return &NullableIncomeTotal{value: val, isSet: true}
}
func (v NullableIncomeTotal) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableIncomeTotal) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | finverse/model_income_total.go | 0.769514 | 0.560734 | model_income_total.go | starcoder |
package keyproof
import (
"strings"
"github.com/privacybydesign/gabi/big"
)
type (
expStepAStructure struct {
bitname string
prename string
postname string
myname string
bitRep RepresentationProofStructure
equalityRep RepresentationProofStructure
}
ExpStepAProof struct {
Bit Proof // Needed to make sure we can fake these proofs, which is needed for the OR in expstep
EqualityHider Proof
}
expStepACommit struct {
bit secret // Needed to make sure we can fake these proofs, which is needed for the OR in expstep
equalityHider secret
}
)
func newExpStepAStructure(bitname, prename, postname string) expStepAStructure {
structure := expStepAStructure{
bitname: bitname,
prename: prename,
postname: postname,
myname: strings.Join([]string{bitname, prename, postname, "expa"}, "_"),
}
structure.bitRep = RepresentationProofStructure{
[]LhsContribution{
LhsContribution{bitname, big.NewInt(1)},
},
[]RhsContribution{
RhsContribution{"h", strings.Join([]string{bitname, "hider"}, "_"), 1},
},
}
structure.equalityRep = RepresentationProofStructure{
[]LhsContribution{
LhsContribution{prename, big.NewInt(1)},
LhsContribution{postname, big.NewInt(-1)},
},
[]RhsContribution{
RhsContribution{"h", strings.Join([]string{structure.myname, "eqhider"}, "_"), 1},
},
}
return structure
}
func (s *expStepAStructure) commitmentsFromSecrets(g group, list []*big.Int, bases BaseLookup, secretdata SecretLookup) ([]*big.Int, expStepACommit) {
var commit expStepACommit
// Build commit structure
commit.bit = newSecret(g, strings.Join([]string{s.bitname, "hider"}, "_"), secretdata.Secret(strings.Join([]string{s.bitname, "hider"}, "_")))
commit.equalityHider = newSecret(g, strings.Join([]string{s.myname, "eqhider"}, "_"), new(big.Int).Mod(
new(big.Int).Sub(
secretdata.Secret(strings.Join([]string{s.prename, "hider"}, "_")),
secretdata.Secret(strings.Join([]string{s.postname, "hider"}, "_"))),
g.order))
// inner secrets
secrets := NewSecretMerge(&commit.bit, &commit.equalityHider, secretdata)
// Generate commitments
list = s.bitRep.commitmentsFromSecrets(g, list, bases, &secrets)
list = s.equalityRep.commitmentsFromSecrets(g, list, bases, &secrets)
return list, commit
}
func (s *expStepAStructure) buildProof(g group, challenge *big.Int, commit expStepACommit, secretdata SecretLookup) ExpStepAProof {
return ExpStepAProof{
Bit: commit.bit.buildProof(g, challenge),
EqualityHider: commit.equalityHider.buildProof(g, challenge),
}
}
func (s *expStepAStructure) fakeProof(g group) ExpStepAProof {
return ExpStepAProof{
Bit: fakeProof(g),
EqualityHider: fakeProof(g),
}
}
func (s *expStepAStructure) verifyProofStructure(proof ExpStepAProof) bool {
return proof.Bit.verifyStructure() && proof.EqualityHider.verifyStructure()
}
func (s *expStepAStructure) commitmentsFromProof(g group, list []*big.Int, challenge *big.Int, bases BaseLookup, proof ExpStepAProof) []*big.Int {
// inner proof data
proof.Bit.setName(strings.Join([]string{s.bitname, "hider"}, "_"))
proof.EqualityHider.setName(strings.Join([]string{s.myname, "eqhider"}, "_"))
proofMerge := NewProofMerge(&proof.Bit, &proof.EqualityHider)
// Generate commitments
list = s.bitRep.commitmentsFromProof(g, list, challenge, bases, &proofMerge)
list = s.equalityRep.commitmentsFromProof(g, list, challenge, bases, &proofMerge)
return list
}
func (s *expStepAStructure) isTrue(secretdata SecretLookup) bool {
if secretdata.Secret(s.bitname).Cmp(big.NewInt(0)) != 0 {
return false
}
if secretdata.Secret(s.prename).Cmp(secretdata.Secret(s.postname)) != 0 {
return false
}
return true
}
func (s *expStepAStructure) numRangeProofs() int {
return 0
}
func (s *expStepAStructure) numCommitments() int {
return s.bitRep.numCommitments() + s.equalityRep.numCommitments()
} | keyproof/expstepa.go | 0.612657 | 0.457985 | expstepa.go | starcoder |
package convnet
import (
"errors"
"math"
)
// MaxPool apply max pooling to the matrix with a max-pool filter of size filter_size (square)
// and a stride (movement of the filter), the filter applied outside the original matrix use as paddings "0s"
func (matrix *Matrix) MaxPool(filterSize int, stride int) (*Matrix, error) {
if filterSize <= 0 {
return nil, errors.New("filterSize is invalid")
}
if stride <= 0 {
return nil, errors.New("stride is invalid")
}
result := &Matrix{
content: [][]float64{},
}
resIndex := 0
// iterate through the main matrix-x jumping from stride_size
for i := 0; i < len(matrix.content); i += stride {
// create new result row
result.content = append(result.content, []float64{})
// iterate through the main matrix-y jumping from stride_size
for j := 0; j < len(matrix.content[0]); j += stride {
// avoid out of bound (simulating padding = 0), getting min of current index+filter_size or matrix-len
endRow := int(math.Min(float64(i+filterSize-1), float64(len(matrix.content)-1)))
endCol := int(math.Min(float64(j+filterSize-1), float64(len(matrix.content[0])-1)))
// getting row subset for filter
subset := matrix.content[i : endRow+1]
// getting tmp max (first element of the subset)
max := subset[0][j]
// iterate through the elements of the subset searching for the max
for k := 0; k < len(subset); k++ {
// get max of the subset-array
_, maxRow, err := minAndMaxOfSlice(subset[k][j : endCol+1])
panicIfErr(err)
if max < maxRow {
max = maxRow
}
}
result.content[resIndex] = append(result.content[resIndex], max)
}
// update result index
resIndex++
}
return result, nil
}
// ReLU (rectified linear unit) applies non-satutaring activation functions => f(x) = max(0, x)
// it removes negative value from an activation map setting to 0s
func (matrix *Matrix) ReLU() (*Matrix, error) {
result := &Matrix{
content: [][]float64{},
}
for i := 0; i < len(matrix.content); i++ {
result.content = append(result.content, []float64{})
for j := 0; j < len(matrix.content[0]); j++ {
val := matrix.content[i][j]
if matrix.content[i][j] < 0.0 {
val = 0.0
}
result.content[i] = append(result.content[i], val)
}
}
return result, nil
} | cnn_functions.go | 0.747155 | 0.490846 | cnn_functions.go | starcoder |
package league
import (
"fmt"
"net/http"
pandascore "github.com/vahill-corp/pandascore-go"
)
// League resources is the interface on the library to interact with the league section of the pandascore API.
// More info here : https://developers.pandascore.co/doc/#tag/Leagues
type League struct {
Backend *pandascore.Backend
}
// List is returning leagues as described here : https://developers.pandascore.co/doc/#tag/Leagues
func (l *League) List(params *pandascore.LeagueListParams) ([]pandascore.League, error) {
if params == nil {
params = &pandascore.LeagueListParams{
// In case no parameters are given, we want to find results for all games.
VideoGame: pandascore.VideoGameAll,
}
}
path := fmt.Sprintf("%s/leagues", params.VideoGame.GetURLPath())
leaguesResponse := make([]pandascore.League, 0)
err := l.Backend.Call(http.MethodGet, path, params, &leaguesResponse)
return leaguesResponse, err
}
// GetByID returns a league using the ID given as parameter.
func (l *League) GetByID(ID int) (*pandascore.League, error) {
leagueResponse := &pandascore.League{}
err := l.Backend.Call(http.MethodGet, fmt.Sprintf("/leagues/%d", ID), &pandascore.EmptyParams{}, leagueResponse)
return leagueResponse, err
}
// ListForLeagueID is returning series as described here : https://developers.pandascore.co/doc/#tag/Series
func (l *League) ListSeriesForLeagueID(LeagueID int, params *pandascore.SerieListParams) ([]pandascore.Serie, error) {
if params == nil {
params = &pandascore.SerieListParams{
// In case no parameters are given, we want to find results for all games.
VideoGame: pandascore.VideoGameAll,
}
}
path := fmt.Sprintf("/leagues/%d/series", LeagueID)
seriesResponse := make([]pandascore.Serie, 0)
err := l.Backend.Call(http.MethodGet, path, params, &seriesResponse)
return seriesResponse, err
}
// ListTournamentsForLeagueID is returning tournaments as described here : https://developers.pandascore.co/doc/#tag/Tournaments
func (l *League) ListTournamentsForLeagueID(LeagueID int, params *pandascore.TournamentListParams) ([]pandascore.Tournament, error) {
if params == nil {
params = &pandascore.TournamentListParams{
// In case no parameters are given, we want to find results for all games.
VideoGame: pandascore.VideoGameAll,
}
}
path := fmt.Sprintf("/leagues/%d/tournaments", LeagueID)
tournamentsResponse := make([]pandascore.Tournament, 0)
err := l.Backend.Call(http.MethodGet, path, params, &tournamentsResponse)
return tournamentsResponse, err
}
// ListMatchesForLeagueID is returning matches as described here : https://developers.pandascore.co/doc/#tag/Matches
func (l *League) ListMatchesForLeagueID(LeagueID int, params *pandascore.MatchListParams) ([]pandascore.Match, error) {
if params == nil {
params = &pandascore.MatchListParams{
// In case no parameters are given, we want to find results for all games.
VideoGame: pandascore.VideoGameAll,
}
}
path := fmt.Sprintf("/leagues/%d/matches", LeagueID)
matchesResponse := make([]pandascore.Match, 0)
err := l.Backend.Call(http.MethodGet, path, params, &matchesResponse)
return matchesResponse, err
}
// ListPastMatchesForLeagueID is returning matches as described here : https://developers.pandascore.co/doc/#tag/Matches
func (l *League) ListPastMatchesForLeagueID(LeagueID int, params *pandascore.MatchListParams) ([]pandascore.Match, error) {
if params == nil {
params = &pandascore.MatchListParams{
// In case no parameters are given, we want to find results for all games.
VideoGame: pandascore.VideoGameAll,
}
}
path := fmt.Sprintf("/leagues/%d/matches/past", LeagueID)
matchesResponse := make([]pandascore.Match, 0)
err := l.Backend.Call(http.MethodGet, path, params, &matchesResponse)
return matchesResponse, err
}
// ListRunningMatchesForLeagueID is returning matches as described here : https://developers.pandascore.co/doc/#tag/Matches
func (l *League) ListRunningMatchesForLeagueID(LeagueID int, params *pandascore.MatchListParams) ([]pandascore.Match, error) {
if params == nil {
params = &pandascore.MatchListParams{
// In case no parameters are given, we want to find results for all games.
VideoGame: pandascore.VideoGameAll,
}
}
path := fmt.Sprintf("/leagues/%d/matches/running", LeagueID)
matchesResponse := make([]pandascore.Match, 0)
err := l.Backend.Call(http.MethodGet, path, params, &matchesResponse)
return matchesResponse, err
}
// ListUpcomingMatchesForLeagueID is returning matches as described here : https://developers.pandascore.co/doc/#tag/Matches
func (l *League) ListUpcomingMatchesForLeagueID(LeagueID int, params *pandascore.MatchListParams) ([]pandascore.Match, error) {
if params == nil {
params = &pandascore.MatchListParams{
// In case no parameters are given, we want to find results for all games.
VideoGame: pandascore.VideoGameAll,
}
}
path := fmt.Sprintf("/leagues/%d/matches/upcoming", LeagueID)
matchesResponse := make([]pandascore.Match, 0)
err := l.Backend.Call(http.MethodGet, path, params, &matchesResponse)
return matchesResponse, err
} | league/client.go | 0.625095 | 0.431584 | client.go | starcoder |
package main
func search(nums []int, target int) int {
return helper3(nums, 0, len(nums), target)
}
// helper3 takes the array `nums` and two int `a` `b`
// to find the `target`
// 0 <= a <= b <= len(Nums)
// search area [a, b)
func helper3(nums []int, a, b int, target int) int {
lenAB := b - a
// short enough, directly iterate
if lenAB <= 4 {
for i := a; i < b; i++ {
if nums[i] == target {
return i
}
}
return -1
}
// consider the range [a, p, q, b)
p := a + lenAB/3
q := p + lenAB/3
// check a p q b
switch target {
case nums[a]:
return a
case nums[p]:
return p
case nums[q-1]:
return q - 1
case nums[b-1]:
return b - 1
}
if nums[p] > nums[q-1] {
// the pivot is in [p, q)
if target < nums[p] && target > nums[q-1] {
// binary search
left := helper2(nums, a, p, target)
right := helper2(nums, q, b, target)
if left != -1 {
return left
}
if right != -1 {
return right
}
return -1
}
return helper3(nums, p, q, target)
} else {
// the pivot is in [0, p) or [q, len)
if nums[p] < target && target < nums[q-1] {
return helper3(nums, p, q, target)
}
left := helper3(nums, a, p, target)
right := helper3(nums, q, b, target)
if left != -1 {
return left
}
if right != -1 {
return right
}
return -1
}
}
func helper2(nums []int, a, b int, target int) int {
lenAB := b - a
// short enough, directly iterate
if lenAB <= 2 {
for i := a; i < b; i++ {
if nums[i] == target {
return i
}
}
return -1
}
// binary search
p := (a + b) / 2
// check a p b
switch target {
case nums[a]:
return a
case nums[p]:
return p
case nums[b-1]:
return b - 1
}
// consider [a, p, b)
if target < nums[p] {
return helper2(nums, a, p, target)
} else {
return helper2(nums, p+1, b, target)
}
}
func main() {
// nums[3] = 7, pivot = 3
println(search([]int{4, 5, 6, 7, 0, 1, 2}, 0)) // 4
println(search([]int{4, 5, 6, 7, 0, 1, 2}, 3)) // -1
println(search([]int{4, 5, 6, 7, 0, 1, 2}, 1)) // 5
println(search([]int{6, 7, 8, 1, 2, 3, 4, 5}, 3)) // 5
} | leetcode/0033_search-in-rotated-sorted-array/main.go | 0.686055 | 0.634727 | main.go | starcoder |
package Solution
func lengthOfLongestSubstring_3(s string) int {
ans, left, m := 0, 0, map[rune]int{}
for right, v := range s {
if _, ok := m[v]; !ok {
m[v] = right
} else {
if m[v]+1 > left {
left = m[v] + 1
}
m[v] = right
}
ans = max(ans, right-left+1)
}
return ans
}
func lengthOfLongestSubstring_1(s string) int {
// 哈希集合,记录每个字符是否出现过
m := map[byte]int{}
n := len(s)
// 右指针,初始值为 -1,相当于我们在字符串的左边界的左侧,还没有开始移动
rk, ans := -1, 0
for i := 0; i < n; i++ {
if i != 0 {
// 左指针向右移动一格,移除一个字符
delete(m, s[i-1])
}
for rk+1 < n && m[s[rk+1]] == 0 {
// 不断地移动右指针
m[s[rk+1]]++
rk++
}
// 第 i 到 rk 个字符是一个极长的无重复字符子串
ans = max(ans, rk-i+1)
}
return ans
}
func max(x, y int) int {
if x < y {
return y
}
return x
}
func lengthOfLongestSubstring_2(s string) int {
ans, left, m := 0, 0, map[rune]int{}
for right, v := range s {
left = max(left, m[v])
m[v] = right + 1
ans = max(ans, right-left+1)
}
return ans
}
// O(n) time O(1) space Solution
func lengthOfLongestSubstring(s string) int {
var chPosition [256]int // [0, 0, 0, ...]
maxLength, substringLen, lastRepeatPos := 0, 0, 0
for i := 0; i < len(s); i++ {
if pos := chPosition[s[i]]; pos > 0 {
// record current substring length
maxLength = Max(substringLen, maxLength)
// update characters position
chPosition[s[i]] = i + 1
// update last repeat character position
lastRepeatPos = Max(pos, lastRepeatPos)
// update the current substring from last repeat character
substringLen = i + 1 - lastRepeatPos
} else {
substringLen += 1
chPosition[s[i]] = i + 1
}
}
return Max(maxLength, substringLen)
}
// 暴力求解(会超时)
func lengthOfLongestSubstring2(s string) int {
ans := 0
for i := 0; i < len(s); i++ {
for j := i + 1; j <= len(s); j++ {
if allUnique(s, i, j) {
ans = Max(ans, j-i)
}
}
}
return ans
}
func allUnique(s string, start int, end int) bool {
sMap := make(map[string]int)
for i := start; i < end; i++ {
if sMap[string(s[i])] > 0 {
return false
}
sMap[string(s[i])]++
}
return true
}
func Max(x, y int) int {
if x > y {
return x
}
return y
} | leetcode/1-100/0003.Longest-Substring-Without-Repeating-Characters/Solution.go | 0.566019 | 0.459925 | Solution.go | starcoder |
package main
import (
"bufio"
"fmt"
"math"
"os"
"strconv"
"strings"
)
/**
Let us assume the following formula for displacement s as a function of time t, acceleration a, initial velocity vo, and initial displacement so.
s =½ a t2 + vot + so
Write a program which first prompts the user to enter values for acceleration, initial velocity, and initial displacement. Then the program should prompt the user to enter a value for time and the program should compute the displacement after the entered time.
You will need to define and use a function called GenDisplaceFn() which takes three float64 arguments, acceleration a, initial velocity vo, and initial displacement so. GenDisplaceFn() should return a function which computes displacement as a function of time, assuming the given values acceleration, initial velocity, and initial displacement. The function returned by GenDisplaceFn() should take one float64 argument t, representing time, and return one float64 argument which is the displacement travelled after time t.
For example, let’s say that I want to assume the following values for acceleration, initial velocity, and initial displacement: a = 10, vo = 2, so = 1. I can use the following statement to call GenDisplaceFn() to generate a function fn which will compute displacement as a function of time.
fn := GenDisplaceFn(10, 2, 1)
Then I can use the following statement to print the displacement after 3 seconds.
fmt.Println(fn(3))
And I can use the following statement to print the displacement after 5 seconds.
fmt.Println(fn(5))
*/
// GenDisplaceFn generates function based on the given values
func GenDisplaceFn(acceleration, initialVelocity, initialDisplacement float64) func(float64) float64 {
function := func(time float64) float64 {
return 0.5*acceleration*math.Pow(time, 2) + initialVelocity*time + initialDisplacement
}
return function
}
func getFloatInput() float64 {
reader := bufio.NewReader(os.Stdin)
input, _ := reader.ReadString('\n')
input = strings.TrimSuffix(input, "\n")
floatValue, _ := strconv.ParseFloat(input, 64)
return floatValue
}
func promptAndGetValue(name string) float64 {
fmt.Println("Enter value for " + name + ": ")
return getFloatInput()
}
func main() {
acceleration := promptAndGetValue("acceleration")
initialVelocity := promptAndGetValue("initialVelocity")
initialDisplacement := promptAndGetValue("initialDisplacement")
time := promptAndGetValue("time")
function := GenDisplaceFn(acceleration, initialVelocity, initialDisplacement)
fmt.Println(function(time))
} | course-2/displacement/displacement.go | 0.791176 | 0.680255 | displacement.go | starcoder |
package xy
import (
"sort"
"github.com/twpayne/go-geom"
"github.com/twpayne/go-geom/bigxy"
"github.com/twpayne/go-geom/sorting"
"github.com/twpayne/go-geom/xy/internal"
"github.com/twpayne/go-geom/xy/orientation"
)
type convexHullCalculator struct {
layout geom.Layout
stride int
inputPts []float64
}
func (calc *convexHullCalculator) lineOrPolygon(coordinates []float64) geom.T {
cleanCoords := calc.cleanRing(coordinates)
if len(cleanCoords) == 3*calc.stride {
return geom.NewLineStringFlat(calc.layout, cleanCoords[0:len(cleanCoords)-calc.stride])
}
return geom.NewPolygonFlat(calc.layout, cleanCoords, []int{len(cleanCoords)})
}
func (calc *convexHullCalculator) cleanRing(original []float64) []float64 {
cleanedRing := []float64{}
var previousDistinctCoordinate []float64
for i := 0; i < len(original)-calc.stride; i += calc.stride {
if internal.Equal(original, i, original, i+calc.stride) {
continue
}
currentCoordinate := original[i : i+calc.stride]
nextCoordinate := original[i+calc.stride : i+calc.stride+calc.stride]
if previousDistinctCoordinate != nil && calc.isBetween(previousDistinctCoordinate, currentCoordinate, nextCoordinate) {
continue
}
cleanedRing = append(cleanedRing, currentCoordinate...)
previousDistinctCoordinate = currentCoordinate
}
return append(cleanedRing, original[len(original)-calc.stride:]...)
}
func (calc *convexHullCalculator) isBetween(c1, c2, c3 []float64) bool {
if bigxy.OrientationIndex(c1, c2, c3) != orientation.Collinear {
return false
}
if c1[0] != c3[0] {
if c1[0] <= c2[0] && c2[0] <= c3[0] {
return true
}
if c3[0] <= c2[0] && c2[0] <= c1[0] {
return true
}
}
if c1[1] != c3[1] {
if c1[1] <= c2[1] && c2[1] <= c3[1] {
return true
}
if c3[1] <= c2[1] && c2[1] <= c1[1] {
return true
}
}
return false
}
func (calc *convexHullCalculator) grahamScan(coordData []float64) []float64 {
coordStack := internal.NewCoordStack(calc.layout)
coordStack.Push(coordData, 0)
coordStack.Push(coordData, calc.stride)
coordStack.Push(coordData, calc.stride*2)
for i := 3 * calc.stride; i < len(coordData); i += calc.stride {
p, remaining := coordStack.Pop()
// check for empty stack to guard against robustness problems
for remaining > 0 && bigxy.OrientationIndex(geom.Coord(coordStack.Peek()), geom.Coord(p), geom.Coord(coordData[i:i+calc.stride])) > 0 {
p, _ = coordStack.Pop()
}
coordStack.Push(p, 0)
coordStack.Push(coordData, i)
}
coordStack.Push(coordData, 0)
return coordStack.Data
}
func (calc *convexHullCalculator) preSort(pts []float64) {
// find the lowest point in the set. If two or more points have
// the same minimum y coordinate choose the one with the minimu x.
// This focal point is put in array location pts[0].
for i := calc.stride; i < len(pts); i += calc.stride {
if pts[i+1] < pts[1] || (pts[i+1] == pts[1] && pts[i] < pts[0]) {
for k := 0; k < calc.stride; k++ {
pts[k], pts[i+k] = pts[i+k], pts[k]
}
}
}
// sort the points radially around the focal point.
sort.Sort(NewRadialSorting(calc.layout, pts, geom.Coord{pts[0], pts[1]}))
}
func (calc *convexHullCalculator) padArray3(pts []float64) []float64 {
pad := make([]float64, 3*calc.stride)
for i := 0; i < len(pad); i++ {
if i < len(pts) {
pad[i] = pts[i]
} else {
pad[i] = pts[0]
}
}
return pad
}
func (calc *convexHullCalculator) computeOctRing(inputPts []float64) []float64 {
stride := calc.stride
octPts := calc.computeOctPts(inputPts)
copyTo := 0
for i := stride; i < len(octPts); i += stride {
if !internal.Equal(octPts, i-stride, octPts, i) {
copyTo += stride
}
for j := 0; j < stride; j++ {
octPts[copyTo+j] = octPts[i+j]
}
}
// points must all lie in a line
if copyTo < 6 {
return nil
}
copyTo += stride
octPts = octPts[0 : copyTo+stride]
// close ring
for j := 0; j < stride; j++ {
octPts[copyTo+j] = octPts[j]
}
return octPts
}
func (calc *convexHullCalculator) computeOctPts(inputPts []float64) []float64 {
stride := calc.stride
pts := make([]float64, 8*stride)
for j := 0; j < len(pts); j += stride {
for k := 0; k < stride; k++ {
pts[j+k] = inputPts[k]
}
}
for i := stride; i < len(inputPts); i += stride {
if inputPts[i] < pts[0] {
for k := 0; k < stride; k++ {
pts[k] = inputPts[i+k]
}
}
if inputPts[i]-inputPts[i+1] < pts[stride]-pts[stride+1] {
for k := 0; k < stride; k++ {
pts[stride+k] = inputPts[i+k]
}
}
if inputPts[i+1] > pts[2*stride+1] {
for k := 0; k < stride; k++ {
pts[2*stride+k] = inputPts[i+k]
}
}
if inputPts[i]+inputPts[i+1] > pts[3*stride]+pts[3*stride+1] {
for k := 0; k < stride; k++ {
pts[3*stride+k] = inputPts[i+k]
}
}
if inputPts[i] > pts[4*stride] {
for k := 0; k < stride; k++ {
pts[4*stride+k] = inputPts[i+k]
}
}
if inputPts[i]-inputPts[i+1] > pts[5*stride]-pts[5*stride+1] {
for k := 0; k < stride; k++ {
pts[5*stride+k] = inputPts[i+k]
}
}
if inputPts[i+1] < pts[6*stride+1] {
for k := 0; k < stride; k++ {
pts[6*stride+k] = inputPts[i+k]
}
}
if inputPts[i]+inputPts[i+1] < pts[7*stride]+pts[7*stride+1] {
for k := 0; k < stride; k++ {
pts[7*stride+k] = inputPts[i+k]
}
}
}
return pts
}
type comparator struct{}
func (c comparator) IsEquals(x, y geom.Coord) bool {
return internal.Equal(x, 0, y, 0)
}
func (c comparator) IsLess(x, y geom.Coord) bool {
return sorting.IsLess2D(x, y)
} | vendor/github.com/whosonfirst/go-whosonfirst-static/vendor/github.com/whosonfirst/go-whosonfirst-readwrite-sqlite/vendor/github.com/whosonfirst/go-whosonfirst-sqlite-features/vendor/github.com/twpayne/go-geom/xy/convex_hull.go | 0.62601 | 0.413714 | convex_hull.go | starcoder |
package binaryheap
import (
"fmt"
"github.com/kevinpollet/go-datastructures/errors"
)
type node struct {
value interface{}
priority int
}
// BinaryHeap implements the PriorityQueue ADT.
type BinaryHeap struct {
tree []*node
}
// Clear removes all values from the heap.
// Complexity: O(1)
func (heap *BinaryHeap) Clear() {
heap.tree = nil
}
// IsEmpty returns true if the heap is empty, false otherwise.
// Complexity: O(1)
func (heap *BinaryHeap) IsEmpty() bool {
return heap.Size() == 0
}
// Offer adds the given value to the heap with the given priority.
// Complexity: O(log(n))
func (heap *BinaryHeap) Offer(value interface{}, priority int) {
heap.tree = append(heap.tree, &node{value: value, priority: priority})
childIndex := heap.Size() - 1
parentIndex := (childIndex - 1) / 2
for parentIndex >= 0 && heap.tree[parentIndex].priority > heap.tree[childIndex].priority {
temp := heap.tree[childIndex]
heap.tree[childIndex] = heap.tree[parentIndex]
heap.tree[parentIndex] = temp
childIndex = parentIndex
parentIndex = (childIndex - 1) / 2
}
}
// Peek returns the value with the hightest priority in the heap or an error if the heap is empty.
// Complexity: O(1)
func (heap *BinaryHeap) Peek() (interface{}, error) {
if heap.IsEmpty() {
return nil, errors.NewNoSuchValueError("cannot peek a value from an empty heap")
}
return heap.tree[0].value, nil
}
// Poll returns and removes the value with the hightest priority in the heap or an error if the heap is empty.
// Complexity: O(log(n))
func (heap *BinaryHeap) Poll() (interface{}, error) {
if heap.IsEmpty() {
return nil, errors.NewNoSuchValueError("cannot poll a value from an empty heap")
}
rootNode := heap.tree[0]
heap.tree[0] = heap.tree[heap.Size()-1]
heap.tree = heap.tree[0 : heap.Size()-1]
parentIndex := 0
leftChildIndex := 2*parentIndex + 1
rightChildIndex := 2*parentIndex + 2
for leftChildIndex <= heap.Size()-1 {
minChildIndex := leftChildIndex
if rightChildIndex <= heap.Size()-1 && heap.tree[rightChildIndex].priority < heap.tree[leftChildIndex].priority {
minChildIndex = rightChildIndex
}
if heap.tree[parentIndex].priority < heap.tree[minChildIndex].priority {
break
}
temp := heap.tree[parentIndex]
heap.tree[parentIndex] = heap.tree[minChildIndex]
heap.tree[minChildIndex] = temp
parentIndex = minChildIndex
leftChildIndex = 2*parentIndex + 1
rightChildIndex = 2*parentIndex + 2
}
return rootNode.value, nil
}
// Size returns the number of values in the heap.
// Complexity: O(1)
func (heap *BinaryHeap) Size() int {
return len(heap.tree)
}
// String returns a string representation of the tree.
// Complexity: O(n)
func (heap BinaryHeap) String() string {
str := "["
for index, node := range heap.tree {
str += fmt.Sprintf("(value: %v, priority: %d)", node.value, node.priority)
if index != heap.Size()-1 {
str += ","
}
}
return str + "]"
} | priorityqueue/binaryheap/binary_heap.go | 0.8398 | 0.442877 | binary_heap.go | starcoder |
package core
import (
"reflect"
)
type NormalizedValue struct {
Value interface{}
OriginalKind reflect.Kind
IsNil bool
}
// TODO: Normalize slices to arrays?
func normalizeInternal(value interface{}, isNil bool) (*NormalizedValue, error) {
reflectedValue := reflect.ValueOf(value)
kind := reflectedValue.Kind()
switch reflectedValue.Kind() {
// Dereference the pointer and normalize that value
case reflect.Ptr:
// If it's a nil pointer then flag the value as nil, obtain the inner element and create/return a zero value for the type
if reflectedValue.IsNil() {
isNil = true
innerElement := reflect.TypeOf(value).Elem()
kind = innerElement.Kind()
value = reflect.Zero(innerElement).Interface()
} else {
value = reflectedValue.Elem().Interface()
}
return normalizeInternal(value, isNil)
// Convert any number to its 64-bit counterpart. Also normalize according to kind. I.e. any string, int, float or bool kind will be normalized to its base type.
// This means that a type, lets say `type Id int64` would instead of having the type `main.Id` be normalized to `int64`.
// This is done in order to simplify work for the validators so that they don't have to validate by kind or have to care for custom types.
// It's also easier for them to validate if the expected type is always the same, i.e. int64 instead of int8, uint16...
case reflect.String:
value = reflectedValue.String()
case reflect.Bool:
value = reflectedValue.Bool()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
value = int64(reflectedValue.Uint())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
value = reflectedValue.Int()
case reflect.Float32, reflect.Float64:
value = reflectedValue.Float()
case reflect.Invalid:
if value == nil {
isNil = true
}
}
normalized := &NormalizedValue{
Value: value,
OriginalKind: kind,
IsNil: isNil,
}
return normalized, nil
}
func Normalize(value interface{}) (*NormalizedValue, error) {
return normalizeInternal(value, false)
} | core/normalization.go | 0.512937 | 0.447581 | normalization.go | starcoder |
package frm
// DatabaseType is the database type definition.
// DatabaseType is the engine of current table, see enum legacy_db_type
type DatabaseType int
const (
// DatabaseTypeUnknown is the unknown database type
DatabaseTypeUnknown DatabaseType = 0
// DatabaseTypeDiabIsam is the diab_isam database type
DatabaseTypeDiabIsam DatabaseType = 1
// DatabaseTypeHash is the hash database type
DatabaseTypeHash DatabaseType = 2
// DatabaseTypeMisam is the misam database type
DatabaseTypeMisam DatabaseType = 3
// DatabaseTypePisam is the pisam database type
DatabaseTypePisam DatabaseType = 4
// DatabaseTypeRmsIsam is the rms_isam database type
DatabaseTypeRmsIsam DatabaseType = 5
// DatabaseTypeHeap is the heap database type
DatabaseTypeHeap DatabaseType = 6
// DatabaseTypeIsam is the isam database type
DatabaseTypeIsam DatabaseType = 7
// DatabaseTypeMrgIsam is the mrg_isam database type
DatabaseTypeMrgIsam DatabaseType = 8
// DatabaseTypeMyIsam is the MyIsam database type
DatabaseTypeMyIsam DatabaseType = 9
// DatabaseTypeMrgMyIsam is the mrg_myisam database type
DatabaseTypeMrgMyIsam DatabaseType = 10
// DatabaseTypeBerkeleyDB is the berkeley_db database type
DatabaseTypeBerkeleyDB DatabaseType = 11
// DatabaseTypeInnoDB is the innodb database type
DatabaseTypeInnoDB DatabaseType = 12
// DatabaseTypeGemini is the gemini database type
DatabaseTypeGemini DatabaseType = 13
// DatabaseTypeNdbCluster is the ndb_cluster database type
DatabaseTypeNdbCluster DatabaseType = 14
// DatabaseTypeExampleDB is the example_db database type
DatabaseTypeExampleDB DatabaseType = 15
// DatabaseTypeArchiveDB is the archive_db database type
DatabaseTypeArchiveDB DatabaseType = 16
// DatabaseTypeCSVDB is the csv_db database type
DatabaseTypeCSVDB DatabaseType = 17
// DatabaseTypeFederatedDB is the federeated_db database type
DatabaseTypeFederatedDB DatabaseType = 18
// DatabaseTypeBlackholeDB is the blackhold_db database type
DatabaseTypeBlackholeDB DatabaseType = 19
// DatabaseTypePartitionDB is the partition_db database type
DatabaseTypePartitionDB DatabaseType = 20
// DatabaseTypeBinlog is the binlog database type
DatabaseTypeBinlog DatabaseType = 21
// DatabaseTypeSolid is the solid database type
DatabaseTypeSolid DatabaseType = 22
// DatabaseTypePbxt is the pbxt database type
DatabaseTypePbxt DatabaseType = 23
// DatabaseTypeTableFunction is the table_function database type
DatabaseTypeTableFunction DatabaseType = 24
// DatabaseTypeMemcache is the memcache database type
DatabaseTypeMemcache DatabaseType = 25
// DatabaseTypeFalcon is the falcon database type
DatabaseTypeFalcon DatabaseType = 26
// DatabaseTypeMaria is the maria database type
DatabaseTypeMaria DatabaseType = 27
// DatabaseTypePerformanceSchema is the performance_schema database type
DatabaseTypePerformanceSchema DatabaseType = 28
// DatabaseTypeFirstDynamic is the first_dynamic database type
DatabaseTypeFirstDynamic DatabaseType = 42
// DatabaseTypeDefault is the default database type
DatabaseTypeDefault DatabaseType = 127
)
// RowType is the row format definition.
// See enum row_type
type RowType int
const (
// RowTypeNotUsed is the not used row format.
RowTypeNotUsed RowType = -1
// RowTypeDefault is the default row format.
RowTypeDefault RowType = 0
// RowTypeFixed is the fixed row format.
RowTypeFixed RowType = 1
// RowTypeDynamic is the dynamic row format.
RowTypeDynamic RowType = 2
// RowTypeCompressed is the compressed row format.
RowTypeCompressed RowType = 3
// RowTypeRedundant is the redundant row format.
RowTypeRedundant RowType = 4
// RowTypeCompact is the compact row format.
RowTypeCompact RowType = 5
// RowTypePage is UNUSED, reserved for future versions.
RowTypePage RowType = 6
)
// DataType is the column DATA_TYPE value.
// See enum enum_field_types
// See https://dev.mysql.com/doc/refman/5.7/en/data-types.html
type DataType int
const (
// DataTypeDecimal types: DECIMAL
// NOTE: used in OLDER MySQL version.
DataTypeDecimal DataType = 0
// DataTypeTiny types: TINYINT, BOOL, BOOLEAN
// Length: 1 bytes.
DataTypeTiny DataType = 1
// DataTypeShort types: SMALLINT
// Length: 2 bytes.
DataTypeShort DataType = 2
// DataTypeLong types: INT, INTEGER
// Length: 4 bytes.
DataTypeLong DataType = 3
// DataTypeFloat types: FLOAT, FLOAT(p) when p < 25
// Length: 4 bytes.
DataTypeFloat DataType = 4
// DataTypeDouble types: DOUBLE, DOUBLE PRECISION, REAL, FLOAT(p) when p >= 25
// Length 8 bytes.
DataTypeDouble DataType = 5
// DataTypeNull is the null type
DataTypeNull DataType = 6
// DataTypeTimestamp is the TIMESTAMP type, without ms support.
DataTypeTimestamp = 7
// DataTypeLongLong types: BIGINT
// Length: 8 bytes.
DataTypeLongLong = 8
// DataTypeInt24 is the MEDIUMINT type, 3 bytes.
// TYPES: MEDIUMINT
DataTypeInt24 = 9
// DataTypeDate is the DATE type, used before MySQL 5.0
DataTypeDate = 10
// DataTypeTime is the TIME type, without ms support.
DataTypeTime = 11
// DataTypeDateTime is the DATETIME type
DataTypeDateTime = 12
// DataTypeYear types: YEAR
DataTypeYear = 13
// DataTypeNewDate types: DATE
// NOTE: used after MySQL 5.0
DataTypeNewDate = 14
// DataTypeVarchar types: VARCHAR, VARBINARY
DataTypeVarchar = 15
// DataTypeBit is the BIT type
// TYPES: BIT
DataTypeBit = 16
// DataTypeTimestamp2 types: TIMESTAMP
// NOTE: with ms support.
DataTypeTimestamp2 = 17
// DataTypeDatetime2 types: DATETIME
// NOTE: with ms support
DataTypeDatetime2 = 18
// DataTypeTime2 types: TIME
// NOTE: with ms support.
DataTypeTime2 = 19
// DataTypeJSON types: JSON
DataTypeJSON = 245
// DataTypeNewDecimal types: DECIMAL, DEC, FIXED, NUMERIC
DataTypeNewDecimal = 246
// DataTypeEnum types: ENUM
DataTypeEnum = 247
// DataTypeSet types: SET
DataTypeSet = 248
// DataTypeTinyBlob types: TINYBLOB, TINYTEXT
// Length: < (2^8) bytes
DataTypeTinyBlob = 249
// DataTypeMediumBlob types: MEDIUMBLOB, MEDIUMTEXT
// Length: < (2^24) bytes
DataTypeMediumBlob = 250
// DataTypeLongBlob types: LONGBLOB, LONGTEXT
// Length: < (2^32) bytes
DataTypeLongBlob = 251
// DataTypeBlob types: BLOB, TEXT
// Length: < (2^16) bytes
DataTypeBlob = 252
// DataTypeVarString is the VARCHAR type
DataTypeVarString = 253
// DataTypeString types: CHAR, BINARY
DataTypeString = 254
// DataTypeGeometry types: GEOMETRY, POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, GEOMETRYCOLLECTION
DataTypeGeometry = 255
)
// KeyType is the column TYPE value.
// See enum keytype
type KeyType int
const (
// KeyTypePrimary is the PRIMARY KEY
KeyTypePrimary KeyType = 0
// KeyTypeUnique is the UNIQUE INDEX
KeyTypeUnique KeyType = 1
// KeyTypeMultiple is the unknown index
KeyTypeMultiple KeyType = 2
// KeyTypeFulltext is the FULLTEXT
KeyTypeFulltext KeyType = 3
// KeyTypeSpatial is the SPATIAL
KeyTypeSpatial KeyType = 4
// KeyTypeForeign is the FOREIGN
KeyTypeForeign KeyType = 5
)
const (
// FileTypeMagicNumber is the magic number of frm file format.
FileTypeMagicNumber = 0x01fe
) | pkg/reader/frm/constants.go | 0.52342 | 0.49585 | constants.go | starcoder |
package types
import (
"github.com/attic-labs/noms/go/d"
"github.com/attic-labs/noms/go/hash"
)
type List struct {
seq indexedSequence
h *hash.Hash
}
func newList(seq indexedSequence) List {
return List{seq, &hash.Hash{}}
}
// NewList creates a new List where the type is computed from the elements in the list, populated with values, chunking if and when needed.
func NewList(values ...Value) List {
seq := newEmptySequenceChunker(nil, makeListLeafChunkFn(nil), newIndexedMetaSequenceChunkFn(ListKind, nil), hashValueBytes)
for _, v := range values {
seq.Append(v)
}
return newList(seq.Done(nil).(indexedSequence))
}
// NewStreamingList creates a new List with type t, populated with values, chunking if and when needed. As chunks are created, they're written to vrw -- including the root chunk of the list. Once the caller has closed values, she can read the completed List from the returned channel.
func NewStreamingList(vrw ValueReadWriter, values <-chan Value) <-chan List {
out := make(chan List)
go func() {
seq := newEmptySequenceChunker(vrw, makeListLeafChunkFn(vrw), newIndexedMetaSequenceChunkFn(ListKind, vrw), hashValueBytes)
for v := range values {
seq.Append(v)
}
out <- newList(seq.Done(vrw).(indexedSequence))
close(out)
}()
return out
}
// Collection interface
func (l List) Len() uint64 {
return l.seq.numLeaves()
}
func (l List) Empty() bool {
return l.Len() == 0
}
func (l List) sequence() sequence {
return l.seq
}
func (l List) hashPointer() *hash.Hash {
return l.h
}
// Value interface
func (l List) Equals(other Value) bool {
return l.Hash() == other.Hash()
}
func (l List) Less(other Value) bool {
return valueLess(l, other)
}
func (l List) Hash() hash.Hash {
if l.h.IsEmpty() {
*l.h = getHash(l)
}
return *l.h
}
func (l List) ChildValues() (values []Value) {
l.IterAll(func(v Value, idx uint64) {
values = append(values, v)
})
return
}
func (l List) Chunks() []Ref {
return l.seq.Chunks()
}
func (l List) Type() *Type {
return l.seq.Type()
}
func (l List) Get(idx uint64) Value {
d.Chk.True(idx < l.Len())
cur := newCursorAtIndex(l.seq, idx)
return cur.current().(Value)
}
type MapFunc func(v Value, index uint64) interface{}
func (l List) Map(mf MapFunc) []interface{} {
idx := uint64(0)
cur := newCursorAtIndex(l.seq, idx)
results := make([]interface{}, 0, l.Len())
cur.iter(func(v interface{}) bool {
res := mf(v.(Value), uint64(idx))
results = append(results, res)
idx++
return false
})
return results
}
func (l List) elemType() *Type {
return l.seq.Type().Desc.(CompoundDesc).ElemTypes[0]
}
func (l List) Set(idx uint64, v Value) List {
d.Chk.True(idx < l.Len())
return l.Splice(idx, 1, v)
}
func (l List) Append(vs ...Value) List {
return l.Splice(l.Len(), 0, vs...)
}
func (l List) Splice(idx uint64, deleteCount uint64, vs ...Value) List {
if deleteCount == 0 && len(vs) == 0 {
return l
}
d.Chk.True(idx <= l.Len())
d.Chk.True(idx+deleteCount <= l.Len())
cur := newCursorAtIndex(l.seq, idx)
ch := newSequenceChunker(cur, nil, makeListLeafChunkFn(l.seq.valueReader()), newIndexedMetaSequenceChunkFn(ListKind, l.seq.valueReader()), hashValueBytes)
for deleteCount > 0 {
ch.Skip()
deleteCount--
}
for _, v := range vs {
ch.Append(v)
}
return newList(ch.Done(nil).(indexedSequence))
}
func (l List) Insert(idx uint64, vs ...Value) List {
return l.Splice(idx, 0, vs...)
}
func (l List) Remove(start uint64, end uint64) List {
d.Chk.True(start <= end)
return l.Splice(start, end-start)
}
func (l List) RemoveAt(idx uint64) List {
return l.Splice(idx, 1)
}
type listIterFunc func(v Value, index uint64) (stop bool)
func (l List) Iter(f listIterFunc) {
idx := uint64(0)
cur := newCursorAtIndex(l.seq, idx)
cur.iter(func(v interface{}) bool {
if f(v.(Value), uint64(idx)) {
return true
}
idx++
return false
})
}
type listIterAllFunc func(v Value, index uint64)
func (l List) IterAll(f listIterAllFunc) {
idx := uint64(0)
cur := newCursorAtIndex(l.seq, idx)
cur.iter(func(v interface{}) bool {
f(v.(Value), uint64(idx))
idx++
return false
})
}
func (l List) Diff(last List, changes chan<- Splice, closeChan <-chan struct{}) {
l.DiffWithLimit(last, changes, closeChan, DEFAULT_MAX_SPLICE_MATRIX_SIZE)
}
func (l List) DiffWithLimit(last List, changes chan<- Splice, closeChan <-chan struct{}, maxSpliceMatrixSize uint64) {
if l.Equals(last) {
return
}
lLen, lastLen := l.Len(), last.Len()
if lLen == 0 {
changes <- Splice{0, lastLen, 0, 0} // everything removed
return
}
if lastLen == 0 {
changes <- Splice{0, 0, lLen, 0} // everything added
return
}
lastCur := newCursorAtIndex(last.seq, 0)
lCur := newCursorAtIndex(l.seq, 0)
indexedSequenceDiff(last.seq, lastCur.depth(), 0, l.seq, lCur.depth(), 0, changes, closeChan, maxSpliceMatrixSize)
}
// If |sink| is not nil, chunks will be eagerly written as they're created. Otherwise they are
// written when the root is written.
func makeListLeafChunkFn(vr ValueReader) makeChunkFn {
return func(items []sequenceItem) (Collection, orderedKey, uint64) {
values := make([]Value, len(items))
for i, v := range items {
values[i] = v.(Value)
}
list := newList(newListLeafSequence(vr, values...))
return list, orderedKeyFromInt(len(values)), uint64(len(values))
}
} | go/types/list.go | 0.678753 | 0.492859 | list.go | starcoder |
package gaussian
// https://github.com/freethenation/gaussian
// free-gaussian
// MIT License
import (
"math"
)
// prop
//mean: the mean (μ) of the distribution
//variance: the variance (σ^2) of the distribution
//standardDeviation: the standard deviation (σ) of the distribution
// combination
//mul(d): returns the product distribution of this and the given distribution. If a constant is passed in the distribution is scaled.
//div(d): returns the quotient distribution of this and the given distribution. If a constant is passed in the distribution is scaled by 1/d.
//add(d): returns the result of adding this and the given distribution
//sub(d): returns the result of subtracting this and the given distribution
//scale(c): returns the result of scaling this distribution by the given constant
type Gaussian struct {
mean float64
variance float64
standardDeviation float64
}
func NewGaussian(mean, variance float64) *Gaussian {
if variance <= 0.0 {
panic("error")
}
return &Gaussian{
mean: mean,
variance: variance,
standardDeviation: math.Sqrt(float64(variance)),
}
}
// Complementary error function
// From Numerical Recipes in C 2e p221
func Erfc(x float64) float64 {
z := math.Abs(x)
t := 1 / (1 + z/2)
r := t * math.Exp(-z*z-1.26551223+t*(1.00002368+
t*(0.37409196+t*(0.09678418+t*(-0.18628806+
t*(0.27886807+t*(-1.13520398+t*(1.48851587+
t*(-0.82215223+t*0.17087277)))))))))
if x >= 0 {
return r
} else {
return 2 - r
}
}
// Inverse complementary error function
// From Numerical Recipes 3e p265
func Ierfc(x float64) float64 {
if x >= 2 {
return -100
}
if x <= 0 {
return 100
}
var xx float64
if x < 1 {
xx = x
} else {
xx = 2 - x
}
t := math.Sqrt(-2 * math.Log(xx/2))
r := -0.70711 * ((2.30753+t*0.27061)/
(1+t*(0.99229+t*0.04481)) - t)
for j := 0; j < 2; j++ {
e := Erfc(r) - xx
r += e / (1.12837916709551257*math.Exp(-(r*r)) - r*e)
}
if x < 1 {
return r
} else {
return -r
}
}
// Construct a new distribution from the precision and precisionmean
func fromPrecisionMean(precision, precisionmean float64) *Gaussian {
return NewGaussian(precisionmean/precision, 1/precision)
}
/// PROB
//pdf(x): the probability density function, which describes the probability
// of a random variable taking on the value x
func (self *Gaussian) Pdf(x float64) float64 {
m := self.standardDeviation * math.Sqrt(2*math.Pi)
e := math.Exp(-math.Pow(x-self.mean, 2) / (2 * self.variance))
return e / m
}
//cdf(x): the cumulative distribution function,
// which describes the probability of a random
// variable falling in the interval (−∞, x]
func (self *Gaussian) Cdf(x float64) float64 {
return 0.5 * Erfc(-(x-self.mean)/(self.standardDeviation*math.Sqrt(2)))
}
//ppf(x): the percent point function, the inverse of cdf
func (self *Gaussian) Ppf(x float64) float64 {
return self.mean - self.standardDeviation*math.Sqrt(2)*Ierfc(2*x)
}
func (self *Gaussian) Add(d *Gaussian) *Gaussian {
return NewGaussian(self.mean+d.mean, self.variance+d.variance)
}
func (self *Gaussian) Sub(d *Gaussian) *Gaussian {
return NewGaussian(self.mean-d.mean, self.variance+d.variance)
}
func (self *Gaussian) Scale(c float64) *Gaussian {
return NewGaussian(self.mean*c, self.variance*c*c)
}
func (self *Gaussian) Mul(d *Gaussian) *Gaussian {
precision := 1 / self.variance
dprecision := 1 / d.variance
return fromPrecisionMean(precision+dprecision, precision*self.mean+dprecision*d.mean)
}
func (self *Gaussian) Div(d *Gaussian) *Gaussian {
precision := 1 / self.variance
dprecision := 1 / d.variance
return fromPrecisionMean(precision-dprecision, precision*self.mean-dprecision*d.mean)
} | vendor/github.com/chobie/go-gaussian/gaussian.go | 0.913874 | 0.673896 | gaussian.go | starcoder |
package parts
import (
"bytes"
"fmt"
"github.com/google/shenzhen-go/model"
"github.com/google/shenzhen-go/model/pin"
)
func init() {
model.RegisterPartType("Gather", "Flow", &model.PartType{
New: func() model.Part { return &Gather{InputNum: 2} },
Panels: []model.PartPanel{
{
Name: "Gather",
Editor: `<div class="form"><div class="formfield">
<label>Number of inputs: <input id="gather-inputnum" type="number"></input></label>
</div></div>`,
},
{
Name: "Help",
Editor: `<div>
<p>
A Gather part sends every value received from every input to the output.
The number of inputs is configurable.
</p><p>
Gather is useful for combining multiple outputs. While a single channel
can be attached to multiple outputs, it can cause a panic if both outputs
try to close the channel.
</p>
</div>`,
},
},
})
}
// Gather is a part type which reads a configurable number of inputs
// and sends values to a single output.
type Gather struct {
InputNum uint `json:"input_num"`
}
// Clone returns a clone of this part.
func (g Gather) Clone() model.Part { return g }
// Impl returns a "straightforward" for-select implementation.
// Compared with the N-goroutine approach, this doesn't require a WaitGroup
// and has less hidden-buffer (won't read from inputs if blocked on output).
func (g Gather) Impl(n *model.Node) model.PartImpl {
lb, sb := bytes.NewBuffer(nil), bytes.NewBuffer(nil)
lb.WriteString(`for {
if true `)
sb.WriteString("select {\n")
for i := uint(0); i < g.InputNum; i++ {
name := fmt.Sprintf("input%d", i)
if n.Connections[name] == "nil" {
continue
}
fmt.Fprintf(lb, " && %s == nil", name)
fmt.Fprintf(sb, `case in, open := <- %s:
if !open { %s = nil; break }
output <- in
`, name, name)
}
lb.WriteString("{ break }\n")
sb.WriteString("}\n")
lb.Write(sb.Bytes())
lb.WriteString("}\n")
return model.PartImpl{
Body: lb.String(),
Tail: `close(output)`,
}
}
// Pins returns a map with N inputs and 1 output.
func (g Gather) Pins() pin.Map {
m := pin.NewMap(&pin.Definition{
Name: "output",
Direction: pin.Output,
Type: "$Any",
})
for i := uint(0); i < g.InputNum; i++ {
n := fmt.Sprintf("input%d", i)
m[n] = &pin.Definition{
Name: n,
Direction: pin.Input,
Type: "$Any",
}
}
return m
}
// TypeKey returns "Gather".
func (g Gather) TypeKey() string { return "Gather" } | parts/gather.go | 0.591015 | 0.49347 | gather.go | starcoder |
package inject
import "reflect"
// TypedInjector returns an Injector utilizing valueMaker, which should implement one or more *Maker interfaces.
func TypedInjector(valueMaker interface{}) Injector {
return &typedInjector{valueMaker}
}
// A typedInjector adapts valueMaker to Injector.
type typedInjector struct {
// valueMaker should implement one or more *Maker interfaces.
valueMaker interface{}
}
// Inject injects value based on tagValue, if the valueMaker supports value's Kind.
// Implements Injector.
func (tvs typedInjector) Inject(value reflect.Value, tagValue string) (bool, error) {
kind := value.Kind()
switch kind {
case reflect.String:
stringValueMaker, ok := tvs.valueMaker.(StringMaker)
if !ok {
return false, &UnsupportedKindError{reflect.String}
}
set, s, err := stringValueMaker.MakeString(tagValue)
if err != nil {
return false, err
} else if set {
value.SetString(s)
}
return set, err
case reflect.Bool:
boolValueMaker, ok := tvs.valueMaker.(BoolMaker)
if !ok {
return false, &UnsupportedKindError{reflect.Bool}
}
set, b, err := boolValueMaker.MakeBool(tagValue)
if err != nil {
return false, err
} else if set {
value.SetBool(b)
}
return set, nil
case reflect.Int:
return intSetter(0, tvs.valueMaker, value, tagValue)
case reflect.Int8:
return intSetter(8, tvs.valueMaker, value, tagValue)
case reflect.Int16:
return intSetter(16, tvs.valueMaker, value, tagValue)
case reflect.Int32:
return intSetter(32, tvs.valueMaker, value, tagValue)
case reflect.Int64:
return intSetter(64, tvs.valueMaker, value, tagValue)
case reflect.Uint:
return uintSetter(0, tvs.valueMaker, value, tagValue)
case reflect.Uint8:
return uintSetter(8, tvs.valueMaker, value, tagValue)
case reflect.Uint16:
return uintSetter(16, tvs.valueMaker, value, tagValue)
case reflect.Uint32:
return uintSetter(32, tvs.valueMaker, value, tagValue)
case reflect.Uint64:
return uintSetter(64, tvs.valueMaker, value, tagValue)
case reflect.Float32:
return floatSetter(32, tvs.valueMaker, value, tagValue)
case reflect.Float64:
return floatSetter(64, tvs.valueMaker, value, tagValue)
case reflect.Complex64:
return complexSetter(64, tvs.valueMaker, value, tagValue)
case reflect.Complex128:
return complexSetter(128, tvs.valueMaker, value, tagValue)
case reflect.Slice:
sliceValueMaker, ok := tvs.valueMaker.(SliceMaker)
if !ok {
return false, &UnsupportedKindError{reflect.Slice}
}
set, made, err := sliceValueMaker.MakeSlice(tagValue, value.Type())
if err != nil {
return false, err
} else if set {
value.Set(made)
}
return set, nil
case reflect.Array:
arrayValueMaker, ok := tvs.valueMaker.(ArrayMaker)
if !ok {
return false, &UnsupportedKindError{reflect.Array}
}
set, made, err := arrayValueMaker.MakeArray(tagValue, value.Type())
if err != nil {
return false, err
} else if set {
value.Set(made)
}
return set, nil
case reflect.Chan:
chanValueMaker, ok := tvs.valueMaker.(ChanMaker)
if !ok {
return false, &UnsupportedKindError{reflect.Chan}
}
set, made, err := chanValueMaker.MakeChan(tagValue, reflect.TypeOf(value))
if err != nil {
return false, err
} else if set {
value.Set(made)
}
return set, nil
default:
return false, &UnsupportedKindError{kind}
}
}
// intSetter sets value with the int returned from valueMaker, if it implements IntMaker. Otherwise it returns an error.
func intSetter(bits int, valueMaker interface{}, value reflect.Value, tagValue string) (bool, error) {
intMaker, ok := valueMaker.(IntMaker)
if !ok {
return false, &UnsupportedKindError{reflect.Int}
}
set, i64, err := intMaker.MakeInt(tagValue, bits)
if err != nil {
return false, err
} else if set {
value.SetInt(i64)
}
return set, nil
}
// uintSetter sets value with the int returned from valueMaker, if it implements UintMaker. Otherwise it returns an error.
func uintSetter(bits int, valueMaker interface{}, value reflect.Value, tagValue string) (bool, error) {
uintMaker, ok := valueMaker.(UintMaker)
if !ok {
return false, &UnsupportedKindError{reflect.Uint}
}
set, u64, err := uintMaker.MakeUint(tagValue, bits)
if err != nil {
return false, err
} else if set {
value.SetUint(u64)
}
return set, nil
}
// floatSetter sets value with the float returned from valueMaker, if it implements FloatMaker. Otherwise it returns an error.
func floatSetter(bitSize int, valueMaker interface{}, value reflect.Value, tagValue string) (bool, error) {
floatMaker, ok := valueMaker.(FloatMaker)
if !ok {
var kind reflect.Kind
if bitSize == 32 {
kind = reflect.Float32
} else {
kind = reflect.Float64
}
return false, &UnsupportedKindError{kind}
}
set, f64, err := floatMaker.MakeFloat(tagValue, bitSize)
if err != nil {
return false, err
} else if set {
value.SetFloat(f64)
}
return set, nil
}
// complexSetter sets value with the complex returned from valueMaker, if it implements ComplexMaker. Otherwise it returns an error.
func complexSetter(bitSize int, valueMaker interface{}, value reflect.Value, tagValue string) (bool, error) {
complexValueMaker, ok := valueMaker.(ComplexMaker)
if !ok {
var kind reflect.Kind
if bitSize == 64 {
kind = reflect.Complex64
} else {
kind = reflect.Complex128
}
return false, &UnsupportedKindError{kind}
}
set, c128, err := complexValueMaker.MakeComplex(tagValue, bitSize)
if err != nil {
return false, err
} else if set {
value.SetComplex(c128)
}
return set, nil
}
// An UnsupportedKindError indicates that a value maker does not support a certain reflect.Kind.
type UnsupportedKindError struct {
reflect.Kind
}
func (e *UnsupportedKindError) Error() string {
return "value maker does not support kind: " + e.Kind.String()
} | inject/typed.go | 0.704872 | 0.526891 | typed.go | starcoder |
package optional
import (
"errors"
"reflect"
)
type optional struct {
v interface{}
empty bool
}
// Test if an input is the default zero value
func isZeroed(in interface{}, t reflect.Type) bool {
return in == reflect.Zero(t).Interface()
}
// Test if nil depending on its type
func isNil(in interface{}) bool {
if in == nil {
return true
}
t := reflect.TypeOf(in)
switch t.Kind() {
case reflect.Ptr, reflect.Array:
if isZeroed(in, t) {
return true
}
case reflect.Slice, reflect.Func, reflect.Map:
value := reflect.ValueOf(in)
if value.IsNil() {
return true
}
case reflect.Struct:
return false
}
return false
}
// Create an optional with an initial value
func Of(in interface{}) (optional, error) {
if isNil(in) {
return optional{nil, true}, errors.New("input shall not be nil")
}
return optional{in, false}, nil
}
// Create an empty optional
func OfEmpty() optional {
o := optional{nil, true}
return o
}
// Get the optional value with an eventual error if the optional is empty
func (v *optional) Get() (interface{}, error) {
if v.IsPresent() {
return v.v, nil
}
return nil, errors.New("optional is empty")
}
// Get the optional value regardless of its emptiness
func (v *optional) GetValue() interface{} {
return v.v
}
// Test whether the optional is not empty
func (v *optional) IsPresent() bool {
return !v.empty
}
// If the optional is not empty, execute a function taking the optional value as the input parameter
func (v *optional) IfPresent(f func(interface{})) {
if v.IsPresent() {
f(v.v)
}
}
// If the optional is empty, execute a function
func (v *optional) IfNotPresent(f func()) {
if !v.IsPresent() {
f()
}
}
// If the optional is not empty, returns an error. Otherwise, returns a nil value
func (v *optional) IfPresentError(e error) error {
if v.IsPresent() {
return e
}
return nil
}
// If the optional is empty, returns an error. Otherwise, returns a nil value
func (v *optional) IfNotPresentError(e error) error {
if !v.IsPresent() {
return e
}
return nil
}
// If the optional is not empty, panic
func (v *optional) IfPresentPanic(i interface{}) {
if v.IsPresent() {
panic(i)
}
}
// If the optional is empty, panic
func (v *optional) IfNotPresentPanic(i interface{}) {
if !v.IsPresent() {
panic(i)
}
} | optional/optional.go | 0.742982 | 0.438545 | optional.go | starcoder |
package pkg
import (
"fmt"
"github.com/juju/errors"
"reflect"
)
// ValueAdd will try to do a mathematical addition between two values.
// It will return another value as the result and an error if between the two values are not compatible for Addition
func ValueAdd(a, b reflect.Value) (reflect.Value, error) {
aBkind := GetBaseKind(a)
bBkind := GetBaseKind(b)
switch aBkind {
case reflect.Int64:
switch bBkind {
case reflect.Int64:
return reflect.ValueOf(a.Int() + b.Int()), nil
case reflect.Uint64:
return reflect.ValueOf(a.Int() + int64(b.Uint())), nil
case reflect.Float64:
return reflect.ValueOf(float64(a.Int()) + b.Float()), nil
case reflect.String:
return reflect.ValueOf(fmt.Sprintf("%d%s", a.Int(), b.String())), nil
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do addition math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
case reflect.Uint64:
switch bBkind {
case reflect.Int64:
return reflect.ValueOf(int64(a.Uint()) + b.Int()), nil
case reflect.Uint64:
return reflect.ValueOf(a.Uint() + b.Uint()), nil
case reflect.Float64:
return reflect.ValueOf(float64(a.Uint()) + b.Float()), nil
case reflect.String:
return reflect.ValueOf(fmt.Sprintf("%d%s", a.Uint(), b.String())), nil
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do addition math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
case reflect.Float64:
switch bBkind {
case reflect.Int64:
return reflect.ValueOf(a.Float() + float64(b.Int())), nil
case reflect.Uint64:
return reflect.ValueOf(a.Float() + float64(b.Uint())), nil
case reflect.Float64:
return reflect.ValueOf(a.Float() + b.Float()), nil
case reflect.String:
return reflect.ValueOf(fmt.Sprintf("%f%s", a.Float(), b.String())), nil
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do addition math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
case reflect.String:
switch bBkind {
case reflect.Int64:
return reflect.ValueOf(fmt.Sprintf("%s%d", a.String(), b.Int())), nil
case reflect.Uint64:
return reflect.ValueOf(fmt.Sprintf("%s%d", a.String(), b.Uint())), nil
case reflect.Float64:
return reflect.ValueOf(fmt.Sprintf("%s%f", a.String(), b.Float())), nil
case reflect.String:
return reflect.ValueOf(fmt.Sprintf("%s%s", a.String(), b.String())), nil
case reflect.Bool:
return reflect.ValueOf(fmt.Sprintf("%s%v", a.String(), b.Bool())), nil
case reflect.Ptr:
if b.CanInterface() {
return reflect.ValueOf(fmt.Sprintf("%s%v", a.String(), b.Interface())), nil
}
return reflect.ValueOf(nil), errors.Errorf("Can not do addition math operator between %s and non interface-able %s", a.Kind().String(), b.Kind().String())
default:
return reflect.ValueOf(fmt.Sprintf("%s%v", a.String(), b.String())), nil
}
default:
if bBkind == reflect.String {
if a.CanInterface() {
return reflect.ValueOf(fmt.Sprintf("%v%s", a.Interface(), b.String())), nil
}
return reflect.ValueOf(nil), errors.Errorf("Can not do addition math operator between non interface-able %s and %s", b.Kind().String(), a.Kind().String())
}
return reflect.ValueOf(nil), errors.Errorf("Can not do math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
}
// ValueSub will try to do a mathematical substraction between two values.
// It will return another value as the result and an error if between the two values are not compatible for substraction
func ValueSub(a, b reflect.Value) (reflect.Value, error) {
aBkind := GetBaseKind(a)
bBkind := GetBaseKind(b)
switch aBkind {
case reflect.Int64:
switch bBkind {
case reflect.Int64:
return reflect.ValueOf(a.Int() - b.Int()), nil
case reflect.Uint64:
return reflect.ValueOf(a.Int() - int64(b.Uint())), nil
case reflect.Float64:
return reflect.ValueOf(float64(a.Int()) - b.Float()), nil
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do subtraction math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
case reflect.Uint64:
switch bBkind {
case reflect.Int64:
return reflect.ValueOf(int64(a.Uint()) - b.Int()), nil
case reflect.Uint64:
return reflect.ValueOf(a.Uint() - b.Uint()), nil
case reflect.Float64:
return reflect.ValueOf(float64(a.Uint()) - b.Float()), nil
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do subtraction math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
case reflect.Float64:
switch bBkind {
case reflect.Int64:
return reflect.ValueOf(a.Float() - float64(b.Int())), nil
case reflect.Uint64:
return reflect.ValueOf(a.Float() - float64(b.Uint())), nil
case reflect.Float64:
return reflect.ValueOf(a.Float() - b.Float()), nil
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do subtraction math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do subtraction math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
}
// ValueMul will try to do a mathematical multiplication between two values.
// It will return another value as the result and an error if between the two values are not compatible for multiplication
func ValueMul(a, b reflect.Value) (reflect.Value, error) {
aBkind := GetBaseKind(a)
bBkind := GetBaseKind(b)
switch aBkind {
case reflect.Int64:
switch bBkind {
case reflect.Int64:
return reflect.ValueOf(a.Int() * b.Int()), nil
case reflect.Uint64:
return reflect.ValueOf(a.Int() * int64(b.Uint())), nil
case reflect.Float64:
return reflect.ValueOf(float64(a.Int()) * b.Float()), nil
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do multiplication math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
case reflect.Uint64:
switch bBkind {
case reflect.Int64:
return reflect.ValueOf(int64(a.Uint()) * b.Int()), nil
case reflect.Uint64:
return reflect.ValueOf(a.Uint() * b.Uint()), nil
case reflect.Float64:
return reflect.ValueOf(float64(a.Uint()) * b.Float()), nil
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do multiplication math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
case reflect.Float64:
switch bBkind {
case reflect.Int64:
return reflect.ValueOf(a.Float() * float64(b.Int())), nil
case reflect.Uint64:
return reflect.ValueOf(a.Float() * float64(b.Uint())), nil
case reflect.Float64:
return reflect.ValueOf(a.Float() * b.Float()), nil
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do multiplication math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do multiplication math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
}
// ValueDiv will try to do a mathematical division between two values.
// It will return another value as the result and an error if between the two values are not compatible for division
func ValueDiv(a, b reflect.Value) (reflect.Value, error) {
aBkind := GetBaseKind(a)
bBkind := GetBaseKind(b)
switch aBkind {
case reflect.Int64:
switch bBkind {
case reflect.Int64:
return reflect.ValueOf(a.Int() / b.Int()), nil
case reflect.Uint64:
return reflect.ValueOf(a.Int() / int64(b.Uint())), nil
case reflect.Float64:
return reflect.ValueOf(float64(a.Int()) / b.Float()), nil
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do division math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
case reflect.Uint64:
switch bBkind {
case reflect.Int64:
return reflect.ValueOf(int64(a.Uint()) / b.Int()), nil
case reflect.Uint64:
return reflect.ValueOf(a.Uint() / b.Uint()), nil
case reflect.Float64:
return reflect.ValueOf(float64(a.Uint()) / b.Float()), nil
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do division math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
case reflect.Float64:
switch bBkind {
case reflect.Int64:
return reflect.ValueOf(a.Float() / float64(b.Int())), nil
case reflect.Uint64:
return reflect.ValueOf(a.Float() / float64(b.Uint())), nil
case reflect.Float64:
return reflect.ValueOf(a.Float() / b.Float()), nil
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do division math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
default:
return reflect.ValueOf(nil), errors.Errorf("Can not do division math operator between %s and %s", a.Kind().String(), b.Kind().String())
}
} | pkg/reflectmath.go | 0.677474 | 0.698239 | reflectmath.go | starcoder |
package xcom
import (
"github.com/DomBlack/advent-of-code-2018/lib/vectors"
"log"
"sort"
)
type FloodMap map[vectors.Vec2]int
func (m *Map) NewFloodMap(starting *Unit) FloodMap {
type FloodCell struct {
position vectors.Vec2
cost int
}
toVisit := []FloodCell{{starting.Position, 0}}
visited := make(map[vectors.Vec2]bool)
floodMap := make(map[vectors.Vec2]int)
// While we still have cells to visit
for len(toVisit) > 0 {
cell := toVisit[0]
toVisit = toVisit[1:]
floodMap[cell.position] = cell.cost
// Check all adjacent cells
for _, dir := range AdjacentCells {
adjacentPos := cell.position.Add(dir)
_, found := visited[adjacentPos]
visited[adjacentPos] = true
// If not visited already
if !found {
// If the adjacent cell is empty
adjacentCell, found := m.Cells[adjacentPos]
if found && adjacentCell.IsEmpty() {
// Add the cell to the list of which we need to visit
toVisit = append(toVisit, FloodCell{adjacentPos, cell.cost + 1})
}
}
}
}
// If here all reachable cells are
return floodMap
}
func (m FloodMap) GetNearestReachableFrom(inRangeCells []vectors.Vec2) *vectors.Vec2 {
// Sort the slice for reading order, so the first we loop over is the min
sort.Slice(inRangeCells, func(i, j int) bool {
return inRangeCells[i].IsReadingOrderLess(inRangeCells[j])
})
// Now find the reachable position which is the lowest cost to get to
var nearest *vectors.Vec2
var nearestCost = 0
for _, pos := range inRangeCells {
cost, found := m[pos]
if found && (nearest == nil || cost < nearestCost) {
nearestCost = cost
nPos := pos
nearest = &nPos
}
}
return nearest
}
func (m FloodMap) FindNextStepTowards(target *vectors.Vec2) vectors.Vec2 {
// "If multiple steps would put the unit equally closer to its destination, "
// "the unit chooses the step which is first in reading order. "
type Node struct { position vectors.Vec2; cost int }
cost, found := m[*target]
if !found {
log.Fatalf("Initial position %v was not in flood map", target)
}
if cost == 1 {
return *target
}
toVisit := []Node{ { *target, cost} }
visited := make(map[vectors.Vec2]bool)
var possibleStep *vectors.Vec2
for len(toVisit) > 0 {
node := toVisit[0]
toVisit = toVisit[1:]
visited[node.position] = true
for _, dir := range AdjacentCells {
adjacentPos := node.position.Add(dir)
if _, found := visited[adjacentPos]; found {
continue
}
cost, found := m[adjacentPos]
if cost == 1 && (possibleStep == nil || adjacentPos.IsReadingOrderLess(*possibleStep)) {
possibleStep = &adjacentPos
} else if found && cost < node.cost && cost > 0 {
toVisit = append(toVisit, Node { adjacentPos, cost })
}
}
}
if possibleStep == nil {
log.Fatalf("Unable to compute next step from %v", target)
}
return *possibleStep
} | day-15/xcom/FloodMap.go | 0.740268 | 0.485173 | FloodMap.go | starcoder |
package ids
// SoundEffectInfo describes one sound effect in the game.
type SoundEffectInfo struct {
// Name is the unique identifier for the effect source.
Name string
// Index refers to the audio index. Multiple effects may use the same audio. -1 indicates no audio mapped.
AudioIndex int
}
// SoundEffectsForAudio returns the effect information that share the same audio.
func SoundEffectsForAudio(index int) []SoundEffectInfo {
var result []SoundEffectInfo
for _, effect := range soundEffects {
if effect.AudioIndex == index {
result = append(result, effect)
}
}
return result
}
// The following table is based on the constants found in sfxlist.h (non-demo case).
var soundEffects = []SoundEffectInfo{
// Doors
{Name: "DoorMetal", AudioIndex: 3},
{Name: "DoorNormal", AudioIndex: 5},
{Name: "DoorIris", AudioIndex: 6},
{Name: "DoorBulkhead", AudioIndex: 67},
{Name: "DoorGrating", AudioIndex: 90},
// Ambient
{Name: "Bridge1", AudioIndex: -1},
{Name: "Bridge2", AudioIndex: -1},
{Name: "Bridge3", AudioIndex: -1},
{Name: "Bridge4", AudioIndex: -1},
{Name: "Maint1", AudioIndex: 9},
{Name: "Maint2", AudioIndex: -1},
{Name: "Maint3", AudioIndex: -1},
{Name: "Maint4", AudioIndex: -1},
{Name: "Grove1", AudioIndex: 43},
// Critters
{Name: "Death1", AudioIndex: 11},
{Name: "Death2", AudioIndex: 49},
{Name: "Death3", AudioIndex: 50},
{Name: "Death4", AudioIndex: 51},
{Name: "Death5", AudioIndex: 53},
{Name: "Death6", AudioIndex: 54},
{Name: "Death7", AudioIndex: 68},
{Name: "Death8", AudioIndex: 69},
{Name: "Death9", AudioIndex: 88},
{Name: "Death10", AudioIndex: 93},
{Name: "Death11", AudioIndex: 101},
{Name: "Attack1", AudioIndex: 12},
{Name: "Attack4", AudioIndex: 46},
{Name: "Attack5", AudioIndex: 48},
{Name: "Attack6", AudioIndex: 52},
{Name: "Attack7", AudioIndex: 55},
{Name: "Attack8", AudioIndex: 63},
{Name: "Attack9", AudioIndex: 16},
{Name: "Notice1", AudioIndex: 58},
{Name: "Notice2", AudioIndex: 59},
{Name: "Notice3", AudioIndex: 74},
{Name: "Notice4", AudioIndex: 75},
{Name: "Notice5", AudioIndex: 100},
{Name: "Near1", AudioIndex: 73},
{Name: "Near2", AudioIndex: 56},
{Name: "Near3", AudioIndex: 47},
{Name: "Near4", AudioIndex: 25},
// Wacky Objects
{Name: "Repulsor", AudioIndex: -1},
{Name: "ForceBridge", AudioIndex: 72},
{Name: "TerrainElevLoop", AudioIndex: 15},
{Name: "SparkingCable", AudioIndex: 87},
{Name: "SurgeryMachine", AudioIndex: 102},
// Combat
{Name: "GunMinipistol", AudioIndex: 39},
{Name: "GunDartpistol", AudioIndex: 86},
{Name: "GunMagnum", AudioIndex: 40},
{Name: "GunAssault", AudioIndex: 17},
{Name: "GunRiot", AudioIndex: 41},
{Name: "GunFlechette", AudioIndex: 38},
{Name: "GunSkorpion", AudioIndex: 65},
{Name: "GunMagpulse", AudioIndex: 45},
{Name: "GunRailgun", AudioIndex: 29},
{Name: "GunPipeHitMeat", AudioIndex: 4},
{Name: "GunPipeHitMetal", AudioIndex: 21},
{Name: "GunPipeMiss", AudioIndex: 24},
{Name: "GunLaserepeeHit", AudioIndex: 31},
{Name: "GunLaserepeeMiss", AudioIndex: 34},
{Name: "GunPhaser", AudioIndex: 18},
{Name: "GunBlaster", AudioIndex: 94},
{Name: "GunIonbeam", AudioIndex: 95},
{Name: "GunStungun", AudioIndex: 19},
{Name: "GunPlasma", AudioIndex: 97},
{Name: "PlayerHurt", AudioIndex: 64},
{Name: "Shield1", AudioIndex: 32},
{Name: "Shield2", AudioIndex: 20},
{Name: "ShieldUp", AudioIndex: 96},
{Name: "ShieldDown", AudioIndex: 42},
{Name: "MetalSpang", AudioIndex: 89},
{Name: "Radiation", AudioIndex: 2},
{Name: "Reload1", AudioIndex: 22},
{Name: "Reload2", AudioIndex: 23},
{Name: "GrenadeArm", AudioIndex: 7},
{Name: "BatteryUse", AudioIndex: 28},
{Name: "Explosion1", AudioIndex: 44},
{Name: "Rumble", AudioIndex: 106},
{Name: "Teleport", AudioIndex: 103},
{Name: "MonitorExplode", AudioIndex: 57},
{Name: "CameraExplode", AudioIndex: 8},
{Name: "CpuExplode", AudioIndex: 10},
{Name: "DestroyCrate", AudioIndex: 37},
{Name: "DestroyBarrel", AudioIndex: 109},
// Cspace
{Name: "Pulser", AudioIndex: -1},
{Name: "Drill", AudioIndex: -1},
{Name: "Disc", AudioIndex: -1},
{Name: "Datastorm", AudioIndex: -1},
{Name: "Recall", AudioIndex: -1},
{Name: "Turbo", AudioIndex: -1},
{Name: "Fakeid", AudioIndex: -1},
{Name: "Decoy", AudioIndex: -1},
{Name: "EnterCspace", AudioIndex: 27},
{Name: "OttoShodan", AudioIndex: 30},
{Name: "CyberDamage", AudioIndex: -1},
{Name: "Cyberheal", AudioIndex: -1},
{Name: "Cybertoggle", AudioIndex: -1},
{Name: "IceDefense", AudioIndex: -1},
{Name: "CyberAttack1", AudioIndex: -1},
{Name: "CyberAttack2", AudioIndex: -1},
{Name: "CyberAttack3", AudioIndex: -1},
// MFD and UI Wackiness
{Name: "VideoDown", AudioIndex: 33},
{Name: "MfdButton", AudioIndex: 35},
{Name: "InventButton", AudioIndex: 79},
{Name: "InventSelect", AudioIndex: 80},
{Name: "InventAdd", AudioIndex: 81},
{Name: "InventWare", AudioIndex: 82},
{Name: "PatchUse", AudioIndex: 83},
{Name: "ZoomBox", AudioIndex: 84},
{Name: "MapZoom", AudioIndex: 85},
{Name: "MfdKeypad", AudioIndex: 76},
{Name: "MfdBuzz", AudioIndex: 77},
{Name: "MfdSuccess", AudioIndex: 78},
{Name: "Goggle", AudioIndex: 0},
{Name: "Hudfrob", AudioIndex: 1},
{Name: "Static", AudioIndex: 2},
{Name: "Email", AudioIndex: 107},
// SHODAN
{Name: "ShodanBark", AudioIndex: 30},
{Name: "ShodanWeak", AudioIndex: 30},
{Name: "ShodanStrong", AudioIndex: 30},
// Other
{Name: "PanelSuccess", AudioIndex: 78},
{Name: "PowerOut", AudioIndex: 26},
{Name: "EnergyDrain", AudioIndex: 14},
{Name: "EnergyRecharge", AudioIndex: 98},
{Name: "Surge", AudioIndex: 60},
{Name: "Vmail", AudioIndex: 92},
{Name: "DropItem", AudioIndex: 99},
// 3d World
{Name: "Button", AudioIndex: 71},
{Name: "MechButton", AudioIndex: 36},
{Name: "Bigbutton", AudioIndex: 61},
{Name: "NormalLever", AudioIndex: 62},
{Name: "Biglever", AudioIndex: 62},
{Name: "Klaxon", AudioIndex: 70},
// Plot
{Name: "GroveJett", AudioIndex: 66},
// Extra
{Name: "PanelConfirm", AudioIndex: 13},
{Name: "Death12", AudioIndex: 104},
{Name: "Death13", AudioIndex: 105},
{Name: "Death14", AudioIndex: 108},
{Name: "Attack10", AudioIndex: 91},
{Name: "Attack11", AudioIndex: 110},
{Name: "Near5", AudioIndex: 111},
{Name: "Death15", AudioIndex: 112},
{Name: "FunPackAttackingUs", AudioIndex: 113},
} | ss1/world/ids/Sounds.go | 0.650578 | 0.604807 | Sounds.go | starcoder |
package gots
import "math"
// PTS constants
const (
PTS_DTS_INDICATOR_BOTH = 3 // 11
PTS_DTS_INDICATOR_ONLY_PTS = 2 // 10
PTS_DTS_INDICATOR_NONE = 0 // 00
// MaxPtsValue is the highest value the PTS can hold before it rolls over, since its a 33 bit timestamp.
MaxPtsValue = (1 << 33) - 1 // 2^33 - 1 = 8589934591 = 0x1FFFFFFFF
// MaxPtsTicks is the length of the complete PTS timeline.
MaxPtsTicks = 1 << 33 // 2^33 = 8589934592 = 0x200000000
// Used as a sentinel values for algorithms working against PTS
PtsNegativeInfinity = PTS(math.MaxUint64 - 1) //18446744073709551614
PtsPositiveInfinity = PTS(math.MaxUint64) //18446744073709551615
PtsClockRate = 90000
// UpperPtsRolloverThreshold is the threshold for a rollover on the upper end, MaxPtsValue - 30 min
UpperPtsRolloverThreshold = 8427934591
// LowerPtsRolloverThreshold is the threshold for a rollover on the lower end, 30 min
LowerPtsRolloverThreshold = 162000000
)
// PTS represents PTS time
type PTS uint64
// After checks if this PTS is after the other PTS
func (p PTS) After(other PTS) bool {
switch {
case other == PtsPositiveInfinity:
return false
case other == PtsNegativeInfinity:
return true
case p.RolledOver(other):
return true
case other.RolledOver(p):
return false
default:
return p > other
}
}
// GreaterOrEqual returns true if the method reciever is >= the provided PTS
func (p PTS) GreaterOrEqual(other PTS) bool {
if p == other {
return true
}
return p.After(other)
}
// RolledOver checks if this PTS just rollover compared to the other PTS
func (p PTS) RolledOver(other PTS) bool {
if other == PtsNegativeInfinity || other == PtsPositiveInfinity {
return false
}
if p < LowerPtsRolloverThreshold && other > UpperPtsRolloverThreshold {
return true
}
return false
}
// DurationFrom returns the difference between the two pts times. This number is always positive.
func (p PTS) DurationFrom(from PTS) uint64 {
switch {
case p.RolledOver(from):
return uint64((MaxPtsTicks - from) + p)
case from.RolledOver(p):
return uint64((MaxPtsTicks - p) + from)
case p < from:
return uint64(from - p)
default:
return uint64(p - from)
}
}
// Add adds the two PTS times together and returns a new PTS
func (p PTS) Add(x PTS) PTS {
return (p + x) & MaxPtsValue
}
// ExtractTime extracts a PTS time
func ExtractTime(bytes []byte) uint64 {
var a, b, c, d, e uint64
a = uint64((bytes[0] >> 1) & 0x07)
b = uint64(bytes[1])
c = uint64((bytes[2] >> 1) & 0x7f)
d = uint64(bytes[3])
e = uint64((bytes[4] >> 1) & 0x7f)
return (a << 30) | (b << 22) | (c << 15) | (d << 7) | e
}
// InsertPTS insterts a given pts time into a byte slice and sets the
// marker bits. len(b) >= 5
func InsertPTS(b []byte, pts uint64) {
b[0] = byte(pts >> 29 & 0x0f) // PTS[32..30]
b[1] = byte(pts >> 22 & 0xff) // PTS[29..22]
b[2] = byte(pts >> 14 & 0xff) // PTS[21..15]
b[3] = byte(pts >> 7 & 0xff) // PTS[14..8]
b[4] = byte(pts&0xff) << 1 // PTS[7..0]
// Set the marker bits as appropriate
b[0] |= 0x21
b[2] |= 0x01
b[4] |= 0x01
} | pts.go | 0.657868 | 0.52683 | pts.go | starcoder |
package stats
import (
"fmt"
"regexp"
"github.com/turbinelabs/nonstdlib/arrays/indexof"
)
const (
transformTagsDesc = `
Defines one or more transformations for tags. A tag with a specific name whose value matches
a regular expression can be transformed into one or more tags with values extracted from
subexpressions of the regular expression. Transformations are specified as follows:
tag=/regex/,n1,n2...
where tag is the name of the tag to be transformed, regex is a regular expression with 1 or
more subexpressions, and n1,n2... is a sequence of names for the tags formed from the regular
expression's subexpressions (matching groups). Any character may be used in place of the
slashes (/) to delimit the regular expression. There must be at least one subexpression in the
regular expression. There must be exactly as many names as subexpressions. If one of the names
is the original tag name, the original tag is replaced with the transformed value. Otherwise,
the original tag is passed through unchanged. Multiple transformations may be separated by
semicolons (;). Any character may be escaped with a backslash (\).
Examples:
foo=/^(.+):.*x=([0-9]+)/,foo,bar
foo=@.*y=([A-Za-z_]+)@,yval
`
)
// newTagTransformer creates a new tagTransformer with the given underlying
// tagTransforms.
func newTagTransformer(config []tagTransform) *tagTransformer {
if len(config) == 0 {
return &tagTransformer{}
}
m := make(map[string]*tagTransform, len(config))
for _, tt := range config {
ttCopy := tt
m[ttCopy.name] = &ttCopy
}
return &tagTransformer{transforms: m}
}
// newTagTransform creates a new tagTransform from the given tag name, value pattern,
// and mapped tag names. The pattern must be a valid regular expression with at least
// one subexpression. There must be exactly one mapped name for each subexpression in
// the pattern. If a mapped name equals the original tag name, the original tag will
// be replaced with the subexpression's value. Otherwise, the original tag is left in
// place.
func newTagTransform(name, pattern string, mappedNames []string) (tagTransform, error) {
r, err := regexp.Compile(pattern)
if err != nil {
return tagTransform{}, err
}
if r.NumSubexp() == 0 {
return tagTransform{}, fmt.Errorf("pattern %q contains no subexpressions", pattern)
}
if r.NumSubexp() != len(mappedNames) {
return tagTransform{},
fmt.Errorf(
"pattern %q contains %d subexpressions, but %d names were provided",
pattern,
r.NumSubexp(),
len(mappedNames),
)
}
return tagTransform{
name: name,
regex: r,
mappedNames: mappedNames,
replaceOriginal: indexof.String(mappedNames, name) != indexof.NotFound,
}, nil
}
// tagTransformer provides a mechanism to allow a single tag value to be broken up
// into multiple component tags. For example, ENCHILADA=chicken,corn,cotija can be
// transformed into FILLING=chicken, TORTILLA=corn, CHEESE=cotija.
type tagTransformer struct {
transforms map[string]*tagTransform
}
// tagTransform represents a single tag transform.
type tagTransform struct {
name string
regex *regexp.Regexp
mappedNames []string
replaceOriginal bool
}
func (dt *tagTransform) transform(original Tag) ([]Tag, bool) {
submatches := dt.regex.FindStringSubmatch(original.V)
if len(submatches) < 2 {
return nil, false
}
var tags []Tag
if dt.replaceOriginal {
tags = make([]Tag, 0, len(submatches)-1)
} else {
tags = append(make([]Tag, 0, len(submatches)), original)
}
for i, submatch := range submatches[1:] {
t := NewKVTag(dt.mappedNames[i], submatch)
tags = append(tags, t)
}
return tags, true
}
func (t *tagTransformer) transform(tags []Tag) []Tag {
if len(t.transforms) == 0 {
return tags
}
ttags := map[int][]Tag{}
numReplacements := 0
for i, tag := range tags {
if tt, ok := t.transforms[tag.K]; ok {
if replacements, ok := tt.transform(tag); ok {
ttags[i] = replacements
numReplacements += len(replacements)
}
}
}
if numReplacements == 0 {
// No matches
return tags
}
// Capacity is number of original tags, minus those for which replacement will
// occur, plus the number of replacement tags.
rtags := make([]Tag, 0, len(tags)-len(ttags)+numReplacements)
for i, tag := range tags {
if replacements, ok := ttags[i]; ok {
rtags = append(rtags, replacements...)
} else {
rtags = append(rtags, tag)
}
}
return rtags
} | vendor/github.com/turbinelabs/stats/tag_transformer.go | 0.780077 | 0.636692 | tag_transformer.go | starcoder |
package creator
import (
"github.com/pzduniak/unipdf/contentstream/draw"
"github.com/pzduniak/unipdf/model"
)
// Ellipse defines an ellipse with a center at (xc,yc) and a specified width and height. The ellipse can have a colored
// fill and/or border with a specified width.
// Implements the Drawable interface and can be drawn on PDF using the Creator.
type Ellipse struct {
xc float64
yc float64
width float64
height float64
fillColor *model.PdfColorDeviceRGB
borderColor *model.PdfColorDeviceRGB
borderWidth float64
}
// newEllipse creates a new ellipse centered at (xc,yc) with a width and height specified.
func newEllipse(xc, yc, width, height float64) *Ellipse {
ell := &Ellipse{}
ell.xc = xc
ell.yc = yc
ell.width = width
ell.height = height
ell.borderColor = model.NewPdfColorDeviceRGB(0, 0, 0)
ell.borderWidth = 1.0
return ell
}
// GetCoords returns the coordinates of the Ellipse's center (xc,yc).
func (ell *Ellipse) GetCoords() (float64, float64) {
return ell.xc, ell.yc
}
// SetBorderWidth sets the border width.
func (ell *Ellipse) SetBorderWidth(bw float64) {
ell.borderWidth = bw
}
// SetBorderColor sets the border color.
func (ell *Ellipse) SetBorderColor(col Color) {
ell.borderColor = model.NewPdfColorDeviceRGB(col.ToRGB())
}
// SetFillColor sets the fill color.
func (ell *Ellipse) SetFillColor(col Color) {
ell.fillColor = model.NewPdfColorDeviceRGB(col.ToRGB())
}
// GeneratePageBlocks draws the rectangle on a new block representing the page.
func (ell *Ellipse) GeneratePageBlocks(ctx DrawContext) ([]*Block, DrawContext, error) {
block := NewBlock(ctx.PageWidth, ctx.PageHeight)
drawell := draw.Circle{
X: ell.xc - ell.width/2,
Y: ctx.PageHeight - ell.yc - ell.height/2,
Width: ell.width,
Height: ell.height,
Opacity: 1.0,
BorderWidth: ell.borderWidth,
}
if ell.fillColor != nil {
drawell.FillEnabled = true
drawell.FillColor = ell.fillColor
}
if ell.borderColor != nil {
drawell.BorderEnabled = true
drawell.BorderColor = ell.borderColor
drawell.BorderWidth = ell.borderWidth
}
contents, _, err := drawell.Draw("")
if err != nil {
return nil, ctx, err
}
err = block.addContentsByString(string(contents))
if err != nil {
return nil, ctx, err
}
return []*Block{block}, ctx, nil
} | bot/vendor/github.com/pzduniak/unipdf/creator/ellipse.go | 0.825976 | 0.439687 | ellipse.go | starcoder |
package texture
import (
"math"
)
// FixedNormal provides a fixed VectorField.
type FixedNormal struct {
Val []float64
}
// DefaultNormal describes the unit normal point straight up from the XY plane.
var DefaultNormal = &FixedNormal{[]float64{0, 0, 1}}
// Eval2 implements
func (n *FixedNormal) Eval2(x, y float64) []float64 {
return n.Val
}
// Normal provides a VectorField calculated from a Field using the finite difference method.
type Normal struct {
Source Field
SDx, SDy float64
Dx, Dy float64
}
// NewNormal returns a new instance of Normal.
func NewNormal(src Field, sx, sy, dx, dy float64) *Normal {
return &Normal{src, sx / (2 * dx), sy / (2 * dy), dx, dy}
}
// Eval2 implements the VectorField interface.
func (n *Normal) Eval2(x, y float64) []float64 {
dx := n.Source.Eval2(x-n.Dx, y) - n.Source.Eval2(x+n.Dx, y)
dy := n.Source.Eval2(x, y-n.Dy) - n.Source.Eval2(x, y+n.Dy)
dx *= n.SDx
dy *= n.SDy
div := 1 / math.Sqrt(dx*dx+dy*dy+1)
return []float64{dx * div, dy * div, div}
}
// UnitNormal provides a normalized version of a VectorField.
type UnitNormal struct {
Source VectorField
}
// Eval2 implements the VectorField interface.
func (u *UnitNormal) Eval2(x, y float64) []float64 {
v := u.Source.Eval2(x, y)
s := v[0]*v[0] + v[1]*v[1] + v[2]*v[2]
s = 1 / math.Sqrt(s)
return []float64{v[0] * s, v[1] * s, v[2] * s}
}
const oneOverPi = 1 / math.Pi
// Direction converts a VectorField to a Field based on the vector's direction.
type Direction struct {
Source VectorField
FFunc func(float64) float64
}
// Eval2 implements the Field interface.
func (d *Direction) Eval2(x, y float64) float64 {
normal := d.Source.Eval2(x, y)
theta := math.Atan2(normal[1], normal[0])
if d.FFunc == nil {
return theta * oneOverPi
}
return d.FFunc(theta * oneOverPi)
}
// Magnitude converts a normalized VectorField to a Field based on the vector's magnitude.
type Magnitude struct {
Source VectorField
FFunc func(float64) float64
}
// Eval2 implements the Field interface. Always >= 0
func (m *Magnitude) Eval2(x, y float64) float64 {
normal := m.Source.Eval2(x, y)
if m.FFunc == nil {
return 1 - normal[2]
}
return m.FFunc(1 - normal[2])
}
// Select converts a VectorField to a field by selecting one of its components.
type Select struct {
Src VectorField
Chan int
FFunc func(float64) float64
}
// Eval2 implements the Field interface.
func (s *Select) Eval2(x, y float64) float64 {
v := s.Src.Eval2(x, y)[s.Chan]
if s.FFunc == nil {
return v
}
return s.FFunc(v)
}
// VectorCombine converts a VectorField to a Field using a combiner function.
type VectorCombine struct {
Source VectorField
CFunc func(...float64) float64
FFunc func(float64) float64
}
// Eval2 implements the Field interface.
func (vb *VectorCombine) Eval2(x, y float64) float64 {
v := vb.Source.Eval2(x, y)
res := vb.CFunc(v...)
if vb.FFunc == nil {
return res
}
return vb.FFunc(res)
} | vector.go | 0.909184 | 0.747524 | vector.go | starcoder |
package value
import (
"strconv"
"strings"
)
type compareUIntFunc func(a, b uint64) bool
// UIntSlice holds a slice of uint64 values
type UIntSlice struct {
valsPtr *[]uint64
}
// NewUIntSlice makes a new UIntSlice with the given uint64 values.
func NewUIntSlice(vals ...uint64) *UIntSlice {
slice := make([]uint64, len(vals))
copy(slice, vals)
return &UIntSlice{valsPtr: &slice}
}
// NewUIntSliceFromPtr makes a new UIntSlice with the given pointer to uint64 values.
func NewUIntSliceFromPtr(valsPtr *[]uint64) *UIntSlice {
return &UIntSlice{valsPtr: valsPtr}
}
// Set changes the uint64 values.
func (v *UIntSlice) Set(vals []uint64) { *v.valsPtr = vals }
// Type return TypeUInt.
func (v *UIntSlice) Type() Type { return TypeUInt }
// IsSlice returns true.
func (v *UIntSlice) IsSlice() bool { return true }
// Clone produce a clone that is identical except for the backing pointer.
func (v *UIntSlice) Clone() Value { return NewUIntSlice(*v.valsPtr...) }
// Parse sets the values from the given string.
func (v *UIntSlice) Parse(str string) error {
substrings := strings.Split(str, ",")
vals := make([]uint64, len(substrings))
for i := 0; i < len(substrings); i++ {
substr := strings.TrimSpace(substrings[i])
val, err := strconv.ParseUint(substr, 10, 64)
if err != nil {
return err
}
vals[i] = val
}
*v.valsPtr = vals
return nil
}
// SlicePointer returns the pointer for storage of slice values.
func (v *UIntSlice) SlicePointer() interface{} { return v.valsPtr }
// Slice returns the uint64 slice values.
func (v *UIntSlice) Slice() interface{} { return *v.valsPtr }
// Len returns the number of slice elements.
func (v *UIntSlice) Len() int { return len(*v.valsPtr) }
// Equal checks if length and values of given slice equal the current.
// Returns a non-nil error if types do not match.
func (v *UIntSlice) Equal(v2 Slice) (bool, error) {
if err := CheckType(TypeUInt, v2.Type()); err != nil {
return false, err
}
vals1 := *v.valsPtr
vals2 := v2.Slice().([]uint64)
if len(vals1) != len(vals2) {
return false, nil
}
for i, val1 := range vals1 {
if val1 != vals2[i] {
return false, nil
}
}
return true, nil
}
// Greater checks if all values of the current slice are greater than that of
// the given single.
// Returns a non-nil error if types do not match.
func (v *UIntSlice) Greater(v2 Single) (bool, error) {
return compareUInts(*v.valsPtr, v2, uintGreater)
}
// GreaterEqual checks if all values of the current slice are greater or equal
// to the given single.
// Returns a non-nil error if types do not match.
func (v *UIntSlice) GreaterEqual(v2 Single) (bool, error) {
return compareUInts(*v.valsPtr, v2, uintGreaterEqual)
}
// Less checks if all values of the current slice are less than that of
// the given single.
// Returns a non-nil error if types do not match.
func (v *UIntSlice) Less(v2 Single) (bool, error) {
return compareUInts(*v.valsPtr, v2, uintLess)
}
// LessEqual checks if all values of the current slice are less or equal
// to the given single.
// Returns a non-nil error if types do not match.
func (v *UIntSlice) LessEqual(v2 Single) (bool, error) {
return compareUInts(*v.valsPtr, v2, uintLessEqual)
}
// Contains checks if the given single value is equal to one of the
// current slice values.
// Returns a non-nil error if types do not match.
func (v *UIntSlice) Contains(v2 Single) (bool, error) {
if err := CheckType(TypeUInt, v2.Type()); err != nil {
return false, err
}
vals := *v.valsPtr
val2 := v2.Value().(uint64)
for _, val1 := range vals {
if val1 == val2 {
return true, nil
}
}
return false, nil
}
func compareUInts(vals []uint64, v2 Single, f compareUIntFunc) (bool, error) {
if err := CheckType(TypeUInt, v2.Type()); err != nil {
return false, err
}
if len(vals) == 0 {
return false, nil
}
val2 := v2.Value().(uint64)
for _, val1 := range vals {
if !f(val1, val2) {
return false, nil
}
}
return true, nil
}
func uintGreater(a, b uint64) bool {
return a > b
}
func uintGreaterEqual(a, b uint64) bool {
return a >= b
}
func uintLess(a, b uint64) bool {
return a < b
}
func uintLessEqual(a, b uint64) bool {
return a <= b
} | value/uintslice.go | 0.840292 | 0.644812 | uintslice.go | starcoder |
package constraint
import (
"fmt"
"github.com/sineatos/deag/base"
"github.com/sineatos/deag/benchmarks"
)
// Float64Constraint defines the functions of constraint
type Float64Constraint interface {
// AdjustAndEvolve checks individual and adjusts it if it is not feasible
AdjustAndEvolve(individual *base.Float64Individual) []float64
}
// ClosestValidPenalty returns penalized fitness for invalid individuals and the original fitness value for valid individuals. The penalized fitness is made of the fitness of the closest valid individual added with a weighted (optional) distance penalty. The distance function, if provided, shall return a value growing as the individual moves away the valid zone.
type ClosestValidPenalty struct {
// isFeasible checks individual if feasible
isFeasible func(individual *base.Float64Individual) bool
// adjust adjusts individual and returns the individual in constraint
adjust func(individual *base.Float64Individual) *base.Float64Individual
alpha float64
distance func(ind1, ind2 *base.Float64Individual) float64
evaluator benchmarks.Float64Evaluator
}
// NewClosestValidPenalty returns a *ClosestValidPenalty
func NewClosestValidPenalty(isFeasible func(individual *base.Float64Individual) bool, adjust func(individual *base.Float64Individual) *base.Float64Individual, alpha float64, distance func(ind1, ind2 *base.Float64Individual) float64, evaluator benchmarks.Float64Evaluator) *ClosestValidPenalty {
return &ClosestValidPenalty{
isFeasible: isFeasible,
adjust: adjust,
alpha: alpha,
distance: distance,
evaluator: evaluator,
}
}
// AdjustAndEvolve checks individual and adjusts it if it is not feasible
func (con *ClosestValidPenalty) AdjustAndEvolve(individual *base.Float64Individual) []float64 {
if con.isFeasible(individual) {
return con.evaluator(individual)
}
fInd := con.adjust(individual)
fAns := con.evaluator(fInd)
fWeights := individual.GetFitness().GetWeights()
weights := make([]float64, len(fWeights))
for i := range fWeights {
if fWeights[i] >= 0.0 {
weights[i] = 1.0
} else {
weights[i] = -1.0
}
}
if len(weights) != len(fAns) {
panic(fmt.Sprintf("Fitness weights and computed fitness are of different size: %v,%v", weights, fAns))
}
dists := make([]float64, len(weights))
if con.distance != nil {
d := con.distance(fInd, individual)
for i := range dists {
dists[i] = d
}
}
ans := make([]float64, len(dists))
for i := range ans {
ans[i] = fAns[i] - weights[i]*con.alpha*dists[i]
}
return ans
} | tools/constraint/constraint.go | 0.733738 | 0.421552 | constraint.go | starcoder |
package generator
import (
"flag"
"fmt"
"math/rand"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"time"
)
type (
// Grid is the primary data structure for the generator. It contains the candidates for each cell in the 9 x 9 puzzle.
Grid struct {
orig [rows][cols]bool
cells [rows][cols]cell
}
pointCell struct {
point
cell
}
)
const (
cols = 9
rows = 9
zero = uint8(0)
all = 0b1111111110
)
var (
attempts uint
colorized bool
)
func init() {
rand.Seed(time.Now().Unix())
flag.UintVar(&attempts, "a", 500, "maximum `attempts` to generate a puzzle")
flag.BoolVar(&colorized, "c", false, "colorize the output for ANSI terminals")
}
// ParseEncoded parses an input string contains 81 digits and dots ('.') representing an initial puzzle layout.
func ParseEncoded(i string) (*Grid, error) {
if len(i) != 81 {
return nil, fmt.Errorf("encoded puzzle must contain 81 characters")
}
g := Grid{}
for r := 0; r < rows; r++ {
for c := 0; c < cols; c++ {
b := i[r*9+c]
if b == '.' {
g.cells[r][c] = all
} else {
d, err := strconv.Atoi(string(b))
if err != nil {
return nil, fmt.Errorf("Illegal character '%c' in encoded puzzle", b)
}
g.orig[r][c] = true
g.cells[r][c] = 1 << d
}
}
}
return &g, nil
}
// Randomize generates a random puzzle. There is no guarantee that the puzzle will be solvable or have just one solution.
func Randomize() *Grid {
g := Grid{}
for r := 0; r < rows; r++ {
for c := 0; c < cols; c++ {
g.cells[r][c] = all
}
}
indexes := []int{0, 1, 2}
rand.Shuffle(len(indexes), func(i, j int) { indexes[i], indexes[j] = indexes[j], indexes[i] })
for i, index := range indexes {
u := i*3 + index
d := []int{1, 2, 3, 4, 5, 6, 7, 8, 9}
rand.Shuffle(len(d), func(i, j int) { d[i], d[j] = d[j], d[i] })
for pi, p := range box.unit[u] {
*g.pt(p) = 1 << d[pi]
}
}
return &g
}
func (g *Grid) allPoints() (res []pointCell) {
for r := zero; r < rows; r++ {
for c := zero; c < cols; c++ {
res = append(res, pointCell{point{r, c}, g.cells[r][c]})
}
}
return
}
// cellChange is a convenience function that is called by strategy methods when a cell changes value.
func (g *Grid) cellChange(res *bool, verbose uint, format string, a ...interface{}) {
*res = true
if verbose >= 1 {
fmt.Printf(format, a...)
}
if verbose >= 2 {
g.Display()
}
}
// Display emits a grid to stdout in a framed format.
func (g *Grid) Display() {
const (
botLeft = "\u2514"
botRight = "\u2518"
botT = "\u2534"
horizBar = "\u2500"
leftT = "\u251c"
plus = "\u253c"
rightT = "\u2524"
topLeft = "\u250c"
topRight = "\u2510"
topT = "\u252c"
vertBar = "\u2502"
green = "32"
yellow = "33"
)
width := g.maxWidth() + 2 // Add 2 for margins.
bars := strings.Repeat(horizBar, width*3)
line := leftT + strings.Join([]string{bars, bars, bars}, plus) + rightT
// Top line with column headers.
fmt.Print("\t ")
for d := 0; d < 9; d++ {
fmt.Printf("%s", colorize(yellow, center(strconv.Itoa(d), width)))
if d == 2 || d == 5 {
fmt.Print(" ")
}
}
fmt.Println()
// First frame line.
fmt.Printf("\t %s%s%s%s%s%s%s\n", topLeft, bars, topT, bars, topT, bars, topRight)
// Grid rows.
for r := 0; r < rows; r++ {
fmt.Printf("\t%s %s", colorize(yellow, strconv.Itoa(r)), vertBar)
for c := 0; c < cols; c++ {
cell := g.cells[r][c]
orig := g.orig[r][c]
s := cell.String()
if s == "123456789" {
fmt.Printf("%s", center(".", width))
} else {
if orig {
fmt.Printf("%s", colorize(green, center(s, width)))
} else {
fmt.Printf("%s", center(s, width))
}
}
if c == 2 || c == 5 {
fmt.Printf("%s", vertBar)
}
}
fmt.Printf("%s\n", vertBar)
if r == 2 || r == 5 {
fmt.Printf("\t %s\n", line)
}
}
// Bottom line.
fmt.Printf("\t %s%s%s%s%s%s%s\n", botLeft, bars, botT, bars, botT, bars, botRight)
}
// digitPlaces returns an array of digits containing values where the bits (1 - 9) are set if the corresponding digit appears in that cell.
func (g *Grid) digitPlaces(points [9]point) (res [10]positions) {
for pi, p := range points {
cell := *g.pt(p)
for d := 1; d <= 9; d++ {
if cell&(1<<d) != 0 {
res[d] |= 1 << pi
}
}
}
return
}
// digitPoints builds a table of points that contain each digit.
func (g *Grid) digitPoints(ps [9]point) (res [10][]point) {
for _, p := range ps {
cell := *g.pt(p)
for d := 1; d <= 9; d++ {
if cell&(1<<d) != 0 {
res[d] = append(res[d], p)
}
}
}
return
}
// emptyCell returns true if the grid contains at least one empty cell (no digits set).
func (g *Grid) emptyCell() bool {
for r := 0; r < rows; r++ {
for c := 0; c < cols; c++ {
if g.cells[r][c] == 0 {
return true
}
}
}
return false
}
// Encode returns a "standard" (digits and zeroes) string represenation of a puzzle.
func (g *Grid) Encode() string {
var b strings.Builder
for r := zero; r < rows; r++ {
for c := zero; c < cols; c++ {
if g.orig[r][c] {
cell := *g.pt(point{r, c})
for d := 1; d <= 9; d++ {
if cell&(1<<d) != 0 {
fmt.Fprintf(&b, "%d", d)
break
}
}
} else {
fmt.Fprint(&b, "0")
}
}
}
return b.String()
}
func (g *Grid) encodeInts() []int {
var encoded []int
for r := zero; r < rows; r++ {
for c := zero; c < cols; c++ {
v, _ := strconv.Atoi(g.cells[r][c].String())
encoded = append(encoded, v)
}
}
return encoded
}
// maxWidth calculates the width in characters of the widest cell in the grid (maximum number of candidate digits). If the width is 9, it is changed to 1 because we will display only a dot ('.').
func (g *Grid) maxWidth() int {
width := 0
for r := 0; r < rows; r++ {
for c := 0; c < cols; c++ {
count := bitCount[g.cells[r][c]]
if count == 9 {
count = 1
}
if width < count {
width = count
}
}
}
return width
}
// minPoint find the non-solved point with the least number of candidates and returns that point and true if found, otherwise it returns false.
func (g *Grid) minPoint() (p point, found bool) {
min := 10
minPoints := make([]point, 0)
for r := zero; r < rows; r++ {
for c := zero; c < cols; c++ {
cell := g.cells[r][c]
count := bitCount[cell]
if count > 1 {
p = point{r, c}
if count < min {
min = count
minPoints = minPoints[:0]
minPoints = append(minPoints, p)
found = true
} else if count == min {
minPoints = append(minPoints, p)
found = true
}
}
}
}
if found {
rand.Shuffle(len(minPoints), func(i, j int) { minPoints[i], minPoints[j] = minPoints[j], minPoints[i] })
return minPoints[0], true
}
return
}
// pt returns the cell at a given point.
func (g *Grid) pt(p point) *cell {
return &g.cells[p.r][p.c]
}
// Reduce eliminates candidates from cells using logical methods. For example if a cell contains a single digit candidate, that digit can be removed from all other cells in the same box, row, and column.
func (g *Grid) Reduce(all bool, strategies *map[string]bool, verbose uint) (Level, bool) {
maxLevel := Easy
if g.emptyCell() {
return Easy, false
}
for {
if g.solved() {
return maxLevel, true
}
if g.reduceLevel(&maxLevel, Easy, verbose, strategies, []func(uint) bool{
g.nakedSingle,
g.hiddenSingle,
}) {
continue
}
if all {
if g.reduceLevel(&maxLevel, Easy, verbose, strategies, []func(uint) bool{
g.nakedPair,
g.nakedTriple,
g.nakedQuad,
g.hiddenPair,
g.hiddenTriple,
g.hiddenQuad,
g.pointingLine,
g.boxLine,
}) {
continue
}
if g.reduceLevel(&maxLevel, Standard, verbose, strategies, []func(uint) bool{
g.xWing,
g.yWing,
g.singlesChains,
g.swordfish,
g.xyzWing,
}) {
continue
}
if g.reduceLevel(&maxLevel, Hard, verbose, strategies, []func(uint) bool{
g.xCycles,
g.xyChains,
g.medusa,
g.jellyfish,
g.wxyzWing,
}) {
continue
}
if g.reduceLevel(&maxLevel, Expert, verbose, strategies, []func(uint) bool{
g.skLoops,
g.exocet,
}) {
continue
}
if g.reduceLevel(&maxLevel, Extreme, verbose, strategies, []func(uint) bool{}) {
continue
}
}
break
}
return maxLevel, false
}
func (g *Grid) reduceLevel(maxLevel *Level, level Level, verbose uint, strategies *map[string]bool, fs []func(uint) bool) bool {
for _, f := range fs {
if f(verbose) {
if strategies != nil {
name := nameOfFunc(f)
(*strategies)[name] = true
}
if *maxLevel < level {
*maxLevel = level
}
return true
}
}
return false
}
// Search uses a brute-force descent to solve the grid and returns a slice of grids that may be empty if no solution was found, may contain a single grid if a unique solution was found, or may contain more than one solution.
func (g *Grid) Search(solutions *[]*Grid) {
if g.solved() {
*solutions = append(*solutions, g)
return
}
if g.emptyCell() {
return
}
point, found := g.minPoint()
if !found {
return
}
digits := g.pt(point).digits()
rand.Shuffle(len(digits), func(i, j int) { digits[i], digits[j] = digits[j], digits[i] })
for _, d := range digits {
cp := *g
*cp.pt(point) = 1 << d
_, solved := cp.Reduce(false, nil, 0)
if solved {
*solutions = append(*solutions, &cp)
if len(*solutions) > 1 {
return
}
continue
}
cp.Search(solutions)
if len(*solutions) > 1 {
return
}
}
return
}
// solved checks that a grid is completely solved (all boxes, rows, and columns have each digit appearing exactly once).
func (g *Grid) solved() bool {
for r := zero; r < rows; r++ {
for c := zero; c < cols; c++ {
if bitCount[g.cells[r][c]] != 1 {
return false
}
}
}
return g.solvedGroup(&box) && g.solvedGroup(&col) && g.solvedGroup(&row)
}
func (g *Grid) solvedGroup(gr *group) bool {
for _, ps := range gr.unit {
cells := [10]int{}
for _, p := range ps {
cell := *g.pt(p)
if g.orig[p.r][p.c] && bitCount[cell] != 1 {
panic(fmt.Sprintf("changed original cell (%d, %d) to %#b", p.r, p.c, cell))
}
if cell == 0 {
return false
}
for d := 1; d <= 9; d++ {
if cell&(1<<d) != 0 {
cells[d]++
}
}
}
for d := 1; d <= 9; d++ {
if cells[d] != 1 {
return false
}
}
}
return true
}
// Valid returns true if the grid contains at most one occurance of each digit in each unit.
func (g *Grid) Valid() bool {
return g.validGroup(&box) && g.validGroup(&col) && g.validGroup(&row)
}
func (g *Grid) validGroup(gr *group) bool {
for _, u := range gr.unit {
var seen [10]bool
for _, p := range u {
if !g.orig[p.r][p.c] {
continue
}
digit := g.pt(p).lowestSetBit()
if seen[digit] {
return false
}
seen[digit] = true
}
}
return true
}
// Worker generates puzzles. It removes a requested puzzle level from the tasks channel and attempts to generate a puzzle at the level. If it succeeds, it pushes the puzzle to the results channel. If it cannot generate a puzzle, it pushes nil.
func Worker(tasks chan Level, results chan *Game) {
outer:
for level := range tasks {
maxAttempts := attempts
inner:
for {
grid := Randomize()
solutions := make([]*Grid, 0, 2)
grid.Search(&solutions)
if len(solutions) == 0 { // The grid has no solution.
maxAttempts--
if maxAttempts == 0 { // If too many attempts, push a nil and start again with a new level.
results <- nil
continue outer
}
continue inner
}
// From https://stackoverflow.com/a/7280517/96233.
*grid = *solutions[0] // Copy the first solution
points := grid.allPoints() // Get all points from the first solution.
rand.Shuffle(len(points), func(i, j int) { points[i], points[j] = points[j], points[i] }) // Shuffle them.
for len(points) > 0 {
curr := points[0]
points = points[1:]
*grid.pt(curr.point) = all // Clear the cell.
solutions = solutions[:0]
grid.Search(&solutions)
if len(solutions) > 1 { // No longer unique.
*grid.pt(curr.point) = curr.cell // Put the value back.
}
}
// At this point, grid contains the smallest solution that is unique. Now we test the level.
cp := *grid
strategies := make(map[string]bool)
l, solved := cp.Reduce(true, &strategies, 0)
solutions = solutions[:0]
cp.Search(&solutions)
if solved && l == level && len(solutions) == 1 {
solution := solutions[0]
var clues uint
for r := 0; r < rows; r++ {
for c := 0; c < cols; c++ {
if bitCount[grid.cells[r][c]] == 1 {
solution.orig[r][c] = true
clues++
}
}
}
var s []string
for n := range strategies {
s = append(s, n)
}
sort.Slice(s, func(i, j int) bool { return s[i] < s[j] })
grid.orig = solution.orig
results <- &Game{level, clues, s, grid, solution}
continue outer
}
// If we could not find a unique solution, try again.
maxAttempts--
if maxAttempts == 0 { // If too many attempts, push a nil and start again with a new level.
results <- nil
continue outer
}
}
}
}
// center centers a string in the given width field.
func center(s string, w int) string {
excess := w - len(s)
lead := excess / 2
follow := excess - lead
return fmt.Sprintf("%*s%*s", lead+len(s), s, follow, " ")
}
// colorize adds ANSI escape sequences to display the string in color.
func colorize(c string, s string) string {
if colorized {
return fmt.Sprintf("\x1b[%sm%s\x1b[0m", c, s)
}
return fmt.Sprintf("%s", s)
}
func decodeInts(encoded []int) *Grid {
if len(encoded) != 81 {
panic(fmt.Sprintf("encoding has bad length: %d (should be 81)", len(encoded)))
}
g := Grid{}
for i, e := range encoded {
s := strconv.Itoa(e)
c := cell(0)
for ci := 0; ci < len(s); ci++ {
v, err := strconv.Atoi(s[ci : ci+1])
if err != nil {
panic(fmt.Sprintf("encoding values must be digits -- found %s", s[ci:ci+1]))
}
c |= 1 << v
}
g.cells[i/9][i%9] = c
}
return &g
}
func nameOfFunc(f func(uint) bool) string {
name := runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
i := strings.LastIndex(name, ".")
if i > 0 {
name = name[i+1:]
}
i = strings.LastIndex(name, "-fm")
if i > 0 {
name = name[:i]
}
return name
} | generator/grid.go | 0.661158 | 0.449091 | grid.go | starcoder |
package main
import (
"encoding/hex"
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"unicode/utf16"
"unicode/utf8"
"github.com/google/der-ascii/internal"
)
// A position describes a location in the input stream.
type position struct {
Offset int // offset, starting at 0
Line int // line number, starting at 1
Column int // column number, starting at 1 (byte count)
}
// A tokenKind is a kind of token.
type tokenKind int
const (
tokenBytes tokenKind = iota
tokenLeftCurly
tokenRightCurly
tokenIndefinite
tokenLongForm
tokenEOF
)
// A parseError is an error during parsing DER ASCII.
type parseError struct {
Pos position
Err error
}
func (t *parseError) Error() string {
return fmt.Sprintf("line %d: %s", t.Pos.Line, t.Err)
}
// A token is a token in a DER ASCII file.
type token struct {
// Kind is the kind of the token.
Kind tokenKind
// Value, for a tokenBytes token, is the decoded value of the token in
// bytes.
Value []byte
// Pos is the position of the first byte of the token.
Pos position
// Length, for a tokenLongForm token, is the number of bytes to use to
// encode the length, not including the initial one.
Length int
}
var (
regexpInteger = regexp.MustCompile(`^-?[0-9]+$`)
regexpOID = regexp.MustCompile(`^[0-9]+(\.[0-9]+)+$`)
)
type scanner struct {
text string
pos position
}
func newScanner(text string) *scanner {
return &scanner{text: text, pos: position{Line: 1}}
}
func (s *scanner) parseEscapeSequence() (rune, error) {
s.advance() // Skip the \. The caller is assumed to have validated it.
if s.isEOF() {
return 0, &parseError{s.pos, errors.New("expected escape character")}
}
switch c := s.text[s.pos.Offset]; c {
case 'n':
s.advance()
return '\n', nil
case '"', '\\':
s.advance()
return rune(c), nil
case 'x':
s.advance()
if s.pos.Offset+2 > len(s.text) {
return 0, &parseError{s.pos, errors.New("unfinished escape sequence")}
}
b, err := hex.DecodeString(s.text[s.pos.Offset : s.pos.Offset+2])
if err != nil {
return 0, &parseError{s.pos, err}
}
s.advanceBytes(2)
return rune(b[0]), nil
case 'u':
s.advance()
if s.pos.Offset+4 > len(s.text) {
return 0, &parseError{s.pos, errors.New("unfinished escape sequence")}
}
b, err := hex.DecodeString(s.text[s.pos.Offset : s.pos.Offset+4])
if err != nil {
return 0, &parseError{s.pos, err}
}
s.advanceBytes(4)
return rune(b[0])<<8 | rune(b[1]), nil
case 'U':
s.advance()
if s.pos.Offset+8 > len(s.text) {
return 0, &parseError{s.pos, errors.New("unfinished escape sequence")}
}
b, err := hex.DecodeString(s.text[s.pos.Offset : s.pos.Offset+8])
if err != nil {
return 0, &parseError{s.pos, err}
}
s.advanceBytes(8)
return rune(b[0])<<24 | rune(b[1])<<16 | rune(b[2])<<8 | rune(b[3]), nil
default:
return 0, &parseError{s.pos, fmt.Errorf("unknown escape sequence \\%c", c)}
}
}
func (s *scanner) parseQuotedString() (token, error) {
s.advance() // Skip the ". The caller is assumed to have validated it.
start := s.pos
var bytes []byte
for {
if s.isEOF() {
return token{}, &parseError{start, errors.New("unmatched \"")}
}
switch c := s.text[s.pos.Offset]; c {
case '"':
s.advance()
return token{Kind: tokenBytes, Value: bytes, Pos: start}, nil
case '\\':
escapeStart := s.pos
r, err := s.parseEscapeSequence()
if err != nil {
return token{}, err
}
if r > 0xff {
// TODO(davidben): Alternatively, should these encode as UTF-8?
return token{}, &parseError{escapeStart, errors.New("illegal escape for quoted string")}
}
bytes = append(bytes, byte(r))
default:
s.advance()
bytes = append(bytes, c)
}
}
}
func appendUTF16(b []byte, r rune) []byte {
if r <= 0xffff {
// Note this logic intentionally tolerates unpaired surrogates.
return append(b, byte(r>>8), byte(r))
}
r1, r2 := utf16.EncodeRune(r)
b = append(b, byte(r1>>8), byte(r1))
b = append(b, byte(r2>>8), byte(r2))
return b
}
func (s *scanner) parseUTF16String() (token, error) {
s.advance() // Skip the u. The caller is assumed to have validated it.
s.advance() // Skip the ". The caller is assumed to have validated it.
start := s.pos
var bytes []byte
for {
if s.isEOF() {
return token{}, &parseError{start, errors.New("unmatched \"")}
}
switch c := s.text[s.pos.Offset]; c {
case '"':
s.advance()
return token{Kind: tokenBytes, Value: bytes, Pos: start}, nil
case '\\':
r, err := s.parseEscapeSequence()
if err != nil {
return token{}, err
}
bytes = appendUTF16(bytes, r)
default:
r, n := utf8.DecodeRuneInString(s.text[s.pos.Offset:])
// Note DecodeRuneInString may return utf8.RuneError if there is a
// legitimate replacement charaacter in the input. The documentation
// says errors return (RuneError, 0) or (RuneError, 1).
if r == utf8.RuneError && n <= 1 {
return token{}, &parseError{s.pos, errors.New("invalid UTF-8")}
}
s.advanceBytes(n)
bytes = appendUTF16(bytes, r)
}
}
}
func appendUTF32(b []byte, r rune) []byte {
return append(b, byte(r>>24), byte(r>>16), byte(r>>8), byte(r))
}
func (s *scanner) parseUTF32String() (token, error) {
s.advance() // Skip the U. The caller is assumed to have validated it.
s.advance() // Skip the ". The caller is assumed to have validated it.
start := s.pos
var bytes []byte
for {
if s.isEOF() {
return token{}, &parseError{start, errors.New("unmatched \"")}
}
switch c := s.text[s.pos.Offset]; c {
case '"':
s.advance()
return token{Kind: tokenBytes, Value: bytes, Pos: start}, nil
case '\\':
r, err := s.parseEscapeSequence()
if err != nil {
return token{}, err
}
bytes = appendUTF32(bytes, r)
default:
r, n := utf8.DecodeRuneInString(s.text[s.pos.Offset:])
// Note DecodeRuneInString may return utf8.RuneError if there is a
// legitimate replacement charaacter in the input. The documentation
// says errors return (RuneError, 0) or (RuneError, 1).
if r == utf8.RuneError && n <= 1 {
return token{}, &parseError{s.pos, errors.New("invalid UTF-8")}
}
s.advanceBytes(n)
bytes = appendUTF32(bytes, r)
}
}
}
func (s *scanner) Next() (token, error) {
again:
if s.isEOF() {
return token{Kind: tokenEOF, Pos: s.pos}, nil
}
switch s.text[s.pos.Offset] {
case ' ', '\t', '\n', '\r':
// Skip whitespace.
s.advance()
goto again
case '#':
// Skip to the end of the comment.
s.advance()
for !s.isEOF() {
wasNewline := s.text[s.pos.Offset] == '\n'
s.advance()
if wasNewline {
break
}
}
goto again
case '{':
s.advance()
return token{Kind: tokenLeftCurly, Pos: s.pos}, nil
case '}':
s.advance()
return token{Kind: tokenRightCurly, Pos: s.pos}, nil
case '"':
return s.parseQuotedString()
case 'u':
if s.pos.Offset+1 < len(s.text) && s.text[s.pos.Offset+1] == '"' {
return s.parseUTF16String()
}
case 'U':
if s.pos.Offset+1 < len(s.text) && s.text[s.pos.Offset+1] == '"' {
return s.parseUTF32String()
}
case 'b':
if s.pos.Offset+1 < len(s.text) && s.text[s.pos.Offset+1] == '`' {
s.advance() // Skip the b.
s.advance() // Skip the `.
bitStr, ok := s.consumeUpTo('`')
if !ok {
return token{}, &parseError{s.pos, errors.New("unmatched `")}
}
// The leading byte is the number of "extra" bits at the end.
var bitCount int
var sawPipe bool
value := []byte{0}
for i, r := range bitStr {
switch r {
case '0', '1':
if bitCount%8 == 0 {
value = append(value, 0)
}
if r == '1' {
value[bitCount/8+1] |= 1 << uint(7-bitCount%8)
}
bitCount++
case '|':
if sawPipe {
return token{}, &parseError{s.pos, errors.New("duplicate |")}
}
// bitsRemaining is the number of bits remaining in the output that haven't
// been used yet. There cannot be more than that many bits past the |.
bitsRemaining := (len(value)-1)*8 - bitCount
inputRemaining := len(bitStr) - i - 1
if inputRemaining > bitsRemaining {
return token{}, &parseError{s.pos, fmt.Errorf("expected at most %v explicit padding bits; found %v", bitsRemaining, inputRemaining)}
}
sawPipe = true
value[0] = byte(bitsRemaining)
default:
return token{}, &parseError{s.pos, fmt.Errorf("unexpected rune %q", r)}
}
}
if !sawPipe {
value[0] = byte((len(value)-1)*8 - bitCount)
}
return token{Kind: tokenBytes, Value: value, Pos: s.pos}, nil
}
case '`':
s.advance()
hexStr, ok := s.consumeUpTo('`')
if !ok {
return token{}, &parseError{s.pos, errors.New("unmatched `")}
}
bytes, err := hex.DecodeString(hexStr)
if err != nil {
return token{}, &parseError{s.pos, err}
}
return token{Kind: tokenBytes, Value: bytes, Pos: s.pos}, nil
case '[':
s.advance()
tagStr, ok := s.consumeUpTo(']')
if !ok {
return token{}, &parseError{s.pos, errors.New("unmatched [")}
}
tag, err := decodeTagString(tagStr)
if err != nil {
return token{}, &parseError{s.pos, err}
}
value, err := appendTag(nil, tag)
if err != nil {
return token{}, &parseError{s.pos, err}
}
return token{Kind: tokenBytes, Value: value, Pos: s.pos}, nil
}
// Normal token. Consume up to the next whitespace character, symbol, or
// EOF.
start := s.pos
s.advance()
loop:
for !s.isEOF() {
switch s.text[s.pos.Offset] {
case ' ', '\t', '\n', '\r', '{', '}', '[', ']', '`', '"', '#':
break loop
default:
s.advance()
}
}
symbol := s.text[start.Offset:s.pos.Offset]
// See if it is a tag.
tag, ok := internal.TagByName(symbol)
if ok {
value, err := appendTag(nil, tag)
if err != nil {
// This is impossible; built-in tags always encode.
return token{}, &parseError{s.pos, err}
}
return token{Kind: tokenBytes, Value: value, Pos: start}, nil
}
if regexpInteger.MatchString(symbol) {
value, err := strconv.ParseInt(symbol, 10, 64)
if err != nil {
return token{}, &parseError{start, err}
}
return token{Kind: tokenBytes, Value: appendInteger(nil, value), Pos: s.pos}, nil
}
if regexpOID.MatchString(symbol) {
oidStr := strings.Split(symbol, ".")
var oid []uint32
for _, s := range oidStr {
u, err := strconv.ParseUint(s, 10, 32)
if err != nil {
return token{}, &parseError{start, err}
}
oid = append(oid, uint32(u))
}
der, ok := appendObjectIdentifier(nil, oid)
if !ok {
return token{}, errors.New("invalid OID")
}
return token{Kind: tokenBytes, Value: der, Pos: s.pos}, nil
}
if symbol == "TRUE" {
return token{Kind: tokenBytes, Value: []byte{0xff}, Pos: s.pos}, nil
}
if symbol == "FALSE" {
return token{Kind: tokenBytes, Value: []byte{0x00}, Pos: s.pos}, nil
}
if symbol == "indefinite" {
return token{Kind: tokenIndefinite}, nil
}
if isLongFormOverride(symbol) {
l, err := decodeLongFormOverride(symbol)
if err != nil {
return token{}, &parseError{start, err}
}
return token{Kind: tokenLongForm, Length: l}, nil
}
return token{}, fmt.Errorf("unrecognized symbol %q", symbol)
}
func (s *scanner) isEOF() bool {
return s.pos.Offset >= len(s.text)
}
func (s *scanner) advance() {
if !s.isEOF() {
if s.text[s.pos.Offset] == '\n' {
s.pos.Line++
s.pos.Column = 0
} else {
s.pos.Column++
}
s.pos.Offset++
}
}
func (s *scanner) advanceBytes(n int) {
for i := 0; i < n; i++ {
s.advance()
}
}
func (s *scanner) consumeUpTo(b byte) (string, bool) {
start := s.pos.Offset
for !s.isEOF() {
if s.text[s.pos.Offset] == b {
ret := s.text[start:s.pos.Offset]
s.advance()
return ret, true
}
s.advance()
}
return "", false
}
func asciiToDERImpl(scanner *scanner, leftCurly *token) ([]byte, error) {
var out []byte
var lengthModifier *token
for {
token, err := scanner.Next()
if err != nil {
return nil, err
}
if lengthModifier != nil && token.Kind != tokenLeftCurly {
return nil, &parseError{lengthModifier.Pos, errors.New("length modifier was not followed by '{'")}
}
switch token.Kind {
case tokenBytes:
out = append(out, token.Value...)
case tokenLeftCurly:
child, err := asciiToDERImpl(scanner, &token)
if err != nil {
return nil, err
}
var lengthOverride int
if lengthModifier != nil {
if lengthModifier.Kind == tokenIndefinite {
out = append(out, 0x80)
out = append(out, child...)
out = append(out, 0x00, 0x00)
lengthModifier = nil
break
}
if lengthModifier.Kind == tokenLongForm {
lengthOverride = lengthModifier.Length
}
}
out, err = appendLength(out, len(child), lengthOverride)
if err != nil {
// appendLength may fail if the lengthModifier was incompatible.
return nil, &parseError{lengthModifier.Pos, err}
}
out = append(out, child...)
lengthModifier = nil
case tokenRightCurly:
if leftCurly != nil {
return out, nil
}
return nil, &parseError{token.Pos, errors.New("unmatched '}'")}
case tokenLongForm, tokenIndefinite:
lengthModifier = &token
case tokenEOF:
if leftCurly == nil {
return out, nil
}
return nil, &parseError{leftCurly.Pos, errors.New("unmatched '{'")}
default:
panic(token)
}
}
}
func asciiToDER(input string) ([]byte, error) {
scanner := newScanner(input)
return asciiToDERImpl(scanner, nil)
} | cmd/ascii2der/scanner.go | 0.53048 | 0.400046 | scanner.go | starcoder |
package bcnutil
import (
"github.com/bcndev/bytecoin-go"
)
func TreeHash(hashes []bytecoin.Hash) bytecoin.Hash {
count := len(hashes)
switch count {
case 0:
return bytecoin.Hash{}
case 1:
return hashes[0]
case 2:
return FastHash(hashes[0][:], hashes[1][:])
}
cnt := 1
for cnt*2 < count {
cnt *= 2
}
tmp := make([]bytecoin.Hash, cnt)
copy(tmp[:], hashes[:2*cnt-count])
for i, j := 2*cnt-count, 2*cnt-count; j < cnt; i, j = i+2, j+1 {
tmp[j] = FastHash(hashes[i][:], hashes[i+1][:])
}
for cnt > 2 {
cnt /= 2
for j := 0; j < cnt; j++ {
tmp[j] = FastHash(tmp[2*j][:], tmp[2*j+1][:])
}
}
return FastHash(tmp[0][:], tmp[1][:])
}
func CoinbaseTreeDepth(count int) int {
depth := 0
for uint64(1)<<uint(depth+1) <= uint64(count) {
depth++
}
return depth
}
func CoinbaseTreeBranch(hashes []bytecoin.Hash) []bytecoin.Hash {
count := len(hashes)
depth := CoinbaseTreeDepth(count)
cnt := 1 << uint(depth)
tmp := make([]bytecoin.Hash, cnt-1)
copy(tmp[:], hashes[1:2*cnt-count])
for i, j := 2*cnt-count, 2*cnt-count-1; j < cnt-1; i, j = i+2, j+1 {
tmp[j] = FastHash(hashes[i][:], hashes[i+1][:])
}
branch := make([]bytecoin.Hash, depth)
for depth > 0 {
cnt >>= 1
depth--
branch[depth] = tmp[0]
for i, j := 1, 0; j < cnt-1; i, j = i+2, j+1 {
tmp[j] = FastHash(tmp[i][:], tmp[i+1][:])
}
}
return branch
}
func TreeHashFromBranch(branch []bytecoin.Hash, depth int, leaf bytecoin.Hash, path bytecoin.Hash) bytecoin.Hash {
if depth == 0 {
return leaf
}
buf := [2]bytecoin.Hash{}
fromLeaf := true
for depth > 0 {
var leafPath, branchPath *bytecoin.Hash
depth--
if !path.IsZero() && ltrIndex(path, depth) != 0 {
leafPath = &buf[1]
branchPath = &buf[0]
} else {
leafPath = &buf[0]
branchPath = &buf[1]
}
if fromLeaf {
*leafPath = leaf
fromLeaf = false
} else {
*leafPath = FastHash(buf[0][:], buf[1][:])
}
*branchPath = branch[depth]
}
return FastHash(buf[0][:], buf[1][:])
}
type MergeMiningItem struct {
Leaf bytecoin.Hash
Path bytecoin.Hash
Branch []bytecoin.Hash
}
func FillMergeMiningBranches(items []MergeMiningItem) bytecoin.Hash {
maxDepth := mergeMiningDepth(items, 0)
var pItems []*MergeMiningItem
for i := range items {
pItems = append(pItems, &items[i])
}
h := doFillMergeMiningBranches(pItems, 0, maxDepth)
for _, item := range pItems {
for i, j := 0, len(item.Branch)-1; i < j; i, j = i+1, j-1 {
item.Branch[i], item.Branch[j] = item.Branch[j], item.Branch[i]
}
}
return h
}
func mergeMiningDepth(items []MergeMiningItem, depth int) int {
if len(items) <= 1 {
return depth
}
halves := [2][]MergeMiningItem{}
for _, item := range items {
i := ltrIndex(item.Path, depth)
halves[i] = append(halves[i], item)
}
left := mergeMiningDepth(halves[0], depth+1)
right := mergeMiningDepth(halves[1], depth+1)
if left > right {
return left
} else {
return right
}
}
func doFillMergeMiningBranches(items []*MergeMiningItem, depth int, maxDepth int) bytecoin.Hash {
if len(items) == 0 {
return bytecoin.Hash{}
}
if depth == maxDepth {
return items[0].Leaf
}
halves := [2][]*MergeMiningItem{}
for _, item := range items {
i := ltrIndex(item.Path, depth)
halves[i] = append(halves[i], item)
}
hashes := [2]bytecoin.Hash{
doFillMergeMiningBranches(halves[0], depth+1, maxDepth),
doFillMergeMiningBranches(halves[1], depth+1, maxDepth),
}
for _, item := range items {
i := ltrIndex(item.Path, depth)
item.Branch = append(item.Branch, hashes[1-i])
}
return FastHash(hashes[0][:], hashes[1][:])
}
func ltrIndex(h bytecoin.Hash, depth int) int {
if h[depth>>3]&(1<<uint(depth&7)) != 0 {
return 1
}
return 0
} | bcnutil/merkle.go | 0.51879 | 0.406214 | merkle.go | starcoder |
package basic
// DropLastTest is template to generate itself for different combination of data type.
func DropLastTest() string {
return `
func TestDropLast<FTYPE>(t *testing.T) {
list := []<TYPE>{1, 2, 3, 4, 5}
expectedList := []<TYPE>{1, 2, 3, 4}
actualList := DropLast<FTYPE>(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = []<TYPE>{1, 2}
expectedList = []<TYPE>{1}
actualList = DropLast<FTYPE>(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = []<TYPE>{1}
expectedList = []<TYPE>{}
actualList = DropLast<FTYPE>(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = []<TYPE>{}
expectedList = []<TYPE>{}
actualList = DropLast<FTYPE>(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = nil
expectedList = []<TYPE>{}
actualList = DropLast<FTYPE>(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
}
`
}
// DropLastBoolTest is template to generate itself for different combination of data type.
func DropLastBoolTest() string {
return `
func TestDropLast<FTYPE>(t *testing.T) {
list := []<TYPE>{true, true, true, true, false}
expectedList := []<TYPE>{true, true, true, true}
actualList := DropLast<FTYPE>(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = []<TYPE>{true, true}
expectedList = []<TYPE>{true}
actualList = DropLast<FTYPE>(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = []<TYPE>{true}
expectedList = []<TYPE>{}
actualList = DropLast<FTYPE>(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = []<TYPE>{}
expectedList = []<TYPE>{}
actualList = DropLast<FTYPE>(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = nil
expectedList = []<TYPE>{}
actualList = DropLast<FTYPE>(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
}
`
}
// DropLastPtrTest is template to generate itself for different combination of data type.
func DropLastPtrTest() string {
return `
func TestDropLast<FTYPE>Ptr(t *testing.T) {
var v1 <TYPE> = 1
var v2 <TYPE> = 2
var v3 <TYPE> = 3
var v4 <TYPE> = 4
var v5 <TYPE> = 5
list := []*<TYPE>{&v1, &v2, &v3, &v4, &v5}
expectedList := []*<TYPE>{&v1, &v2, &v3, &v4}
actualList := DropLast<FTYPE>Ptr(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE>Ptr failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = []*<TYPE>{&v1, &v2}
expectedList = []*<TYPE>{&v1}
actualList = DropLast<FTYPE>Ptr(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE> failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = []*<TYPE>{&v1}
expectedList = []*<TYPE>{}
actualList = DropLast<FTYPE>Ptr(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE>Ptr failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = []*<TYPE>{}
expectedList = []*<TYPE>{}
actualList = DropLast<FTYPE>Ptr(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE>Ptr failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = nil
expectedList = []*<TYPE>{}
actualList = DropLast<FTYPE>Ptr(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE>Ptr failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
}
`
}
// DropLastPtrBoolTest is template to generate itself for different combination of data type.
func DropLastPtrBoolTest() string {
return `
func TestDropLast<FTYPE>Ptr(t *testing.T) {
var true bool = true
var false bool = false
list := []*<TYPE>{&true, &true, &true, &true, &false}
expectedList := []*<TYPE>{&true, &true, &true, &true}
actualList := DropLast<FTYPE>Ptr(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE>Ptr failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = []*<TYPE>{&true, &true}
expectedList = []*<TYPE>{&true}
actualList = DropLast<FTYPE>Ptr(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE>Ptr failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = []*<TYPE>{&true}
expectedList = []*<TYPE>{}
actualList = DropLast<FTYPE>Ptr(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE>Ptr failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = []*<TYPE>{}
expectedList = []*<TYPE>{}
actualList = DropLast<FTYPE>Ptr(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE>Ptr failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
list = nil
expectedList = []*<TYPE>{}
actualList = DropLast<FTYPE>Ptr(list)
if !reflect.DeepEqual(expectedList, actualList) {
t.Errorf("TestDropLast<FTYPE>Ptr failed. acutal_list=%v, expected_list=%v", actualList, expectedList)
}
}
`
} | internal/template/basic/droplasttest.go | 0.620622 | 0.601067 | droplasttest.go | starcoder |
package cp
import "math"
type Transform struct {
a, b, c, d, tx, ty float64
}
func NewTransformIdentity() Transform {
return Transform{1, 0, 0, 1, 0, 0}
}
func NewTransform(a, c, tx, b, d, ty float64) Transform {
return Transform{a, b, c, d, tx, ty}
}
func NewTransformTranspose(a, c, tx, b, d, ty float64) Transform {
return Transform{a, b, c, d, tx, ty}
}
func NewTransformTranslate(translate Vector) Transform {
return NewTransformTranspose(
1, 0, translate.X,
0, 1, translate.Y,
)
}
func NewTransformScale(scaleX, scaleY float64) Transform {
return NewTransformTranspose(
scaleX, 0, 0,
0, scaleY, 0,
)
}
func NewTransformRotate(radians float64) Transform {
rot := ForAngle(radians)
return NewTransformTranspose(
rot.X, -rot.Y, 0,
rot.Y, rot.X, 0,
)
}
func NewTransformRigid(translate Vector, radians float64) Transform {
rot := ForAngle(radians)
return NewTransformTranspose(
rot.X, -rot.Y, translate.X,
rot.Y, rot.X, translate.Y,
)
}
func NewTransformRigidInverse(t Transform) Transform {
return NewTransformTranspose(
t.d, -t.c, t.c*t.ty-t.tx*t.d,
-t.b, t.a, t.tx*t.b-t.a*t.ty,
)
}
func (t Transform) Inverse() Transform {
inv_det := 1.0 / (t.a*t.d - t.c*t.b)
return NewTransformTranspose(
t.d*inv_det, -t.c*inv_det, (t.c*t.ty-t.tx*t.d)*inv_det,
-t.b*inv_det, t.a*inv_det, (t.tx*t.b-t.a*t.ty)*inv_det,
)
}
func (t Transform) Mult(t2 Transform) Transform {
return NewTransformTranspose(
t.a*t2.a+t.c*t2.b, t.a*t2.c+t.c*t2.d, t.a*t2.tx+t.c*t2.ty+t.tx,
t.b*t2.a+t.d*t2.b, t.b*t2.c+t.d*t2.d, t.b*t2.tx+t.d*t2.ty+t.ty,
)
}
func (t Transform) Point(p Vector) Vector {
return Vector{X: t.a*p.X + t.c*p.Y + t.tx, Y: t.b*p.X + t.d*p.Y + t.ty}
}
func (t Transform) Vect(v Vector) Vector {
return Vector{t.a*v.X + t.c*v.Y, t.b*v.X + t.d*v.Y}
}
func (t Transform) BB(bb BB) BB {
hw := (bb.R - bb.L) * 0.5
hh := (bb.T - bb.B) * 0.5
a := t.a * hw
b := t.c * hh
d := t.b * hw
e := t.d * hh
hw_max := math.Max(math.Abs(a+b), math.Abs(a-b))
hh_max := math.Max(math.Abs(d+e), math.Abs(d-e))
return NewBBForExtents(t.Point(bb.Center()), hw_max, hh_max)
}
// miscellaneous transform matrices
func (outer Transform) Wrap(inner Transform) Transform {
return outer.Inverse().Mult(inner.Mult(outer))
}
func (outer Transform) Ortho(bb BB) Transform {
return NewTransformTranspose(
2.0/(bb.R-bb.L), 0.0, -(bb.R +bb.L)/(bb.R-bb.L),
0.0, 2.0/(bb.T-bb.B), -(bb.T +bb.B)/(bb.T-bb.B),
)
}
func (outer Transform) BoneScale(v0, v1 Vector) Transform {
d := v1.Sub(v0)
return NewTransformTranspose(
d.X, -d.Y, v0.X,
d.Y, d.X, v0.Y,
)
}
func (outer Transform) AxialScale(axis, pivot Vector, scale float64) Transform {
A := axis.X * axis.Y * (scale - 1.0)
B := axis.Dot(pivot) * (1.0 - scale)
return NewTransformTranspose(
scale*axis.X*axis.X+axis.Y*axis.Y, A, axis.X*B,
A, axis.X*axis.X+scale*axis.Y*axis.Y, axis.Y*B,
)
} | transform.go | 0.81582 | 0.644631 | transform.go | starcoder |
package bps
import (
"fmt"
"math"
"math/big"
"strings"
)
// Denominators for each parts
const (
DenomPPM int64 = 1000
DenomDeciBasisPoint = DenomPPM * 10
DenomHalfBasisPoint = DenomDeciBasisPoint * 5
DenomBasisPoint = DenomHalfBasisPoint * 2
DenomPercentage = DenomBasisPoint * 100
DenomAmount = DenomPercentage * 100
)
// NewFromString returns a new BPS from a string representation.
func NewFromString(value string) (*BPS, error) {
var intString string
var mul int64 = 1
parts := strings.Split(value, ".")
if len(parts) == 1 {
// There is no decimal point, the original string can be just parsed with an int
intString = value
} else if len(parts) == 2 {
// strip the insignificant digits for more accurate comparisons.
decimalPart := strings.TrimRight(parts[1], "0")
intString = parts[0] + decimalPart
if intString == "" && parts[1] != "" {
intString = "0"
}
expInt := len(decimalPart)
mul = int64(math.Pow10(expInt))
} else {
return nil, fmt.Errorf("can't convert %s to BPS: too many .s", value)
}
parsed, ok := new(big.Int).SetString(intString, 10)
if !ok {
return nil, fmt.Errorf("can't convert %s to BPS", value)
}
return newBPS(parsed).Mul(DenomAmount).Div(mul), nil
}
// MustFromString returns a new BPS from a string representation or panics if NewFromString would have returned an error.
func MustFromString(value string) *BPS {
b, err := NewFromString(value)
if err != nil {
panic(err)
}
return b
}
// NewFromPPB makes new BPS instance from part per billion(ppb)
func NewFromPPB(ppb *big.Int) *BPS {
return newBPS(ppb)
}
// NewFromPPM makes new BPS instance from part per million(ppm)
func NewFromPPM(ppm *big.Int) *BPS {
return newBPS(ppm).Mul(DenomPPM)
}
// NewFromDeciBasisPoint makes new BPS instance from deci basis point
func NewFromDeciBasisPoint(deci int64) *BPS {
return newBPS(big.NewInt(deci)).Mul(DenomDeciBasisPoint)
}
// NewFromHalfBasisPoint makes new BPS instance from half basis point
func NewFromHalfBasisPoint(bp int64) *BPS {
return newBPS(big.NewInt(bp)).Mul(DenomHalfBasisPoint)
}
// NewFromBasisPoint makes new BPS instance from basis point
func NewFromBasisPoint(bp int64) *BPS {
return newBPS(big.NewInt(bp)).Mul(DenomBasisPoint)
}
// NewFromPercentage makes new BPS instance from percentage
func NewFromPercentage(per int64) *BPS {
return newBPS(big.NewInt(per)).Mul(DenomPercentage)
}
// NewFromAmount makes new BPS instance from real amount
func NewFromAmount(amt int64) *BPS {
return newBPS(big.NewInt(amt)).Mul(DenomAmount)
}
// NewFromBaseUnit makes new BPS instance from BaseUnit value.
// That means the effective digits is modifiable by BaseUnit.
func NewFromBaseUnit(v int64) *BPS {
switch BaseUnit {
case DeciBasisPoint:
return NewFromDeciBasisPoint(v)
case HalfBasisPoint:
return NewFromHalfBasisPoint(v)
case BasisPoint:
return NewFromBasisPoint(v)
case Percentage:
return NewFromPercentage(v)
case PPM:
return NewFromPPM(big.NewInt(v))
}
// The default unit is PPB
return NewFromPPB(big.NewInt(v))
}
func newBPS(value *big.Int) *BPS {
if value == nil {
value = big.NewInt(0)
}
return &BPS{
value: new(big.Int).Set(value),
}
} | bps/construct.go | 0.733833 | 0.409457 | construct.go | starcoder |
package geo
import (
"errors"
"fmt"
"math"
)
// BBox describes a simple bounding box.
type BBox interface {
Xcenter() float64
Ycenter() float64
Width() float64
Height() float64
Xmin() float64
Xmax() float64
Ymin() float64
Ymax() float64
Contains(x, y float64) bool
fmt.Stringer
}
type bbox struct {
xmin, xmax, ymin, ymax float64
}
var (
errRightLowerThanLeft = errors.New("xmax coordinate is less than xmin one")
errTopLowerThanBottom = errors.New("ymax coordinate is less than ymin one")
)
// NewBBox creates a bounding box that is guaranteed
// to be valid if error is nil
func NewBBox(xmin, ymin, xmax, ymax float64) (BBox, error) {
if xmin >= xmax {
return nil, errRightLowerThanLeft
}
if ymin >= ymax {
return nil, errTopLowerThanBottom
}
return &bbox{xmin, xmax, ymin, ymax}, nil
}
// NewBBoxUnchecked return a bbox that might contain
// garbage (mostly used in test to ease declaration of multiple
// bounding boxes)
func NewBBoxUnchecked(xmin, ymin, xmax, ymax float64) BBox {
b, _ := NewBBox(xmin, ymin, xmax, ymax)
return b
}
func (b bbox) Xcenter() float64 {
return (b.xmin + b.xmax) / 2
}
func (b bbox) Ycenter() float64 {
return (b.ymin + b.ymax) / 2
}
func (b bbox) Width() float64 {
return (b.xmax - b.xmin)
}
func (b bbox) Height() float64 {
return (b.ymax - b.ymin)
}
func (b bbox) Xmin() float64 {
return b.xmin
}
func (b bbox) Xmax() float64 {
return b.xmax
}
func (b bbox) Ymin() float64 {
return b.ymin
}
func (b bbox) Ymax() float64 {
return b.ymax
}
// Intersect returns the common part of boxes a and b.
func Intersect(a, b BBox) (BBox, error) {
return NewBBox(math.Max(a.Xmin(), b.Xmin()),
math.Max(a.Ymin(), b.Ymin()),
math.Min(a.Xmax(), b.Xmax()),
math.Min(a.Ymax(), b.Ymax()))
}
// Contains returns true if (x,y) is inside the box
func (b bbox) Contains(x, y float64) bool {
return IsInRangeFloat64(x, b.xmin, b.xmax) && IsInRangeFloat64(y, b.ymin, b.ymax)
}
func (b bbox) String() string {
return fmt.Sprintf("bottomLeft: %7.2f,%7.2f topRight: %7.2f,%7.2f",
b.Xmin(), b.Ymin(), b.Xmax(), b.Ymax())
}
// EqualBBox checks if two boxes are equal.
// For the precision of the comparison see EqualFloat function.
func EqualBBox(a, b BBox) bool {
return EqualFloat(a.Xmin(), b.Xmin()) &&
EqualFloat(a.Xmax(), b.Xmax()) &&
EqualFloat(a.Ymin(), b.Ymin()) &&
EqualFloat(a.Ymax(), b.Ymax())
} | geo/bbox.go | 0.824991 | 0.449091 | bbox.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTFeatureTypeFilter962AllOf struct for BTFeatureTypeFilter962AllOf
type BTFeatureTypeFilter962AllOf struct {
BtType *string `json:"btType,omitempty"`
FeatureType *string `json:"featureType,omitempty"`
}
// NewBTFeatureTypeFilter962AllOf instantiates a new BTFeatureTypeFilter962AllOf object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTFeatureTypeFilter962AllOf() *BTFeatureTypeFilter962AllOf {
this := BTFeatureTypeFilter962AllOf{}
return &this
}
// NewBTFeatureTypeFilter962AllOfWithDefaults instantiates a new BTFeatureTypeFilter962AllOf object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTFeatureTypeFilter962AllOfWithDefaults() *BTFeatureTypeFilter962AllOf {
this := BTFeatureTypeFilter962AllOf{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTFeatureTypeFilter962AllOf) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTFeatureTypeFilter962AllOf) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTFeatureTypeFilter962AllOf) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTFeatureTypeFilter962AllOf) SetBtType(v string) {
o.BtType = &v
}
// GetFeatureType returns the FeatureType field value if set, zero value otherwise.
func (o *BTFeatureTypeFilter962AllOf) GetFeatureType() string {
if o == nil || o.FeatureType == nil {
var ret string
return ret
}
return *o.FeatureType
}
// GetFeatureTypeOk returns a tuple with the FeatureType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTFeatureTypeFilter962AllOf) GetFeatureTypeOk() (*string, bool) {
if o == nil || o.FeatureType == nil {
return nil, false
}
return o.FeatureType, true
}
// HasFeatureType returns a boolean if a field has been set.
func (o *BTFeatureTypeFilter962AllOf) HasFeatureType() bool {
if o != nil && o.FeatureType != nil {
return true
}
return false
}
// SetFeatureType gets a reference to the given string and assigns it to the FeatureType field.
func (o *BTFeatureTypeFilter962AllOf) SetFeatureType(v string) {
o.FeatureType = &v
}
func (o BTFeatureTypeFilter962AllOf) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.FeatureType != nil {
toSerialize["featureType"] = o.FeatureType
}
return json.Marshal(toSerialize)
}
type NullableBTFeatureTypeFilter962AllOf struct {
value *BTFeatureTypeFilter962AllOf
isSet bool
}
func (v NullableBTFeatureTypeFilter962AllOf) Get() *BTFeatureTypeFilter962AllOf {
return v.value
}
func (v *NullableBTFeatureTypeFilter962AllOf) Set(val *BTFeatureTypeFilter962AllOf) {
v.value = val
v.isSet = true
}
func (v NullableBTFeatureTypeFilter962AllOf) IsSet() bool {
return v.isSet
}
func (v *NullableBTFeatureTypeFilter962AllOf) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTFeatureTypeFilter962AllOf(val *BTFeatureTypeFilter962AllOf) *NullableBTFeatureTypeFilter962AllOf {
return &NullableBTFeatureTypeFilter962AllOf{value: val, isSet: true}
}
func (v NullableBTFeatureTypeFilter962AllOf) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTFeatureTypeFilter962AllOf) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_bt_feature_type_filter_962_all_of.go | 0.674479 | 0.418697 | model_bt_feature_type_filter_962_all_of.go | starcoder |
package nn
import (
"fmt"
"reflect"
"time"
)
// Model is a neural network model.
type Model interface {
Layers() []Layer
Fit(x, y []*Tensor, epochs, batchSize int)
Predict([]*Tensor) []*Tensor
Build(Loss) error
}
// Sequential is a model that stack of layers.
type Sequential struct {
inputShape Shape
outputShape Shape
layers []Layer
loss Loss
optimizerFactory OptimizerFactory
}
// NewSequential creates an instance of sequential model.
func NewSequential(inputShape Shape) *Sequential {
return &Sequential{
inputShape: inputShape,
outputShape: inputShape,
layers: []Layer{&inputLayer{}},
}
}
// Layers returns layers that model has.
func (s *Sequential) Layers() []Layer {
return s.layers
}
// Fit fits the model to the given dataset.
func (s *Sequential) Fit(x, t []*Tensor, epochs, batchSize int) {
totalStart := time.Now()
for epoch := 0; epoch < epochs; epoch++ {
fmt.Printf("epoch %v/%v\n", epoch+1, epochs)
steps := len(x) / batchSize
start := time.Now()
for step := 0; step < steps; step++ {
startIndex := step * batchSize
endIndex := (step + 1) * batchSize
y := s.Predict(x[startIndex:endIndex])
loss := s.Loss(y, t[startIndex:endIndex])
acc := s.Accuracy(y, t[startIndex:endIndex])
fmt.Printf("\r\033[K%v/%v\t%v%%\t%.1fs\tloss: %.4f\tacc: %.4f", step*batchSize, steps*batchSize, 100*step/steps, time.Now().Sub(start).Seconds(), loss, acc)
s.update(x[startIndex:endIndex], t[startIndex:endIndex])
}
y := s.Predict(x)
loss := s.Loss(y, t)
acc := s.Accuracy(y, t)
fmt.Printf("\r\033[K%v/%v\t100%%\t%.1fs\tloss: %.4f\tacc: %.4f\n", steps*batchSize, steps*batchSize, time.Now().Sub(start).Seconds(), loss, acc)
}
fmt.Printf("%.1fs\n", time.Now().Sub(totalStart).Seconds())
}
func (s *Sequential) update(x, t []*Tensor) {
for _, layer := range s.layers {
x = layer.Forward(x)
}
s.loss.Forward(x, t)
dout := s.loss.Backward()
for i := len(s.layers) - 1; i >= 0; i-- {
dout = s.layers[i].Backward(dout)
s.layers[i].Update()
}
}
// Predict predicts output for the given data.
func (s *Sequential) Predict(inputs []*Tensor) []*Tensor {
x := inputs
for _, layer := range s.layers {
x = layer.Call(x)
}
return x
}
// Loss is loss of predicted value.
func (s *Sequential) Loss(y, t []*Tensor) float64 {
return s.loss.Call(y, t)
}
// Accuracy is accuracy of predicted value.
func (s *Sequential) Accuracy(y, t []*Tensor) float64 {
sum := 0.0
for i := 0; i < len(t); i++ {
if y[i].MaxIndex() == t[i].MaxIndex() {
sum++
}
}
return sum / float64(len(t))
}
// Build builds a model by connecting the given layers.
func (s *Sequential) Build(loss Loss, factory OptimizerFactory) error {
if err := s.layers[0].Init(s.inputShape, factory); err != nil {
return err
}
shape := s.layers[0].OutputShape()
for i, layer := range s.layers[1:] {
if err := layer.Init(shape, factory); err != nil {
return fmt.Errorf("build error layer %v %v %v", i+1, reflect.TypeOf(layer), err)
}
shape = layer.OutputShape()
}
s.loss = loss
s.optimizerFactory = factory
return nil
}
// AddLayer adds layer to model.
func (s *Sequential) AddLayer(layer Layer) {
s.layers = append(s.layers, layer)
}
// Summary is summary of model.
func (s *Sequential) Summary() string {
res := "Layer Type\tOutput Shape\tParams\n=======================================\n"
sum := 0
for _, layer := range s.layers {
params := layer.Params()
param := 0
for _, p := range params {
param += p.Shape().Elements()
}
res += fmt.Sprintf("%v\t\t%v\t\t%v\n", reflect.TypeOf(layer).String()[4:], layer.OutputShape(), param)
sum += param
}
res += fmt.Sprintf("\nTotal params:\t%v", sum)
return res
} | nn/model.go | 0.88462 | 0.485295 | model.go | starcoder |
package common
import (
"github.com/BoltApp/sleet"
)
// CURRENCIES maps the precision to the currency symbol used for lookups for some PsP providers that rely on these values for amount calculation
// One example is Braintree which uses its own amount structure requiring the precision of a currency
var CURRENCIES = map[Code]sleet.Currency{
AED: {Precision: 2, Symbol: "AED"},
AFN: {Precision: 2, Symbol: "؋"},
ALL: {Precision: 2, Symbol: "Lek"},
AMD: {Precision: 2, Symbol: "AMD"},
ANG: {Precision: 2, Symbol: "ƒ"},
AOA: {Precision: 2, Symbol: "AOA"},
ARS: {Precision: 2, Symbol: "N$"},
AUD: {Precision: 2, Symbol: "AU$"},
AWG: {Precision: 2, Symbol: "ƒ"},
AZN: {Precision: 2, Symbol: "ман"},
BAM: {Precision: 2, Symbol: "KM"},
BBD: {Precision: 2, Symbol: "Bds$"},
BDT: {Precision: 2, Symbol: "BDT"},
BGN: {Precision: 2, Symbol: "лв"},
BHD: {Precision: 3, Symbol: "BHD"},
BIF: {Precision: 0, Symbol: "BIF"},
BMD: {Precision: 2, Symbol: "BD$"},
BND: {Precision: 2, Symbol: "BN$"},
BOB: {Precision: 2, Symbol: "$b"},
BOV: {Precision: 2, Symbol: "BOV"},
BRL: {Precision: 2, Symbol: "R$"},
BSD: {Precision: 2, Symbol: "B$"},
BTN: {Precision: 2, Symbol: "BTN"},
BWP: {Precision: 2, Symbol: "P"},
BYN: {Precision: 2, Symbol: "BYN"},
BZD: {Precision: 2, Symbol: "BZ$"},
CAD: {Precision: 2, Symbol: "CA$"},
CDF: {Precision: 2, Symbol: "CDF"},
CHE: {Precision: 2, Symbol: "CHE"},
CHF: {Precision: 2, Symbol: "CHF"},
CHW: {Precision: 2, Symbol: "CHW"},
CLF: {Precision: 4, Symbol: "CLF"},
CLP: {Precision: 0, Symbol: "CLP$"},
CNY: {Precision: 2, Symbol: "¥"},
COP: {Precision: 2, Symbol: "COL$"},
COU: {Precision: 2, Symbol: "COU"},
CRC: {Precision: 2, Symbol: "₡"},
CUC: {Precision: 2, Symbol: "CUC"},
CUP: {Precision: 2, Symbol: "₱"},
CVE: {Precision: 2, Symbol: "CVE"},
CZK: {Precision: 2, Symbol: "Kč"},
DJF: {Precision: 0, Symbol: "DJF"},
DKK: {Precision: 2, Symbol: "kr"},
DOP: {Precision: 2, Symbol: "RD$"},
DZD: {Precision: 2, Symbol: "DZD"},
EGP: {Precision: 2, Symbol: "£"},
ERN: {Precision: 2, Symbol: "ERN"},
ETB: {Precision: 2, Symbol: "ETB"},
EUR: {Precision: 2, Symbol: "€"},
FJD: {Precision: 2, Symbol: "FJ$"},
FKP: {Precision: 2, Symbol: "£"},
GBP: {Precision: 2, Symbol: "£"},
GEL: {Precision: 2, Symbol: "GEL"},
GHS: {Precision: 2, Symbol: "GHS"},
GIP: {Precision: 2, Symbol: "£"},
GMD: {Precision: 2, Symbol: "D"},
GNF: {Precision: 0, Symbol: "GNF"},
GTQ: {Precision: 2, Symbol: "Q"},
GYD: {Precision: 2, Symbol: "GY$"},
HKD: {Precision: 2, Symbol: "HK$"},
HNL: {Precision: 2, Symbol: "L"},
HRK: {Precision: 2, Symbol: "kn"},
HTG: {Precision: 2, Symbol: "HTG"},
HUF: {Precision: 2, Symbol: "Ft"},
IDR: {Precision: 2, Symbol: "Rp"},
ILS: {Precision: 2, Symbol: "₪"},
INR: {Precision: 2, Symbol: "INR"},
IQD: {Precision: 3, Symbol: "IQD"},
IRR: {Precision: 2, Symbol: "﷼"},
ISK: {Precision: 0, Symbol: "kr"},
JMD: {Precision: 2, Symbol: "J$"},
JOD: {Precision: 3, Symbol: "JOD"},
JPY: {Precision: 0, Symbol: "¥"},
KES: {Precision: 2, Symbol: "KES"},
KGS: {Precision: 2, Symbol: "лв"},
KHR: {Precision: 2, Symbol: "៛"},
KMF: {Precision: 0, Symbol: "KMF"},
KPW: {Precision: 2, Symbol: "₩"},
KRW: {Precision: 0, Symbol: "₩"},
KWD: {Precision: 3, Symbol: "KWD"},
KYD: {Precision: 2, Symbol: "CI$"},
KZT: {Precision: 2, Symbol: "лв"},
LAK: {Precision: 2, Symbol: "₭"},
LBP: {Precision: 2, Symbol: "£"},
LKR: {Precision: 2, Symbol: "₨"},
LRD: {Precision: 2, Symbol: "L$"},
LSL: {Precision: 2, Symbol: "LSL"},
LYD: {Precision: 3, Symbol: "LYD"},
MAD: {Precision: 2, Symbol: "MAD"},
MDL: {Precision: 2, Symbol: "MDL"},
MGA: {Precision: 2, Symbol: "MGA"},
MKD: {Precision: 2, Symbol: "ден"},
MMK: {Precision: 2, Symbol: "MMK"},
MNT: {Precision: 2, Symbol: "₮"},
MOP: {Precision: 2, Symbol: "MOP"},
MRU: {Precision: 2, Symbol: "MRU"},
MUR: {Precision: 2, Symbol: "₨"},
MVR: {Precision: 2, Symbol: "MVR"},
MWK: {Precision: 2, Symbol: "MWK"},
MXN: {Precision: 2, Symbol: "Mex$"},
MXV: {Precision: 2, Symbol: "MXV"},
MYR: {Precision: 2, Symbol: "RM"},
MZN: {Precision: 2, Symbol: "MT"},
NAD: {Precision: 2, Symbol: "NA$"},
NGN: {Precision: 2, Symbol: "₦"},
NIO: {Precision: 2, Symbol: "C$"},
NOK: {Precision: 2, Symbol: "kr"},
NPR: {Precision: 2, Symbol: "₨"},
NZD: {Precision: 2, Symbol: "NZ$"},
OMR: {Precision: 3, Symbol: "﷼"},
PAB: {Precision: 2, Symbol: "B/."},
PEN: {Precision: 2, Symbol: "S/."},
PGK: {Precision: 2, Symbol: "PGK"},
PHP: {Precision: 2, Symbol: "₱"},
PKR: {Precision: 2, Symbol: "₨"},
PLN: {Precision: 2, Symbol: "zł"},
PYG: {Precision: 0, Symbol: "Gs"},
QAR: {Precision: 2, Symbol: "﷼"},
RON: {Precision: 2, Symbol: "lei"},
RSD: {Precision: 2, Symbol: "Дин."},
RUB: {Precision: 2, Symbol: "руб"},
RWF: {Precision: 0, Symbol: "RWF"},
SAR: {Precision: 2, Symbol: "﷼"},
SBD: {Precision: 2, Symbol: "SI$"},
SCR: {Precision: 2, Symbol: "₨"},
SDG: {Precision: 2, Symbol: "SDG"},
SEK: {Precision: 2, Symbol: "kr"},
SGD: {Precision: 2, Symbol: "S$"},
SHP: {Precision: 2, Symbol: "£"},
SLL: {Precision: 2, Symbol: "SLL"},
SOS: {Precision: 2, Symbol: "S"},
SRD: {Precision: 2, Symbol: "SR$"},
SSP: {Precision: 2, Symbol: "SSP"},
STN: {Precision: 2, Symbol: "STN"},
SVC: {Precision: 2, Symbol: "₡"},
SYP: {Precision: 2, Symbol: "£"},
SZL: {Precision: 2, Symbol: "SZL"},
THB: {Precision: 2, Symbol: "฿"},
TJS: {Precision: 2, Symbol: "TJS"},
TMT: {Precision: 2, Symbol: "TMT"},
TND: {Precision: 3, Symbol: "TND"},
TOP: {Precision: 2, Symbol: "TOP"},
TRY: {Precision: 2, Symbol: "TRY"},
TTD: {Precision: 2, Symbol: "TT$"},
TWD: {Precision: 2, Symbol: "NT$"},
TZS: {Precision: 2, Symbol: "TZS"},
UAH: {Precision: 2, Symbol: "₴"},
UGX: {Precision: 0, Symbol: "UGX"},
USD: {Precision: 2, Symbol: "$"},
USN: {Precision: 2, Symbol: "USN"},
UYI: {Precision: 0, Symbol: "UYI"},
UYU: {Precision: 2, Symbol: "$U"},
UYW: {Precision: 4, Symbol: "UYW"},
UZS: {Precision: 2, Symbol: "лв"},
VES: {Precision: 2, Symbol: "VES"},
VND: {Precision: 0, Symbol: "₫"},
VUV: {Precision: 0, Symbol: "VUV"},
WST: {Precision: 2, Symbol: "WST"},
XAF: {Precision: 0, Symbol: "XAF"},
XCD: {Precision: 2, Symbol: "EC$"},
XOF: {Precision: 0, Symbol: "XOF"},
XPF: {Precision: 0, Symbol: "XPF"},
YER: {Precision: 2, Symbol: "﷼"},
ZAR: {Precision: 2, Symbol: "R"},
ZMW: {Precision: 2, Symbol: "ZMW"},
ZWL: {Precision: 2, Symbol: "ZWL"},
} | common/currency_list.go | 0.595728 | 0.564339 | currency_list.go | starcoder |
package oliviere_v6
/** oliviere_v6 is a helper to identify the shard and server on which a document exists.
It can be used to group together large BulkRequests in Bulks that hit only a specific shard.
This should be faster because the coordinator role will be simplified, as each bulk only hits one server and one shard.
This is the implementation that uses https://godoc.org/github.com/olivere/elastic v6 structures
The algorithm can be replicated for any other ES driver.
https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-routing-field.html
shard_num = hash(_routing) % NumPrimaryShards
shard_num = (hash(_routing) + hash(_id) % RoutingPartitionSize) % NumPrimaryShards
*/
import (
"context"
"errors"
"github.com/bgadrian/es-bulk-shards/routing"
"github.com/olivere/elastic/v6"
)
type IndexSettings struct {
RoutingPartitionSize int `json:"routing_partition_size"`
NumPrimaryShards int `json:"number_of_shards"`
}
// Router identifies the shard number on which a specific routingKey should exists
type Router struct {
cache map[string]IndexSettings
client elastic.Client
}
func NewRouter(client elastic.Client) *Router {
return &Router{
cache: make(map[string]IndexSettings),
client: client,
}
}
// ShardNum returns the shardIndex for a specific document.
func (r *Router) ShardNum(ctx context.Context, indexName, id, docID, docRouting string) (int, error) {
sett, isCached := r.cache[indexName]
if !isCached {
s, err := r.fetchIndexSettings(ctx, indexName)
if err != nil {
return 0, err
}
sett = s
}
hashRouting, err := routing.Murmur3HashFunction(docRouting)
if err != nil {
return 0, err
}
if sett.RoutingPartitionSize > 1 {
//shard_num = (hash(_routing) + hash(_id) % RoutingPartitionSize) % NumPrimaryShards
hashID, err := routing.Murmur3HashFunction(docID)
if err != nil {
return 0, err
}
return (hashRouting + hashID%sett.RoutingPartitionSize) % sett.NumPrimaryShards, nil
}
//shard_num = hash(_routing) % NumPrimaryShards
return hashRouting % sett.NumPrimaryShards, nil
}
func (r *Router) fetchIndexSettings(ctx context.Context, indexName string) (IndexSettings, error) {
data := IndexSettings{
//default values from docs
RoutingPartitionSize: 1,
NumPrimaryShards: 5,
}
// https://www.elastic.co/guide/en/elasticsearch/reference/6.8/index-modules.html
all, err := r.client.IndexGetSettings(indexName).
Name("number_of_shards", "routing_partition_size").
Do(ctx)
if err != nil {
return data, err
}
sett, haveResponse := all[indexName]
if !haveResponse || sett == nil || sett.Settings == nil {
return data, errors.New("missing IndexSettings")
}
if val, hasData := sett.Settings["number_of_shards"]; hasData && val != nil {
asInt, ok := val.(int)
if ok {
data.NumPrimaryShards = asInt
}
}
if val, hasData := sett.Settings["routing_partition_size"]; hasData && val != nil {
asInt, ok := val.(int)
if ok {
data.RoutingPartitionSize = asInt
}
}
return data, nil
} | oliviere-v6/oliviere.go | 0.765944 | 0.416085 | oliviere.go | starcoder |
package di
import (
"fmt"
"reflect"
)
type injector struct {
values map[reflect.Type]reflect.Value
}
// MARK: Struct's constructors
func Injector() IInjector {
injector := injector{values: make(map[reflect.Type]reflect.Value)}
return &injector
}
// MARK: IInjector's members
func (i *injector) Invoke(function interface{}) ([]reflect.Value, error) {
reflectFunction := reflect.TypeOf(function)
// Condition validation: Input must be a function type
if reflectFunction.Kind() != reflect.Func {
return nil, fmt.Errorf("Input is not a function type.")
}
input := make([]reflect.Value, reflectFunction.NumIn())
for idx := 0; idx < reflectFunction.NumIn(); idx++ {
argument := reflectFunction.In(idx)
value := i.Get(argument)
input[idx] = value
}
return reflect.ValueOf(function).Call(input), nil
}
// InterfaceOf dereferences a pointer to an Interface type.
// It panics if value is not an pointer to an interface.
func InterfaceOf(value interface{}) reflect.Type {
t := reflect.TypeOf(value)
for t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Interface {
panic("Called inject.InterfaceOf with a value that is not a pointer to an interface. (*MyInterface)(nil)")
}
return t
}
// Maps dependencies in the Type map to each field in the struct
// that is tagged with 'inject'.
// Returns an error if the injection fails.
func (inj *injector) Apply(val interface{}) error {
v := reflect.ValueOf(val)
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
if v.Kind() != reflect.Struct {
return nil // Should not panic here ?
}
t := v.Type()
for i := 0; i < v.NumField(); i++ {
f := v.Field(i)
structField := t.Field(i)
if f.CanSet() && (structField.Tag == "inject" || structField.Tag.Get("inject") != "") {
ft := f.Type()
v := inj.Get(ft)
if !v.IsValid() {
return fmt.Errorf("Value not found for type %v", ft)
}
f.Set(v)
}
}
return nil
}
// Maps the concrete value of val to its dynamic type using reflect.TypeOf,
// It returns the TypeMapper registered in.
func (i *injector) Map(val interface{}) TypeMapper {
i.values[reflect.TypeOf(val)] = reflect.ValueOf(val)
return i
}
func (i *injector) MapTo(val interface{}, ifacePtr interface{}) TypeMapper {
i.values[InterfaceOf(ifacePtr)] = reflect.ValueOf(val)
return i
}
// Maps the given reflect.Type to the given reflect.Value and returns
// the Typemapper the mapping has been registered in.
func (i *injector) Set(typ reflect.Type, val reflect.Value) TypeMapper {
i.values[typ] = val
return i
}
func (i *injector) Get(t reflect.Type) reflect.Value {
val := i.values[t]
// reflect.Array
// reflect.Chan // Channel
// reflect.Interface
// reflect.Map
// reflect.Ptr
// reflect.Slice
// reflect.String
// reflect.Struct
if val.IsValid() {
return val
} else {
// switch t.Kind() {
// case reflect.Complex64, reflect.Complex128, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
// val = reflect.ValueOf(0)
// break
// case reflect.Bool:
// val = reflect.ValueOf(false)
// break
// case reflect.String:
// val = reflect.ValueOf("")
// break
// }
if t.Kind() == reflect.Bool {
val = reflect.ValueOf(false)
} else if t.Kind() == reflect.Complex64 || t.Kind() == reflect.Complex128 || t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64 || t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64 || t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64 {
val = reflect.ValueOf(0)
} else if t.Kind() == reflect.String {
val = reflect.ValueOf("")
} else if t.Kind() == reflect.Array {
val = reflect.MakeSlice(t, 0, 0)
} else if t.Kind() == reflect.Map {
val = reflect.MakeMap(t)
} else if t.Kind() == reflect.Interface {
for k, v := range i.values {
if k.Implements(t) {
val = v
break
}
}
} else {
val = reflect.ValueOf(nil)
}
}
return val
} | inject.go | 0.527073 | 0.496399 | inject.go | starcoder |
package deepcopy
type Copyable interface {
DeepCopy() interface{}
}
// DeepCopy will create a deep copy of the source object.
// Maps and slices will be taken into account when copying.
func DeepCopy(object interface{}) interface{} {
switch t := object.(type) {
case Copyable, *Copyable:
var value Copyable
if val, ok := t.(*Copyable); ok {
value = *val
} else {
value = t.(Copyable)
}
return value.DeepCopy()
// Deep copy for types map[string]T
case map[string]bool, *map[string]bool:
var value map[string]bool
if val, ok := t.(*map[string]bool); ok {
value = *val
} else {
value = t.(map[string]bool)
}
clone := make(map[string]bool, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]int, *map[string]int:
var value map[string]int
if val, ok := t.(*map[string]int); ok {
value = *val
} else {
value = t.(map[string]int)
}
clone := make(map[string]int, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]int8, *map[string]int8:
var value map[string]int8
if val, ok := t.(*map[string]int8); ok {
value = *val
} else {
value = t.(map[string]int8)
}
clone := make(map[string]int8, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]int16, *map[string]int16:
var value map[string]int16
if val, ok := t.(*map[string]int16); ok {
value = *val
} else {
value = t.(map[string]int16)
}
clone := make(map[string]int16, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]int64, *map[string]int64:
var value map[string]int64
if val, ok := t.(*map[string]int64); ok {
value = *val
} else {
value = t.(map[string]int64)
}
clone := make(map[string]int64, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]uint, *map[string]uint:
var value map[string]uint
if val, ok := t.(*map[string]uint); ok {
value = *val
} else {
value = t.(map[string]uint)
}
clone := make(map[string]uint, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]uint8, *map[string]uint8:
var value map[string]uint8
if val, ok := t.(*map[string]uint8); ok {
value = *val
} else {
value = t.(map[string]uint8)
}
clone := make(map[string]uint8, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]uint16, *map[string]uint16:
var value map[string]uint16
if val, ok := t.(*map[string]uint16); ok {
value = *val
} else {
value = t.(map[string]uint16)
}
clone := make(map[string]uint16, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]uint64, *map[string]uint64:
var value map[string]uint64
if val, ok := t.(*map[string]uint64); ok {
value = *val
} else {
value = t.(map[string]uint64)
}
clone := make(map[string]uint64, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]uintptr, *map[string]uintptr:
var value map[string]uintptr
if val, ok := t.(*map[string]uintptr); ok {
value = *val
} else {
value = t.(map[string]uintptr)
}
clone := make(map[string]uintptr, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]float32, *map[string]float32:
var value map[string]float32
if val, ok := t.(*map[string]float32); ok {
value = *val
} else {
value = t.(map[string]float32)
}
clone := make(map[string]float32, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]float64, *map[string]float64:
var value map[string]float64
if val, ok := t.(*map[string]float64); ok {
value = *val
} else {
value = t.(map[string]float64)
}
clone := make(map[string]float64, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]complex64, *map[string]complex64:
var value map[string]complex64
if val, ok := t.(*map[string]complex64); ok {
value = *val
} else {
value = t.(map[string]complex64)
}
clone := make(map[string]complex64, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]complex128, *map[string]complex128:
var value map[string]complex128
if val, ok := t.(*map[string]complex128); ok {
value = *val
} else {
value = t.(map[string]complex128)
}
clone := make(map[string]complex128, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]string, *map[string]string:
var value map[string]string
if val, ok := t.(*map[string]string); ok {
value = *val
} else {
value = t.(map[string]string)
}
clone := make(map[string]string, len(value))
for k, v := range value {
clone[k] = v
}
return clone
case map[string]interface{}, *map[string]interface{}:
var value map[string]interface{}
if val, ok := t.(*map[string]interface{}); ok {
value = *val
} else {
value = t.(map[string]interface{})
}
clone := make(map[string]interface{}, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).(interface{})
}
return clone
// Deep copy for types map[string][]T
case map[string][]bool, *map[string][]bool:
var value map[string][]bool
if val, ok := t.(*map[string][]bool); ok {
value = *val
} else {
value = t.(map[string][]bool)
}
clone := make(map[string][]bool, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]bool)
}
return clone
case map[string][]int, *map[string][]int:
var value map[string][]int
if val, ok := t.(*map[string][]int); ok {
value = *val
} else {
value = t.(map[string][]int)
}
clone := make(map[string][]int, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]int)
}
return clone
case map[string][]int8, *map[string][]int8:
var value map[string][]int8
if val, ok := t.(*map[string][]int8); ok {
value = *val
} else {
value = t.(map[string][]int8)
}
clone := make(map[string][]int8, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]int8)
}
return clone
case map[string][]int16, *map[string][]int16:
var value map[string][]int16
if val, ok := t.(*map[string][]int16); ok {
value = *val
} else {
value = t.(map[string][]int16)
}
clone := make(map[string][]int16, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]int16)
}
return clone
case map[string][]int64, *map[string][]int64:
var value map[string][]int64
if val, ok := t.(*map[string][]int64); ok {
value = *val
} else {
value = t.(map[string][]int64)
}
clone := make(map[string][]int64, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]int64)
}
return clone
case map[string][]uint, *map[string][]uint:
var value map[string][]uint
if val, ok := t.(*map[string][]uint); ok {
value = *val
} else {
value = t.(map[string][]uint)
}
clone := make(map[string][]uint, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]uint)
}
return clone
case map[string][]uint8, *map[string][]uint8:
var value map[string][]uint8
if val, ok := t.(*map[string][]uint8); ok {
value = *val
} else {
value = t.(map[string][]uint8)
}
clone := make(map[string][]uint8, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]uint8)
}
return clone
case map[string][]uint16, *map[string][]uint16:
var value map[string][]uint16
if val, ok := t.(*map[string][]uint16); ok {
value = *val
} else {
value = t.(map[string][]uint16)
}
clone := make(map[string][]uint16, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]uint16)
}
return clone
case map[string][]uint64, *map[string][]uint64:
var value map[string][]uint64
if val, ok := t.(*map[string][]uint64); ok {
value = *val
} else {
value = t.(map[string][]uint64)
}
clone := make(map[string][]uint64, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]uint64)
}
return clone
case map[string][]uintptr, *map[string][]uintptr:
var value map[string][]uintptr
if val, ok := t.(*map[string][]uintptr); ok {
value = *val
} else {
value = t.(map[string][]uintptr)
}
clone := make(map[string][]uintptr, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]uintptr)
}
return clone
case map[string][]float32, *map[string][]float32:
var value map[string][]float32
if val, ok := t.(*map[string][]float32); ok {
value = *val
} else {
value = t.(map[string][]float32)
}
clone := make(map[string][]float32, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]float32)
}
return clone
case map[string][]float64, *map[string][]float64:
var value map[string][]float64
if val, ok := t.(*map[string][]float64); ok {
value = *val
} else {
value = t.(map[string][]float64)
}
clone := make(map[string][]float64, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]float64)
}
return clone
case map[string][]complex64, *map[string][]complex64:
var value map[string][]complex64
if val, ok := t.(*map[string][]complex64); ok {
value = *val
} else {
value = t.(map[string][]complex64)
}
clone := make(map[string][]complex64, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]complex64)
}
return clone
case map[string][]complex128, *map[string][]complex128:
var value map[string][]complex128
if val, ok := t.(*map[string][]complex128); ok {
value = *val
} else {
value = t.(map[string][]complex128)
}
clone := make(map[string][]complex128, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]complex128)
}
return clone
case map[string][]string, *map[string][]string:
var value map[string][]string
if val, ok := t.(*map[string][]string); ok {
value = *val
} else {
value = t.(map[string][]string)
}
clone := make(map[string][]string, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]string)
}
return clone
case map[string][]interface{}, *map[string][]interface{}:
var value map[string][]interface{}
if val, ok := t.(*map[string][]interface{}); ok {
value = *val
} else {
value = t.(map[string][]interface{})
}
clone := make(map[string][]interface{}, len(value))
for k, v := range value {
clone[k] = DeepCopy(v).([]interface{})
}
return clone
// Deep copy for types []T
case []bool, *[]bool:
var value []bool
if val, ok := t.(*[]bool); ok {
value = *val
} else {
value = t.([]bool)
}
clone := make([]bool, len(value))
copy(clone, value)
return clone
case []int, *[]int:
var value []int
if val, ok := t.(*[]int); ok {
value = *val
} else {
value = t.([]int)
}
clone := make([]int, len(value))
copy(clone, value)
return clone
case []int8, *[]int8:
var value []int8
if val, ok := t.(*[]int8); ok {
value = *val
} else {
value = t.([]int8)
}
clone := make([]int8, len(value))
copy(clone, value)
return clone
case []int16, *[]int16:
var value []int16
if val, ok := t.(*[]int16); ok {
value = *val
} else {
value = t.([]int16)
}
clone := make([]int16, len(value))
copy(clone, value)
return clone
case []int64, *[]int64:
var value []int64
if val, ok := t.(*[]int64); ok {
value = *val
} else {
value = t.([]int64)
}
clone := make([]int64, len(value))
copy(clone, value)
return clone
case []uint, *[]uint:
var value []uint
if val, ok := t.(*[]uint); ok {
value = *val
} else {
value = t.([]uint)
}
clone := make([]uint, len(value))
copy(clone, value)
return clone
case []uint8, *[]uint8:
var value []uint8
if val, ok := t.(*[]uint8); ok {
value = *val
} else {
value = t.([]uint8)
}
clone := make([]uint8, len(value))
copy(clone, value)
return clone
case []uint16, *[]uint16:
var value []uint16
if val, ok := t.(*[]uint16); ok {
value = *val
} else {
value = t.([]uint16)
}
clone := make([]uint16, len(value))
copy(clone, value)
return clone
case []uint64, *[]uint64:
var value []uint64
if val, ok := t.(*[]uint64); ok {
value = *val
} else {
value = t.([]uint64)
}
clone := make([]uint64, len(value))
copy(clone, value)
return clone
case []uintptr, *[]uintptr:
var value []uintptr
if val, ok := t.(*[]uintptr); ok {
value = *val
} else {
value = t.([]uintptr)
}
clone := make([]uintptr, len(value))
copy(clone, value)
return clone
case []float32, *[]float32:
var value []float32
if val, ok := t.(*[]float32); ok {
value = *val
} else {
value = t.([]float32)
}
clone := make([]float32, len(value))
copy(clone, value)
return clone
case []float64, *[]float64:
var value []float64
if val, ok := t.(*[]float64); ok {
value = *val
} else {
value = t.([]float64)
}
clone := make([]float64, len(value))
copy(clone, value)
return clone
case []complex64, *[]complex64:
var value []complex64
if val, ok := t.(*[]complex64); ok {
value = *val
} else {
value = t.([]complex64)
}
clone := make([]complex64, len(value))
copy(clone, value)
return clone
case []complex128, *[]complex128:
var value []complex128
if val, ok := t.(*[]complex128); ok {
value = *val
} else {
value = t.([]complex128)
}
clone := make([]complex128, len(value))
copy(clone, value)
return clone
case []string, *[]string:
var value []string
if val, ok := t.(*[]string); ok {
value = *val
} else {
value = t.([]string)
}
clone := make([]string, len(value))
copy(clone, value)
return clone
case []interface{}, *[]interface{}:
var value []interface{}
if val, ok := t.(*[]interface{}); ok {
value = *val
} else {
value = t.([]interface{})
}
clone := make([]interface{}, len(value))
copy(clone, value)
for k, v := range clone {
clone[k] = DeepCopy(v)
}
return clone
}
return object
} | deepcopy.go | 0.591605 | 0.4831 | deepcopy.go | starcoder |
package fsm
import (
"fmt"
"sort"
)
// VisualizeType the type of the visualization
type VisualizeType string
const (
// GRAPHVIZ the type for graphviz output (http://www.webgraphviz.com/)
GRAPHVIZ VisualizeType = "graphviz"
// MERMAID the type for mermaid output (https://mermaid-js.github.io/mermaid/#/stateDiagram) in the stateDiagram form
MERMAID VisualizeType = "mermaid"
// MermaidStateDiagram the type for mermaid output (https://mermaid-js.github.io/mermaid/#/stateDiagram) in the stateDiagram form
MermaidStateDiagram VisualizeType = "mermaid-state-diagram"
// MermaidFlowChart the type for mermaid output (https://mermaid-js.github.io/mermaid/#/flowchart) in the flow chart form
MermaidFlowChart VisualizeType = "mermaid-flow-chart"
)
// VisualizeWithType outputs a visualization of a FSM in the desired format.
// If the type is not given it defaults to GRAPHVIZ
func (fsm *FSM[STATE, EVENT, FSM_IMPL, ARG]) VisualizeWithType(visualizeType VisualizeType, current STATE) (string, error) {
switch visualizeType {
case GRAPHVIZ:
return fsm.Visualize(current), nil
case MERMAID, MermaidStateDiagram:
return fsm.VisualizeForMermaidWithGraphType(StateDiagram, current)
case MermaidFlowChart:
return fsm.VisualizeForMermaidWithGraphType(FlowChart, current)
default:
return "", fmt.Errorf("unknown VisualizeType: %s", visualizeType)
}
}
func (fsm *Instance[STATE, EVENT, FSM_IMPL, ARG]) VisualizeWithType(visualizeType VisualizeType) (string, error) {
return fsm.FSM.VisualizeWithType(visualizeType, fsm.Current())
}
func (fsm *FSM[STATE, EVENT, FSM_IMPL, ARG]) getSortedTransitionKeys() []eKey[STATE, EVENT] {
// we sort the key alphabetically to have a reproducible graph output
sortedTransitionKeys := make([]eKey[STATE, EVENT], 0)
for transition := range fsm.transitions {
sortedTransitionKeys = append(sortedTransitionKeys, transition)
}
sort.Slice(sortedTransitionKeys, func(i, j int) bool {
if sortedTransitionKeys[i].src == sortedTransitionKeys[j].src {
return fmt.Sprint(sortedTransitionKeys[i].event) < fmt.Sprint(sortedTransitionKeys[j].event)
}
return fmt.Sprint(sortedTransitionKeys[i].src) < fmt.Sprint(sortedTransitionKeys[j].src)
})
return sortedTransitionKeys
}
func (fsm *FSM[STATE, EVENT, FSM_IMPL, ARG]) getSortedStates() ([]STATE, map[STATE]string) {
statesToIDMap := make(map[STATE]string)
for transition, target := range fsm.transitions {
if _, ok := statesToIDMap[transition.src]; !ok {
statesToIDMap[transition.src] = ""
}
if _, ok := statesToIDMap[target]; !ok {
statesToIDMap[target] = ""
}
}
sortedStates := make([]STATE, 0, len(statesToIDMap))
for state := range statesToIDMap {
sortedStates = append(sortedStates, state)
}
sort.Slice(sortedStates, func(i, j int) bool {
return fmt.Sprint(sortedStates[i]) < fmt.Sprint(sortedStates[j])
})
for i, state := range sortedStates {
statesToIDMap[state] = fmt.Sprintf("id%d", i)
}
return sortedStates, statesToIDMap
} | visualizer.go | 0.610105 | 0.650509 | visualizer.go | starcoder |
package gjp
//--------------------
// IMPORTS
//--------------------
import (
"encoding/json"
"github.com/tideland/golib/errors"
"github.com/tideland/golib/stringex"
)
//--------------------
// DOCUMENT
//--------------------
// PathValue is the combination of path and value.
type PathValue struct {
Path string
Value Value
}
// PathValues contains a number of path/value combinations.
type PathValues []PathValue
// ValueProcessor describes a function for the processing of
// values while iterating over a document.
type ValueProcessor func(path string, value Value) error
// Document represents one JSON document.
type Document interface {
json.Marshaler
// Length returns the number of elements for the given path.
Length(path string) int
// SetValueAt sets the value at the given path.
SetValueAt(path string, value interface{}) error
// ValueAt returns the addressed value.
ValueAt(path string) Value
// Clear removes the so far build document data.
Clear()
// Query allows to find pathes matching a given pattern.
Query(pattern string) (PathValues, error)
// Process iterates over a document and processes its values.
// There's no order, so nesting into an embedded document or
// list may come earlier than higher level paths.
Process(processor ValueProcessor) error
}
// document implements Document.
type document struct {
separator string
root interface{}
}
// Parse reads a raw document and returns it as
// accessible document.
func Parse(data []byte, separator string) (Document, error) {
var root interface{}
err := json.Unmarshal(data, &root)
if err != nil {
return nil, errors.Annotate(err, ErrUnmarshalling, errorMessages)
}
return &document{
separator: separator,
root: root,
}, nil
}
// NewDocument creates a new empty document.
func NewDocument(separator string) Document {
return &document{
separator: separator,
}
}
// Length implements Document.
func (d *document) Length(path string) int {
n, err := valueAt(d.root, splitPath(path, d.separator))
if err != nil {
return -1
}
// Check if object or array.
o, ok := isObject(n)
if ok {
return len(o)
}
a, ok := isArray(n)
if ok {
return len(a)
}
return 1
}
// SetValueAt implements Document.
func (d *document) SetValueAt(path string, value interface{}) error {
parts := splitPath(path, d.separator)
root, err := setValueAt(d.root, value, parts)
if err != nil {
return err
}
d.root = root
return nil
}
// ValueAt implements Document.
func (d *document) ValueAt(path string) Value {
n, err := valueAt(d.root, splitPath(path, d.separator))
return &value{n, err}
}
// Clear implements Document.
func (d *document) Clear() {
d.root = nil
}
// Query implements Document.
func (d *document) Query(pattern string) (PathValues, error) {
pvs := PathValues{}
err := d.Process(func(path string, value Value) error {
if stringex.Matches(pattern, path, false) {
pvs = append(pvs, PathValue{
Path: path,
Value: value,
})
}
return nil
})
return pvs, err
}
// Process implements Document.
func (d *document) Process(processor ValueProcessor) error {
return process(d.root, []string{}, d.separator, processor)
}
// MarshalJSON implements json.Marshaler.
func (d *document) MarshalJSON() ([]byte, error) {
return json.Marshal(d.root)
}
// EOF | gjp/gjp.go | 0.709824 | 0.513668 | gjp.go | starcoder |
package find_minimum_in_rotated_sorted_array
/*
33. 搜索旋转排序数组 https://leetcode-cn.com/problems/search-in-rotated-sorted-array/
假设按照升序排序的数组在预先未知的某个点上进行了旋转。
( 例如,数组 [0,1,2,4,5,6,7] 可能变为 [4,5,6,7,0,1,2] )。
搜索一个给定的目标值,如果数组中存在这个目标值,则返回它的索引,否则返回 -1 。
你可以假设数组中不存在重复的元素。
你的算法时间复杂度必须是 O(log n) 级别。
示例 1:
输入: nums = [4,5,6,7,0,1,2], target = 0
输出: 4
示例 2:
输入: nums = [4,5,6,7,0,1,2], target = 3
输出: -1
*/
/*
二分法, 对于mid,根据nums[0]和mid的大小关系,能方便获知mid左半侧有序还是右半侧有序
再根据target是否在有序一侧决定left和right的移动
*/
func search(nums []int, target int) int {
left, right := 0, len(nums)-1
for left <= right {
mid := left + (right-left)/2
curr := nums[mid]
if curr == target {
return mid
}
if nums[0] <= curr { // nums[0:mid+1] 有序; mid可能为0
if nums[0] <= target && target < curr {
right = mid - 1
} else {
left = mid + 1
}
} else { // nums[mid+1:] 有序
if curr < target && target <= nums[len(nums)-1] {
left = mid + 1
} else {
right = mid - 1
}
}
}
return -1
}
/*
153. 寻找旋转排序数组中的最小值 https://leetcode-cn.com/problems/find-minimum-in-rotated-sorted-array
假设按照升序排序的数组在预先未知的某个点上进行了旋转。
( 例如,数组 [0,1,2,4,5,6,7] 可能变为 [4,5,6,7,0,1,2] )。
请找出其中最小的元素。
你可以假设数组中不存在重复元素。
示例 1:
输入: [3,4,5,1,2]
输出: 1
示例 2:
输入: [4,5,6,7,0,1,2]
输出: 0
*/
// 朴素实现
func findMin00(nums []int) int {
if len(nums) == 0 {
return -1
}
for i := 1; i < len(nums); i++ {
if nums[i] < nums[i-1] {
return nums[i]
}
}
return nums[0]
}
/*
二分法1,每次将mid和right处的值比较,以判断mid落在旋转点左侧还是右侧
*/
func findMin0(nums []int) int {
if len(nums) == 0 {
return -1
}
left, right := 0, len(nums)-1
if nums[0] <= nums[right] {
return nums[0]
}
for left <= right {
mid := left + (right-left)/2
if nums[mid] > nums[mid+1] {
return nums[mid+1]
}
if nums[mid-1] > nums[mid] {
return nums[mid]
}
if nums[mid] > nums[right] { // mid落在旋转点左侧; 改成nums[mid] > nums[0]也行
left = mid + 1
} else {
right = mid - 1
}
}
return -1
}
// 二分法2
func findMin(nums []int) int {
if len(nums) == 0 {
return -1
}
left, right := 0, len(nums)-1
for left < right {
mid := left + (right-left)/2
if nums[mid] > nums[right] { // mid落在旋转点左侧
left = mid + 1
} else { // mid和right在旋转点同侧,但因为一开始right在整个数组最右,所以当前只可能同在旋转点右侧
right = mid
}
}
return nums[left]
}
// 分治法,类似二分法
func findMin01(nums []int) int {
if len(nums) == 0 {
return -1
}
if len(nums) == 1 {
return nums[0]
}
end := len(nums) - 1
mid := end / 2
if nums[mid] > nums[end] {
return findMin(nums[mid+1:])
}
if nums[mid] < nums[end] {
return findMin(nums[:mid+1])
}
return -1 // 实际到不了这里
}
/*
题目变体:
154. 寻找旋转排序数组中的最小值 II https://leetcode-cn.com/problems/find-minimum-in-rotated-sorted-array-ii
如果nums里有重复元素呢?如:
[3, 1, 2, 3]
[3, 1, 2, 2, 3]
*/
func findMin1(nums []int) int {
left, right := 0, len(nums)-1
for left < right {
mid := left + (right-left)/2
switch {
case nums[mid] > nums[right]: // mid落在旋转点左侧
left = mid + 1
case nums[mid] < nums[right]: // mid和right在旋转点同侧,但因为一开始right在整个数组最右,所以当前只可能同在旋转点右侧
right = mid
default: // 相等时保守缩进,避免遗漏一些元素
right--
}
}
return nums[left]
}
// 或者用分治法,本质与二分法类似
func minArray(numbers []int) int {
if len(numbers) == 0 {
return 0
}
if len(numbers) == 1 {
return numbers[0]
}
end := len(numbers) - 1
mid := end / 2
if numbers[mid] > numbers[end] {
return minArray(numbers[mid+1:])
}
if numbers[mid] < numbers[end] {
return minArray(numbers[:mid+1])
}
// 相等时保守缩进,避免遗漏一些元素
return minArray(numbers[:end])
} | solutions/find-minimum-in-rotated-sorted-array/d.go | 0.699973 | 0.514888 | d.go | starcoder |
// Build Instructions:
// go get "github.com/alexflint/go-arg"
// go get "robpike.io/filter"
// go build csvslim.go
// Usage:
// ./csvslim -c [COLUMN1,COLUMN2,...] < input.csv
// Where COLUMN is the number corresponding to that column (starting at 0).
// It can also include a comparison operator (> and <).
// Examples:
// Show second and fourth columns:
// ./csvslim -c "1,3" < slim.csv
// Show the first 3 columns and the fifth (0, 1, 2, 4)
// ./csvslim -c "<3,4" < input.csv
// Ignore colums:
// ./csvslim -i [COLUMN1,COLUMN2,...] < input.csv
// Rename columns (rename columns by index):
// ./csvslim -r "COLUMN1:RENAME1,COLUMN2:RENAME2,..." < input.csv
// Ignore header (skip first line):
// ./csvslim --noheader < input.csv
// Filter by value (filter file must contain sorted values):
// ./csvslim --filter filter.csv < input.csv
// Filter by value specifying the column to watch:
// ./csvslim --filter filter.csv --filtercol 1 < input.csv
// Inverse filter:
// ./csvslim --filter filter.csv --filtercol 1 --inverse < input.csv
package main
import (
"bufio"
"encoding/csv"
"errors"
"fmt"
"github.com/alexflint/go-arg"
"io"
"log"
"os"
"regexp"
"robpike.io/filter"
"strconv"
"strings"
)
// The Operator type identifies a comparison operator to be used againts a column number
type Operator string
const (
Equal Operator = "="
LessThan Operator = "<"
GreaterThan Operator = ">"
)
// A ColumnOperator stores a comparison operator along with the column number it needs to be compared with
type ColumnOperator struct {
Column int
Comparison Operator
}
// The Evaluate method compares a column number against the column within the ColumnOperator
func (ce *ColumnOperator) Evaluate(c int) bool {
switch ce.Comparison {
case Equal:
return c == ce.Column
case LessThan:
return c < ce.Column
case GreaterThan:
return c > ce.Column
}
return false
}
// The FilterColumns type holds all ColumnOperators values a column number must be compared with
type FilterColumns struct {
Values []ColumnOperator
}
// The IsValid method checks whether the column number passes all checks within a FilterColumns instance
func (fc *FilterColumns) IsValid(column int) bool {
valid := filter.Choose(fc.Values, func(co ColumnOperator) bool {
return co.Evaluate(column)
})
return len(valid.([]ColumnOperator)) > 0
}
// The UnmarshalText method allow FilterColumns to be used as an argument type
func (fc *FilterColumns) UnmarshalText(b []byte) error {
re := regexp.MustCompile(`(<)?(\d+)(>)?`)
s := string(b)
values := strings.Split(s, ",")
for _, val := range values {
if !re.MatchString(val) {
continue
}
// Parse expression
matches := re.FindStringSubmatch(val)
num, _ := strconv.Atoi(matches[2])
if matches[1] == "" && matches[3] == "" {
fc.Values = append(fc.Values, ColumnOperator{
Column: num,
Comparison: Equal,
})
} else if matches[1] != "" {
fc.Values = append(fc.Values, ColumnOperator{
Column: num,
Comparison: LessThan,
})
} else if matches[3] != "" {
fc.Values = append(fc.Values, ColumnOperator{
Column: num,
Comparison: GreaterThan,
})
}
}
return nil
}
//The RenameColumns type stores which columns should be renamed
type RenameColumns struct {
Values map[int]string
}
// The UnmarshalText method allow RenameColumns to be used as an argument type
func (rc *RenameColumns) UnmarshalText(b []byte) error {
rc.Values = make(map[int]string)
s := string(b)
values := strings.Split(s, ",")
for i := 0; i < len(values); i++ {
columns := strings.Split(values[i], ":")
if len(columns) < 2 {
continue
}
idx, err := strconv.Atoi(columns[0])
if err != nil {
return fmt.Errorf("invalid index in %s", values[i])
}
rc.Values[idx] = columns[1]
}
return nil
}
// The args struct holds all argument types supported
var args struct {
Columns FilterColumns `arg:"-c" help:"Columns to show"`
Ignore FilterColumns `arg:"-i" help:"Columns to ignore"`
Rename RenameColumns `arg:"-r" help:"Columns to rename"`
NoHeader bool `help:"Skip first line"`
Filter string `help:"Filename containing the id to filter with"`
FilterCol int `help:"Column holding the value to filter for"`
Inverse bool `help:"Inverts filter condition"`
}
// Returns a slice containing all values within the range going from 0 to size - 1
func newRange(size int) []int {
col := 0
cols := make([]int, size)
filter.ApplyInPlace(cols, func(v int) int {
x := v + col
col++
return x
})
return cols
}
// Returns a slice of strings without duplicated values
func unique(values []string) []string {
keys := make(map[string]bool)
list := []string{}
for _, entry := range values {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
}
// Finds a string in a slice
func find(needle string, haystack []string) bool {
found := false
for _, s := range haystack {
if needle == s {
found = true
break
}
}
return found
}
func main() {
arg.MustParse(&args)
// Check if a filter is provided
filterValues := []string{}
if args.Filter != "" {
// Read filter values into a slice
filterFilename := args.Filter
filterFile, err := os.Open(filterFilename)
if err != nil {
log.Fatal(err)
}
filterReader := csv.NewReader(bufio.NewReader(filterFile))
defer filterFile.Close()
for {
line, error := filterReader.Read()
if error == io.EOF {
break
} else if error != nil {
continue
}
filterValues = append(filterValues, line[0])
}
filterValues = unique(filterValues)
}
reader := csv.NewReader(os.Stdin)
writer := csv.NewWriter(os.Stdout)
var cols []int
row := 0
for {
line, error := reader.Read()
if error == io.EOF || error == errors.New("wrong number of fields") {
break
} else if error != nil {
log.Fatal(error)
}
if row == 0 {
// Build the column list
cols = newRange(len(line))
if len(args.Ignore.Values) > 0 {
filter.DropInPlace(&cols, func(c int) bool {
return args.Ignore.IsValid(c)
})
} else if len(args.Columns.Values) > 0 {
filter.ChooseInPlace(&cols, func(c int) bool {
return args.Columns.IsValid(c)
})
}
// Skip first row
if args.NoHeader {
row++
continue
} else if len(args.Rename.Values) > 0 {
// Rename if first line
for idx, col := range args.Rename.Values {
line[idx] = col
}
}
}
// Filter by id
if args.Filter != "" && len(filterValues) > 0 {
found := find(line[args.FilterCol], filterValues)
// If the value is not found, read the next one
if (!args.Inverse && !found) || (args.Inverse && found) {
continue
}
}
// Build line
var out []string
for _, column := range cols {
out = append(out, line[column])
}
writer.Write(out)
row++
}
writer.Flush()
} | csvslim.go | 0.706697 | 0.409988 | csvslim.go | starcoder |
package schema
import (
"go/ast"
"go/token"
"go/types"
"github.com/bflad/tfproviderlint/helper/astutils"
)
const (
TypeNameStateUpgradeFunc = `StateUpgradeFunc`
)
// IsFuncTypeStateUpgradeFunc returns true if the FuncType matches expected parameters and results types
func IsFuncTypeStateUpgradeFunc(node ast.Node, info *types.Info) bool {
funcType := astutils.FuncTypeFromNode(node)
if funcType == nil {
return false
}
if !astutils.HasFieldListLength(funcType.Params, 2) {
return false
}
if !astutils.IsFieldListType(funcType.Params, 0, astutils.IsExprTypeMapStringInterface) {
return false
}
if !astutils.IsFieldListType(funcType.Params, 1, astutils.IsExprTypeInterface) {
return false
}
if !astutils.HasFieldListLength(funcType.Results, 2) {
return false
}
if !astutils.IsFieldListType(funcType.Results, 0, astutils.IsExprTypeMapStringInterface) {
return false
}
return astutils.IsFieldListType(funcType.Results, 1, astutils.IsExprTypeError)
}
// IsTypeStateUpgradeFunc returns if the type is StateUpgradeFunc from the schema package
func IsTypeStateUpgradeFunc(t types.Type) bool {
switch t := t.(type) {
case *types.Named:
return IsNamedType(t, TypeNameStateUpgradeFunc)
case *types.Pointer:
return IsTypeStateUpgradeFunc(t.Elem())
default:
return false
}
}
// StateUpgradeFuncInfo represents all gathered StateUpgradeFunc data for easier access
type StateUpgradeFuncInfo struct {
AstFuncDecl *ast.FuncDecl
AstFuncLit *ast.FuncLit
Body *ast.BlockStmt
Node ast.Node
Pos token.Pos
Type *ast.FuncType
TypesInfo *types.Info
}
// NewStateUpgradeFuncInfo instantiates a StateUpgradeFuncInfo
func NewStateUpgradeFuncInfo(node ast.Node, info *types.Info) *StateUpgradeFuncInfo {
result := &StateUpgradeFuncInfo{
TypesInfo: info,
}
switch node := node.(type) {
case *ast.FuncDecl:
result.AstFuncDecl = node
result.Body = node.Body
result.Node = node
result.Pos = node.Pos()
result.Type = node.Type
case *ast.FuncLit:
result.AstFuncLit = node
result.Body = node.Body
result.Node = node
result.Pos = node.Pos()
result.Type = node.Type
}
return result
} | vendor/github.com/bflad/tfproviderlint/helper/terraformtype/helper/schema/type_stateupgradefunc.go | 0.622689 | 0.417925 | type_stateupgradefunc.go | starcoder |
package sketchy
import (
_ "container/heap"
"fmt"
"math"
"math/rand"
"time"
)
var (
Pi = math.Pi
Tau = 2 * math.Pi
Sqrt2 = math.Sqrt2
Sqrt3 = math.Sqrt(3)
Smol = 1e-9
)
// Greatest common divisor
func Gcd(a int, b int) int {
if b == 0 {
return a
} else {
return Gcd(b, a%b)
}
}
// Linear interpolation between two values
func Lerp(a float64, b float64, i float64) float64 {
return a + i*(b-a)
}
// Linear interpolation from one range to another
func Map(a float64, b float64, c float64, d float64, i float64) float64 {
p := (i - a) / (b - a)
return Lerp(c, d, p)
}
// Restrict a value to a given range
func Clamp(a float64, b float64, c float64) float64 {
if c <= a {
return a
}
if c >= b {
return b
}
return c
}
// NoTinyVals sets values very close to zero to zero
func NoTinyVals(a float64) float64 {
if math.Abs(a) < Smol {
return 0
}
return a
}
// Creates a slice of linearly distributed values in a range
func Linspace(i float64, j float64, n int, b bool) []float64 {
var result []float64
N := float64(n)
if b {
N -= 1
}
d := (j - i) / N
for k := 0; k < n; k++ {
result = append(result, i+float64(k)*d)
}
return result
}
// Convert from degrees to radians
func Deg2Rad(f float64) float64 {
return math.Pi * f / 180
}
// Convert from radians to degrees
func Rad2Deg(f float64) float64 {
return 180 * f / math.Pi
}
// Shuffle a slice of points
func Shuffle(p *[]Point) {
rand.Seed(time.Now().UnixMicro())
n := len(*p)
for i := 0; i < 3*n; i++ {
j := rand.Intn(n)
k := rand.Intn(n)
(*p)[j], (*p)[k] = (*p)[k], (*p)[j]
}
}
// Create a string based on the current time for use in filenames
func GetTimestampString() string {
now := time.Now()
return fmt.Sprintf("%d%02d%02d_%02d%02d%02d",
now.Year(), now.Month(), now.Day(), now.Hour(),
now.Minute(), now.Second())
}
func Equalf(a, b float64) bool {
return math.Abs(b-a) <= Smol
}
func (p Point) ToIndexPoint(index int) IndexPoint {
return IndexPoint{
Index: index,
Point: p,
}
}
func (p Point) ToMetricPoint(index int, metric float64) MetricPoint {
return MetricPoint{
Metric: metric,
Index: index,
Point: p,
}
}
// An IndexPoint is a wrapper around a point with an extra int identifier, useful when used with trees and heaps
type IndexPoint struct {
Index int
Point
}
func (p IndexPoint) ToPoint() Point {
return p.Point
}
// A MetricPoint is a wrapper around a point with to extra identifiers, useful when used with trees and heaps
type MetricPoint struct {
Metric float64
Index int
Point
}
func (p MetricPoint) ToIndexPoint() IndexPoint {
return IndexPoint{
Index: p.Index,
Point: p.Point,
}
}
func (p MetricPoint) ToPoint() Point {
return p.Point
} | util.go | 0.828315 | 0.520862 | util.go | starcoder |
package fpeUtils
import (
"fmt"
"math/big"
)
// Num constructs a big.Int from an array of uint16, where each element represents
// one digit in the given radix. The array is arranged with the most significant digit in element 0,
// down to the least significant digit in element len-1.
func Num(s []uint16, radix uint64) (big.Int, error) {
var bigRadix, bv, x big.Int
if radix > 65536 {
return x, fmt.Errorf("Radix (%d) too big: max supported radix is 65536", radix)
}
maxv := uint16(radix - 1)
bigRadix.SetUint64(uint64(radix))
for i, v := range s {
if v > maxv {
return x, fmt.Errorf("Value at %d out of range: got %d - expected 0..%d", i, v, maxv)
}
bv.SetUint64(uint64(v))
x.Mul(&x, &bigRadix)
x.Add(&x, &bv)
}
return x, nil
}
// NumRev constructs a big.Int from an array of uint16, where each element represents
// one digit in the given radix. The array is arranged with the least significant digit in element 0,
// down to the most significant digit in element len-1.
func NumRev(s []uint16, radix uint64) (big.Int, error) {
var bigRadix, bv, x big.Int
if radix > 65536 {
return x, fmt.Errorf("Radix (%d) too big: max supported radix is 65536", radix)
}
maxv := uint16(radix - 1)
bigRadix.SetUint64(uint64(radix))
for i := len(s) - 1; i >= 0; i-- {
if s[i] > maxv {
return x, fmt.Errorf("Value at %d out of range: got %d - expected 0..%d", i, s[i], maxv)
}
bv.SetUint64(uint64(s[i]))
x.Mul(&x, &bigRadix)
x.Add(&x, &bv)
}
return x, nil
}
// Str populates an array of uint16 with digits representing big.Int x in the specified radix.
// The array is arranged with the most significant digit in element 0.
// The array is built from big.Int x from the least significant digit upwards. If the supplied
// array is too short, the most significant digits of x are quietly lost.
func Str(x *big.Int, r []uint16, radix uint64) ([]uint16, error) {
var bigRadix, mod, v big.Int
if radix > 65536 {
return r, fmt.Errorf("Radix (%d) too big: max supported radix os 65536", radix)
}
m := len(r)
v.Set(x)
bigRadix.SetUint64(radix)
for i := range r {
v.DivMod(&v, &bigRadix, &mod)
r[m-i-1] = uint16(mod.Uint64())
}
if v.Sign() != 0 {
return r, fmt.Errorf("destination array too small: %s remains after conversion", &v)
}
return r, nil
}
// StrRev populates an array of uint16 with digits representing big.Int x in the specified radix.
// The array is arranged with the least significant digit in element 0.
// The array is built from big.Int x from the least significant digit upwards. If the supplied
// array is too short, the most significant digits of x are quietly lost.
func StrRev(x *big.Int, r []uint16, radix uint64) ([]uint16, error) {
var bigRadix, mod, v big.Int
if radix > 65536 {
return r, fmt.Errorf("Radix (%d) too big: max supported radix os 65536", radix)
}
v.Set(x)
bigRadix.SetUint64(radix)
for i := range r {
v.DivMod(&v, &bigRadix, &mod)
r[i] = uint16(mod.Uint64())
}
if v.Sign() != 0 {
return r, fmt.Errorf("destination array too small: %s remains after conversion", &v)
}
return r, nil
}
// DecodeNum constructs a string from indices into the alphabet embedded in the Codec. The indices
// are encoded in the big Ints a and b.
// lenA and lenB are the number of characters that should be built from the corresponding big Ints.
func DecodeNum(a *big.Int, lenA int, b *big.Int, lenB int, c Codec) (string, error) {
ret := make([]uint16, lenA+lenB)
_, err := Str(a, ret[:lenA], uint64(c.Radix()))
if err != nil {
return "", err
}
_, err = Str(b, ret[lenA:], uint64(c.Radix()))
if err != nil {
return "", err
}
return c.Decode(ret)
} | fpeUtils/numeral.go | 0.724188 | 0.624208 | numeral.go | starcoder |
package yasup
import (
crypto "crypto/rand"
"math/big"
"math/rand"
)
var zeroValueByte byte
//ByteInsert will append elem at the position i. Might return ErrIndexOutOfBounds.
func ByteInsert(sl *[]byte, elem byte, i int) error {
if i < 0 || i > len(*sl) {
return ErrIndexOutOfBounds
}
*sl = append(*sl, elem)
copy((*sl)[i+1:], (*sl)[i:])
(*sl)[i] = elem
return nil
}
//ByteDelete delete the element at the position i. Might return ErrIndexOutOfBounds.
func ByteDelete(sl *[]byte, i int) error {
if i < 0 || i >= len(*sl) {
return ErrIndexOutOfBounds
}
*sl = append((*sl)[:i], (*sl)[i+1:]...)
return nil
}
//ByteContains will return true if elem is present in the slice and false otherwise.
func ByteContains(sl []byte, elem byte) bool {
for i := range sl {
if sl[i] == elem {
return true
}
}
return false
}
//ByteIndex returns the index of the first instance of elem, or -1 if elem is not present.
func ByteIndex(sl []byte, elem byte) int {
for i := range sl {
if sl[i] == elem {
return i
}
}
return -1
}
//ByteLastIndex returns the index of the last instance of elem in the slice, or -1 if elem is not present.
func ByteLastIndex(sl []byte, elem byte) int {
for i := len(sl) - 1; i >= 0; i-- {
if sl[i] == elem {
return i
}
}
return -1
}
//ByteCount will return an int representing the amount of times that elem is present in the slice.
func ByteCount(sl []byte, elem byte) int {
var n int
for i := range sl {
if sl[i] == elem {
n++
}
}
return n
}
//BytePush is equivalent to ByteInsert with index len(*sl).
func BytePush(sl *[]byte, elem byte) {
ByteInsert(sl, elem, len(*sl))
}
//ByteFrontPush is equivalent to ByteInsert with index 0.
func ByteFrontPush(sl *[]byte, elem byte) {
ByteInsert(sl, elem, 0)
}
//BytePop is equivalent to getting and removing the last element of the slice. Might return ErrEmptySlice.
func BytePop(sl *[]byte) (byte, error) {
if len(*sl) == 0 {
return zeroValueByte, ErrEmptySlice
}
last := len(*sl) - 1
ret := (*sl)[last]
ByteDelete(sl, last)
return ret, nil
}
//BytePop is equivalent to getting and removing the first element of the slice. Might return ErrEmptySlice.
func ByteFrontPop(sl *[]byte) (byte, error) {
if len(*sl) == 0 {
return zeroValueByte, ErrEmptySlice
}
ret := (*sl)[0]
ByteDelete(sl, 0)
return ret, nil
}
//ByteReplace modifies the slice with the first n non-overlapping instances of old replaced by new. If n equals -1, there is no limit on the number of replacements.
func ByteReplace(sl []byte, old, new byte, n int) (replacements int) {
left := n
for i := range sl {
if left == 0 {
break // no replacements left
}
if sl[i] == old {
sl[i] = new
left--
}
}
return n - left
}
//ByteReplaceAll is equivalent to ByteReplace with n = -1.
func ByteReplaceAll(sl []byte, old, new byte) (replacements int) {
return ByteReplace(sl, old, new, -1)
}
//ByteEquals compares two byte slices. Returns true if their elements are equal.
func ByteEquals(a, b []byte) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
//ByteFastShuffle will randomly swap the byte elements of a slice using math/rand (fast but not cryptographycally secure).
func ByteFastShuffle(sp []byte) {
rand.Shuffle(len(sp), func(i, j int) {
sp[i], sp[j] = sp[j], sp[i]
})
}
//ByteSecureShuffle will randomly swap the byte elements of a slice using crypto/rand (resource intensive but cryptographically secure).
func ByteSecureShuffle(sp []byte) error {
var i int64
size := int64(len(sp)) - 1
for i = 0; i < size+1; i++ {
bigRandI, err := crypto.Int(crypto.Reader, big.NewInt(size))
if err != nil {
return err
}
randI := bigRandI.Int64()
sp[size-i], sp[randI] = sp[randI], sp[size-i]
}
return nil
} | byteSlices.go | 0.667581 | 0.450178 | byteSlices.go | starcoder |
// Package oracle handles schema and data migrations from oracle.
package oracle
import (
"regexp"
"github.com/cloudspannerecosystem/harbourbridge/common/constants"
"github.com/cloudspannerecosystem/harbourbridge/internal"
"github.com/cloudspannerecosystem/harbourbridge/schema"
"github.com/cloudspannerecosystem/harbourbridge/spanner/ddl"
)
var (
TimestampReg = regexp.MustCompile(`TIMESTAMP`)
IntervalReg = regexp.MustCompile(`INTERVAL`)
)
// ToDdlImpl oracle specific implementation for ToDdl.
type ToDdlImpl struct {
}
// ToSpannerType maps a scalar source schema type (defined by id and
// mods) into a Spanner type. This is the core source-to-Spanner type
// mapping. toSpannerType returns the Spanner type and a list of type
// conversion issues encountered.
func (tdi ToDdlImpl) ToSpannerType(conv *internal.Conv, columnType schema.Type) (ddl.Type, []internal.SchemaIssue) {
// passing empty spType to execute default case.will get other spType from web pkg
ty, issues := toSpannerTypeInternal(conv, "", columnType.Name, columnType.Mods)
if conv.TargetDb == constants.TargetExperimentalPostgres {
ty = overrideExperimentalType(columnType, ty)
} else {
if len(columnType.ArrayBounds) > 1 {
ty = ddl.Type{Name: ddl.String, Len: ddl.MaxLength}
issues = append(issues, internal.MultiDimensionalArray)
}
ty.IsArray = len(columnType.ArrayBounds) == 1
}
return ty, issues
}
func ToSpannerTypeWeb(conv *internal.Conv, spType string, srcType string, mods []int64) (ddl.Type, []internal.SchemaIssue) {
return toSpannerTypeInternal(conv, spType, srcType, mods)
}
func toSpannerTypeInternal(conv *internal.Conv, spType string, srcType string, mods []int64) (ddl.Type, []internal.SchemaIssue) {
// Oracle returns some datatype with the precision,
// So will get TIMESTAMP as TIMESTAMP(6),TIMESTAMP(6) WITH TIME ZONE,TIMESTAMP(6) WITH LOCAL TIME ZONE.
// To match this case timestampReg Regex defined.
if TimestampReg.MatchString(srcType) {
switch spType {
case ddl.String:
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
default:
return ddl.Type{Name: ddl.Timestamp}, nil
}
}
// Matching cases like INTERVAL YEAR(2) TO MONTH, INTERVAL DAY(2) TO SECOND(6),etc.
if IntervalReg.MatchString(srcType) {
switch spType {
case ddl.String:
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
default:
if len(mods) > 0 {
return ddl.Type{Name: ddl.String, Len: 30}, nil
}
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
}
}
switch srcType {
case "NUMBER":
switch spType {
case ddl.String:
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
default:
// If no scale is avalible then map it to int64, and numeric elsewhere.
if len(mods) == 1 && mods[0] >= 1 && mods[0] < 19 {
return ddl.Type{Name: ddl.Int64}, nil
} else {
return ddl.Type{Name: ddl.Numeric}, nil
}
}
case "BFILE", "BLOB":
switch spType {
case ddl.String:
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
default:
return ddl.Type{Name: ddl.Bytes, Len: ddl.MaxLength}, nil
}
case "CHAR":
if len(mods) > 0 {
return ddl.Type{Name: ddl.String, Len: mods[0]}, nil
}
return ddl.Type{Name: ddl.String}, nil
case "CLOB":
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
case "DATE":
switch spType {
case ddl.String:
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
default:
return ddl.Type{Name: ddl.Date}, nil
}
case "BINARY_DOUBLE", "BINARY_FLOAT", "FLOAT":
switch spType {
case ddl.String:
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
default:
return ddl.Type{Name: ddl.Float64}, nil
}
case "LONG":
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
case "RAW", "LONG RAW":
switch spType {
case ddl.String:
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
default:
return ddl.Type{Name: ddl.Bytes, Len: ddl.MaxLength}, nil
}
case "NCHAR", "NVARCHAR2", "VARCHAR", "VARCHAR2":
if len(mods) > 0 {
return ddl.Type{Name: ddl.String, Len: mods[0]}, nil
}
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
case "NCLOB":
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
case "ROWID":
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
case "UROWID":
if len(mods) > 0 {
return ddl.Type{Name: ddl.String, Len: mods[0]}, nil
}
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
case "XMLTYPE":
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, nil
case "JSON", "OBJECT":
return ddl.Type{Name: ddl.JSON}, nil
default:
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}, []internal.SchemaIssue{internal.NoGoodType}
}
}
// Override the types to map to experimental postgres types.
func overrideExperimentalType(columnType schema.Type, originalType ddl.Type) ddl.Type {
if columnType.Name == "DATE" {
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}
} else if columnType.Name == "JSON" {
return ddl.Type{Name: ddl.String, Len: ddl.MaxLength}
}
return originalType
} | sources/oracle/toddl.go | 0.640523 | 0.429489 | toddl.go | starcoder |
package goforjj
//***************************************
// JSON data structure of plugin input.
// See plugin-actions.go about how those structs are managed.
// PluginReqData define the API data request to send to forjj plugins
type PluginReqData struct {
// Collection of Forjj flags requested by the plugin or given by default by Forjj
Forj map[string]string
ForjExtent map[string]string `json:",omitempty"` // Extended Forjj flags
// Define the list of Forjj objects data transmitted. object_type, instance, action.
Objects map[string]ObjectInstances
Creds map[string]string `json:",omitempty"` // Contains credentials requested by the plugin for a specific action.
}
// ObjectInstances is a collection of instanceKeys
type ObjectInstances map[string]InstanceKeys
// InstanceKeys is a collection of key/values or under key "extent" a collection of key/values (intanceExtentKeys)
type InstanceKeys map[string]interface{}
// InstanceExtentKeys is the collection of key/values which is stored as "extent" in InstanceKeys.
type InstanceExtentKeys map[string]*ValueStruct
// NewReqData return an empty API request structure.
func NewReqData() (r *PluginReqData) {
r = new(PluginReqData)
r.Forj = make(map[string]string)
r.Objects = make(map[string]ObjectInstances)
return
}
// SetForjFlag initialize forj part of the request with key/value or extent key/value.
func (r *PluginReqData) SetForjFlag(key, value string, cred, extent bool) {
if r == nil {
return
}
if cred {
if r.Creds == nil {
r.Creds = make(map[string]string)
}
r.Creds[key] = value
if extent {
return // For compatibility, creds data are still kept in normal structure So the function do not exit, except for extent (New way)
}
}
if !extent {
if r.Forj == nil {
r.Forj = make(map[string]string)
}
r.Forj[key] = value
} else {
if r.ForjExtent == nil {
r.ForjExtent = make(map[string]string)
}
r.ForjExtent[key] = value
}
}
// AddObjectActions add in the request, the collection of keys/values or extent/keys/values for each objects/instances
func (r *PluginReqData) AddObjectActions(objectType, objectName string, keys InstanceKeys, extent InstanceExtentKeys, creds map[string]string) {
if r == nil {
return
}
if r.Objects == nil {
r.Objects = make(map[string]ObjectInstances)
}
if _, found := r.Objects[objectType]; !found {
r.Objects[objectType] = make(map[string]InstanceKeys)
}
keys["extent"] = extent
r.Objects[objectType][objectName] = keys
for cname, cdata := range creds {
if r.Creds == nil {
r.Creds = make(map[string]string)
}
r.Creds[objectType+"-"+objectName+"-"+cname] = cdata
}
return
} | plugin-req-data.go | 0.61115 | 0.426381 | plugin-req-data.go | starcoder |
package data
import (
"encoding/binary"
"math"
"math/big"
)
type DecimalTraits interface {
NumDigits() int
ByteWidth() int
IsSparse() bool
MaxPrecision() int
}
var (
Decimal28DenseTraits decimal28DenseTraits
Decimal38DenseTraits decimal38DenseTraits
Decimal28SparseTraits decimal28SparseTraits
Decimal38SparseTraits decimal38SparseTraits
)
type decimal28DenseTraits struct{}
func (decimal28DenseTraits) NumDigits() int { return 3 }
func (decimal28DenseTraits) ByteWidth() int { return 12 }
func (decimal28DenseTraits) IsSparse() bool { return false }
func (decimal28DenseTraits) MaxPrecision() int { return 28 }
type decimal38DenseTraits struct{}
func (decimal38DenseTraits) NumDigits() int { return 4 }
func (decimal38DenseTraits) ByteWidth() int { return 16 }
func (decimal38DenseTraits) IsSparse() bool { return false }
func (decimal38DenseTraits) MaxPrecision() int { return 38 }
type decimal28SparseTraits struct{}
func (decimal28SparseTraits) NumDigits() int { return 5 }
func (decimal28SparseTraits) ByteWidth() int { return 20 }
func (decimal28SparseTraits) IsSparse() bool { return true }
func (decimal28SparseTraits) MaxPrecision() int { return 28 }
type decimal38SparseTraits struct{}
func (decimal38SparseTraits) NumDigits() int { return 6 }
func (decimal38SparseTraits) ByteWidth() int { return 24 }
func (decimal38SparseTraits) IsSparse() bool { return true }
func (decimal38SparseTraits) MaxPrecision() int { return 38 }
const (
maxdigits = 9
digBase = 1000000000
)
var base = big.NewFloat(digBase)
func getFloatFromBytes(valbytes []byte, digits, scale int, truncate bool) *big.Float {
// sparse types (truncate == true) are little endian, otherwise we're big endian
var order binary.ByteOrder
if truncate {
order = binary.LittleEndian
} else {
order = binary.BigEndian
}
val := big.NewFloat(float64(order.Uint32(valbytes) & 0x7FFFFFFF))
for i := 1; i < digits; i++ {
tmp := big.NewFloat(float64(order.Uint32(valbytes[i*Uint32SizeBytes:])))
val.Mul(val, base)
val.Add(val, tmp)
}
actualDigits := int32(scale % maxdigits)
if truncate && scale > 0 && (actualDigits != 0) {
val.Quo(val, big.NewFloat(math.Pow10(int(maxdigits-actualDigits))))
}
if order.Uint32(valbytes)&0x80000000 != 0 {
val.Neg(val)
}
// scale it and return it
return val.Quo(val, big.NewFloat(math.Pow10(int(scale))))
} | internal/data/decimal_utils.go | 0.511961 | 0.446736 | decimal_utils.go | starcoder |
package raster
import "math"
func (g *Grmap) KernelFilter3(k []float64) *Grmap {
if len(k) != 9 {
return nil
}
r := NewGrmap(g.cols, g.rows)
r.Comments = append([]string{}, g.Comments...)
// Filter edge pixels with minimal code.
// Execution time per pixel is high but there are few edge pixels
// relative to the interior.
o3 := [][]int{
{-1, -1}, {0, -1}, {1, -1},
{-1, 0}, {0, 0}, {1, 0},
{-1, 1}, {0, 1}, {1, 1}}
edge := func(x, y int) uint16 {
var sum float64
for i, o := range o3 {
c, ok := g.GetPx(x+o[0], y+o[1])
if !ok {
c = g.pxRow[y][x]
}
sum += float64(c) * k[i]
}
return uint16(math.Min(math.MaxUint16, math.Max(0,sum)))
}
for x := 0; x < r.cols; x++ {
r.pxRow[0][x] = edge(x, 0)
r.pxRow[r.rows-1][x] = edge(x, r.rows-1)
}
for y := 1; y < r.rows-1; y++ {
r.pxRow[y][0] = edge(0, y)
r.pxRow[y][r.cols-1] = edge(r.cols-1, y)
}
if r.rows < 3 || r.cols < 3 {
return r
}
// Interior pixels can be filtered much more efficiently.
otr := -g.cols + 1
obr := g.cols + 1
z := g.cols + 1
c2 := g.cols - 2
for y := 1; y < r.rows-1; y++ {
tl := float64(g.pxRow[y-1][0])
tc := float64(g.pxRow[y-1][1])
tr := float64(g.pxRow[y-1][2])
ml := float64(g.pxRow[y][0])
mc := float64(g.pxRow[y][1])
mr := float64(g.pxRow[y][2])
bl := float64(g.pxRow[y+1][0])
bc := float64(g.pxRow[y+1][1])
br := float64(g.pxRow[y+1][2])
for x := 1; ; x++ {
r.px[z] = uint16(math.Min(math.MaxUint16, math.Max(0,
tl*k[0] + tc*k[1] + tr*k[2] +
ml*k[3] + mc*k[4] + mr*k[5] +
bl*k[6] + bc*k[7] + br*k[8])))
if x == c2 {
break
}
z++
tl, tc, tr = tc, tr, float64(g.px[z+otr])
ml, mc, mr = mc, mr, float64(g.px[z+1])
bl, bc, br = bc, br, float64(g.px[z+obr])
}
z += 3
}
return r
} | lang/Go/image-convolution-2.go | 0.568655 | 0.478163 | image-convolution-2.go | starcoder |
package business
import "context"
// BusinessContract declares the service that can create new edge cluster, read, update
// and delete existing edge clusters.
type BusinessContract interface {
// CreateEdgeCluster creates a new edge cluster.
// context: Mandatory The reference to the context
// request: Mandatory. The request to create a new edge cluster
// Returns either the result of creating new edge cluster or error if something goes wrong.
CreateEdgeCluster(
ctx context.Context,
request *CreateEdgeClusterRequest) (*CreateEdgeClusterResponse, error)
// ReadEdgeCluster read an existing edge cluster
// context: Mandatory The reference to the context
// request: Mandatory. The request to read an existing edge cluster
// Returns either the result of reading an existing edge cluster or error if something goes wrong.
ReadEdgeCluster(
ctx context.Context,
request *ReadEdgeClusterRequest) (*ReadEdgeClusterResponse, error)
// UpdateEdgeCluster update an existing edge cluster
// context: Mandatory The reference to the context
// request: Mandatory. The request to update an existing edge cluster
// Returns either the result of updateing an existing edge cluster or error if something goes wrong.
UpdateEdgeCluster(
ctx context.Context,
request *UpdateEdgeClusterRequest) (*UpdateEdgeClusterResponse, error)
// DeleteEdgeCluster delete an existing edge cluster
// context: Mandatory The reference to the context
// request: Mandatory. The request to delete an existing edge cluster
// Returns either the result of deleting an existing edge cluster or error if something goes wrong.
DeleteEdgeCluster(
ctx context.Context,
request *DeleteEdgeClusterRequest) (*DeleteEdgeClusterResponse, error)
// ListEdgeClusters returns the list of edge clusters that matched the criteria
// ctx: Mandatory The reference to the context
// request: Mandatory. The request contains the search criteria
// Returns the list of edge clusters that matched the criteria
ListEdgeClusters(
ctx context.Context,
request *ListEdgeClustersRequest) (*ListEdgeClustersResponse, error)
// ListEdgeClusterNodes lists an existing edge cluster nodes details
// ctx: Mandatory The reference to the context
// request: Mandatory. The request to list an existing edge cluster nodes details
// Returns an existing edge cluster nodes details or error if something goes wrong.
ListEdgeClusterNodes(
ctx context.Context,
request *ListEdgeClusterNodesRequest) (*ListEdgeClusterNodesResponse, error)
// ListEdgeClusterPods lists an existing edge cluster pods details
// ctx: Mandatory The reference to the context
// request: Mandatory. The request to list an existing edge cluster pods details
// Returns an existing edge cluster pods details or error if something goes wrong.
ListEdgeClusterPods(
ctx context.Context,
request *ListEdgeClusterPodsRequest) (*ListEdgeClusterPodsResponse, error)
// ListEdgeClusterServices lists an existing edge cluster services details
// ctx: Mandatory The reference to the context
// request: Mandatory. The request to list an existing edge cluster services details
// Returns an existing edge cluster services details or error if something goes wrong.
ListEdgeClusterServices(
ctx context.Context,
request *ListEdgeClusterServicesRequest) (*ListEdgeClusterServicesResponse, error)
} | services/business/contract.go | 0.577495 | 0.400398 | contract.go | starcoder |
package porousmedia
import "math"
// PorousMedium contains a set of parameters that
// describe the transport properties of porour media
// Currently only supporting Campbell (1974) parameterization
// ref: Campbell, G.S., 1974. A simple method for determining unsaturated conductivity from moisture retention data. Soil Science, 117: 311-387.
// b: shape parameter, He: air-entry potential [J/kg]
type PorousMedium struct {
Ts, Tr, Ks, He, B float64
}
// // New default constructor for testing
// // Note: residual soil moisture is not in the Campbell model
// func (pm *PorousMedium) New() {
// // *pm = PorousMedium{
// // Ts: 0.44,
// // Tr: 0.01,
// // Ks: 0.001,
// // He: -2.08,
// // B: 4.74,
// // }
// *pm = PorousMedium{ //silt loam
// Ts: 0.43,
// Tr: 0.05,
// Ks: 0.003,
// He: -2.08,
// B: 4.74,
// }
// }
// GetK returns the hydraulic conductivity for a given
// volumetric water content (Campbell, 1974).
func (pm *PorousMedium) GetK(theta float64) float64 {
if theta >= pm.Ts {
return pm.Ks // saturated hydraulic conductivity
}
return pm.Ks * math.Pow(theta/pm.Ts, 2.0*pm.B+3.0)
}
// GetKfromPsi returns the hydraulic conductivity for a given
// matric potential (Campbell, 1974).
func (pm *PorousMedium) GetKfromPsi(psi float64) float64 {
if psi >= pm.He {
return pm.Ks // saturated hydraulic conductivity
}
return pm.Ks * math.Pow(pm.He/psi, 2.0+3.0/pm.B)
}
// GetPsi returns the matric potential for a given
// volumetric water content (Campbell, 1974).
func (pm *PorousMedium) GetPsi(theta float64) float64 {
if theta >= pm.Ts {
return pm.He // air entry potential
}
return pm.He * math.Pow(theta/pm.Ts, -pm.B)
}
// GetTheta returns the volumetric water content for a
// given matric potential (Campbell, 1974).
func (pm *PorousMedium) GetTheta(psi float64) float64 {
if psi >= pm.He {
return pm.Ts // saturated volumetric water content
}
return pm.Ts * math.Pow(psi/pm.He, -1.0/pm.B)
}
// GetThetaSe returns the volumetric water content for a
// given degree of saturation Se=(t-tr)/(ts-tr)~t/ts
func (pm *PorousMedium) GetThetaSe(se float64) float64 {
return se * pm.Ts //+ pm.tr*(1.0-se)
}
// GetSe returns the volumetric water content for a
// given volumetric water content. Se=t/ts
func (pm *PorousMedium) GetSe(theta float64) float64 {
return theta / pm.Ts
}
// GetSePsi returns the volumetric water content for a
// given matric potential (Campbell, 1974). Se=t/ts
func (pm *PorousMedium) GetSePsi(psi float64) float64 {
if psi >= 0.0 {
return 1.0
}
if psi >= pm.He {
return 1.0
}
return math.Pow(psi/pm.He, -1.0/pm.B)
} | porousmedia/porousmedia.go | 0.573201 | 0.425546 | porousmedia.go | starcoder |
package tempfuncs
// Parsers is the mapping between parsing name and its functions.
var Parsers = map[string]StringParserFunc{
ParserFloat64: ParseFloat64,
ParserFloat32: ParseFloat32,
ParserInt64: ParseInt64,
ParserInt: ParseInt,
ParserInt32: ParseInt32,
ParserInt16: ParseInt16,
ParserInt8: ParseInt8,
ParserUint64: ParseUint64,
ParserUint: ParseUint,
ParserUint32: ParseUint32,
ParserUint16: ParseUint16,
ParserUint8: ParseUint8,
ParserBoolean: ParseBoolean,
DummyStringParser: ParseDummyString,
}
// Parser gets the parser function and executes with given 'name'.
func Parser(name, funcName string) string {
return Parsers[funcName](name)
}
// ParserWrapper is a template function that checks if given fieldType should be wrapped when used for given parser 'funcName'.
func ParserWrapper(funcName, fieldType string) bool {
switch funcName {
case ParserFloat64:
if fieldType != "float64" {
return true
}
case ParserFloat32:
return true
case ParserInt64, ParserInt, ParserInt32, ParserInt16, ParserInt8:
if fieldType != "int64" {
return true
}
case ParserUint64, ParserUint, ParserUint32, ParserUint16, ParserUint8:
if fieldType != "uint64" {
return true
}
}
return false
}
// StringParserFunc is a function that converts provided selector and parses with a possible error into given value.
type StringParserFunc func(name string) string
const (
ParserFloat64 = "float64"
ParserFloat32 = "float32"
)
var (
_ StringerFunc = ParseFloat32
_ StringerFunc = ParseFloat64
)
// ParseFloat64 gets the float64 parser function.
func ParseFloat64(name string) string {
return "strconv.ParseFloat(" + name + ", 64)"
}
// ParseFloat32 gets the float32 parser function.
func ParseFloat32(name string) string {
return "strconv.ParseFloat(" + name + ", 64)"
}
const (
ParserInt64 = "int64"
ParserInt = "int"
ParserInt32 = "int32"
ParserInt16 = "int16"
ParserInt8 = "int8"
)
// ParseInt gets a string parses for int.
func ParseInt(name string) string {
return parseIntFunction(name, "mapping.IntegerBitSize", false)
}
// ParseInt64 gets a string parses for int64.
func ParseInt64(name string) string {
return parseIntFunction(name, "64", false)
}
// ParseInt32 gets a string parses for int32.
func ParseInt32(name string) string {
return parseIntFunction(name, "32", false)
}
// ParseInt16 gets a string parses for int16.
func ParseInt16(name string) string {
return parseIntFunction(name, "16", false)
}
// ParseInt8 gets a string parses for int8.
func ParseInt8(name string) string {
return parseIntFunction(name, "8", false)
}
func parseIntFunction(name, bitSize string, wrap bool) string {
if wrap {
name = "int64(" + name + ")"
}
return "strconv.ParseInt(" + name + ", 10," + bitSize + ")"
}
const (
ParserUint64 = "uint64"
ParserUint = "uint"
ParserUint32 = "uint32"
ParserUint16 = "uint16"
ParserUint8 = "uint8"
)
// ParseUint is uint stringer function.
func ParseUint(name string) string {
return parseUintFunction(name, "mapping.IntegerBitSize", false)
}
// ParseUint64 is uint64 stringer function.
func ParseUint64(name string) string {
return parseUintFunction(name, "64", false)
}
// ParseUint32 is uint32 stringer function.
func ParseUint32(name string) string {
return parseUintFunction(name, "32", false)
}
// ParseUint16 is uint64 stringer function.
func ParseUint16(name string) string {
return parseUintFunction(name, "16", false)
}
// ParseUint8 is uint8 stringer function.
func ParseUint8(name string) string {
return parseUintFunction(name, "8", false)
}
func parseUintFunction(name, bitSize string, wrap bool) string {
if wrap {
name = "uint64(" + name + ")"
}
return "strconv.ParseUint(" + name + ", 10," + bitSize + ")"
}
// ParseBoolean is a stringer function that parses boolean.
func ParseBoolean(name string) string {
return "strconv.ParseBool(" + name + ")"
}
// DummyStringParser is dummy field string parser
const DummyStringParser = "string"
func ParseDummyString(name string) string {
return name + ", nil"
} | internal/tempfuncs/parser.go | 0.709523 | 0.425128 | parser.go | starcoder |
package contracts
import (
"context"
"sync"
"testing"
"github.com/adamluzsi/frameless"
"github.com/adamluzsi/frameless/contracts/assert"
"github.com/adamluzsi/frameless/doubles"
"github.com/adamluzsi/frameless/extid"
"github.com/adamluzsi/testcase"
"github.com/stretchr/testify/require"
)
type MetaAccessor struct {
T, V T
Subject func(testing.TB) MetaAccessorSubject
Context func(testing.TB) context.Context
FixtureFactory func(testing.TB) frameless.FixtureFactory
}
var accessor = testcase.Var{Name: `frameless.MetaAccessor`}
func accessorGet(t *testcase.T) frameless.MetaAccessor {
return accessor.Get(t).(frameless.MetaAccessor)
}
type MetaAccessorSubject struct {
frameless.MetaAccessor
Resource CRD
Publisher interface {
frameless.CreatorPublisher
frameless.UpdaterPublisher
frameless.DeleterPublisher
}
}
var metaAccessorSubject = testcase.Var{Name: `MetaAccessorSubject`}
func metaAccessorSubjectGet(t *testcase.T) MetaAccessorSubject {
return metaAccessorSubject.Get(t).(MetaAccessorSubject)
}
func (c MetaAccessor) Test(t *testing.T) {
c.Spec(testcase.NewSpec(t))
}
func (c MetaAccessor) Benchmark(b *testing.B) {
c.Spec(testcase.NewSpec(b))
}
func (c MetaAccessor) Spec(s *testcase.Spec) {
testcase.RunContract(s,
MetaAccessorBasic{V: c.V,
Subject: func(tb testing.TB) frameless.MetaAccessor {
return c.Subject(tb).MetaAccessor
},
FixtureFactory: c.FixtureFactory,
},
MetaAccessorPublisher{T: c.T, V: c.V,
Subject: func(tb testing.TB) MetaAccessorSubject {
return c.Subject(tb)
},
FixtureFactory: c.FixtureFactory,
Context: c.Context,
},
)
}
type MetaAccessorBasic struct {
// V is the value T type that can be set and looked up with frameless.MetaAccessor.
V T
Subject func(testing.TB) frameless.MetaAccessor
FixtureFactory func(testing.TB) frameless.FixtureFactory
}
func (c MetaAccessorBasic) Test(t *testing.T) {
c.Spec(testcase.NewSpec(t))
}
func (c MetaAccessorBasic) Benchmark(b *testing.B) {
c.Spec(testcase.NewSpec(b))
}
func (c MetaAccessorBasic) Spec(s *testcase.Spec) {
factoryLet(s, c.FixtureFactory)
accessor.Let(s, func(t *testcase.T) interface{} {
return c.Subject(t)
})
// SetMeta(ctx context.Context, key string, value interface{}) (context.Context, error)
// LookupMeta(ctx context.Context, key string, ptr interface{}) (_found bool, _err error)
s.Describe(`.SetMeta+.LookupMeta`, func(s *testcase.Spec) {
var (
ctx = ctx.Let(s, nil)
key = s.Let(`key`, func(t *testcase.T) interface{} { return t.Random.String() })
keyGet = func(t *testcase.T) string { return key.Get(t).(string) }
value = s.Let(`value`, func(t *testcase.T) interface{} { return factoryGet(t).Fixture(c.V, nil) })
)
subjectSetMeta := func(t *testcase.T) (context.Context, error) {
return accessorGet(t).SetMeta(ctxGet(t), keyGet(t), value.Get(t))
}
subjectLookupMeta := func(t *testcase.T, ptr interface{} /*[V]*/) (bool, error) {
return accessorGet(t).LookupMeta(ctxGet(t), keyGet(t), ptr)
}
s.Test(`on an empty context the lookup will yield no result without an issue`, func(t *testcase.T) {
found, err := subjectLookupMeta(t, newT(c.V))
require.NoError(t, err)
require.False(t, found)
})
s.When(`value is set in a context`, func(s *testcase.Spec) {
s.Before(func(t *testcase.T) {
newContext, err := subjectSetMeta(t)
require.NoError(t, err)
ctx.Set(t, newContext)
})
s.Then(`value can be found with lookup`, func(t *testcase.T) {
ptr := newT(c.V)
found, err := subjectLookupMeta(t, ptr)
require.NoError(t, err)
require.True(t, found)
require.Equal(t, base(ptr), value.Get(t))
})
})
})
}
type MetaAccessorPublisher struct {
T, V T
Subject func(testing.TB) MetaAccessorSubject
Context func(testing.TB) context.Context
FixtureFactory func(testing.TB) frameless.FixtureFactory
}
func (c MetaAccessorPublisher) Test(t *testing.T) {
c.Spec(testcase.NewSpec(t))
}
func (c MetaAccessorPublisher) Benchmark(b *testing.B) {
c.Spec(testcase.NewSpec(b))
}
func (c MetaAccessorPublisher) Spec(s *testcase.Spec) {
factoryLet(s, c.FixtureFactory)
metaAccessorSubject.Let(s, func(t *testcase.T) interface{} {
return c.Subject(t)
})
accessor.Let(s, func(t *testcase.T) interface{} {
return metaAccessorSubjectGet(t).MetaAccessor
})
s.Test(".SetMeta -> .Create -> .Subscribe -> .LookupMeta", func(t *testcase.T) {
ctx := c.Context(t)
key := t.Random.String()
expected := base(factoryGet(t).Fixture(c.V, nil))
var (
actual interface{}
mutex sync.RWMutex
)
sub, err := metaAccessorSubjectGet(t).Publisher.SubscribeToCreatorEvents(ctx, doubles.StubSubscriber{
HandleFunc: func(ctx context.Context, event interface{}) error {
_ = event.(frameless.CreateEvent)
v := newT(c.V)
found, err := metaAccessorSubjectGet(t).LookupMeta(ctx, key, v)
require.NoError(t, err)
require.True(t, found)
mutex.Lock()
defer mutex.Unlock()
actual = base(v)
return nil
},
})
require.NoError(t, err)
t.Defer(sub.Close)
ctx, err = accessorGet(t).SetMeta(ctx, key, expected)
require.NoError(t, err)
assert.CreateEntity(t, metaAccessorSubjectGet(t).Resource, ctx, CreatePTR(factoryGet(t), c.T))
assert.Eventually.Assert(t, func(t testing.TB) {
mutex.RLock()
defer mutex.RUnlock()
require.Equal(t, expected, actual)
})
})
s.Test(".SetMeta -> .DeleteByID -> .Subscribe -> .LookupMeta", func(t *testcase.T) {
ctx := c.Context(t)
key := t.Random.String()
expected := base(factoryGet(t).Fixture(c.V, nil))
ptr := CreatePTR(factoryGet(t), c.T)
assert.CreateEntity(t, metaAccessorSubjectGet(t).Resource, ctx, ptr)
id := assert.HasID(t, ptr)
var (
actual interface{}
mutex sync.RWMutex
)
sub, err := metaAccessorSubjectGet(t).Publisher.SubscribeToDeleterEvents(ctx, doubles.StubSubscriber{
HandleFunc: func(ctx context.Context, event interface{}) error {
if _, ok := event.(frameless.DeleteByIDEvent); !ok {
return nil
}
v := newT(c.V)
found, err := metaAccessorSubjectGet(t).LookupMeta(ctx, key, v)
require.NoError(t, err)
require.True(t, found)
mutex.Lock()
defer mutex.Unlock()
actual = base(v)
return nil
},
})
require.NoError(t, err)
t.Defer(sub.Close)
ctx, err = accessorGet(t).SetMeta(ctx, key, expected)
require.NoError(t, err)
require.Nil(t, metaAccessorSubjectGet(t).Resource.DeleteByID(ctx, id))
assert.Eventually.Assert(t, func(t testing.TB) {
mutex.RLock()
defer mutex.RUnlock()
require.Equal(t, expected, actual)
})
})
s.Test(".SetMeta -> .DeleteAll -> .Subscribe -> .LookupMeta", func(t *testcase.T) {
ctx := c.Context(t)
key := t.Random.String()
expected := base(factoryGet(t).Fixture(c.V, nil))
ptr := CreatePTR(factoryGet(t), c.T)
assert.CreateEntity(t, metaAccessorSubjectGet(t).Resource, ctx, ptr)
var (
actual interface{}
mutex sync.RWMutex
)
sub, err := metaAccessorSubjectGet(t).Publisher.SubscribeToDeleterEvents(ctx, doubles.StubSubscriber{
HandleFunc: func(ctx context.Context, event interface{}) error {
if _, ok := event.(frameless.DeleteAllEvent); !ok {
return nil
}
v := newT(c.V)
found, err := metaAccessorSubjectGet(t).LookupMeta(ctx, key, v)
require.NoError(t, err)
require.True(t, found)
mutex.Lock()
defer mutex.Unlock()
actual = base(v)
return nil
},
})
require.NoError(t, err)
t.Defer(sub.Close)
ctx, err = accessorGet(t).SetMeta(ctx, key, expected)
require.NoError(t, err)
require.Nil(t, metaAccessorSubjectGet(t).Resource.DeleteAll(ctx))
assert.Eventually.Assert(t, func(t testing.TB) {
mutex.RLock()
defer mutex.RUnlock()
require.Equal(t, expected, actual)
})
})
s.Test(".SetMeta -> .Update -> .Subscribe -> .LookupMeta", func(t *testcase.T) {
crud, ok := metaAccessorSubjectGet(t).Resource.(UpdaterSubject)
if !ok {
t.Skipf(`frameless.Updater is not implemented by %T`, metaAccessorSubjectGet(t).Resource)
}
ctx := c.Context(t)
key := t.Random.String()
expected := base(factoryGet(t).Fixture(c.V, nil))
ptr := CreatePTR(factoryGet(t), c.T)
assert.CreateEntity(t, metaAccessorSubjectGet(t).Resource, ctx, ptr)
id := assert.HasID(t, ptr)
var (
actual interface{}
mutex sync.RWMutex
)
sub, err := metaAccessorSubjectGet(t).Publisher.SubscribeToUpdaterEvents(ctx, doubles.StubSubscriber{
HandleFunc: func(ctx context.Context, event interface{}) error {
if _, ok := event.(frameless.UpdateEvent); !ok {
return nil
}
v := newT(c.V)
found, err := metaAccessorSubjectGet(t).LookupMeta(ctx, key, v)
require.NoError(t, err)
require.True(t, found)
mutex.Lock()
defer mutex.Unlock()
actual = base(v)
return nil
},
})
require.NoError(t, err)
t.Defer(sub.Close)
updPTR := CreatePTR(factoryGet(t), c.T)
require.NoError(t, extid.Set(updPTR, id))
ctx, err = accessorGet(t).SetMeta(ctx, key, expected)
require.NoError(t, err)
require.Nil(t, crud.Update(ctx, updPTR))
assert.Eventually.Assert(t, func(t testing.TB) {
mutex.RLock()
defer mutex.RUnlock()
require.Equal(t, expected, actual)
})
})
} | contracts/MetaAccessor.go | 0.617513 | 0.581362 | MetaAccessor.go | starcoder |
package statmodel
import (
"fmt"
"math"
)
// Focuser restricts a model to one parameter.
type Focuser interface {
NumParams() int
NumObs() int
Focus(int, []float64, []float64) RegFitter
LogLike(Parameter, bool) float64
Score(Parameter, []float64)
Hessian(Parameter, HessType, []float64)
}
// FitL1Reg fits the provided L1RegFitter and returns the array of
// parameter values.
func FitL1Reg(model Focuser, param Parameter, l1wgt, offset []float64, checkstep bool) Parameter {
maxiter := 400
// A parameter for the 1-d focused model.
param1d := param.Clone()
param1d.SetCoeff([]float64{0})
nvar := model.NumParams()
nobs := model.NumObs()
// Since we are using non-normalized log-likelihood, the
// tolerance can scale with the sample size.
tol := 1e-7 * float64(nobs)
if tol > 0.1 {
tol = 0.1
}
coeff := param.GetCoeff()
// Outer coordinate descent loop.
for iter := 0; iter < maxiter; iter++ {
// L-inf of the increment in the parameter vector
px := 0.0
// Loop over covariates
for j := 0; j < nvar; j++ {
// Get the new point
fmodel := model.Focus(j, coeff, offset)
np := opt1d(fmodel, coeff[j], param1d, float64(nobs)*l1wgt[j], checkstep)
// Update the change measure
d := math.Abs(np - coeff[j])
if d > px {
px = d
}
coeff[j] = np
}
if px < tol {
break
}
}
return param
}
// Use a local quadratic approximation, then fall back to a line
// search if needed.
func opt1d(m1 RegFitter, coeff float64, par Parameter, l1wgt float64, checkstep bool) float64 {
// Quadratic approximation coefficients
bv := make([]float64, 1)
par.SetCoeff([]float64{coeff})
m1.Score(par, bv)
b := -bv[0]
cv := make([]float64, 1)
m1.Hessian(par, ObsHess, cv)
c := -cv[0]
// The optimum point of the quadratic approximation
d := b - c*coeff
if l1wgt > math.Abs(d) {
// The optimum is achieved by hard thresholding to zero
return 0
}
// pj + h is the minimizer of Q(x) + L1_wt*abs(x)
var h float64
if d >= 0 {
h = (l1wgt - b) / c
} else if d < 0 {
h = -(l1wgt + b) / c
} else {
panic(fmt.Sprintf("d=%f\n", d))
}
if !checkstep {
return coeff + h
}
// Check whether the new point improves the target function.
// This check is a bit expensive and not necessary for OLS
par.SetCoeff([]float64{coeff})
f0 := -m1.LogLike(par, false) + l1wgt*math.Abs(coeff)
par.SetCoeff([]float64{coeff + h})
f1 := -m1.LogLike(par, false) + l1wgt*math.Abs(coeff+h)
if f1 <= f0+1e-10 {
return coeff + h
}
// Wrap the log-likelihood so it takes a scalar argument.
fw := func(z float64) float64 {
par.SetCoeff([]float64{z})
f := -m1.LogLike(par, false) + l1wgt*math.Abs(z)
return f
}
// Fallback for models where the loss is not quadratic
w := 1.0
btol := 1e-7
np := bisection(fw, coeff-w, coeff+w, btol)
return np
}
// Standard bisection to minimize f.
func bisection(f func(float64) float64, xl, xu, tol float64) float64 {
var x0, x1, x2, f0, f1, f2 float64
// Try to find a bracket.
success := false
x0, x2 = xl, xu
x1 = (x0 + x2) / 2
f1 = f(x1)
for k := 0; k < 100; k++ {
// TODO recomputing some values here
f0 = f(x0)
f1 = f(x1)
f2 = f(x2)
if f1 < f0 && f1 < f2 {
success = true
break
}
if f0 > f1 && f1 > f2 {
// Slide right
x0 = x1
x1 = x2
x2 += 1.5 * (x1 - x0)
continue
}
if f0 < f1 && f1 < f2 {
// Slide left
x1 = x0
x2 = x1
x0 -= 1.5 * (x2 - x1)
continue
}
x0 = x1 - 2*(x1-x0)
x2 = x1 + 2*(x2-x1)
}
if !success {
fmt.Printf("Did not find bracket...\n")
if f0 < f1 && f0 < f2 {
return x0
} else if f1 < f0 && f1 < f2 {
return x1
} else {
return x2
}
}
iter := 0
for x2-x0 > tol {
iter++
if x1-x0 > x2-x1 {
xx := (x0 + x1) / 2
ff := f(xx)
if ff < f1 {
x2 = x1
x1, f1 = xx, ff
} else {
x0 = xx
}
} else {
xx := (x1 + x2) / 2
ff := f(xx)
if ff < f1 {
x0 = x1
x1, f1 = xx, ff
} else {
x2 = xx
}
}
}
return x1
} | statmodel/l1reg.go | 0.8059 | 0.421909 | l1reg.go | starcoder |
package streams
// Node represents a topology node.
type Node interface {
// Name gets the node name.
Name() string
// AddChild adds a child node to the node.
AddChild(n Node)
// Children gets the nodes children.
Children() []Node
// Processor gets the nodes processor.
Processor() Processor
}
var _ = (Node)(&SourceNode{})
// SourceNode represents a node between the source
// and the rest of the node tree.
type SourceNode struct {
name string
children []Node
}
// NewSourceNode create a new SourceNode.
func NewSourceNode(name string) *SourceNode {
return &SourceNode{
name: name,
}
}
// Name gets the node name.
func (n *SourceNode) Name() string {
return n.name
}
// AddChild adds a child node to the node.
func (n *SourceNode) AddChild(node Node) {
n.children = append(n.children, node)
}
// Children gets the nodes children.
func (n *SourceNode) Children() []Node {
return n.children
}
// Processor gets the nodes processor.
func (n *SourceNode) Processor() Processor {
return nil
}
var _ = (Node)(&ProcessorNode{})
// ProcessorNode represents the topology node for a processor.
type ProcessorNode struct {
name string
processor Processor
children []Node
}
// NewProcessorNode creates a new ProcessorNode.
func NewProcessorNode(name string, p Processor) *ProcessorNode {
return &ProcessorNode{
name: name,
processor: p,
}
}
// Name gets the node name.
func (n *ProcessorNode) Name() string {
return n.name
}
// AddChild adds a child node to the node.
func (n *ProcessorNode) AddChild(node Node) {
n.children = append(n.children, node)
}
// Children gets the nodes children.
func (n *ProcessorNode) Children() []Node {
return n.children
}
// Processor gets the nodes processor.
func (n *ProcessorNode) Processor() Processor {
return n.processor
}
// Topology represents the streams topology.
type Topology struct {
sources map[Source]Node
processors []Node
}
// Sources get the topology Sources.
func (t Topology) Sources() map[Source]Node {
return t.sources
}
// Processors gets the topology Processors.
func (t Topology) Processors() []Node {
return t.processors
}
// TopologyBuilder represents a topology builder.
type TopologyBuilder struct {
inspections []inspection
sources map[Source]Node
processors []Node
}
// NewTopologyBuilder creates a new TopologyBuilder.
func NewTopologyBuilder() *TopologyBuilder {
inspections := []inspection{
sourcesConnected,
committersConnected,
committerIsLeafNode,
}
return &TopologyBuilder{
inspections: inspections,
sources: map[Source]Node{},
processors: []Node{},
}
}
// AddSource adds a Source to the builder, returning the created Node.
func (tb *TopologyBuilder) AddSource(name string, source Source) Node {
n := NewSourceNode(name)
tb.sources[source] = n
return n
}
// AddProcessor adds a Processor to the builder, returning the created Node.
func (tb *TopologyBuilder) AddProcessor(name string, processor Processor, parents []Node) Node {
n := NewProcessorNode(name, processor)
for _, parent := range parents {
parent.AddChild(n)
}
tb.processors = append(tb.processors, n)
return n
}
// Build creates an immutable Topology.
func (tb *TopologyBuilder) Build() (*Topology, []error) {
var errs []error
for _, inspection := range tb.inspections {
if err := inspection(tb.sources, tb.processors); err != nil {
errs = append(errs, err)
}
}
return &Topology{
sources: tb.sources,
processors: tb.processors,
}, errs
}
func nodesConnected(roots []Node) bool {
if len(roots) <= 1 {
return true
}
var nodes []Node
var visit []Node
connections := 0
for _, node := range roots {
visit = append(visit, node)
}
for len(visit) > 0 {
var n Node
n, visit = visit[0], visit[1:]
nodes = append(nodes, n)
for _, c := range n.Children() {
if contains(c, visit) || contains(c, nodes) {
connections++
continue
}
visit = append(visit, c)
}
}
return connections == len(roots)-1
}
func flattenNodeTree(roots map[Source]Node) []Node {
var nodes []Node
var visit []Node
for _, node := range roots {
visit = append(visit, node)
}
for len(visit) > 0 {
var n Node
n, visit = visit[0], visit[1:]
if n.Processor() != nil {
nodes = append(nodes, n)
}
for _, c := range n.Children() {
if contains(c, visit) || contains(c, nodes) {
continue
}
visit = append(visit, c)
}
}
// In asymmetric trees, our dependencies can be out of order,
// which will cause errors. In order to ratify this, we check
// that not dependency appears higher in the list than us.
for i := 0; i < len(nodes); i++ {
node := nodes[i]
for _, child := range node.Children() {
pos := indexOf(child, nodes)
if pos < i {
temp := nodes[pos]
nodes[pos] = nodes[i]
nodes[i] = temp
i = pos
}
}
}
return nodes
}
func reverseNodes(nodes []Node) {
for i := len(nodes)/2 - 1; i >= 0; i-- {
opp := len(nodes) - 1 - i
nodes[i], nodes[opp] = nodes[opp], nodes[i]
}
}
func contains(n Node, nodes []Node) bool {
for _, node := range nodes {
if node == n {
return true
}
}
return false
}
func indexOf(n Node, nodes []Node) int {
for i, node := range nodes {
if node == n {
return i
}
}
return -1
} | topology.go | 0.758242 | 0.435781 | topology.go | starcoder |
package conversion
import (
"fmt"
"github.com/galaco/Lambda/internal/model/valve/world"
"github.com/galaco/gosigl"
"github.com/galaco/lambda-core/material"
lambdaMesh "github.com/galaco/lambda-core/mesh"
lambdaModel "github.com/galaco/lambda-core/model"
)
func SolidToModel(solid *world.Solid) *lambdaModel.Model {
meshes := make([]lambdaMesh.IMesh, 0)
for idx := range solid.Sides {
meshes = append(meshes, SideToMesh(&solid.Sides[idx]))
}
return lambdaModel.NewModel(fmt.Sprintf("solid_%d", solid.Id), meshes...)
}
func SideToMesh(side *world.Side) lambdaMesh.IMesh {
mesh := lambdaMesh.NewMesh()
// Material
mesh.SetMaterial(material.NewMaterial(side.Material))
// Vertices
verts := make([]float32, 0)
{
// a plane represents 3 vertices- bottom-left, top-left and top-right
// Triangle 1
verts = append(verts, float32(side.Plane[0].X()), float32(side.Plane[0].Y()), float32(side.Plane[0].Z()))
verts = append(verts, float32(side.Plane[1].X()), float32(side.Plane[1].Y()), float32(side.Plane[1].Z()))
verts = append(verts, float32(side.Plane[2].X()), float32(side.Plane[2].Y()), float32(side.Plane[2].Z()))
// Triangle 2
verts = append(verts, float32(side.Plane[0].X()), float32(side.Plane[0].Y()), float32(side.Plane[0].Z()))
verts = append(verts, float32(side.Plane[2].X()), float32(side.Plane[2].Y()), float32(side.Plane[2].Z()))
// Compute new vertex
vert4 := side.Plane[2].Sub(side.Plane[1].Sub(side.Plane[0]))
verts = append(verts, float32(vert4.X()), float32(vert4.Y()), float32(vert4.Z()))
mesh.AddVertex(verts...)
}
// Normals
normals := make([]float32, 0)
{
normal := (side.Plane[1].Sub(side.Plane[0])).Cross(side.Plane[2].Sub(side.Plane[0]))
normals = append(normals, float32(normal.X()), float32(normal.Y()), float32(normal.Z()))
normals = append(normals, float32(normal.X()), float32(normal.Y()), float32(normal.Z()))
normals = append(normals, float32(normal.X()), float32(normal.Y()), float32(normal.Z()))
normals = append(normals, float32(normal.X()), float32(normal.Y()), float32(normal.Z()))
normals = append(normals, float32(normal.X()), float32(normal.Y()), float32(normal.Z()))
normals = append(normals, float32(normal.X()), float32(normal.Y()), float32(normal.Z()))
mesh.AddNormal(normals...)
}
// Texture coordinates
{
for i := 0; i < len(verts); i += 3 {
// @TODO width & height must be known
mesh.AddUV(uvForVertex(verts[i:i+3], &side.UAxis, &side.VAxis, 512, 512)...)
}
}
// Tangents
mesh.GenerateTangents()
gosigl.FinishMesh()
return mesh
}
func uvForVertex(vertex []float32, u *world.UVTransform, v *world.UVTransform, width int, height int) (uvs []float32) {
cu := (float32(u.Transform[0]) * vertex[0]) +
(float32(u.Transform[1]) * vertex[1]) +
(float32(u.Transform[2]) * vertex[2]) +
float32(u.Scale)/float32(width)
cv := (float32(v.Transform[0]) * vertex[0]) +
(float32(v.Transform[1]) * vertex[1]) +
(float32(v.Transform[2]) * vertex[2]) +
float32(v.Scale)/float32(height)
return []float32{cu, cv}
} | internal/renderer/conversion/solid.go | 0.674694 | 0.515986 | solid.go | starcoder |
package btree
//Index interface of data container of Node
type Index interface {
LessThan(Index) bool
EqualsTo(Index) bool
}
//Node Tree element struct
type Node struct {
Data Index
Score int
Edges [2]*Node
}
// Public
// Insert a node into the AVL tree.
func Insert(tree **Node, data Index) {
*tree, _ = insertInRightOf(*tree, data)
}
// Remove a single item from an AVL tree.
func Remove(tree **Node, data Index) {
*tree, _ = removeFromRightOf(*tree, data)
}
// Private
func opp(d int) int {
return 1 - d
}
//singleRotation
func singleRotation(parent *Node, direction int) (b *Node) {
b = parent.Edges[opp(direction)]
parent.Edges[opp(direction)] = b.Edges[direction]
b.Edges[direction] = parent
return
}
//doubleRotation
func doubleRotation(parent *Node, d int) (b *Node) {
b = parent.Edges[opp(d)].Edges[d]
parent.Edges[opp(d)].Edges[d] = b.Edges[opp(d)]
b.Edges[opp(d)] = parent.Edges[opp(d)]
parent.Edges[opp(d)] = b
b = parent.Edges[opp(d)]
parent.Edges[opp(d)] = b.Edges[d]
b.Edges[d] = parent
return
}
func adjust(parent *Node, dir, bal int) {
n := parent.Edges[dir]
nn := n.Edges[opp(dir)]
switch nn.Score {
case 0:
parent.Score = 0
n.Score = 0
case bal:
parent.Score = bal
n.Score = 0
default:
parent.Score = 0
n.Score = bal
}
nn.Score = 0
}
func insertScoreBalance(node *Node, dir int) *Node {
n := node.Edges[dir]
bal := 2*dir - 1
if n.Score == bal {
node.Score = 0
n.Score = 0
return singleRotation(node, opp(dir))
}
adjust(node, dir, bal)
return doubleRotation(node, opp(dir))
}
//insertInRightOf
func insertInRightOf(node *Node, data Index) (*Node, bool) {
if node == nil {
return &Node{Data: data}, false
}
dir := 0
if node.Data.LessThan(data) {
dir = 1
}
var done bool
node.Edges[dir], done = insertInRightOf(node.Edges[dir], data)
if done {
return node, true
}
node.Score += 2*dir - 1
switch node.Score {
case 0:
return node, true
case 1, -1:
return node, false
}
return insertScoreBalance(node, dir), true
}
//removeScoreBalance
func removeScoreBalance(root *Node, dir int) (*Node, bool) {
n := root.Edges[opp(dir)]
bal := 2*dir - 1
switch n.Score {
case -bal:
root.Score = 0
n.Score = 0
return singleRotation(root, dir), false
case bal:
adjust(root, opp(dir), -bal)
return doubleRotation(root, dir), false
}
root.Score = -bal
n.Score = bal
return singleRotation(root, dir), true
}
//removeFromRightOf
func removeFromRightOf(node *Node, data Index) (*Node, bool) {
if node == nil {
return nil, false
}
if node.Data.EqualsTo(data) {
switch {
case node.Edges[0] == nil:
return node.Edges[1], false
case node.Edges[1] == nil:
return node.Edges[0], false
}
heir := node.Edges[0]
for heir.Edges[1] != nil {
heir = heir.Edges[1]
}
node.Data = heir.Data
data = heir.Data
}
dir := 0
if node.Data.LessThan(data) {
dir = 1
}
var done bool
node.Edges[dir], done = removeFromRightOf(node.Edges[dir], data)
if done {
return node, true
}
node.Score += 1 - 2*dir
switch node.Score {
case 1, -1:
return node, true
case 0:
return node, false
}
return removeScoreBalance(node, dir)
} | btree/btree.go | 0.655005 | 0.641647 | btree.go | starcoder |
package lit
import (
"github.com/mb0/xelf/bfr"
"github.com/mb0/xelf/cor"
"github.com/mb0/xelf/typ"
)
// BreakIter is a special error value that can be returned from iterators.
// It indicates that the iteration should be stopped even though no actual failure occurred.
var BreakIter = cor.StrError("break iter")
// Deopt returns the wrapped literal if l is an optional literal, otherwise it returns l as-is.
func Deopt(l Lit) Lit {
if o, ok := l.(Opter); ok {
return o.Some()
}
return l
}
// Lit is the common interface for all literal adapters.
// A nil Lit represents an absent value.
type Lit interface {
// Typ returns the defined type of the literal.
Typ() typ.Type
// IsZero returns whether the literal value is the zero value.
IsZero() bool
// WriteBfr writes to a bfr ctx either as strict JSON or xelf representation.
WriteBfr(*bfr.Ctx) error
// String returns the xelf representation as string.
String() string
// MarshalJSON returns the JSON representation as bytes.
MarshalJSON() ([]byte, error)
}
// Opter is the interface for literals with an optional type.
type Opter interface {
Lit
// Some returns the wrapped literal or nil
Some() Lit
}
// Proxr is the encapsulates the extra method-set of proxy literals.
// It is used only for easier interface composition before Go 1.13.
type Proxr interface {
// New returns a zero instance of the proxy literal.
New() Proxy
// Ptr returns a pointer to the underlying go value as interface.
Ptr() interface{}
// Assign assigns the value of the given literal or returns an error.
// The literal must be valid literal of the same type.
Assign(Lit) error
}
// Idxr is encapsulates the extra method-set of indexer literals.
// It is used only for easier interface composition before Go 1.13.
type Idxr interface {
// Idx returns the literal of the element at idx or an error.
Idx(idx int) (Lit, error)
// SetIdx sets the element value at idx to l and returns the indexer or an error.
SetIdx(idx int, l Lit) (Indexer, error)
// IterIdx iterates over elements, calling iter with the elements index and literal value.
// If iter returns an error the iteration is aborted.
IterIdx(iter func(int, Lit) error) error
}
// Keyr is encapsulates the extra method-set of keyer literals.
// It is only used for easier interface composition before Go 1.13.
type Keyr interface {
// Keys returns a string slice of all keys.
Keys() []string
// Key returns the literal of the element with key key or an error.
Key(key string) (Lit, error)
// SetKey sets the elements value with key to l and returns the keyer or an error.
SetKey(key string, l Lit) (Keyer, error)
// IterKey iterates over elements, calling iter with the elements key and literal value.
// If iter returns an error the iteration is aborted.
IterKey(iter func(string, Lit) error) error
}
// Indexer is the common interface for container literals with elements accessible by index.
type Indexer interface {
Lit
Idxr
// Len returns the number of contained elements.
Len() int
}
// Keyer is the common interface for container literals with elements accessible by key.
type Keyer interface {
Lit
Keyr
// Len returns the number of contained elements.
Len() int
}
// Proxy is the common interface for proxies and some adapter pointers that can be assigned to.
type Proxy interface {
Lit
Proxr
}
// Numeric is the common interface for numeric literals.
type Numeric interface {
Lit
// Num returns the numeric value of the literal as float64.
Num() float64
// Val returns the simple go value representing this literal.
// The type is either bool, int64, float64, time.Time or time.Duration
Val() interface{}
}
// Character is the common interface for character literals.
type Character interface {
Lit
// Char returns the character format of the literal as string.
Char() string
// Val returns the simple go value representing this literal.
// The type is either string, []byte, [16]byte, time.Time or time.Duration.
Val() interface{}
}
// Appender is the common interface for list literals.
type Appender interface {
Indexer
// Append appends the given literals and returns a new appender or an error
Append(...Lit) (Appender, error)
// Element returns a newly created proxy of the element type or an error.
Element() (Proxy, error)
}
// Dictionary is the interface for dict literals.
type Dictionary interface {
Keyer
// Element returns a newly created proxy of the element type or an error.
Element() (Proxy, error)
}
// Record is the interface for record literals.
type Record interface {
Lit
Idxr
Keyr
// Len returns the number of fields.
Len() int
}
// MarkSpan is a marker interface. When implemented on an int64 indicates a span type.
type MarkSpan interface{ Seconds() float64 }
// MarkBits is a marker interface. When implemented on an unsigned integer indicates a bits type.
type MarkBits interface{ Bits() map[string]int64 }
// MarkEnum is a marker interface. When implemented on a string or integer indicates an enum type.
type MarkEnum interface{ Enums() map[string]int64 } | lit/lit.go | 0.80213 | 0.430088 | lit.go | starcoder |
package iso20022
// Details of the closing of the securities financing transaction.
type SecuritiesFinancingTransactionDetails3 struct {
// Unambiguous identification of the underlying securities financing trade as assigned by the instructing party. The identification is common to all collateral pieces (one or many).
SecuritiesFinancingTradeIdentification *Max35Text `xml:"SctiesFincgTradId,omitempty"`
// Unambiguous identification of the second leg of the transaction as known by the account owner (or the instructing party acting on its behalf).
ClosingLegIdentification *Max35Text `xml:"ClsgLegId,omitempty"`
// Closing date/time or maturity date/time of the transaction.
TerminationDate *TerminationDate2Choice `xml:"TermntnDt,omitempty"`
// Date/Time at which rate change has taken place.
RateChangeDate *DateAndDateTimeChoice `xml:"RateChngDt,omitempty"`
// Specifies whether the rate is fixed or variable.
RateType *RateType5Choice `xml:"RateTp,omitempty"`
// Specifies whether the collateral position should be subject to automatic revaluation by the account servicer.
Revaluation *RevaluationIndicator1Choice `xml:"Rvaltn,omitempty"`
// Legal framework of the transaction.
LegalFramework *LegalFramework1Choice `xml:"LglFrmwk,omitempty"`
// Identifies the computation method of accrued interest of the related financial instrument.
InterestComputationMethod *InterestComputationMethodFormat1Choice `xml:"IntrstCmptnMtd,omitempty"`
// Specifies whether the interest is to be paid to the collateral taker. If set to no, the interest is paid to the collateral giver.
InterestPayment *YesNoIndicator `xml:"IntrstPmt,omitempty"`
// Index or support rate used together with the spread to calculate the
// repurchase rate.
VariableRateSupport *RateName1 `xml:"VarblRateSpprt,omitempty"`
// Rate to be used to recalculate the repurchase amount.
RepurchaseRate *Rate2 `xml:"RpRate,omitempty"`
// Percentage mark-up on a loan consideration used to reflect the lender's risk.
StockLoanMargin *Rate2 `xml:"StockLnMrgn,omitempty"`
// Haircut or valuation factor on the security expressed as a percentage.
SecuritiesHaircut *Rate2 `xml:"SctiesHrcut,omitempty"`
// Interest rate to be paid on the transaction amount, as agreed between the counterparties.
PricingRate *RateOrName1Choice `xml:"PricgRate,omitempty"`
// Repurchase spread expressed as a rate; margin over or under an index that determines the repurchase rate.
Spread *Rate2 `xml:"Sprd,omitempty"`
// Minimum number of days' notice a counterparty needs for terminating the transaction.
TransactionCallDelay *Exact3NumericText `xml:"TxCallDely,omitempty"`
// Total number of collateral instructions involved in the transaction.
TotalNumberOfCollateralInstructions *Exact3NumericText `xml:"TtlNbOfCollInstrs,omitempty"`
// Principal amount of a trade (for second leg).
DealAmount *AmountAndDirection4 `xml:"DealAmt,omitempty"`
// Interest amount that has accrued in between coupon payment periods.
AccruedInterestAmount *AmountAndDirection4 `xml:"AcrdIntrstAmt,omitempty"`
// Fixed amount of money that has to be paid (instead of interest) in the case of a recall or at the closing date.
ForfeitAmount *AmountAndDirection4 `xml:"FrftAmt,omitempty"`
// Difference between the amount of money of the first leg and the amount of the second leg of the transaction.
PremiumAmount *AmountAndDirection4 `xml:"PrmAmt,omitempty"`
// Amount of money to be settled per piece of collateral to terminate the transaction.
TerminationAmountPerPieceOfCollateral *AmountAndDirection4 `xml:"TermntnAmtPerPcOfColl,omitempty"`
// Total amount of money to be settled to terminate the transaction.
TerminationTransactionAmount *AmountAndDirection4 `xml:"TermntnTxAmt,omitempty"`
// Provides additional information about the second leg in narrative form.
SecondLegNarrative *Max140Text `xml:"ScndLegNrrtv,omitempty"`
}
func (s *SecuritiesFinancingTransactionDetails3) SetSecuritiesFinancingTradeIdentification(value string) {
s.SecuritiesFinancingTradeIdentification = (*Max35Text)(&value)
}
func (s *SecuritiesFinancingTransactionDetails3) SetClosingLegIdentification(value string) {
s.ClosingLegIdentification = (*Max35Text)(&value)
}
func (s *SecuritiesFinancingTransactionDetails3) AddTerminationDate() *TerminationDate2Choice {
s.TerminationDate = new(TerminationDate2Choice)
return s.TerminationDate
}
func (s *SecuritiesFinancingTransactionDetails3) AddRateChangeDate() *DateAndDateTimeChoice {
s.RateChangeDate = new(DateAndDateTimeChoice)
return s.RateChangeDate
}
func (s *SecuritiesFinancingTransactionDetails3) AddRateType() *RateType5Choice {
s.RateType = new(RateType5Choice)
return s.RateType
}
func (s *SecuritiesFinancingTransactionDetails3) AddRevaluation() *RevaluationIndicator1Choice {
s.Revaluation = new(RevaluationIndicator1Choice)
return s.Revaluation
}
func (s *SecuritiesFinancingTransactionDetails3) AddLegalFramework() *LegalFramework1Choice {
s.LegalFramework = new(LegalFramework1Choice)
return s.LegalFramework
}
func (s *SecuritiesFinancingTransactionDetails3) AddInterestComputationMethod() *InterestComputationMethodFormat1Choice {
s.InterestComputationMethod = new(InterestComputationMethodFormat1Choice)
return s.InterestComputationMethod
}
func (s *SecuritiesFinancingTransactionDetails3) SetInterestPayment(value string) {
s.InterestPayment = (*YesNoIndicator)(&value)
}
func (s *SecuritiesFinancingTransactionDetails3) AddVariableRateSupport() *RateName1 {
s.VariableRateSupport = new(RateName1)
return s.VariableRateSupport
}
func (s *SecuritiesFinancingTransactionDetails3) AddRepurchaseRate() *Rate2 {
s.RepurchaseRate = new(Rate2)
return s.RepurchaseRate
}
func (s *SecuritiesFinancingTransactionDetails3) AddStockLoanMargin() *Rate2 {
s.StockLoanMargin = new(Rate2)
return s.StockLoanMargin
}
func (s *SecuritiesFinancingTransactionDetails3) AddSecuritiesHaircut() *Rate2 {
s.SecuritiesHaircut = new(Rate2)
return s.SecuritiesHaircut
}
func (s *SecuritiesFinancingTransactionDetails3) AddPricingRate() *RateOrName1Choice {
s.PricingRate = new(RateOrName1Choice)
return s.PricingRate
}
func (s *SecuritiesFinancingTransactionDetails3) AddSpread() *Rate2 {
s.Spread = new(Rate2)
return s.Spread
}
func (s *SecuritiesFinancingTransactionDetails3) SetTransactionCallDelay(value string) {
s.TransactionCallDelay = (*Exact3NumericText)(&value)
}
func (s *SecuritiesFinancingTransactionDetails3) SetTotalNumberOfCollateralInstructions(value string) {
s.TotalNumberOfCollateralInstructions = (*Exact3NumericText)(&value)
}
func (s *SecuritiesFinancingTransactionDetails3) AddDealAmount() *AmountAndDirection4 {
s.DealAmount = new(AmountAndDirection4)
return s.DealAmount
}
func (s *SecuritiesFinancingTransactionDetails3) AddAccruedInterestAmount() *AmountAndDirection4 {
s.AccruedInterestAmount = new(AmountAndDirection4)
return s.AccruedInterestAmount
}
func (s *SecuritiesFinancingTransactionDetails3) AddForfeitAmount() *AmountAndDirection4 {
s.ForfeitAmount = new(AmountAndDirection4)
return s.ForfeitAmount
}
func (s *SecuritiesFinancingTransactionDetails3) AddPremiumAmount() *AmountAndDirection4 {
s.PremiumAmount = new(AmountAndDirection4)
return s.PremiumAmount
}
func (s *SecuritiesFinancingTransactionDetails3) AddTerminationAmountPerPieceOfCollateral() *AmountAndDirection4 {
s.TerminationAmountPerPieceOfCollateral = new(AmountAndDirection4)
return s.TerminationAmountPerPieceOfCollateral
}
func (s *SecuritiesFinancingTransactionDetails3) AddTerminationTransactionAmount() *AmountAndDirection4 {
s.TerminationTransactionAmount = new(AmountAndDirection4)
return s.TerminationTransactionAmount
}
func (s *SecuritiesFinancingTransactionDetails3) SetSecondLegNarrative(value string) {
s.SecondLegNarrative = (*Max140Text)(&value)
} | SecuritiesFinancingTransactionDetails3.go | 0.852874 | 0.469338 | SecuritiesFinancingTransactionDetails3.go | starcoder |
// Sample program that takes a stream of bytes and looks for the bytes
// “elvis” and when they are found, replace them with “Elvis”. The code
// cannot assume that there are any line feeds or other delimiters in the
// stream and the code must assume that the stream is of any arbitrary length.
// The solution cannot meaningfully buffer to the end of the stream and
// then process the replacement.
package main
import (
"bufio"
"bytes"
"fmt"
"io"
)
// data represents a table of input and expected output.
var data = []struct {
input []byte
output []byte
}{
{[]byte("abc"), []byte("abc")},
{[]byte("elvis"), []byte("Elvis")},
{[]byte("aElvis"), []byte("aElvis")},
{[]byte("abcelvis"), []byte("abcElvis")},
{[]byte("eelvis"), []byte("eElvis")},
{[]byte("aelvis"), []byte("aElvis")},
{[]byte("aabeeeelvis"), []byte("aabeeeElvis")},
{[]byte("e l v i s"), []byte("e l v i s")},
{[]byte("aa bb e l v i saa"), []byte("aa bb e l v i saa")},
{[]byte(" elvi s"), []byte(" elvi s")},
{[]byte("elvielvis"), []byte("elviElvis")},
{[]byte("elvielvielviselvi1"), []byte("elvielviElviselvi1")},
{[]byte("elvielviselvis"), []byte("elviElvisElvis")},
}
// Declare what needs to be found and its replacement.
var find = []byte("elvis")
var repl = []byte("Elvis")
// Calculate the number of bytes we need to locate.
var size = len(find)
func main() {
var output bytes.Buffer
fmt.Println("=======================================\nRunning Algorithm One")
for _, d := range data {
input := bytes.NewReader(d.input)
output.Reset()
algOne(input, &output)
matched := bytes.Compare(d.output, output.Bytes())
fmt.Printf("Matched: %v Inp: [%s] Exp: [%s] Got: [%s]\n", matched == 0, d.input, d.output, output.Bytes())
}
fmt.Println("=======================================\nRunning Algorithm Two")
for _, d := range data {
input := bytes.NewReader(d.input)
output.Reset()
algTwo(input, &output)
matched := bytes.Compare(d.output, output.Bytes())
fmt.Printf("Matched: %v Inp: [%s] Exp: [%s] Got: [%s]\n", matched == 0, d.input, d.output, output.Bytes())
}
fmt.Println("=======================================\nRunning Algorithm Three")
for _, d := range data {
input := bytes.NewReader(d.input)
output.Reset()
algThree(input, &output)
matched := bytes.Compare(d.output, output.Bytes())
fmt.Printf("Matched: %v Inp: [%s] Exp: [%s] Got: [%s]\n", matched == 0, d.input, d.output, output.Bytes())
}
fmt.Println("=======================================\nRunning Algorithm Four")
for _, d := range data {
output.Reset()
output.ReadFrom(NewReplaceReader(bytes.NewReader(d.input), find, repl))
matched := bytes.Compare(d.output, output.Bytes())
fmt.Printf("Matched: %v Inp: [%s] Exp: [%s] Got: [%s]\n", matched == 0, d.input, d.output, output.Bytes())
}
}
// algOne is one way to solve the problem. This approach first
// reads a minimum number of bytes required and then starts processing
// new bytes as they are provided in the stream.
func algOne(r io.Reader, w *bytes.Buffer) {
// Declare the buffers we need to process the stream.
buf := make([]byte, size)
tmp := make([]byte, 1)
end := size - 1
// Read in an initial number of bytes we need to get started.
if n, err := io.ReadFull(r, buf[:end]); err != nil {
w.Write(buf[:n])
return
}
for {
// Read in one byte from the input stream.
n, err := io.ReadFull(r, tmp)
// If we have a byte then process it.
if n == 1 {
// Add this byte to the end of the buffer.
buf[end] = tmp[0]
// If we have a match, replace the bytes.
if bytes.Compare(buf, find) == 0 {
copy(buf, repl)
}
// Write the front byte since it has been compared.
w.WriteByte(buf[0])
// Slice that front byte out.
copy(buf, buf[1:])
}
// Did we hit the end of the stream, then we are done.
if err != nil {
// Flush the reset of the bytes we have.
w.Write(buf[:end])
break
}
}
}
// algTwo is a second way to solve the problem. This approach takes an
// io.Reader to represent an infinite stream. This allows for the algorithm to
// accept input from just about anywhere, thanks to the beauty of Go
// interfaces.
// Provided by <NAME> https://twitter.com/TylerJBunnell
func algTwo(r io.Reader, w *bytes.Buffer) {
// Create a byte slice of length 1 into which our byte will be read.
b := make([]byte, 1)
// Create an index variable to match bytes.
idx := 0
for {
// Are we re-using the byte from a previous call?
if b[0] == 0 {
// Read a single byte from our input.
n, err := r.Read(b)
if n == 0 || err != nil {
break
}
}
// Does this byte match the byte at this offset?
if b[0] == find[idx] {
// It matches so increment the index position.
idx++
// If every byte has been matched, write
// out the replacement.
if idx == size {
w.Write(repl)
idx = 0
}
// Reset the reader byte to 0 so another byte will be read.
b[0] = 0
continue
}
// Did we have any sort of match on any given byte?
if idx != 0 {
// Write what we've matched up to this point.
w.Write(find[:idx])
// NOTE: we are NOT resetting the reader byte to 0 here because we need
// to re-use it on the next call. This is equivalent to the UnreadByte()
// call in the other functions.
// Reset the offset to start matching from the beginning.
idx = 0
continue
}
// There was no previous match. Write byte and reset.
w.WriteByte(b[0])
// Reset the reader byte to 0 so another byte will be read.
b[0] = 0
}
}
// algThree is a third way to solve the problem.
// Provided by <NAME> https://twitter.com/billhathaway
func algThree(r io.Reader, w *bytes.Buffer) {
// This identifies where we are in the match.
var idx int
var buf = make([]byte, 1)
for {
n, err := r.Read(buf)
if err != nil || n == 0 {
break
}
// Does newest byte match next byte of find pattern?
if buf[0] == find[idx] {
idx++
// If a full match, write out the replacement pattern.
if idx == len(find) {
w.Write(repl)
idx = 0
}
continue
}
// If we have matched anything earlier, write it.
if idx > 0 {
w.Write(find[:idx])
idx = 0
}
// Match start of pattern?
if buf[0] == find[0] {
idx = 1
continue
}
// Write out what we read since no match.
w.Write(buf)
}
// Write out any partial match before returning.
if idx > 0 {
w.Write(find[:idx])
}
}
// algFour is a forth way to solve the problem. This takes a Functional approach.
// Provided by <NAME> https://twitter.com/acw5
func algFour(r io.Reader, w *bytes.Buffer) {
buf := bufio.NewReaderSize(r, len(find))
for {
peek, err := buf.Peek(len(find))
if err == nil && bytes.Equal(find, peek) {
// A match was found. Advance the bufio reader past the match.
if _, err := buf.Discard(len(find)); err != nil {
return
}
w.Write(repl)
continue
}
// Ignore any peek errors because we may not be able to peek
// but still be able to read a byte.
c, err := buf.ReadByte()
if err != nil {
return
}
w.WriteByte(c)
}
}
// NewReplaceReader returns an io.Reader that reads from r, replacing
// any occurrence of old with new. Used by algFour.
func NewReplaceReader(r io.Reader, old, new []byte) io.Reader {
return &replaceReader{
br: bufio.NewReaderSize(r, len(old)),
old: old,
new: new,
}
}
type replaceReader struct {
br *bufio.Reader
old, new []byte
}
// Read reads into p the translated bytes.
func (r *replaceReader) Read(p []byte) (int, error) {
var n int
for {
peek, err := r.br.Peek(len(r.old))
if err == nil && bytes.Equal(r.old, peek) {
// A match was found. Advance the bufio reader past the match.
if _, err := r.br.Discard(len(r.old)); err != nil {
return n, err
}
copy(p[n:], r.new)
n += len(r.new)
continue
}
// Ignore any peek errors because we may not be able to peek
// but still be able to read a byte.
p[n], err = r.br.ReadByte()
if err != nil {
return n, err
}
n++
// Read reads up to len(p) bytes into p. Since we could potentially add
// len(r.new) new bytes, we check here that p still has capacity.
if n+len(r.new) >= len(p) {
return n, nil
}
}
} | topics/go/packages/io/example4/example4.go | 0.682468 | 0.581927 | example4.go | starcoder |
package apitest
import (
"fmt"
"net/http"
"github.com/stretchr/testify/assert"
)
// TestingT is an interface to wrap the native *testing.T interface, this allows integration with GinkgoT() interface
// GinkgoT interface defined in https://github.com/onsi/ginkgo/blob/55c858784e51c26077949c81b6defb6b97b76944/ginkgo_dsl.go#L91
type TestingT interface {
Errorf(format string, args ...interface{})
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
}
// Verifier is the assertion interface allowing consumers to inject a custom assertion implementation.
// It also allows failure scenarios to be tested within apitest
type Verifier interface {
Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool
JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool
Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool
NoError(t TestingT, err error, msgAndArgs ...interface{}) bool
}
// testifyVerifier is a verifier that use https://github.com/stretchr/testify to perform assertions
type testifyVerifier struct{}
// JSONEq asserts that two JSON strings are equivalent
func (a testifyVerifier) JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
return assert.JSONEq(t, expected, actual, msgAndArgs...)
}
// Equal asserts that two objects are equal
func (a testifyVerifier) Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
return assert.Equal(t, expected, actual, msgAndArgs...)
}
// Fail reports a failure
func (a testifyVerifier) Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
return assert.Fail(t, failureMessage, msgAndArgs...)
}
// NoError asserts that a function returned no error
func (a testifyVerifier) NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
return assert.NoError(t, err, msgAndArgs...)
}
func newTestifyVerifier() Verifier {
return testifyVerifier{}
}
// NoopVerifier is a verifier that does not perform verification
type NoopVerifier struct{}
var _ Verifier = NoopVerifier{}
// Equal does not perform any assertion and always returns true
func (n NoopVerifier) Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
return true
}
// JSONEq does not perform any assertion and always returns true
func (n NoopVerifier) JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
return true
}
// Fail does not perform any assertion and always returns true
func (n NoopVerifier) Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
return true
}
// NoError asserts that a function returned no error
func (n NoopVerifier) NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
return true
}
// IsSuccess is a convenience function to assert on a range of happy path status codes
var IsSuccess Assert = func(response *http.Response, request *http.Request) error {
if response.StatusCode >= 200 && response.StatusCode < 400 {
return nil
}
return fmt.Errorf("not success. Status code=%d", response.StatusCode)
}
// IsClientError is a convenience function to assert on a range of client error status codes
var IsClientError Assert = func(response *http.Response, request *http.Request) error {
if response.StatusCode >= 400 && response.StatusCode < 500 {
return nil
}
return fmt.Errorf("not a client error. Status code=%d", response.StatusCode)
}
// IsServerError is a convenience function to assert on a range of server error status codes
var IsServerError Assert = func(response *http.Response, request *http.Request) error {
if response.StatusCode >= 500 {
return nil
}
return fmt.Errorf("not a server error. Status code=%d", response.StatusCode)
} | assert.go | 0.823683 | 0.446314 | assert.go | starcoder |
package cryptoapis
import (
"encoding/json"
)
// BlockMinedDataItem Defines an `item` as one result.
type BlockMinedDataItem struct {
// Represents the specific blockchain protocol name, e.g. Ethereum, Bitcoin, etc.
Blockchain string `json:"blockchain"`
// Represents the name of the blockchain network used; blockchain networks are usually identical as technology and software, but they differ in data, e.g. - \"mainnet\" is the live network with actual data while networks like \"testnet\", \"ropsten\", \"rinkeby\" are test networks.
Network string `json:"network"`
// Defines the number of blocks in the blockchain preceding this specific block.
Height int32 `json:"height"`
// Represents the hash of the block's header, i.e. an output that has a fixed length.
Hash string `json:"hash"`
// Defines the exact date/time when this block was mined in seconds since Unix Epoch time.
Timestamp int32 `json:"timestamp"`
}
// NewBlockMinedDataItem instantiates a new BlockMinedDataItem object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBlockMinedDataItem(blockchain string, network string, height int32, hash string, timestamp int32) *BlockMinedDataItem {
this := BlockMinedDataItem{}
this.Blockchain = blockchain
this.Network = network
this.Height = height
this.Hash = hash
this.Timestamp = timestamp
return &this
}
// NewBlockMinedDataItemWithDefaults instantiates a new BlockMinedDataItem object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBlockMinedDataItemWithDefaults() *BlockMinedDataItem {
this := BlockMinedDataItem{}
return &this
}
// GetBlockchain returns the Blockchain field value
func (o *BlockMinedDataItem) GetBlockchain() string {
if o == nil {
var ret string
return ret
}
return o.Blockchain
}
// GetBlockchainOk returns a tuple with the Blockchain field value
// and a boolean to check if the value has been set.
func (o *BlockMinedDataItem) GetBlockchainOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Blockchain, true
}
// SetBlockchain sets field value
func (o *BlockMinedDataItem) SetBlockchain(v string) {
o.Blockchain = v
}
// GetNetwork returns the Network field value
func (o *BlockMinedDataItem) GetNetwork() string {
if o == nil {
var ret string
return ret
}
return o.Network
}
// GetNetworkOk returns a tuple with the Network field value
// and a boolean to check if the value has been set.
func (o *BlockMinedDataItem) GetNetworkOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Network, true
}
// SetNetwork sets field value
func (o *BlockMinedDataItem) SetNetwork(v string) {
o.Network = v
}
// GetHeight returns the Height field value
func (o *BlockMinedDataItem) GetHeight() int32 {
if o == nil {
var ret int32
return ret
}
return o.Height
}
// GetHeightOk returns a tuple with the Height field value
// and a boolean to check if the value has been set.
func (o *BlockMinedDataItem) GetHeightOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.Height, true
}
// SetHeight sets field value
func (o *BlockMinedDataItem) SetHeight(v int32) {
o.Height = v
}
// GetHash returns the Hash field value
func (o *BlockMinedDataItem) GetHash() string {
if o == nil {
var ret string
return ret
}
return o.Hash
}
// GetHashOk returns a tuple with the Hash field value
// and a boolean to check if the value has been set.
func (o *BlockMinedDataItem) GetHashOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Hash, true
}
// SetHash sets field value
func (o *BlockMinedDataItem) SetHash(v string) {
o.Hash = v
}
// GetTimestamp returns the Timestamp field value
func (o *BlockMinedDataItem) GetTimestamp() int32 {
if o == nil {
var ret int32
return ret
}
return o.Timestamp
}
// GetTimestampOk returns a tuple with the Timestamp field value
// and a boolean to check if the value has been set.
func (o *BlockMinedDataItem) GetTimestampOk() (*int32, bool) {
if o == nil {
return nil, false
}
return &o.Timestamp, true
}
// SetTimestamp sets field value
func (o *BlockMinedDataItem) SetTimestamp(v int32) {
o.Timestamp = v
}
func (o BlockMinedDataItem) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["blockchain"] = o.Blockchain
}
if true {
toSerialize["network"] = o.Network
}
if true {
toSerialize["height"] = o.Height
}
if true {
toSerialize["hash"] = o.Hash
}
if true {
toSerialize["timestamp"] = o.Timestamp
}
return json.Marshal(toSerialize)
}
type NullableBlockMinedDataItem struct {
value *BlockMinedDataItem
isSet bool
}
func (v NullableBlockMinedDataItem) Get() *BlockMinedDataItem {
return v.value
}
func (v *NullableBlockMinedDataItem) Set(val *BlockMinedDataItem) {
v.value = val
v.isSet = true
}
func (v NullableBlockMinedDataItem) IsSet() bool {
return v.isSet
}
func (v *NullableBlockMinedDataItem) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBlockMinedDataItem(val *BlockMinedDataItem) *NullableBlockMinedDataItem {
return &NullableBlockMinedDataItem{value: val, isSet: true}
}
func (v NullableBlockMinedDataItem) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBlockMinedDataItem) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | model_block_mined_data_item.go | 0.811825 | 0.452959 | model_block_mined_data_item.go | starcoder |
package geom
func NewBoundingBox(leftBottom *LngLat, rightTop *LngLat) *BoundingBox {
if leftBottom == nil || rightTop == nil {
return nil
}
return BoundingBox{LeftBottom: leftBottom, RightTop: leftBottom}.Extend(&BoundingBox{
LeftBottom: rightTop,
RightTop: rightTop,
})
}
func (x BoundingBox) Vertexes() []*LngLat {
return []*LngLat{
x.LeftBottom,
{Longitude: x.RightTop.Longitude, Latitude: x.LeftBottom.Latitude},
x.RightTop,
{Longitude: x.LeftBottom.Longitude, Latitude: x.RightTop.Latitude},
}
}
func (x BoundingBox) Vertex(index int) *LngLat {
switch index {
case 0:
return x.LeftBottom
case 1:
return &LngLat{Longitude: x.RightTop.Longitude, Latitude: x.LeftBottom.Latitude}
case 2:
return x.RightTop
case 3:
return &LngLat{Longitude: x.LeftBottom.Longitude, Latitude: x.RightTop.Latitude}
default:
return nil
}
}
func (x BoundingBox) Extend(box *BoundingBox) *BoundingBox {
if box != nil {
var minLat, minLng, maxLat, maxLng float64
if x.LeftBottom.Latitude > box.LeftBottom.Latitude {
minLat = box.LeftBottom.Latitude
} else {
minLat = x.LeftBottom.Latitude
}
if x.LeftBottom.Longitude > box.LeftBottom.Longitude {
minLng = box.LeftBottom.Longitude
} else {
minLng = x.LeftBottom.Longitude
}
if x.RightTop.Latitude < box.RightTop.Latitude {
maxLat = box.RightTop.Latitude
} else {
maxLat = x.RightTop.Latitude
}
if x.RightTop.Longitude < box.RightTop.Longitude {
maxLng = box.RightTop.Longitude
} else {
maxLng = x.RightTop.Longitude
}
return &BoundingBox{LeftBottom: &LngLat{Longitude: minLng, Latitude: minLat},
RightTop: &LngLat{Longitude: maxLng, Latitude: maxLat}}
}
return &x
}
func (x BoundingBox) ExtendDegree(lng float64, lat float64) *BoundingBox {
minLng := x.LeftBottom.Longitude - lng
minLat := x.LeftBottom.Latitude - lat
maxLng := x.RightTop.Longitude + lng
maxLat := x.RightTop.Latitude + lat
return &BoundingBox{LeftBottom: &LngLat{Longitude: minLng, Latitude: minLat},
RightTop: &LngLat{Longitude: maxLng, Latitude: maxLat}}
} | go/pkg/mojo/geom/bounding_box.go | 0.875401 | 0.553324 | bounding_box.go | starcoder |
// x2j_valuesAt.go: Extract values from an arbitrary XML doc that are at same level as "key".
// Tag path can include wildcard characters.
package x2j
import (
"strings"
)
// ------------------- sweep up everything for some point in the node tree ---------------------
// ValuesAtTagPath - deliver all values at the same level of the document as the specified key.
// See ValuesAtKeyPath().
// If there are no values for the path 'nil' is returned.
// A return value of (nil, nil) means that there were no values and no errors parsing the doc.
// 'doc' is the XML document
// 'path' is a dot-separated path of tag nodes
// 'getAttrs' can be set 'true' to return attribute values for "*"-terminated path
// If a node is '*', then everything beyond is scanned for values.
// E.g., "doc.books' might return a single value 'book' of type []interface{}, but
// "doc.books.*" could return all the 'book' entries as []map[string]interface{}.
// "doc.books.*.author" might return all the 'author' tag values as []string - or
// "doc.books.*.author.lastname" might be required, depending on he schema.
func ValuesAtTagPath(doc, path string, getAttrs ...bool) ([]interface{}, error) {
var a bool
if len(getAttrs) == 1 {
a = getAttrs[0]
}
m, err := DocToMap(doc)
if err != nil {
return nil, err
}
v := ValuesAtKeyPath(m, path, a)
return v, nil
}
// ValuesAtKeyPath - deliver all values at the same depth in a map[string]interface{} value
// If v := ValuesAtKeyPath(m,"x.y.z")
// then there exists a _,vv := range v
// such that v.(map[string]interface{})[z] == ValuesFromKeyPath(m,"x.y.z")
// If there are no values for the path 'nil' is returned.
// 'm' is the map to be walked
// 'path' is a dot-separated path of key values
// 'getAttrs' can be set 'true' to return attribute values for "*"-terminated path
// If a node is '*', then everything beyond is walked.
// E.g., see ValuesFromTagPath documentation.
func ValuesAtKeyPath(m map[string]interface{}, path string, getAttrs ...bool) []interface{} {
var a bool
if len(getAttrs) == 1 {
a = getAttrs[0]
}
keys := strings.Split(path, ".")
lenKeys := len(keys)
ret := make([]interface{}, 0)
if lenKeys > 1 {
// use function in x2j_valuesFrom.go
valuesFromKeyPath(&ret, m, keys[:lenKeys-1], a)
if len(ret) == 0 {
return nil
}
} else {
ret = append(ret,interface{}(m))
}
// scan the value set and see if key occurs
key := keys[lenKeys-1]
// wildcard is special
if key == "*" {
return ret
}
for _, v := range ret {
switch v.(type) {
case map[string]interface{}:
if _, ok := v.(map[string]interface{})[key]; ok {
return ret
}
}
}
// no instance of key in penultimate value set
return nil
} | vendor/github.com/clbanning/x2j/x2j_valuesAt.go | 0.509032 | 0.418162 | x2j_valuesAt.go | starcoder |
package grpc
import (
"fmt"
"reflect"
"github.com/opencontainers/runtime-spec/specs-go"
)
func copyValue(to, from reflect.Value) error {
toKind := to.Kind()
fromKind := from.Kind()
if !from.IsValid() {
return nil
}
if toKind == reflect.Ptr {
// If the destination is a pointer, we need to allocate a new one.
to.Set(reflect.New(to.Type().Elem()))
if fromKind == reflect.Ptr {
return copyValue(to.Elem(), from.Elem())
} else {
return copyValue(to.Elem(), from)
}
} else {
// Here the destination is not a pointer.
// Let's check what's the origin.
if fromKind == reflect.Ptr {
return copyValue(to, from.Elem())
}
switch toKind {
case reflect.Struct:
return copyStructValue(to, from)
case reflect.Slice:
return copySliceValue(to, from)
case reflect.Map:
return copyMapValue(to, from)
default:
// We now are copying non pointers scalar.
// This is the leaf of the recursion.
if from.Type() != to.Type() {
if from.Type().ConvertibleTo(to.Type()) {
to.Set(from.Convert(to.Type()))
return nil
} else {
return fmt.Errorf("Can not convert %v to %v", from.Type(), to.Type())
}
} else {
to.Set(from)
return nil
}
}
}
}
func copyMapValue(to, from reflect.Value) error {
if to.Kind() != reflect.Map && from.Kind() != reflect.Map {
return fmt.Errorf("Can only copy maps into maps")
}
to.Set(reflect.MakeMap(to.Type()))
keys := from.MapKeys()
for _, k := range keys {
newValue := reflect.New(to.Type().Elem())
v := from.MapIndex(k)
if err := copyValue(newValue.Elem(), v); err != nil {
return err
}
to.SetMapIndex(k, newValue.Elem())
}
return nil
}
func copySliceValue(to, from reflect.Value) error {
if to.Kind() != reflect.Slice && from.Kind() != reflect.Slice {
return fmt.Errorf("Can only copy slices into slices")
}
sliceLen := from.Len()
to.Set(reflect.MakeSlice(to.Type(), sliceLen, sliceLen))
for j := 0; j < sliceLen; j++ {
if err := copyValue(to.Index(j), from.Index(j)); err != nil {
return err
}
}
return nil
}
func copyStructSkipField(to, from reflect.Value) bool {
var grpcSolaris Solaris
var ociSolaris specs.Solaris
var grpcWindows Windows
var ociWindows specs.Windows
toType := to.Type()
grpcSolarisType := reflect.TypeOf(grpcSolaris)
ociSolarisType := reflect.TypeOf(ociSolaris)
grpcWindowsType := reflect.TypeOf(grpcWindows)
ociWindowsType := reflect.TypeOf(ociWindows)
// We skip all Windows and Solaris types
if toType == grpcSolarisType || toType == grpcWindowsType || toType == ociSolarisType || toType == ociWindowsType {
return true
}
return false
}
func structFieldName(v reflect.Value, index int) (string, error) {
if v.Kind() != reflect.Struct {
return "", fmt.Errorf("Can only infer field name from structs")
}
return v.Type().Field(index).Name, nil
}
func isEmbeddedStruct(v reflect.Value, index int) bool {
if v.Kind() != reflect.Struct || index > v.Type().NumField()-1 {
return false
}
return v.Type().Field(index).Anonymous
}
func findStructField(v reflect.Value, name string) (reflect.Value, error) {
if v.Kind() != reflect.Struct {
return reflect.Value{}, fmt.Errorf("Can only infer field name from structs")
}
for i := 0; i < v.NumField(); i++ {
if v.Type().Field(i).Name == name {
return v.Field(i), nil
}
}
return reflect.Value{}, fmt.Errorf("Could not find field %s", name)
}
func copyStructValue(to, from reflect.Value) error {
if to.Kind() != reflect.Struct && from.Kind() != reflect.Struct {
return fmt.Errorf("Can only copy structs into structs")
}
if copyStructSkipField(to, from) {
return nil
}
for i := 0; i < to.NumField(); i++ {
// If one of the field is embedded, we copy between the embedded field
// and the structure itself. The fields in the embedded field should
// be found in the parent structure.
if isEmbeddedStruct(to, i) {
if err := copyStructValue(to.Field(i), from); err != nil {
return err
}
continue
}
if isEmbeddedStruct(from, i) {
if err := copyStructValue(to, from.Field(i)); err != nil {
return err
}
continue
}
// Find the destination structure field name.
fieldName, err := structFieldName(to, i)
if err != nil {
return err
}
// Try to find the same field name in the origin structure.
// This can fail as we support copying between structures
// that optionally have embedded fields.
v, err := findStructField(from, fieldName)
if err != nil {
continue
}
if err := copyValue(to.Field(i), v); err != nil {
return err
}
}
return nil
}
func copyStruct(to interface{}, from interface{}) (err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
toVal := reflect.ValueOf(to)
fromVal := reflect.ValueOf(from)
if toVal.Kind() != reflect.Ptr || toVal.Elem().Kind() != reflect.Struct ||
fromVal.Kind() != reflect.Ptr || fromVal.Elem().Kind() != reflect.Struct {
return fmt.Errorf("Arguments must be pointers to structures")
}
toVal = toVal.Elem()
fromVal = fromVal.Elem()
return copyStructValue(toVal, fromVal)
}
func OCItoGRPC(ociSpec *specs.Spec) (*Spec, error) {
s := &Spec{}
err := copyStruct(s, ociSpec)
return s, err
}
func GRPCtoOCI(grpcSpec *Spec) (*specs.Spec, error) {
s := &specs.Spec{}
err := copyStruct(s, grpcSpec)
return s, err
}
func ProcessOCItoGRPC(ociProcess *specs.Process) (*Process, error) {
s := &Process{}
err := copyStruct(s, ociProcess)
return s, err
}
func ProcessGRPCtoOCI(grpcProcess *Process) (*specs.Process, error) {
s := &specs.Process{}
err := copyStruct(s, grpcProcess)
return s, err
} | vendor/github.com/kata-containers/agent/protocols/grpc/utils.go | 0.683842 | 0.452173 | utils.go | starcoder |
package cwe
var data = map[string]*Weakness{
"118": {
ID: "118",
Description: "The software does not restrict or incorrectly restricts operations within the boundaries of a resource that is accessed using an index or pointer, such as memory or files.",
Name: "Incorrect Access of Indexable Resource ('Range Error')",
},
"190": {
ID: "190",
Description: "The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control.",
Name: "Integer Overflow or Wraparound",
},
"200": {
ID: "200",
Description: "The product exposes sensitive information to an actor that is not explicitly authorized to have access to that information.",
Name: "Exposure of Sensitive Information to an Unauthorized Actor",
},
"22": {
ID: "22",
Description: "The software uses external input to construct a pathname that is intended to identify a file or directory that is located underneath a restricted parent directory, but the software does not properly neutralize special elements within the pathname that can cause the pathname to resolve to a location that is outside of the restricted directory.",
Name: "Improper Limitation of a Pathname to a Restricted Directory ('Path Traversal')",
},
"242": {
ID: "242",
Description: "The program calls a function that can never be guaranteed to work safely.",
Name: "Use of Inherently Dangerous Function",
},
"276": {
ID: "276",
Description: "During installation, installed file permissions are set to allow anyone to modify those files.",
Name: "Incorrect Default Permissions",
},
"295": {
ID: "295",
Description: "The software does not validate, or incorrectly validates, a certificate.",
Name: "Improper Certificate Validation",
},
"310": {
ID: "310",
Description: "Weaknesses in this category are related to the design and implementation of data confidentiality and integrity. Frequently these deal with the use of encoding techniques, encryption libraries, and hashing algorithms. The weaknesses in this category could lead to a degradation of the quality data if they are not addressed.",
Name: "Cryptographic Issues",
},
"322": {
ID: "322",
Description: "The software performs a key exchange with an actor without verifying the identity of that actor.",
Name: "Key Exchange without Entity Authentication",
},
"326": {
ID: "326",
Description: "The software stores or transmits sensitive data using an encryption scheme that is theoretically sound, but is not strong enough for the level of protection required.",
Name: "Inadequate Encryption Strength",
},
"327": {
ID: "327",
Description: "The use of a broken or risky cryptographic algorithm is an unnecessary risk that may result in the exposure of sensitive information.",
Name: "Use of a Broken or Risky Cryptographic Algorithm",
},
"338": {
ID: "338",
Description: "The product uses a Pseudo-Random Number Generator (PRNG) in a security context, but the PRNG's algorithm is not cryptographically strong.",
Name: "Use of Cryptographically Weak Pseudo-Random Number Generator (PRNG)",
},
"377": {
ID: "377",
Description: "Creating and using insecure temporary files can leave application and system data vulnerable to attack.",
Name: "Insecure Temporary File",
},
"409": {
ID: "409",
Description: "The software does not handle or incorrectly handles a compressed input with a very high compression ratio that produces a large output.",
Name: "Improper Handling of Highly Compressed Data (Data Amplification)",
},
"703": {
ID: "703",
Description: "The software does not properly anticipate or handle exceptional conditions that rarely occur during normal operation of the software.",
Name: "Improper Check or Handling of Exceptional Conditions",
},
"78": {
ID: "78",
Description: "The software constructs all or part of an OS command using externally-influenced input from an upstream component, but it does not neutralize or incorrectly neutralizes special elements that could modify the intended OS command when it is sent to a downstream component.",
Name: "Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')",
},
"79": {
ID: "79",
Description: "The software does not neutralize or incorrectly neutralizes user-controllable input before it is placed in output that is used as a web page that is served to other users.",
Name: "Improper Neutralization of Input During Web Page Generation ('Cross-site Scripting')",
},
"798": {
ID: "798",
Description: "The software contains hard-coded credentials, such as a password or cryptographic key, which it uses for its own inbound authentication, outbound communication to external components, or encryption of internal data.",
Name: "Use of Hard-coded Credentials",
},
"88": {
ID: "88",
Description: "The software constructs a string for a command to executed by a separate component\nin another control sphere, but it does not properly delimit the\nintended arguments, options, or switches within that command string.",
Name: "Improper Neutralization of Argument Delimiters in a Command ('Argument Injection')",
},
"89": {
ID: "89",
Description: "The software constructs all or part of an SQL command using externally-influenced input from an upstream component, but it does not neutralize or incorrectly neutralizes special elements that could modify the intended SQL command when it is sent to a downstream component.",
Name: "Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')",
},
}
//Get Retrieves a CWE weakness by it's id
func Get(id string) *Weakness {
weakness, ok := data[id]
if ok && weakness != nil {
return weakness
}
return nil
} | cwe/data.go | 0.663124 | 0.667415 | data.go | starcoder |
package tracetranslator
import (
"encoding/binary"
"go.opentelemetry.io/collector/consumer/pdata"
)
// UInt64ToByteTraceID takes a two uint64 representation of a TraceID and
// converts it to a []byte representation.
func UInt64ToTraceID(high, low uint64) pdata.TraceID {
traceID := [16]byte{}
binary.BigEndian.PutUint64(traceID[:8], high)
binary.BigEndian.PutUint64(traceID[8:], low)
return pdata.NewTraceID(traceID)
}
// UInt64ToByteTraceID takes a two uint64 representation of a TraceID and
// converts it to a []byte representation.
func UInt64ToByteTraceID(high, low uint64) [16]byte {
traceID := [16]byte{}
binary.BigEndian.PutUint64(traceID[:8], high)
binary.BigEndian.PutUint64(traceID[8:], low)
return traceID
}
// Int64ToByteTraceID takes a two int64 representation of a TraceID and
// converts it to a []byte representation.
func Int64ToTraceID(high, low int64) pdata.TraceID {
return UInt64ToTraceID(uint64(high), uint64(low))
}
// Int64ToByteTraceID takes a two int64 representation of a TraceID and
// converts it to a []byte representation.
func Int64ToByteTraceID(high, low int64) [16]byte {
return UInt64ToByteTraceID(uint64(high), uint64(low))
}
// BytesToUInt64TraceID takes a []byte representation of a TraceID and
// converts it to a two uint64 representation.
func BytesToUInt64TraceID(traceID [16]byte) (uint64, uint64) {
return binary.BigEndian.Uint64(traceID[:8]), binary.BigEndian.Uint64(traceID[8:])
}
// BytesToInt64TraceID takes a []byte representation of a TraceID and
// converts it to a two int64 representation.
func BytesToInt64TraceID(traceID [16]byte) (int64, int64) {
traceIDHigh, traceIDLow := BytesToUInt64TraceID(traceID)
return int64(traceIDHigh), int64(traceIDLow)
}
// TraceIDToUInt64Pair takes a pdata.TraceID and converts it to a pair of uint64 representation.
func TraceIDToUInt64Pair(traceID pdata.TraceID) (uint64, uint64) {
return BytesToUInt64TraceID(traceID.Bytes())
}
// UInt64ToByteSpanID takes a uint64 representation of a SpanID and
// converts it to a []byte representation.
func UInt64ToByteSpanID(id uint64) [8]byte {
spanID := [8]byte{}
binary.BigEndian.PutUint64(spanID[:], id)
return spanID
}
// UInt64ToSpanID takes a uint64 representation of a SpanID and
// converts it to a pdata.SpanID representation.
func UInt64ToSpanID(id uint64) pdata.SpanID {
return pdata.NewSpanID(UInt64ToByteSpanID(id))
}
// Int64ToByteSpanID takes a int64 representation of a SpanID and
// converts it to a []byte representation.
func Int64ToByteSpanID(id int64) [8]byte {
return UInt64ToByteSpanID(uint64(id))
}
// Int64ToByteSpanID takes a int64 representation of a SpanID and
// converts it to a []byte representation.
func Int64ToSpanID(id int64) pdata.SpanID {
return UInt64ToSpanID(uint64(id))
}
// BytesToUInt64SpanID takes a []byte representation of a SpanID and
// converts it to a uint64 representation.
func BytesToUInt64SpanID(b [8]byte) uint64 {
return binary.BigEndian.Uint64(b[:])
}
// BytesToInt64SpanID takes a []byte representation of a SpanID and
// converts it to a int64 representation.
func BytesToInt64SpanID(b [8]byte) int64 {
return int64(BytesToUInt64SpanID(b))
} | translator/trace/big_endian_converter.go | 0.741861 | 0.683938 | big_endian_converter.go | starcoder |
package internal
import (
"fmt"
"time"
)
// Validator is used for testing.
type Validator interface {
Error(...interface{})
}
func validateStringField(v Validator, fieldName, v1, v2 string) {
if v1 != v2 {
v.Error(fieldName, v1, v2)
}
}
// WantMetric is a metric expectation. If Data is nil, then any data values are
// acceptable.
type WantMetric struct {
Name string
Scope string
Forced bool
Data []float64
}
// WantCustomEvent is a custom event expectation.
type WantCustomEvent struct {
Type string
Params map[string]interface{}
}
// WantError is a traced error expectation.
type WantError struct {
TxnName string
Msg string
Klass string
Caller string
URL string
UserAttributes map[string]interface{}
AgentAttributes map[string]interface{}
}
// WantErrorEvent is an error event expectation.
type WantErrorEvent struct {
TxnName string
Msg string
Klass string
Queuing bool
ExternalCallCount uint64
DatastoreCallCount uint64
UserAttributes map[string]interface{}
AgentAttributes map[string]interface{}
}
// WantTxnEvent is a transaction event expectation.
type WantTxnEvent struct {
Name string
Zone string
Queuing bool
ExternalCallCount uint64
DatastoreCallCount uint64
UserAttributes map[string]interface{}
AgentAttributes map[string]interface{}
}
// WantTxnTrace is a transaction trace expectation.
type WantTxnTrace struct {
MetricName string
CleanURL string
NumSegments int
UserAttributes map[string]interface{}
AgentAttributes map[string]interface{}
}
// Expect exposes methods that allow for testing whether the correct data was
// captured.
type Expect interface {
ExpectCustomEvents(t Validator, want []WantCustomEvent)
ExpectErrors(t Validator, want []WantError)
ExpectErrorEvents(t Validator, want []WantErrorEvent)
ExpectTxnEvents(t Validator, want []WantTxnEvent)
ExpectMetrics(t Validator, want []WantMetric)
ExpectTxnTraces(t Validator, want []WantTxnTrace)
}
func expectMetricField(t Validator, id metricID, v1, v2 float64, fieldName string) {
if v1 != v2 {
t.Error("metric fields do not match", id, v1, v2, fieldName)
}
}
// ExpectMetrics allows testing of metrics.
func ExpectMetrics(t Validator, mt *metricTable, expect []WantMetric) {
if len(mt.metrics) != len(expect) {
t.Error("metric counts do not match expectations", len(mt.metrics), len(expect))
}
expectedIds := make(map[metricID]struct{})
for _, e := range expect {
id := metricID{Name: e.Name, Scope: e.Scope}
expectedIds[id] = struct{}{}
m := mt.metrics[id]
if nil == m {
t.Error("unable to find metric", id)
continue
}
if e.Forced != (forced == m.forced) {
t.Error("metric forced incorrect", e.Forced, m.forced, id)
}
if nil != e.Data {
expectMetricField(t, id, e.Data[0], m.data.countSatisfied, "countSatisfied")
expectMetricField(t, id, e.Data[1], m.data.totalTolerated, "totalTolerated")
expectMetricField(t, id, e.Data[2], m.data.exclusiveFailed, "exclusiveFailed")
expectMetricField(t, id, e.Data[3], m.data.min, "min")
expectMetricField(t, id, e.Data[4], m.data.max, "max")
expectMetricField(t, id, e.Data[5], m.data.sumSquares, "sumSquares")
}
}
for id := range mt.metrics {
if _, ok := expectedIds[id]; !ok {
t.Error("expected metrics does not contain", id.Name, id.Scope)
}
}
}
func expectAttributes(v Validator, exists map[string]interface{}, expect map[string]interface{}) {
// TODO: This params comparison can be made smarter: Alert differences
// based on sub/super set behavior.
if len(exists) != len(expect) {
v.Error("attributes length difference", exists, expect)
return
}
for key, val := range expect {
found, ok := exists[key]
if !ok {
v.Error("missing key", key)
continue
}
v1 := fmt.Sprint(found)
v2 := fmt.Sprint(val)
if v1 != v2 {
v.Error("value difference", fmt.Sprintf("key=%s", key),
v1, v2)
}
}
}
func expectCustomEvent(v Validator, event *CustomEvent, expect WantCustomEvent) {
if event.eventType != expect.Type {
v.Error("type mismatch", event.eventType, expect.Type)
}
now := time.Now()
diff := absTimeDiff(now, event.timestamp)
if diff > time.Hour {
v.Error("large timestamp difference", event.eventType, now, event.timestamp)
}
expectAttributes(v, event.truncatedParams, expect.Params)
}
// ExpectCustomEvents allows testing of custom events.
func ExpectCustomEvents(v Validator, cs *customEvents, expect []WantCustomEvent) {
if len(cs.events.events) != len(expect) {
v.Error("number of custom events does not match", len(cs.events.events),
len(expect))
return
}
for i, e := range expect {
event, ok := cs.events.events[i].jsonWriter.(*CustomEvent)
if !ok {
v.Error("wrong custom event")
} else {
expectCustomEvent(v, event, e)
}
}
}
func expectErrorEvent(v Validator, err *ErrorEvent, expect WantErrorEvent) {
validateStringField(v, "txnName", expect.TxnName, err.TxnName)
validateStringField(v, "klass", expect.Klass, err.Klass)
validateStringField(v, "msg", expect.Msg, err.Msg)
if (0 != err.Queuing) != expect.Queuing {
v.Error("queuing", err.Queuing)
}
if nil != expect.UserAttributes {
expectAttributes(v, getUserAttributes(err.Attrs, destError), expect.UserAttributes)
}
if nil != expect.AgentAttributes {
expectAttributes(v, getAgentAttributes(err.Attrs, destError), expect.AgentAttributes)
}
if expect.ExternalCallCount != err.externalCallCount {
v.Error("external call count", expect.ExternalCallCount, err.externalCallCount)
}
if (0 == expect.ExternalCallCount) != (err.externalDuration == 0) {
v.Error("external duration", err.externalDuration)
}
if expect.DatastoreCallCount != err.datastoreCallCount {
v.Error("datastore call count", expect.DatastoreCallCount, err.datastoreCallCount)
}
if (0 == expect.DatastoreCallCount) != (err.datastoreDuration == 0) {
v.Error("datastore duration", err.datastoreDuration)
}
}
// ExpectErrorEvents allows testing of error events.
func ExpectErrorEvents(v Validator, events *errorEvents, expect []WantErrorEvent) {
if len(events.events.events) != len(expect) {
v.Error("number of custom events does not match",
len(events.events.events), len(expect))
return
}
for i, e := range expect {
event, ok := events.events.events[i].jsonWriter.(*ErrorEvent)
if !ok {
v.Error("wrong error event")
} else {
expectErrorEvent(v, event, e)
}
}
}
func expectTxnEvent(v Validator, e *TxnEvent, expect WantTxnEvent) {
validateStringField(v, "apdex zone", expect.Zone, e.Zone.label())
validateStringField(v, "name", expect.Name, e.Name)
if 0 == e.Duration {
v.Error("zero duration", e.Duration)
}
if (0 != e.Queuing) != expect.Queuing {
v.Error("queuing", e.Queuing)
}
if nil != expect.UserAttributes {
expectAttributes(v, getUserAttributes(e.Attrs, destTxnEvent), expect.UserAttributes)
}
if nil != expect.AgentAttributes {
expectAttributes(v, getAgentAttributes(e.Attrs, destTxnEvent), expect.AgentAttributes)
}
if expect.ExternalCallCount != e.externalCallCount {
v.Error("external call count", expect.ExternalCallCount, e.externalCallCount)
}
if (0 == expect.ExternalCallCount) != (e.externalDuration == 0) {
v.Error("external duration", e.externalDuration)
}
if expect.DatastoreCallCount != e.datastoreCallCount {
v.Error("datastore call count", expect.DatastoreCallCount, e.datastoreCallCount)
}
if (0 == expect.DatastoreCallCount) != (e.datastoreDuration == 0) {
v.Error("datastore duration", e.datastoreDuration)
}
}
// ExpectTxnEvents allows testing of txn events.
func ExpectTxnEvents(v Validator, events *txnEvents, expect []WantTxnEvent) {
if len(events.events.events) != len(expect) {
v.Error("number of txn events does not match",
len(events.events.events), len(expect))
return
}
for i, e := range expect {
event, ok := events.events.events[i].jsonWriter.(*TxnEvent)
if !ok {
v.Error("wrong txn event")
} else {
expectTxnEvent(v, event, e)
}
}
}
func expectError(v Validator, err *harvestError, expect WantError) {
caller := topCallerNameBase(err.TxnError.Stack)
validateStringField(v, "caller", expect.Caller, caller)
validateStringField(v, "txnName", expect.TxnName, err.txnName)
validateStringField(v, "klass", expect.Klass, err.TxnError.Klass)
validateStringField(v, "msg", expect.Msg, err.TxnError.Msg)
validateStringField(v, "URL", expect.URL, err.requestURI)
if nil != expect.UserAttributes {
expectAttributes(v, getUserAttributes(err.attrs, destError), expect.UserAttributes)
}
if nil != expect.AgentAttributes {
expectAttributes(v, getAgentAttributes(err.attrs, destError), expect.AgentAttributes)
}
}
// ExpectErrors allows testing of errors.
func ExpectErrors(v Validator, errors *harvestErrors, expect []WantError) {
if len(errors.errors) != len(expect) {
v.Error("number of errors mismatch", len(errors.errors), len(expect))
return
}
for i, e := range expect {
expectError(v, errors.errors[i], e)
}
}
func expectTxnTrace(v Validator, trace *HarvestTrace, expect WantTxnTrace) {
if 0 == trace.Duration {
v.Error("zero trace duration")
}
validateStringField(v, "metric name", expect.MetricName, trace.MetricName)
validateStringField(v, "request url", expect.CleanURL, trace.CleanURL)
if nil != expect.UserAttributes {
expectAttributes(v, getUserAttributes(trace.Attrs, destTxnTrace), expect.UserAttributes)
}
if nil != expect.AgentAttributes {
expectAttributes(v, getAgentAttributes(trace.Attrs, destTxnTrace), expect.AgentAttributes)
}
if expect.NumSegments != len(trace.Trace.nodes) {
v.Error("wrong number of segments", expect.NumSegments, len(trace.Trace.nodes))
}
}
// ExpectTxnTraces allows testing of transaction traces.
func ExpectTxnTraces(v Validator, traces *harvestTraces, want []WantTxnTrace) {
if len(want) == 0 {
if nil != traces.trace {
v.Error("trace exists when not expected")
}
} else if len(want) > 1 {
v.Error("too many traces expected")
} else {
if nil == traces.trace {
v.Error("missing expected trace")
} else {
expectTxnTrace(v, traces.trace, want[0])
}
}
} | vendor/github.com/newrelic/go-agent/internal/expect.go | 0.531939 | 0.400163 | expect.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.