code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
package main import ( "fmt" "math" "github.com/jakubDoka/mlok/ggl" "github.com/jakubDoka/mlok/logic/frame" "github.com/jakubDoka/mlok/logic/spatial" "github.com/jakubDoka/mlok/mat" "github.com/jakubDoka/mlok/mat/angle" "github.com/jakubDoka/mlok/mat/rgba" _ "image/png" ) const ( RepelCof = 7.5 AlignCof = 0.045 CohesionCof = 0.03 MaxSpeed, MinSpeed = 25.0, 150.0 Sight = 10 ) var Scale = mat.V(.5, .5) func main() { win, err := ggl.NWindow(nil) if err != nil { panic(err) } t, err := ggl.LoadTexture("fish.png") if err != nil { panic(err) } b := ggl.Batch{ Texture: t, } // calculating how many tiles MinHash needs to spam whole screen size := win.Frame().Size().Divided(Sight).Point() e := BoidEngine{ Sp: ggl.NSprite(t.Frame()), Mh: spatial.NMinHash(size.X+1, size.Y+1, mat.V(Sight, Sight)), } d := frame.Delta{}.Init() // We have to shift the camera as hasher has origin in V(0 0) and grows positively. // Thats just how hashing is designed. win.SetCamera(mat.IM.Move(mat.V(-500, -300))) // everithing about camera is inverted e.Spawn(10000, win.Rect()) // rect returns viewport rect in world coordinates useless := 0 for !win.ShouldClose() { useless += e.Update(d.Tick(), win.Rect()) d.CustomLog(1, func() { fmt.Println("we made", useless, "iterations in last second") useless = 0 }) win.Clear(rgba.DarkBlue) e.Draw(&b) b.Draw(win) b.Clear() win.Update() } } // Draw all boids func (b *BoidEngine) Draw(t ggl.Target) { for _, boid := range b.Bs { b.Sp.Draw(t, mat.M(boid.Position, Scale, boid.Velocity.Angle()), rgba.White) } } type Boid struct { Position, Velocity mat.Vec Adders mat.Point } type BoidEngine struct { Bs []Boid Sp ggl.Sprite Buff []int Mh spatial.MinHash } func (b *BoidEngine) Spawn(amount int, bounds mat.AABB) { // This just showcases how to remove objects for i, boid := range b.Bs { // Removing of object assumes addres is correct and object with id i and group 0 do exist if !b.Mh.Remove(boid.Adders, i, 0) { panic("removal of object with incorrect addres or removal of nonexistant object") } } b.Bs = make([]Boid, amount) for i := range b.Bs { boid := &b.Bs[i] boid.Position = mat.V( mat.Range(bounds.Min.X, bounds.Max.X), mat.Range(bounds.Min.Y, bounds.Max.Y), ) boid.Velocity = mat.Rad(angle.Random(), 100) // We are inserting new object to hasher. All that really gets stored is id though. // Addres will be modified, last argument is group, in case you need to detect collisions // between multiple groups, hasher has optimized solution for this. b.Mh.Insert(&boid.Adders, boid.Position, i, 0) } } // Update velocity and position, it also retruns amount of useles iterations func (b *BoidEngine) Update(delta float64, bounds mat.AABB) int { var useless int for i := range b.Bs { boid := &b.Bs[i] count := 1.0 // tree rules var repel mat.Vec cohesion := boid.Position alignmant := boid.Velocity // Now we are querying for ids that are nearby the boid (all tiles that intersect rectangle) b.Buff = b.Mh.Query(mat.Square(boid.Position, Sight), b.Buff[:0], 0, true) for _, id := range b.Buff { if i == id { continue } other := &b.Bs[id] dif := other.Position.To(boid.Position) len2 := dif.Len2() if math.Sqrt(len2) > Sight { useless++ continue } count++ repel.AddE(dif.Divided(len2)) alignmant.AddE(other.Velocity) cohesion.AddE(other.Position) } cohesion = boid.Position.To(cohesion.Divided(count)).Scaled(CohesionCof) alignmant = alignmant.Scaled(AlignCof / count) if !bounds.Contains(boid.Position) { // so that boids will come back repel.AddE(boid.Position.To(bounds.Center()).Normalized().Scaled(.1)) } repel = repel.Scaled(RepelCof) boid.Velocity.AddE(cohesion.Add(alignmant).Add(repel)) boid.Velocity = mat.Rad(boid.Velocity.Angle(), mat.Clamp(boid.Velocity.Len(), 0, MaxSpeed)) boid.Position.AddE(boid.Velocity.Scaled(delta)) // Lastly we are updating the boid addres, this is no-op if addres does not change, // group and id has to be provided b.Mh.Update(&boid.Adders, boid.Position, i, 0) } return useless } /*import ( "fmt" "math" "github.com/jakubDoka/mlok/ggl" "github.com/jakubDoka/mlok/logic/frame" "github.com/jakubDoka/mlok/logic/spatial" "github.com/jakubDoka/mlok/mat" "github.com/jakubDoka/mlok/mat/angle" "github.com/jakubDoka/mlok/mat/rgba" _ "image/png" ) const ( RepelCof = 7.5 AlignCof = 0.045 CohesionCof = 0.03 MaxSpeed, MinSpeed = 25.0, 1000.0 Sight = 10 ) var Scale = mat.V(.5, .5) func main() { win, err := ggl.NWindow(nil) if err != nil { panic(err) } t, err := ggl.LoadTexture("fish.png") if err != nil { panic(err) } b := ggl.Batch{ Texture: t, } // calculating how many tiles MinHash needs to spam whole screen size := win.Frame().Size().Divided(Sight).Point() e := BoidEngine{ Sp: ggl.NSprite(t.Frame()), Mh: spatial.NMinHash(size.X+1, size.Y+1, mat.V(Sight, Sight)), } d := frame.Delta{}.Init() // We have to shift the camera as hasher has origin in V(0 0) and grows positively. // Thats just how hashing is designed. win.SetCamera(mat.IM.Move(mat.V(-500, -300))) // everithing about camera is inverted e.Spawn(10000, win.Rect()) // rect returns viewport rect in world coordinates fmt.Println(win.Rect()) useless := 0 for !win.ShouldClose() { useless += e.Update(d.Tick(), win.Rect()) d.CustomLog(1, func() { fmt.Println("we made", useless, "iterations in last second") useless = 0 }) win.Clear(rgba.DarkBlue) e.Draw(&b) b.Draw(win) b.Clear() win.Update() } } type BoidEngine struct { Bs []Boid Sp ggl.Sprite Buff []int Mh spatial.MinHash } // Spawn boids on random positions func (b *BoidEngine) Spawn(amount int, bounds mat.AABB) { for i, boid := range b.Bs { // Removing of object assumes addres is correct and object with id i and group 0 do exist if !b.Mh.Remove(boid.Adders, i, 0) { panic("removal of object with incorrect addres or removal of nonexistant object") } } b.Bs = make([]Boid, amount) for i := range b.Bs { boid := &b.Bs[i] boid.Position = mat.V( mat.Range(bounds.Min.X, bounds.Max.X), mat.Range(bounds.Min.Y, bounds.Max.Y), ) boid.Velocity = mat.Rad(angle.Random(), 100) // We are inserting new object to hasher. All that really gets stored is id though. // Addres will be modified, last argument is group, in case you need to detect collisions // between multiple groups, hasher has optimized solution for this. b.Mh.Insert(&boid.Adders, boid.Position, i, 0) } } // Draw all boids func (b *BoidEngine) Draw(t ggl.Target) { for _, boid := range b.Bs { b.Sp.Draw(t, mat.M(boid.Position, Scale, boid.Velocity.Angle()), rgba.White) } } // Update velocity and position, it also retruns amount of useles iterations func (b *BoidEngine) Update(delta float64, bounds mat.AABB) int { var useless int for i := range b.Bs { boid := &b.Bs[i] // tree rules var repel mat.Vec count := 1.0 cohesion := boid.Position alignmant := boid.Velocity // Now we are querying for ids that are nearby the boid b.Buff = b.Mh.Query(mat.Square(boid.Position, Sight), b.Buff[:0], 0, true) for _, id := range b.Buff { if i == id { continue } other := &b.Bs[id] dif := other.Position.To(boid.Position) len2 := dif.Len2() if math.Sqrt(len2) > Sight { useless++ continue } count++ repel.AddE(dif.Divided(len2)) alignmant.AddE(other.Velocity) cohesion.AddE(other.Position) } cohesion = boid.Position.To(cohesion.Divided(count)).Scaled(CohesionCof) alignmant = alignmant.Scaled(AlignCof / count) if !bounds.Contains(boid.Position) { // so that boids will come back repel.AddE(boid.Position.To(bounds.Center()).Normalized().Scaled(.1)) } repel = repel.Scaled(RepelCof) boid.Velocity.AddE(cohesion.Add(alignmant).Add(repel)) boid.Velocity = mat.Rad(boid.Velocity.Angle(), mat.Clamp(boid.Velocity.Len(), 0, MaxSpeed)) boid.Position.AddE(boid.Velocity.Scaled(delta)) // Lastly we are updating the boid addres, this is no-op if addres does not change b.Mh.Update(&boid.Adders, boid.Position, i, 0) } return useless } type Boid struct { Position, Velocity mat.Vec Adders mat.Point } */
examples/basics/hasher/main.go
0.568416
0.411495
main.go
starcoder
package quicksort import "math/rand" func quicksort(array []int) []int { if len(array) == 0 { return array } pivot := array[len(array)/2] less, equal, greater := []int{}, []int{}, []int{} for ind := range array { if array[ind] < pivot { less = append(less, array[ind]) } if array[ind] == pivot { equal = append(equal, array[ind]) } if array[ind] > pivot { greater = append(greater, array[ind]) } } // Joining three arrays together; in other languages, this is the same as // `quicksort(less) + equal + quicksort(greater)` return append(append(quicksort(less), equal...), quicksort(greater)...) } func partitionLomuto(array []int, low int, high int) int { if len(array) == 0 { panic("Error finding partition index: length of array is 0") } pivot := array[high] i := low for j := low; j < high; j++ { if array[j] <= pivot { array[i], array[j] = array[j], array[i] i++ } } array[i], array[high] = array[high], array[i] return i } func quicksortLomuto(array []int, low int, high int) { if low < high { p := partitionLomuto(array, low, high) quicksortLomuto(array, low, p-1) quicksortLomuto(array, p+1, high) } } func partitionHoare(array []int, low int, high int) int { if len(array) == 0 { panic("Error finding partition index: length of array is 0") } pivot := array[low] i, j := low-1, high+1 for { for { j-- if array[j] <= pivot { break } } for { i++ if array[i] >= pivot { break } } if i < j { array[i], array[j] = array[j], array[i] } else { return j } } } func quicksortHoare(array []int, low int, high int) { if low < high { p := partitionHoare(array, low, high) quicksortHoare(array, low, p) quicksortHoare(array, p+1, high) } } func quicksortRandom(array []int, low int, high int) { if low < high { pivotIndex := rand.Intn(high-low) + low array[pivotIndex], array[high] = array[high], array[pivotIndex] p := partitionLomuto(array, low, high) quicksortRandom(array, low, p-1) quicksortRandom(array, p+1, high) } } func partitionDutchFlag(array []int, low int, high int, pivotIndex int) (int, int) { if len(array) == 0 { panic("Error finding partition indices: length of array is 0") } pivot := array[pivotIndex] smaller, equal, larger := low, low, high for equal <= larger { if array[equal] < pivot { array[smaller], array[equal] = array[equal], array[smaller] smaller++ equal++ } else if array[equal] == pivot { equal++ } else { array[equal], array[larger] = array[larger], array[equal] larger-- } } return smaller, larger } func quicksortDutchFlag(array []int, low int, high int) { if low < high { pivotIndex := rand.Intn(high-low) + low p, q := partitionDutchFlag(array, low, high, pivotIndex) quicksortDutchFlag(array, low, p-1) quicksortDutchFlag(array, q+1, high) } }
Algorithms/Sorting/Quicksort/quicksort.go
0.662578
0.553928
quicksort.go
starcoder
package cursors // FieldType represents the primitive field data types available in tsm. type FieldType int const ( Float FieldType = iota // means the data type is a float Integer // means the data type is an integer Unsigned // means the data type is an unsigned integer Boolean // means the data type is a boolean String // means the data type is a string of text Undefined // means the data type in unknown or undefined ) type MeasurementField struct { Key string Type FieldType } type MeasurementFields struct { Fields []MeasurementField } type MeasurementFieldsIterator interface { // Next advances the iterator to the next value. It returns false // when there are no more values. Next() bool // Value returns the current value. Value() MeasurementFields Stats() CursorStats } // EmptyMeasurementFieldsIterator is an implementation of MeasurementFieldsIterator that returns // no values. var EmptyMeasurementFieldsIterator = &measurementFieldsIterator{} type measurementFieldsIterator struct{} func (m *measurementFieldsIterator) Next() bool { return false } func (m *measurementFieldsIterator) Value() MeasurementFields { return MeasurementFields{} } func (m *measurementFieldsIterator) Stats() CursorStats { return CursorStats{} } type MeasurementFieldsSliceIterator struct { f []MeasurementFields v MeasurementFields i int stats CursorStats } func NewMeasurementFieldsSliceIteratorWithStats(f []MeasurementFields, stats CursorStats) *MeasurementFieldsSliceIterator { return &MeasurementFieldsSliceIterator{f: f, stats: stats} } func (s *MeasurementFieldsSliceIterator) Next() bool { if s.i < len(s.f) { s.v = s.f[s.i] s.i++ return true } s.v = MeasurementFields{} return false } func (s *MeasurementFieldsSliceIterator) Value() MeasurementFields { return s.v } func (s *MeasurementFieldsSliceIterator) Stats() CursorStats { return s.stats } func (s *MeasurementFieldsSliceIterator) toSlice() []MeasurementFields { if s.i < len(s.f) { return s.f[s.i:] } return nil } // MeasurementFieldsIteratorFlatMap reads the remainder of i, flattening the results // to a single slice. func MeasurementFieldsIteratorFlatMap(i MeasurementFieldsIterator) []MeasurementField { if i == nil { return nil } var res []MeasurementField if si, ok := i.(*MeasurementFieldsSliceIterator); ok { s := si.toSlice() sz := 0 for i := range s { sz += len(s[i].Fields) } res = make([]MeasurementField, 0, sz) for i := range s { res = append(res, s[i].Fields...) } } else { for i.Next() { res = append(res, i.Value().Fields...) } } return res }
tsdb/cursors/schema.go
0.774328
0.49469
schema.go
starcoder
package bccsp const ( SM4 = "SM4" SM3 = "SM3" SM2 = "SM2" SM2ReRand="SM2ReRand" // HMACTruncated256 HMAC truncated at 256 bits. HMACTruncated256 = "HMAC_TRUNCATED_256" // HMAC keyed-hash message authentication code HMAC = "HMAC" // X509Certificate Label for X509 certificate related operation X509Certificate = "X509Certificate" ) // HMACTruncated256SM4DeriveKeyOpts contains options for HMAC truncated // at 256 bits key derivation. type HMACTruncated256SM4DeriveKeyOpts struct { Temporary bool Arg []byte } // Algorithm returns the key derivation algorithm identifier (to be used). func (opts *HMACTruncated256SM4DeriveKeyOpts) Algorithm() string { return HMACTruncated256 } // Ephemeral returns true if the key to generate has to be ephemeral, // false otherwise. func (opts *HMACTruncated256SM4DeriveKeyOpts) Ephemeral() bool { return opts.Temporary } // Argument returns the argument to be passed to the HMAC func (opts *HMACTruncated256SM4DeriveKeyOpts) Argument() []byte { return opts.Arg } // HMACDeriveKeyOpts contains options for HMAC key derivation. type HMACDeriveKeyOpts struct { Temporary bool Arg []byte } // Algorithm returns the key derivation algorithm identifier (to be used). func (opts *HMACDeriveKeyOpts) Algorithm() string { return HMAC } // Ephemeral returns true if the key to generate has to be ephemeral, // false otherwise. func (opts *HMACDeriveKeyOpts) Ephemeral() bool { return opts.Temporary } // Argument returns the argument to be passed to the HMAC func (opts *HMACDeriveKeyOpts) Argument() []byte { return opts.Arg } // X509PublicKeyImportOpts contains options for importing public keys from an x509 certificate type X509PublicKeyImportOpts struct { Temporary bool } // Algorithm returns the key importation algorithm identifier (to be used). func (opts *X509PublicKeyImportOpts) Algorithm() string { return X509Certificate } // Ephemeral returns true if the key to generate has to be ephemeral, // false otherwise. func (opts *X509PublicKeyImportOpts) Ephemeral() bool { return opts.Temporary }
bccsp/opts.go
0.755186
0.411406
opts.go
starcoder
package character import "github.com/genshinsim/gcsim/pkg/core" func (c *Tmpl) QueueParticle(src string, num int, ele core.EleType, delay int) { p := core.Particle{ Source: src, Num: num, Ele: ele, } c.AddTask(func() { c.Core.Energy.DistributeParticle(p) }, "particle", delay) } func (c *Tmpl) ConsumeEnergy(delay int) { if delay == 0 { c.Energy = 0 return } c.AddTask(func() { c.Energy = 0 }, "consume-energy", delay) } func (c *Tmpl) CurrentEnergy() float64 { return c.Energy } func (c *Tmpl) MaxEnergy() float64 { return c.EnergyMax } func (c *Tmpl) AddEnergy(src string, e float64) { preEnergy := c.Energy c.Energy += e if c.Energy > c.EnergyMax { c.Energy = c.EnergyMax } if c.Energy < 0 { c.Energy = 0 } c.Core.Events.Emit(core.OnEnergyChange, c, preEnergy, e, src) c.Core.Log.NewEvent("adding energy", core.LogEnergyEvent, c.Index, "rec'd", e, "post_recovery", c.Energy, "source", src, "max_energy", c.EnergyMax, ) } func (c *Tmpl) ReceiveParticle(p core.Particle, isActive bool, partyCount int) { var amt, er, r float64 r = 1.0 if !isActive { r = 1.0 - 0.1*float64(partyCount) } //recharge amount - particles: same = 3, non-ele = 2, diff = 1 //recharge amount - orbs: same = 9, non-ele = 6, diff = 3 (3x particles) switch { case p.Ele == c.Base.Element: amt = 3 case p.Ele == core.NoElement: amt = 2 default: amt = 1 } amt = amt * r //apply off field reduction //apply energy regen stat energyCalcModeBit := int8(0) if c.Core.Flags.EnergyCalcMode { energyCalcModeBit = 1 } er = c.Stat(core.ER) amt = amt * (1 + er*(1-float64(energyCalcModeBit))) * float64(p.Num) pre := c.Energy c.Energy += amt if c.Energy > c.EnergyMax { c.Energy = c.EnergyMax } c.Core.Events.Emit(core.OnEnergyChange, c, pre, amt, p.Source) c.Core.Log.NewEvent( "particle", core.LogEnergyEvent, c.Index, "source", p.Source, "count", p.Num, "ele", p.Ele, "ER", er, "is_active", isActive, "party_count", partyCount, "pre_recovery", pre, "amt", amt, "post_recovery", c.Energy, "max_energy", c.EnergyMax, ) }
internal/tmpl/character/energy.go
0.593491
0.45048
energy.go
starcoder
package eval import ( "reflect" "go/token" "errors" ) func evalBinaryExpr(binary *BinaryExpr, env Env) (r reflect.Value, err error) { if binary.IsConst() { return binary.Const(), nil } xexpr := binary.X yexpr := binary.Y op := binary.Op() var zt []reflect.Type if xexpr.IsConst() && xexpr.KnownType()[0].Kind() != reflect.Interface { zt = yexpr.KnownType() } else { zt = xexpr.KnownType() } var xs, ys []reflect.Value if xs, err = evalTypedExpr(xexpr, zt, env); err != nil { return reflect.Value{}, err } if op == token.SHL || op == token.SHR { if ys, err = evalTypedExpr(yexpr, knownType{uintType}, env); err != nil { return reflect.Value{}, err } } else { if ys, err = evalTypedExpr(yexpr, zt, env); err != nil { return reflect.Value{}, err } } x, y := xs[0], ys[0] var b bool switch zt[0].Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: r, err = evalBinaryIntExpr(x, op, y) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: r, err = evalBinaryUintExpr(x, op, y) case reflect.Float32, reflect.Float64: r, err = evalBinaryFloatExpr(x, op, y) case reflect.Complex64, reflect.Complex128: r, err = evalBinaryComplexExpr(x, op, y) case reflect.String: r, err = evalBinaryStringExpr(x, op, y) case reflect.Bool: r, err = evalBinaryBoolExpr(x, op, y) case reflect.Interface, reflect.Ptr: if xexpr.KnownType()[0] == ConstNil { b = y.IsNil() } else if yexpr.KnownType()[0] == ConstNil { b = x.IsNil() } else if t := areDynamicTypesComparable(x, y); t != nil { return reflect.Value{}, PanicUncomparableType{t} } else { b = x.Interface() == y.Interface() } if op == token.NEQ { b = !b } r = reflect.ValueOf(b) case reflect.Struct, reflect.Array: if t := areDynamicTypesComparable(x, y); t != nil { return reflect.Value{}, PanicUncomparableType{t} } b = x.Interface() == y.Interface() if op == token.NEQ { b = !b } r = reflect.ValueOf(b) case reflect.Map, reflect.Slice, reflect.Func: if xexpr.KnownType()[0] == ConstNil { b = y.IsNil() } else { b = x.IsNil() } if op == token.NEQ { b = !b } r = reflect.ValueOf(b) default: return reflect.Value{}, errors.New("eval: unimplemented binary ops :(") } return r, err } func evalBinaryIntExpr(x reflect.Value, op token.Token, y reflect.Value) (reflect.Value, error) { var r int64 var err error var b bool is_bool := false switch op { case token.ADD: r = x.Int() + y.Int() case token.SUB: r = x.Int() - y.Int() case token.MUL: r = x.Int() * y.Int() case token.QUO: if y.Int() == 0 { return reflect.Value{}, PanicDivideByZero{} } r = x.Int() / y.Int() case token.REM: if y.Int() == 0 { return reflect.Value{}, PanicDivideByZero{} } r = x.Int() % y.Int() case token.SHL: r = x.Int() << y.Uint() case token.SHR: r = x.Int() >> y.Uint() case token.AND: r = x.Int() & y.Int() case token.OR: r = x.Int() | y.Int() case token.XOR: r = x.Int() ^ y.Int() case token.AND_NOT: r = x.Int() &^ y.Int() case token.EQL: b = x.Int() == y.Int(); is_bool = true case token.NEQ: b = x.Int() != y.Int(); is_bool = true case token.LEQ: b = x.Int() <= y.Int(); is_bool = true case token.GEQ: b = x.Int() >= y.Int(); is_bool = true case token.LSS: b = x.Int() < y.Int(); is_bool = true case token.GTR: b = x.Int() > y.Int(); is_bool = true default: panic(dytc("bad binary op")) } if is_bool { return reflect.ValueOf(b), err } else { return reflect.ValueOf(r).Convert(x.Type()), err } } func evalBinaryUintExpr(x reflect.Value, op token.Token, y reflect.Value) (reflect.Value, error) { var err error var r uint64 var b bool is_bool := false xx, yy := x.Uint(), y.Uint() switch op { case token.ADD: r = xx + yy case token.SUB: r = xx - yy case token.MUL: r = xx * yy case token.QUO: if yy == 0 { return reflect.Value{}, PanicDivideByZero{} } r = xx / yy case token.REM: if yy == 0 { return reflect.Value{}, PanicDivideByZero{} } r = xx % yy case token.SHL: r = xx << yy case token.SHR: r = xx >> yy case token.AND: r = xx & yy case token.OR: r = xx | yy case token.XOR: r = xx ^ yy case token.AND_NOT: r = xx &^ yy case token.EQL: b = xx == yy; is_bool = true case token.NEQ: b = xx != yy; is_bool = true case token.LEQ: b = xx <= yy; is_bool = true case token.GEQ: b = xx >= yy; is_bool = true case token.LSS: b = xx < yy; is_bool = true case token.GTR: b = xx > yy; is_bool = true default: panic(dytc("bad binary op")) } if is_bool { return reflect.ValueOf(b), err } else { return reflect.ValueOf(r).Convert(x.Type()), err } } func evalBinaryFloatExpr(x reflect.Value, op token.Token, y reflect.Value) (reflect.Value, error) { var r float64 var is_bool bool var b bool xx, yy := x.Float(), y.Float() switch op { case token.ADD: r = xx + yy case token.SUB: r = xx - yy case token.MUL: r = xx * yy case token.QUO: if yy == 0 { return reflect.Value{}, PanicDivideByZero{} } r = xx / yy case token.EQL: b = xx == yy; is_bool = true case token.NEQ: b = xx != yy; is_bool = true case token.LEQ: b = xx <= yy; is_bool = true case token.GEQ: b = xx >= yy; is_bool = true case token.LSS: b = xx < yy; is_bool = true case token.GTR: b = xx > yy; is_bool = true default: panic(dytc("bad binary op")) } if is_bool { return reflect.ValueOf(b), nil } else { return reflect.ValueOf(r).Convert(x.Type()), nil } } func evalBinaryComplexExpr(x reflect.Value, op token.Token, y reflect.Value) (reflect.Value, error) { var r complex128 var is_bool bool var b bool xx, yy := x.Complex(), y.Complex() switch op { case token.ADD: r = xx + yy case token.SUB: r = xx - yy case token.MUL: r = xx * yy case token.QUO: if yy == 0 { return reflect.Value{}, PanicDivideByZero{} } r = xx / yy case token.EQL: b = xx == yy; is_bool = true case token.NEQ: b = xx != yy; is_bool = true default: panic(dytc("bad binary op")) } if is_bool { return reflect.ValueOf(b), nil } else { return reflect.ValueOf(r).Convert(x.Type()), nil } } func evalBinaryStringExpr(x reflect.Value, op token.Token, y reflect.Value) (reflect.Value, error) { var r string var b bool is_bool := false xx, yy := x.String(), y.String() switch op { case token.ADD: r = xx + yy case token.EQL: b = xx == yy; is_bool = true case token.NEQ: b = xx != yy; is_bool = true case token.LEQ: b = xx <= yy; is_bool = true case token.GEQ: b = xx >= yy; is_bool = true case token.LSS: b = xx < yy; is_bool = true case token.GTR: b = xx > yy; is_bool = true default: panic(dytc("bad binary op")) } if is_bool { return reflect.ValueOf(b), nil } else { return reflect.ValueOf(r).Convert(x.Type()), nil } } func evalBinaryBoolExpr(x reflect.Value, op token.Token, y reflect.Value) (reflect.Value, error) { xx, yy := x.Bool(), y.Bool() var r bool switch op { case token.LAND: r = xx && yy case token.LOR: r = xx || yy case token.EQL: r = xx == yy case token.NEQ: r = xx != yy default: panic(dytc("bad binary op")) } return reflect.ValueOf(r), nil } func areDynamicTypesComparable(x, y reflect.Value) reflect.Type { if x.Type() != y.Type() { return nil } switch x.Type().Kind() { case reflect.Interface: return areDynamicTypesComparable(x.Elem(), y.Elem()) case reflect.Struct: numField := x.NumField() for i := 0; i < numField; i += 1 { if t := areDynamicTypesComparable(x.Field(i), y.Field(i)); t != nil { if t.Kind() == reflect.Struct { return t } else { return x.Type() } } } case reflect.Array: length := x.Len() for i := 0; i < length; i += 1 { if t := areDynamicTypesComparable(x.Index(i), y.Index(i)); t != nil { return t } } case reflect.Map, reflect.Func, reflect.Slice: return x.Type() } return nil }
Godeps/_workspace/src/github.com/0xfaded/eval/evalbinaryexpr.go
0.552057
0.515864
evalbinaryexpr.go
starcoder
package dst // Geometric distribution (type 1). // The probability distribution of the number Y = X − 1 of failures before the first success, supported on the set {1, 2, 3, ... } // Parameters: // ρ ∈ (0, 1] probability of success in each trial // Support: // k ∈ {1, ... , n} // Geometric1PMF returns the PMF of the Geometric1 distribution. func Geometric1PMF(ρ float64) func(k int64) float64 { return func(k int64) float64 { return ρ * pow(1-ρ, float64(k-1)) } } // Geometric1LnPMF returns the natural logarithm of the PMF of the Geometric distribution (type 1). func Geometric1LnPMF(ρ float64) func(k int64) float64 { return func(k int64) float64 { return log(1-ρ) + float64(k-1)*log(ρ) } } // Geometric1PMFAt returns the value of PMF of Geometric distribution (type 1) at k. func Geometric1PMFAt(ρ float64, k int64) float64 { pmf := Geometric1PMF(ρ) return pmf(k) } // Geometric1CDF returns the value of CDF of the Geometric distribution (type 1). func Geometric1CDF(ρ float64) func(k int64) float64 { return func(k int64) float64 { if k < 0 { return NaN } return 1 - pow(1-ρ, float64(k)) } } // Geometric1CDFAt returns the value of CDF of the Geometric distribution (type 1) at k. func Geometric1CDFAt(ρ float64, k int64) float64 { cdf := Geometric1CDF(ρ) return cdf(k) } /* Not tested, looking strange, commented out, waiting for revision // Geometric1Next returns random number drawn from the Geometric distribution (type 1). //Geometric1Next(ρ) => # of Geometric1Next(ρ) failures before one success func Geometric1Next(ρ float64) int64 { if Geometric1Next(ρ) == 1 { return 1 + Geometric1Next(ρ) } return 0 } // Geometric1 returns the random number generator with Geometric distribution (type 1). func Geometric1(ρ float64) func() int64 { return func() int64 { return Geometric1Next(ρ) } } */ // Geometric1Mean returns the mean of the Geometric distribution (type 1). func Geometric1Mean(ρ float64) float64 { return 1 / ρ } /* to be implemented // Geometric1Median returns the median of the Geometric distribution (type 1). func Geometric1Median(ρ float64) float64 { return floor(float64(n)*p) } */ // Geometric1Mode returns the mode of the Geometric distribution (type 1). func Geometric1Mode(ρ float64) float64 { return 1 } // Geometric1Var returns the variance of the Geometric distribution (type 1). func Geometric1Var(ρ float64) float64 { return (1 - ρ) / (ρ * ρ) } // Geometric1Std returns the standard deviation of the Geometric distribution (type 1). func Geometric1Std(ρ float64) float64 { return sqrt(1-ρ) / ρ } // Geometric1Skew returns the skewness of the Geometric distribution (type 1). func Geometric1Skew(ρ float64) float64 { return (2 - ρ) / sqrt(1-ρ) } // Geometric1ExKurt returns the excess kurtosis of the Geometric distribution (type 1). func Geometric1ExKurt(ρ float64) float64 { return 6 + (ρ*ρ)/(1-ρ) } // Geometric1MGF returns the moment-generating function of the Geometric distribution (type 1). func Geometric1MGF(ρ, t float64) float64 { if t >= -log(1-ρ) { return NaN } return ρ * exp(t) / (1 - (1-ρ)*exp(t)) }
dst/geom1.go
0.882288
0.828523
geom1.go
starcoder
package datautils import ( "bytes" "fmt" ) // FileType is the type URI for files const FileType = "http://sdr.sul.stanford.edu/models/sdr3-file.jsonld" // FilesetType is the type URI for filesets const FilesetType = "http://sdr.sul.stanford.edu/models/sdr3-fileset.jsonld" // ObjectTypes is the list of object subtype URIs var ObjectTypes = []string{ "http://sdr.sul.stanford.edu/models/sdr3-object.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-3d.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-agreement.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-book.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-document.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-geo.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-image.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-page.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-photograph.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-manuscript.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-map.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-media.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-track.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-webarchive-binary.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-webarchive-seed.jsonld", } // CollectionTypes is the list of object subtype URIs var CollectionTypes = []string{ "http://sdr.sul.stanford.edu/models/sdr3-collection.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-curated-collection.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-user-collection.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-exhibit.jsonld", "http://sdr.sul.stanford.edu/models/sdr3-series.jsonld", } // Resource represents the resource as it exists in the persistence layer // this is very similar to models.Resource, but COULD vary, so we should // keep them separated type Resource struct { JSON JSONObject } // NewResource creates a new resource instance func NewResource(data map[string]interface{}) *Resource { if data == nil { data = map[string]interface{}{} } return &Resource{JSON: JSONObject(data)} } // ID returns the document's identifier func (d *Resource) ID() string { return d.JSON.GetS("tacoIdentifier") } // ExternalIdentifier returns the document's external identifier (DRUID or UUID) func (d *Resource) ExternalIdentifier() string { return d.JSON.GetS("externalIdentifier") } // Version returns the document's version func (d *Resource) Version() int { return int(d.JSON.GetF("version")) } // Type returns the document's type func (d *Resource) Type() string { return d.JSON.GetS("@type") } // MimeType returns the document's MIME type func (d *Resource) MimeType() string { return d.JSON.GetS("hasMimeType") } // WithFileLocation sets the location of the binary. func (d *Resource) FileLocation() string { return d.JSON.GetS("file-location") } // Label returns the document's Label func (d *Resource) Label() string { return d.JSON.GetS("label") } // IsFile returns true if the resource has the file type assertion func (d *Resource) IsFile() bool { return d.Type() == FileType } // IsFileset returns true if the resource has the fileset type assertion func (d *Resource) IsFileset() bool { return d.Type() == FilesetType } // IsObject returns true if the resource has an object type assertion func (d *Resource) IsObject() bool { return contains(ObjectTypes, d.Type()) } // IsCollection returns true if the resource has an object type assertion func (d *Resource) IsCollection() bool { return contains(CollectionTypes, d.Type()) } // WithID sets the document's primary key func (d *Resource) WithID(id string) *Resource { d.JSON["tacoIdentifier"] = id return d } // WithType sets the document's type func (d *Resource) WithType(atType string) *Resource { d.JSON["@type"] = atType return d } // WithExternalIdentifier sets the document's external identifier (DRUID or UUID) func (d *Resource) WithExternalIdentifier(id string) *Resource { d.JSON["externalIdentifier"] = id return d } // WithMimeType sets the mime type. This should only be used on File resources func (d *Resource) WithMimeType(mimeType string) *Resource { d.JSON["hasMimeType"] = mimeType return d } // WithFileLocation sets the location of the binary. This should only be used on File resources func (d *Resource) WithFileLocation(fileLocation string) *Resource { d.JSON["file-location"] = fileLocation return d } // WithLabel sets the label. func (d *Resource) WithLabel(label string) *Resource { d.JSON["label"] = label return d } // WithCurrentVersion sets the currentVersion flag func (d *Resource) WithCurrentVersion(flag bool) *Resource { d.JSON["currentVersion"] = flag return d } // WithVersion sets the version func (d *Resource) WithVersion(version int) *Resource { d.JSON["version"] = float64(version) return d } // WithPrecedingVersion sets the precedingVersion to // the id passed (of the old version) func (d *Resource) WithPrecedingVersion(id string) *Resource { d.JSON["precedingVersion"] = id return d } // WithFollowingVersion sets the followingVersion to // the id passed (of the new version) func (d *Resource) WithFollowingVersion(id string) *Resource { d.JSON["followingVersion"] = id return d } func contains(s []string, e string) bool { for _, a := range s { if a == e { return true } } return false } // Structural returns structural.isContainedBy func (d *Resource) Structural() *JSONObject { return d.JSON.GetObj("structural") } // Identification returns the identification subschema func (d *Resource) Identification() *JSONObject { return d.JSON.GetObj("identification") } // Administrative returns the administrative subschema func (d *Resource) Administrative() *JSONObject { return d.JSON.GetObj("administrative") } func (d *Resource) String() string { buf := bytes.NewBufferString("<Resource") if d.JSON.HasKey("tacoIdentifier") { buf.WriteString(fmt.Sprintf(" id:'%s'", d.ID())) } if d.JSON.HasKey("@type") { buf.WriteString(fmt.Sprintf(" @type:'%s'", d.Type())) } buf.WriteString(">") return buf.String() }
datautils/resource.go
0.629661
0.466542
resource.go
starcoder
package worktime import ( "fmt" "time" ) // Day is a full calendar day const Day = time.Hour * 24 // WorkTime represents a day that could be a normal week day with office hours type WorkTime struct { time.Time start time.Duration end time.Duration } // NewStandardWorkTime returns a WorkTime with 9-5 as the working hours func NewStandardWorkTime(t time.Time) WorkTime { return NewWorkTime( t, time.Hour*9, time.Hour*17, ) } // NewWorkTime allows you to specify the start and end of the work day func NewWorkTime(t time.Time, start, end time.Duration) WorkTime { return WorkTime{ Time: t, start: start, end: end, } } func (t WorkTime) String() string { return fmt.Sprintf("%s (%02.0f:%02.0f - %02.0f:%02.0f)", t.Format("2006-01-02T15:04:05 [Mon]"), t.start.Hours(), t.start.Minutes()-t.start.Hours()*60, t.end.Hours(), t.end.Minutes()-t.end.Hours()*60, ) } // IsWorkDay returns false if the time is a Saturday or Sunday, else true func (t WorkTime) IsWorkDay() bool { day := t.Weekday() if day == time.Sunday || day == time.Saturday { return false } return true } // Length returns the time.Duration of the working day func (t WorkTime) Length() time.Duration { diff := t.end - t.start if diff <= 0 { diff += Day } return diff } // SinceMidnight returns the time.Duration since the previous midnight func (t WorkTime) SinceMidnight() time.Duration { return t.Sub(t.Truncate(Day)) } // BeforeStart returns true if the time of day is before the start of the working day func (t WorkTime) BeforeStart() bool { return t.SinceMidnight() < t.start } // DuringOfficeHours returns true if the time is during the working hours func (t WorkTime) DuringOfficeHours() bool { return t.IsWorkDay() && !t.BeforeStart() && !t.AfterEnd() } // AfterEnd returns true if the time of day is after the end of the working day func (t WorkTime) AfterEnd() bool { return t.SinceMidnight() > t.end } // Start returns a WorkTime representing the working start of the current day func (t WorkTime) Start() WorkTime { return NewWorkTime( t.Truncate(Day).Add(t.start), t.start, t.end, ) } // NextStart returns the start of the next working day func (t WorkTime) NextStart() WorkTime { if !t.BeforeStart() { d := Day if t.Weekday() == time.Friday { d = Day * 3 } else if t.Weekday() == time.Saturday { d = Day * 2 } t = t.Add(d) } return t.Start() } // End returns a WorkTime representing the end of the working day func (t WorkTime) End() WorkTime { return NewWorkTime( t.Truncate(Day).Add(t.end), t.start, t.end, ) } // FromStart returns the amount of time since the start of the working day, can be negative if the // time of day is before the start of the working day func (t WorkTime) FromStart() time.Duration { return t.SinceMidnight() - t.start } // UntilEnd returns the amount of time until the end of the working day, can be negative if the // time of day is after the end of the working day func (t WorkTime) UntilEnd() time.Duration { return t.end - t.SinceMidnight() } // Add returns a WorkTime plus a time.Duration func (t WorkTime) Add(d time.Duration) WorkTime { return NewWorkTime(t.Time.Add(d), t.start, t.end) } // IsSameDay returns true if this WorkTime is the same day as d func (t WorkTime) IsSameDay(d time.Time) bool { tYear, tMonth, tDay := t.Date() dYear, dMonth, dDay := d.Date() return tYear == dYear && tMonth == dMonth && tDay == dDay } // Crossover works out the where this working days cross over, accounting for time zone but ignoring the current time func (t WorkTime) Crossover(v WorkTime) WorkTime { _, toffset := t.Zone() _, voffset := v.Zone() offset := time.Second * time.Duration(toffset-voffset) v.start += offset v.end += offset t.start = dmax(t.start, v.start) t.end = dmin(t.end, v.end) if t.start > t.end { t.start = 0 t.end = 0 } return t } func dmin(l, r time.Duration) time.Duration { if l < r { return l } return r } func dmax(l, r time.Duration) time.Duration { if l > r { return l } return r }
worktime.go
0.850158
0.514339
worktime.go
starcoder
package search import ( "bytes" "regexp" "github.com/VictorLowther/simplexml/dom" ) // Match is the basic type of a search function. // It takes a single element, and returns a boolean // indicating whether the element matched the func. type Match func(*dom.Element) bool // And takes any number of Match, and returns another // Match that will match if all of passed Match functions // match. func And(funcs ...Match) Match { return func(e *dom.Element) bool { for _, fn := range funcs { if !fn(e) { return false } } return true } } // Or takes any number of Match, and returns another Match // that will match if any of the passed Match functions match. func Or(funcs ...Match) Match { return func(e *dom.Element) bool { for _, fn := range funcs { if fn(e) { return true } } return false } } // Not takes a single Match, and returns another Match // that matches if fn does not match. func Not(fn Match) Match { return func(e *dom.Element) bool { return !fn(e) } } // NoParent returns a matcher that matches iff the element // does not have a parent func NoParent() Match { return func(e *dom.Element) bool { return e.Parent() == nil } } // Ancestor returns a matcher that matches iff the element has an // ancestor that matches the passed matcher func Ancestor(fn Match) Match { return func(e *dom.Element) bool { return First(fn, e.Ancestors()) != nil } } // AncestorN returns a matcher that matches against the // nth ancestor of the node being tested. // If n == 0, then the node itself will be tested as a degenerate case. // If there is no such ancestor the match fails. func AncestorN(fn Match, distance uint) Match { return func(e *dom.Element) bool { if distance == 0 { return fn(e) } ancestors := e.Ancestors() if len(ancestors) < int(distance) { return false } return fn(ancestors[distance-1]) } } // Parent returns a matcher that matches iff the element // has a parent and that parent matches the passed fn. func Parent(fn Match) Match { return func(e *dom.Element) bool { p := e.Parent() if p == nil { return false } return fn(p) } } // Child returns a matcher that matches iff the element has a // child that matches the passed fn. func Child(fn Match) Match { return func(e *dom.Element) bool { for _, c := range e.Children() { if fn(c) { return true } } return false } } // Always returns a matcher that always matches func Always() Match { return func(e *dom.Element) bool { return true } } //Never returns a matcher that never matches func Never() Match { return Not(Always()) } // All returns all the nodes that fn matches func All(fn Match, nodes []*dom.Element) []*dom.Element { res := make([]*dom.Element, 0, 0) for _, n := range nodes { if fn(n) { res = append(res, n) } } return res } // First returns the first element that fn matches func First(fn Match, nodes []*dom.Element) *dom.Element { for _, n := range nodes { if fn(n) { return n } } return nil } // Tag is a helper function for matching against a specific tag. // It takes a name and a namespace URL to match against. // If either name or space are "*", then they will match // any value. // Return is a Match. func Tag(name, space string) Match { return func(e *dom.Element) bool { return (space == "*" || space == e.Name.Space) && (name == "*" || name == e.Name.Local) } } // FirstTag finds the first element in the set of nodes that matches the tag name and namespace. func FirstTag(name, space string, nodes []*dom.Element) *dom.Element { return First(Tag(name, space), nodes) } // MustFirstTag is the same as FirstTag, but it panics if the tag cannot be found func MustFirstTag(name, space string, nodes []*dom.Element) *dom.Element { res := FirstTag(name, space, nodes) if res == nil { panic("Failed to find tag " + name + " in namespace " + space) } return res } // TagRE is a helper function for matching against a specific tag // using regular expressions. It follows roughly the same rules as // search.Tag // Return is a Match func TagRE(name, space *regexp.Regexp) Match { return func(e *dom.Element) bool { return (space == nil || space.MatchString(e.Name.Space)) && (name == nil || name.MatchString(e.Name.Local)) } } // Attr creates a Match against the attributes of an element. // It follows the same rules as Tag func Attr(name, space, value string) Match { return func(e *dom.Element) bool { for _, a := range e.Attributes { if (space == "*" || space == a.Name.Space) && (name == "*" || name == a.Name.Local) && (value == "*" || value == a.Value) { return true } } return false } } // AttrRE creates a Match against the attributes of an element. // It follows the same rules as MatchRE func AttrRE(name, space, value *regexp.Regexp) Match { return func(e *dom.Element) bool { for _, a := range e.Attributes { if (space == nil || space.MatchString(a.Name.Space)) && (name == nil || name.MatchString(a.Name.Local)) && (value == nil || value.MatchString(a.Value)) { return true } } return false } } // ContentExists creates a Match against an element that has non-empty // Content. func ContentExists() Match { return func(e *dom.Element) bool { return len(e.Content) > 0 } } // Content creates a Match against an element that tests to see if // it matches the supplied content. func Content(content []byte) Match { return func(e *dom.Element) bool { return bytes.Equal(e.Content, content) } } // ContentRE creates a Match against the Content of am element // that passes if the regex matches the content. func ContentRE(regex *regexp.Regexp) Match { return func(e *dom.Element) bool { return regex.Match(e.Content) } }
search/search.go
0.803637
0.510741
search.go
starcoder
package isaac import "unsafe" // Isaac64 represents ISAAC64 random generator type Isaac64 struct { randrsl [256]uint64 randmem [256]uint64 randcnt uint64 aa uint64 bb uint64 cc uint64 } // NewIsaac64 returns a new instance of ISAAC64. func NewIsaac64() *Isaac64 { return &Isaac64{ randmem: [256]uint64{}, randrsl: [256]uint64{}, randcnt: 0, aa: 0, bb: 0, cc: 0, } } // Seed initializes the state of ISAAC instance using given 64bit integer. func (ctx *Isaac64) Seed(seed int64) { ctx.randrsl[0] = uint64(seed) ctx.randInit(true) } // SeedBytes initializes the state of ISAAC instance using given byte sequence. func (ctx *Isaac64) SeedBytes(seed []byte) { if len(seed) > 2048 { seed = seed[:2048] } unsafeCopy(unsafe.Pointer(&ctx.randrsl[0]), unsafe.Pointer(&seed[0]), len(seed)) ctx.randInit(true) } // SeedString initializes the state of ISAAC instance using given string. func (ctx *Isaac64) SeedString(seed string) { ctx.SeedBytes([]byte(seed)) } // Int63 returns a non-negative 63-bit integer as an int64. func (ctx *Isaac64) Int63() int64 { return int64(ctx.Uint64() & uintMask) } // Uint32 returns a random 32-bit unsigned integer. func (ctx *Isaac64) Uint32() uint32 { return uint32(ctx.next()) } // Uint64 returns a random 64-bit unsigned integer. func (ctx *Isaac64) Uint64() uint64 { return ctx.next() } // Int31 returns a non-negative 31-bit integer as an int32. func (ctx *Isaac64) Int31() int32 { return int32(ctx.Int63() >> 32) } // Int returns a non-negative integer as an int func (ctx *Isaac64) Int() int { u := uint(ctx.Uint64()) return int(u << 1 >> 1) } func (ctx *Isaac64) isaac64() { var a, b, x uint64 mm := ctx.randmem[:] r := ctx.randrsl[:] ctx.cc++ a, b = ctx.aa, ctx.bb+ctx.cc for ii := 0; ii < 256; ii += 4 { var i uint8 = uint8(ii) x = mm[i] a = ^(a ^ (a << 21)) + mm[i+128] mm[i] = mm[(x>>3)&255] + a + b r[i] = mm[(mm[i]>>11)&255] + x b = r[i] x = mm[i+1] a = (a ^ (a >> 5)) + mm[i+129] mm[i+1] = mm[(x>>3)&255] + a + b r[i+1] = mm[(mm[i+1]>>11)&255] + x b = r[i+1] x = mm[i+2] a = (a ^ (a << 12)) + mm[i+130] mm[i+2] = mm[(x>>3)&255] + a + b r[i+2] = mm[(mm[i+2]>>11)&255] + x b = r[i+2] x = mm[i+3] a = (a ^ (a >> 33)) + mm[i+131] mm[i+3] = mm[(x>>3)&255] + a + b r[i+3] = mm[(mm[i+3]>>11)&255] + x b = r[i+3] } ctx.bb, ctx.aa = b, a } func (ctx *Isaac64) randInit(flag bool) { var a, b, c, d, e, f, g, h uint64 a, b, c, d, e, f, g, h = 0x9e3779b97f4a7c13, 0x9e3779b97f4a7c13, 0x9e3779b97f4a7c13, 0x9e3779b97f4a7c13, 0x9e3779b97f4a7c13, 0x9e3779b97f4a7c13, 0x9e3779b97f4a7c13, 0x9e3779b97f4a7c13 // scramble for i := 0; i < 4; i++ { a -= e f ^= h >> 9 h += a b -= f g ^= a << 9 a += b c -= g h ^= b >> 23 b += c d -= h a ^= c << 15 c += d e -= a b ^= d >> 14 d += e f -= b c ^= e << 20 e += f g -= c d ^= f >> 17 f += g h -= d e ^= g << 14 g += h } if flag { // initialize using seed for i := 0; i < 256; i += 8 { a += ctx.randrsl[i] b += ctx.randrsl[i+1] c += ctx.randrsl[i+2] d += ctx.randrsl[i+3] e += ctx.randrsl[i+4] f += ctx.randrsl[i+5] g += ctx.randrsl[i+6] h += ctx.randrsl[i+7] // mix a -= e f ^= h >> 9 h += a b -= f g ^= a << 9 a += b c -= g h ^= b >> 23 b += c d -= h a ^= c << 15 c += d e -= a b ^= d >> 14 d += e f -= b c ^= e << 20 e += f g -= c d ^= f >> 17 f += g h -= d e ^= g << 14 g += h ctx.randmem[i] = a ctx.randmem[i+1] = b ctx.randmem[i+2] = c ctx.randmem[i+3] = d ctx.randmem[i+4] = e ctx.randmem[i+5] = f ctx.randmem[i+6] = g ctx.randmem[i+7] = h } // second pass for i := 0; i < 256; i += 8 { a += ctx.randmem[i] b += ctx.randmem[i+1] c += ctx.randmem[i+2] d += ctx.randmem[i+3] e += ctx.randmem[i+4] f += ctx.randmem[i+5] g += ctx.randmem[i+6] h += ctx.randmem[i+7] // mix a -= e f ^= h >> 9 h += a b -= f g ^= a << 9 a += b c -= g h ^= b >> 23 b += c d -= h a ^= c << 15 c += d e -= a b ^= d >> 14 d += e f -= b c ^= e << 20 e += f g -= c d ^= f >> 17 f += g h -= d e ^= g << 14 g += h ctx.randmem[i] = a ctx.randmem[i+1] = b ctx.randmem[i+2] = c ctx.randmem[i+3] = d ctx.randmem[i+4] = e ctx.randmem[i+5] = f ctx.randmem[i+6] = g ctx.randmem[i+7] = h } } else { for i := 0; i < 256; i += 8 { // mix a -= e f ^= h >> 9 h += a b -= f g ^= a << 9 a += b c -= g h ^= b >> 23 b += c d -= h a ^= c << 15 c += d e -= a b ^= d >> 14 d += e f -= b c ^= e << 20 e += f g -= c d ^= f >> 17 f += g h -= d e ^= g << 14 g += h ctx.randmem[i] = a ctx.randmem[i+1] = b ctx.randmem[i+2] = c ctx.randmem[i+3] = d ctx.randmem[i+4] = e ctx.randmem[i+5] = f ctx.randmem[i+6] = g ctx.randmem[i+7] = h } } ctx.isaac64() ctx.randcnt = 256 } func (ctx *Isaac64) next() uint64 { if ctx.randcnt == 0 { ctx.isaac64() ctx.randcnt = 255 return ctx.randrsl[255] } ctx.randcnt-- return ctx.randrsl[ctx.randcnt] }
isaac64.go
0.558207
0.479686
isaac64.go
starcoder
package gwc import ( "math" "math/rand" ) // Builds a Node from the provided state function and neighbours. func NewNode(id NodeID, fn NodeStateFn, neighbours ...NodeID) Node { return &BaseNode{id, neighbours, fn} } // Builds a Node from the provided superposition and neighbours. func NewSuperpositionNode(id NodeID, super NodeSuperposition, neighbours ...NodeID) Node { return NewNode(id, SuperpositionStateFn(super), neighbours...) } type ( NodesMap map[NodeID]Node Nodes []Node Node interface { ID() NodeID Neighbours() NodeIDs Collapse(*rand.Rand, NodeEnvironment) NodeState } NodeIDs []NodeID NodeID = string NodeProbability = float64 NodeState = interface{} NodeStateFn = func(*rand.Rand, NodeEnvironment) NodeState ) // BaseNode can be used as base for a more concrete struct, which implements a concrete Collapse() method. type BaseNode struct { id NodeID neighbours NodeIDs fn NodeStateFn } func (n *BaseNode) ID() NodeID { return n.id } func (n *BaseNode) Neighbours() NodeIDs { return n.neighbours } func (n *BaseNode) Collapse(rnd *rand.Rand, env NodeEnvironment) NodeState { if n.fn != nil { return n.fn(rnd, env) } return nil } // Applies a logical AND to the two index lists and returns the product. func (ids NodeIDs) And(other NodeIDs) NodeIDs { xs := NodeIDs{} for _, a := range ids { for _, b := range other { if a == b { xs = append(xs, a) } } } return xs } // Applies a logical OR to the two index lists and returns the product. func (ids NodeIDs) Or(other NodeIDs) NodeIDs { xs := NodeIDs{} xs = append(xs, ids...) Outer: for _, b := range other { for _, x := range xs { if b == x { continue Outer } } xs = append(xs, b) } return xs } // Applies a logical XOR to the two index lists and returns the product. func (ids NodeIDs) Xor(other NodeIDs) NodeIDs { xs := NodeIDs{} Outer: for _, a := range ids { for _, b := range other { if a == b { continue Outer } } xs = append(xs, a) } Outer2: for _, b := range other { for _, a := range ids { if a == b { continue Outer2 } } xs = append(xs, b) } return xs } type ( NodeSuperpositionFn = func(*rand.Rand, NodeEnvironment) (NodeProbability, NodeState) NodeSuperposition = []NodeSuperpositionFn ) func SuperpositionStateFn(super NodeSuperposition) NodeStateFn { return func(rnd *rand.Rand, env NodeEnvironment) NodeState { // Stop early when the Node's superposition is empty. num := len(super) if num == 0 { return nil } // We'll later compare the generated probabilities against this float, in this order. // Both are generated now, so that collapsing the superposition below won't interfere with these values. compare := rnd.Float64() order := rnd.Perm(num) // Call all functions in the superposition and collect their probabilities and states. sum := float64(0.0) probabilities := make([]NodeProbability, num) states := make([]NodeState, num) for _, i := range order { ip, is := super[i](rnd, env) sum += ip probabilities[i] = ip states[i] = is } // Scale compare float according to the relative probability sum. compare *= math.Max(1, sum) // Collapse into the first state that had a high enough Nodeprobability to reach the compare float. for i, p := range probabilities { compare -= p if compare <= 0 { return states[i] } } // If no state was probable enough but there are states available, return a random one. if len(states) > 0 { return states[rnd.Intn(len(states))] } // Fallback to nil when there were no states. return nil } }
node.go
0.710528
0.488039
node.go
starcoder
package document import ( "fmt" "strconv" "time" "github.com/unchartedsoftware/deluge/util" ) // TSV represents a basic tsv based document. type TSV struct { Cols []string } // SetData sets the internal TSV column. func (d *TSV) SetData(data interface{}) error { // cast back to a string line, ok := data.(string) if !ok { return fmt.Errorf("Could not cast `%v` into type string", data) } // parse delimited fields cols, err := util.ParseFields(line, '\t') if err != nil { return err } d.Cols = cols return nil } // ColumnExists returns true if the provided column index exists in the row. func (d *TSV) ColumnExists(index int) bool { if index > len(d.Cols)-1 { return false } col := d.Cols[index] if col != "" && col != "null" { return true } return false } // Float64 returns the column as a float64. func (d *TSV) Float64(index int) (float64, bool) { if d.ColumnExists(index) { val, err := strconv.ParseFloat(d.Cols[index], 64) if err == nil { return val, true } } return 0, false } // Float32 returns the column as a float32. func (d *TSV) Float32(index int) (float32, bool) { if d.ColumnExists(index) { val, err := strconv.ParseFloat(d.Cols[index], 32) if err == nil { return float32(val), true } } return 0, false } // Int64 returns the column as an int64. func (d *TSV) Int64(index int) (int64, bool) { if d.ColumnExists(index) { val, err := strconv.ParseInt(d.Cols[index], 10, 64) if err == nil { return val, true } } return 0, false } // Int32 returns the column as an int32. func (d *TSV) Int32(index int) (int32, bool) { if d.ColumnExists(index) { val, err := strconv.ParseInt(d.Cols[index], 10, 32) if err == nil { return int32(val), true } } return 0, false } // Int returns the column as an int. func (d *TSV) Int(index int) (int, bool) { if d.ColumnExists(index) { val, err := strconv.ParseInt(d.Cols[index], 10, 64) if err == nil { return int(val), true } } return 0, false } // String returns the column as a string. func (d *TSV) String(index int) (string, bool) { if d.ColumnExists(index) { return d.Cols[index], true } return "", false } // Bool returns the column as a bool. func (d *TSV) Bool(index int) (bool, bool) { if d.ColumnExists(index) { col := d.Cols[index] if col == "true" || col == "1" { return true, true } return false, true } return false, false } // Time returns the column as a time.Time using the provided layout to parse. func (d *TSV) Time(index int, layout string) (time.Time, bool) { if d.ColumnExists(index) { t, err := time.Parse(layout, d.Cols[index]) if err != nil { return time.Time{}, false } return t, true } return time.Time{}, false }
document/tsv.go
0.740174
0.402421
tsv.go
starcoder
package goDataStructure import "fmt" type BSTreeMap struct { root *treeMapNode size int } type treeMapNode struct { key Comparable value interface{} left, right *treeMapNode } func CreateBSTreeMap() *BSTreeMap { return &BSTreeMap{ root: nil, size: 0, } } func (tm *BSTreeMap) Add(k interface{}, v interface{}) { tm.root = tm.add(tm.root, k, v) } func (tm *BSTreeMap) add(node *treeMapNode, k interface{}, v interface{}) *treeMapNode { if node == nil { tm.size++ return &treeMapNode{key: k.(Comparable), value: v, left: nil, right: nil} } if k.(Comparable).Compare(node.key) < 0 { node.left = tm.add(node.left, k, v) } else if k.(Comparable).Compare(node.key) > 0 { node.right = tm.add(node.right, k, v) } else { node.value = v } return node } func (tm *BSTreeMap) Remove(k interface{}) interface{} { ret := tm.Get(k) tm.root = tm.remove(tm.root, k) return ret } func (tm *BSTreeMap) remove(node *treeMapNode, k interface{}) *treeMapNode { if node == nil { return nil } if k.(Comparable).Compare(node.key) < 0 { node.left = tm.remove(node.left, k) return node } else if k.(Comparable).Compare(node.key) > 0 { node.right = tm.remove(node.right, k) return node } else { if node.left == nil { right := node.right node.right = nil tm.size-- return right } else if node.right == nil { left := node.left node.left = nil tm.size-- return left } else { ret := tm.min(node.right) ret.right = tm.remove(node.right, ret.key) ret.left = node.left node.left, node.right = nil, nil return ret } } } func (tm *BSTreeMap) min(node *treeMapNode) *treeMapNode { if node.left == nil { return node } return tm.min(node.left) } func (tm *BSTreeMap) max(node *treeMapNode) *treeMapNode { if node.right == nil { return node } return tm.max(node.right) } func (tm *BSTreeMap) Contains(key interface{}) bool { return tm.contains(tm.root, key) } func (tm *BSTreeMap) contains(node *treeMapNode, key interface{}) bool { if node == nil { return false } else if node.key == key { return true } if node.key.Compare(key.(Comparable)) < 0 { return tm.contains(node.left, key) } else { return tm.contains(node.right, key) } } func (tm *BSTreeMap) Get(key interface{}) interface{} { return tm.get(tm.root, key) } func (tm *BSTreeMap) get(node *treeMapNode, key interface{}) interface{} { if tm.size == 0 { return nil } if node == nil { return nil } else if node.key == key { return node.value } if key.(Comparable).Compare(node.key) < 0 { return tm.get(node.left, key) } else { return tm.get(node.right, key) } } func (tm *BSTreeMap) Set(key interface{}, newValue interface{}) { tm.set(tm.root, key, newValue) } func (tm *BSTreeMap) set(node *treeMapNode, key interface{}, newValue interface{}) { if tm.size == 0 { panic("bu cun zai zhe yang de key") } if node == nil { panic("bu cun zai zhe yang de key1") } if node.key == key { node.value = newValue return } if key.(Comparable).Compare(node.key) < 0 { tm.set(node.left, key, newValue) } else { tm.set(node.right, key, newValue) } } func (tm *BSTreeMap) GetSize() int { return tm.size } func (tm *BSTreeMap) IsEmpty() bool { return tm.size == 0 } func (tm *BSTreeMap) String() string { str := "" return tm.createString(tm.root, 0, str) } func (tm *BSTreeMap) createString(node *treeMapNode, depth int, str string) string { if node == nil { str += tm.depthString(depth) + "nil \n" return str } str += tm.depthString(depth) + "key:" + fmt.Sprint(node.key) + " value:" + fmt.Sprint(node.value) + "\n" str = tm.createString(node.left, depth+1, str) str = tm.createString(node.right, depth+1, str) return str } func (tm *BSTreeMap) depthString(depth int) string { str := fmt.Sprint("") for i := 0; i < depth; i++ { str += fmt.Sprint("--") } return str }
bsTreeMap.go
0.580233
0.438905
bsTreeMap.go
starcoder
package deref // BoolOr returns a dereferenced value or the given default value if p is nil. func BoolOr(p *bool, defVal bool) bool { if p == nil { return defVal } return *p } // Bool returns a dereferenced value or the zero value if p is nil. func Bool(p *bool) bool { var defVal bool return BoolOr(p, defVal) } // StringOr returns a dereferenced value or the given default value if p is nil. func StringOr(p *string, defVal string) string { if p == nil { return defVal } return *p } // String returns a dereferenced value or the zero value if p is nil. func String(p *string) string { var defVal string return StringOr(p, defVal) } // IntOr returns a dereferenced value or the given default value if p is nil. func IntOr(p *int, defVal int) int { if p == nil { return defVal } return *p } // Int returns a dereferenced value or the zero value if p is nil. func Int(p *int) int { var defVal int return IntOr(p, defVal) } // Int8Or returns a dereferenced value or the given default value if p is nil. func Int8Or(p *int8, defVal int8) int8 { if p == nil { return defVal } return *p } // Int8 returns a dereferenced value or the zero value if p is nil. func Int8(p *int8) int8 { var defVal int8 return Int8Or(p, defVal) } // Int16Or returns a dereferenced value or the given default value if p is nil. func Int16Or(p *int16, defVal int16) int16 { if p == nil { return defVal } return *p } // Int16 returns a dereferenced value or the zero value if p is nil. func Int16(p *int16) int16 { var defVal int16 return Int16Or(p, defVal) } // Int32Or returns a dereferenced value or the given default value if p is nil. func Int32Or(p *int32, defVal int32) int32 { if p == nil { return defVal } return *p } // Int32 returns a dereferenced value or the zero value if p is nil. func Int32(p *int32) int32 { var defVal int32 return Int32Or(p, defVal) } // Int64Or returns a dereferenced value or the given default value if p is nil. func Int64Or(p *int64, defVal int64) int64 { if p == nil { return defVal } return *p } // Int64 returns a dereferenced value or the zero value if p is nil. func Int64(p *int64) int64 { var defVal int64 return Int64Or(p, defVal) } // UintOr returns a dereferenced value or the given default value if p is nil. func UintOr(p *uint, defVal uint) uint { if p == nil { return defVal } return *p } // Uint returns a dereferenced value or the zero value if p is nil. func Uint(p *uint) uint { var defVal uint return UintOr(p, defVal) } // Uint8Or returns a dereferenced value or the given default value if p is nil. func Uint8Or(p *uint8, defVal uint8) uint8 { if p == nil { return defVal } return *p } // Uint8 returns a dereferenced value or the zero value if p is nil. func Uint8(p *uint8) uint8 { var defVal uint8 return Uint8Or(p, defVal) } // Uint16Or returns a dereferenced value or the given default value if p is nil. func Uint16Or(p *uint16, defVal uint16) uint16 { if p == nil { return defVal } return *p } // Uint16 returns a dereferenced value or the zero value if p is nil. func Uint16(p *uint16) uint16 { var defVal uint16 return Uint16Or(p, defVal) } // Uint32Or returns a dereferenced value or the given default value if p is nil. func Uint32Or(p *uint32, defVal uint32) uint32 { if p == nil { return defVal } return *p } // Uint32 returns a dereferenced value or the zero value if p is nil. func Uint32(p *uint32) uint32 { var defVal uint32 return Uint32Or(p, defVal) } // Uint64Or returns a dereferenced value or the given default value if p is nil. func Uint64Or(p *uint64, defVal uint64) uint64 { if p == nil { return defVal } return *p } // Uint64 returns a dereferenced value or the zero value if p is nil. func Uint64(p *uint64) uint64 { var defVal uint64 return Uint64Or(p, defVal) } // UintptrOr returns a dereferenced value or the given default value if p is nil. func UintptrOr(p *uintptr, defVal uintptr) uintptr { if p == nil { return defVal } return *p } // Uintptr returns a dereferenced value or the zero value if p is nil. func Uintptr(p *uintptr) uintptr { var defVal uintptr return UintptrOr(p, defVal) } // ByteOr returns a dereferenced value or the given default value if p is nil. func ByteOr(p *byte, defVal byte) byte { if p == nil { return defVal } return *p } // Byte returns a dereferenced value or the zero value if p is nil. func Byte(p *byte) byte { var defVal byte return ByteOr(p, defVal) } // RuneOr returns a dereferenced value or the given default value if p is nil. func RuneOr(p *rune, defVal rune) rune { if p == nil { return defVal } return *p } // Rune returns a dereferenced value or the zero value if p is nil. func Rune(p *rune) rune { var defVal rune return RuneOr(p, defVal) } // Float32Or returns a dereferenced value or the given default value if p is nil. func Float32Or(p *float32, defVal float32) float32 { if p == nil { return defVal } return *p } // Float32 returns a dereferenced value or the zero value if p is nil. func Float32(p *float32) float32 { var defVal float32 return Float32Or(p, defVal) } // Float64Or returns a dereferenced value or the given default value if p is nil. func Float64Or(p *float64, defVal float64) float64 { if p == nil { return defVal } return *p } // Float64 returns a dereferenced value or the zero value if p is nil. func Float64(p *float64) float64 { var defVal float64 return Float64Or(p, defVal) } // Complex64Or returns a dereferenced value or the given default value if p is nil. func Complex64Or(p *complex64, defVal complex64) complex64 { if p == nil { return defVal } return *p } // Complex64 returns a dereferenced value or the zero value if p is nil. func Complex64(p *complex64) complex64 { var defVal complex64 return Complex64Or(p, defVal) } // Complex128Or returns a dereferenced value or the given default value if p is nil. func Complex128Or(p *complex128, defVal complex128) complex128 { if p == nil { return defVal } return *p } // Complex128 returns a dereferenced value or the zero value if p is nil. func Complex128(p *complex128) complex128 { var defVal complex128 return Complex128Or(p, defVal) }
ptr/deref/deref.go
0.808672
0.524516
deref.go
starcoder
package colors // package colors contains functions to quickly and easily generate tetra3d.Color instances by name (i.e. "White()", "Blue()", "Green()", etc). import "github.com/solarlune/tetra3d" // White generates a tetra3d.Color instance of the provided name. func White() *tetra3d.Color { return tetra3d.NewColor(1, 1, 1, 1) } // Black generates a tetra3d.Color instance of the provided name. func Black() *tetra3d.Color { return tetra3d.NewColor(0, 0, 0, 1) } // Gray generates a tetra3d.Color instance of the provided name. func Gray() *tetra3d.Color { return tetra3d.NewColor(0.5, 0.5, 0.5, 1) } // LightGray generates a tetra3d.Color instance of the provided name. func LightGray() *tetra3d.Color { return tetra3d.NewColor(0.8, 0.8, 0.8, 1) } // DarkGray generates a tetra3d.Color instance of the provided name. func DarkGray() *tetra3d.Color { return tetra3d.NewColor(0.2, 0.2, 0.2, 1) } // Red generates a tetra3d.Color instance of the provided name. func Red() *tetra3d.Color { return tetra3d.NewColor(1, 0, 0, 1) } // Orange generates a tetra3d.Color instance of the provided name. func Orange() *tetra3d.Color { return tetra3d.NewColor(0.5, 1, 1, 1) } // Yellow generates a tetra3d.Color instance of the provided name. func Yellow() *tetra3d.Color { return tetra3d.NewColor(1, 1, 0, 1) } // Green generates a tetra3d.Color instance of the provided name. func Green() *tetra3d.Color { return tetra3d.NewColor(0, 1, 0, 1) } // SkyBlue generates a tetra3d.Color instance of the provided name. func SkyBlue() *tetra3d.Color { return tetra3d.NewColor(0, 0.5, 1, 1) } // Turquoise generates a tetra3d.Color instance of the provided name. func Turquoise() *tetra3d.Color { return tetra3d.NewColor(0, 1, 1, 1) } // Blue generates a tetra3d.Color instance of the provided name. func Blue() *tetra3d.Color { return tetra3d.NewColor(0, 0, 1, 1) } // Pink generates a tetra3d.Color instance of the provided name. func Pink() *tetra3d.Color { return tetra3d.NewColor(1, 0, 1, 1) } // Purple generates a tetra3d.Color instance of the provided name. func Purple() *tetra3d.Color { return tetra3d.NewColor(0.5, 0, 1, 1) }
colors/colors.go
0.868618
0.542379
colors.go
starcoder
package keras import ( "math/rand" ) // Matrix type is a 2D slice of float64 Values. type Matrix struct { Matrix [][]float64 } // ColNum returns the number of columns in a matrix. func ColNum(m Matrix) int { return len(m.Matrix[len(m.Matrix)-1]) } // RowNum returns the number of rows in a matrix. func RowNum(m Matrix) int { return len(m.Matrix) } //Dimensions returns the number of rows and columns of m. func Dimensions(m Matrix) (int, int) { return RowNum(m), ColNum(m) } // NumberOfElements returns the number of elements. func NumberOfElements(m Matrix) int { return RowNum(m) * ColNum(m) } // NewMatrix allocates the appropriate memory for an m x n matrix. func NewMatrix(m, n int) Matrix { ans := Matrix{} ans.Matrix = make([][]float64, n) for each := range ans.Matrix { ans.Matrix[each] = make([]float64, m) } return ans } // RandomMatrix will create a new matrix and randomize float64 values. func RandomMatrix(m, n int) Matrix { ans := NewMatrix(m, n) var i, j int for i = 0; i < len(ans.Matrix); i++ { for j = 0; j < len(ans.Matrix[0]); j++ { ans.Matrix[i][j] = rand.Float64() } } return ans } // Transpose will tranpose a matrix and modify a given matrix. func Transpose(m Matrix) Matrix { ans := NewMatrix(len(m.Matrix), len(m.Matrix[0])) var i, j int for i = 0; i < len(m.Matrix[0]); i++ { for j = 0; j < len(m.Matrix); j++ { ans.Matrix[i][j] = m.Matrix[j][i] } } return ans } // MapFunc applies f to every element func (m Matrix) MapFunc(f func(x float64) float64) Matrix { for i := 0; i < RowNum(m); i++ { for j := 0; j < ColNum(m); j++ { m.Matrix[i][j] = f(m.Matrix[i][j]) } } return m } // ToArray returns the matrix in array form. func ToArray(m Matrix) []float64 { var arr []float64 for i := 0; i < RowNum(m); i++ { for j := 0; j < ColNum(m); j++ { arr = append(arr, m.Matrix[i][j]) } } return arr } // ApplyMatrix returns the vector through a matrix transformation. func (v Vector) ApplyMatrix(matrix Matrix) Vector { var product Vector for _, r := range matrix.Matrix { for i := 0; i < len(r); i++ { product.row[i] = r[i] * v.row[i] } } return product } //Add performs elementary matrix addition func (m Matrix) Add(mat Matrix) Matrix { var product Matrix for i := 0; i < RowNum(m); i++ { for j := 0; j < ColNum(m); j++ { product.Matrix[i][j] = m.Matrix[i][j] + mat.Matrix[i][j] } } return product } //Subtract performs elementary matrix subtraction func (m Matrix) Subtract(mat Matrix) Matrix { var product Matrix for i := 0; i < RowNum(m); i++ { for j := 0; j < ColNum(m); j++ { product.Matrix[i][j] = m.Matrix[i][j] - mat.Matrix[i][j] } } return product } //Multiply performs elementary matrix multiplication func (m Matrix) Multiply(mat Matrix) Matrix { var product Matrix for i := 0; i < RowNum(m); i++ { for j := 0; j < ColNum(m); j++ { product.Matrix[i][j] = m.Matrix[i][j] * mat.Matrix[i][j] } } return product } //ScalarMultiplication multiplies every element with a scalar func (m Matrix) ScalarMultiplication(scalar float64) Matrix { for _, r := range m.Matrix { for i := range r { r[i] = r[i] * scalar } } return m } //FromArray returns a matrix from array func FromArray(arr []float64) Matrix { m := Zeros(len(arr), 1) for i := 0; i < len(arr); i++ { m.Matrix[i][0] = arr[0] } return m } //Zeros returns a matrix of zeros. func Zeros(row, column int) Matrix { b := make([][]float64, row) v := make([]float64, column) for i := 0; i < row; i++ { for j := 0; j < column; j++ { v[j] = 0 b[i] = v } } return Matrix{Matrix: b} } //ScalarAdition adds a scalar to every elements func (m Matrix) ScalarAdition(scalar float64) Matrix { for _, r := range m.Matrix { for i := range r { r[i] = r[i] + scalar } } return m } //NewVector returns a vector type /* func DotProduct(one, two Matrix) float64 { var ans float64 var i, j int for i = 0; i < len } */
keras/matrix.go
0.887564
0.813831
matrix.go
starcoder
package jopher import ( "errors" "reflect" ) // reflectFunction converts a supplied interface into a reflect.Value func reflectFunction(function interface{}) reflect.Value { reflected := reflect.ValueOf(function) if reflected.Kind() != reflect.Func { panic(errors.New("please supply a function")) } return reflected } // callReflected calls a reflected function, returning the a slice of results and an error func callReflected(fn reflect.Value, args ...interface{}) (interface{}, error) { // Reflect all the arguments and call the function reflectedArgs := reflectAll(args...) results := fn.Call(reflectedArgs) // Determine if the function returns an error as the last return value hasError := hasLastError(fn.Type()) // Split the results into a slice of interfaces and an error value result, err := splitResults(results, hasError) if err != nil { return nil, err } return result, nil } // reflectAll converts the supplied arguments to reflect values func reflectAll(args ...interface{}) []reflect.Value { reflected := make([]reflect.Value, len(args)) for i := range args { reflected[i] = reflect.ValueOf(args[i]) } return reflected } // unReflectAll converts the supplied reflect values to a slice of interfaces func unReflectAll(results []reflect.Value) []interface{} { outs := make([]interface{}, len(results)) for i := range results { outs[i] = results[i].Interface() } return outs } // splitResults splits a slice of results into an interface and an error. The interface could contain // nil (if no value was returned), a single value (if a single value was returned), or a slice // of interface{}s (if multiple values were returned). func splitResults(results []reflect.Value, lastError bool) (interface{}, error) { count := len(results) // Fish out the error at the end var err error if lastError && count > 0 { var errorValue reflect.Value results, errorValue = results[:count-1], results[count-1] if errorValue.IsValid() && !errorValue.IsNil() { err = errorValue.Interface().(error) } } // Clean up the returned result actualResults := unReflectAll(results) switch len(actualResults) { case 0: return nil, err case 1: return actualResults[0], err default: return actualResults, err } } // hasLastError determines if the last return argument of a function is an error func hasLastError(t reflect.Type) bool { count := t.NumOut() if count == 0 { return false } return t.Out(count-1) == reflect.ValueOf((*error)(nil)).Type().Elem() }
reflect.go
0.693473
0.416144
reflect.go
starcoder
// Package day17 solves AoC 2021 day 17. package day17 import ( "fmt" "math" "strconv" "github.com/fis/aoc/glue" "github.com/fis/aoc/util" ) func init() { glue.RegisterSolver(2021, 17, glue.RegexpSolver{ Solver: solve, Regexp: `^target area: x=(-?\d+)\.\.(-?\d+), y=(-?\d+)\.\.(-?\d+)$`, }) } func solve(lines [][]string) ([]string, error) { if len(lines) != 1 { return nil, fmt.Errorf("expected one input line, got %d", len(lines)) } var Tmin, Tmax util.P Tmin.X, _ = strconv.Atoi(lines[0][0]) Tmax.X, _ = strconv.Atoi(lines[0][1]) Tmin.Y, _ = strconv.Atoi(lines[0][2]) Tmax.Y, _ = strconv.Atoi(lines[0][3]) p1, p2 := findShots(Tmin, Tmax) return glue.Ints(p1, p2), nil } func findShots(Pmin, Pmax util.P) (height, count int) { vymin, vymax := Pmin.Y, -Pmin.Y-1 vxmin, vxmax := quadGe0(1, 1, -2*Pmin.X, 0, Pmin.X), Pmax.X for vy0true := vymin; vy0true <= vymax; vy0true++ { vy0, ty0 := vy0true, 0 if vy0 >= 0 { vy0, ty0 = -(vy0 + 1), 2*vy0+1 } tymin := quadGe0(1, -(2*vy0+1), 2*Pmax.Y, 0, Pmax.Y/vy0+1) + ty0 tymax := quadLe0(1, -(2*vy0+1), 2*Pmin.Y, 0, Pmin.Y/vy0+1) + ty0 for vx0 := vxmin; vx0 <= vxmax; vx0++ { txmin := quadGe0(-1, 2*vx0+1, -2*Pmin.X, 0, vx0) txmax := math.MaxInt if vx0*(vx0+1)/2 > Pmax.X { txmax = quadLe0(-1, 2*vx0+1, -2*Pmax.X, 0, vx0) } tmin, tmax := max(max(tymin, txmin), 0), min(tymax, txmax) if tmin <= tmax { if vy0true > 0 { if h := vy0true * (vy0true + 1) / 2; h > height { height = h } } count++ } } } return height, count } func quadGe0(a, b, c, x0, x1 int) (x int) { y0 := a*x0*x0 + b*x0 + c y1 := a*x1*x1 + b*x1 + c if y0 >= 0 || y1 < 0 { err := fmt.Sprintf("quadGe0: bad initial conditions: (%d)x^2 + (%d)x + (%c): %d -> %d, %d -> %d", a, b, c, x0, y0, x1, y1) panic(err) } for x1-x0 > 1 { x = x0 + (x1-x0)/2 y := a*x*x + b*x + c if y < 0 { x0 = x } else { x1 = x } } return x1 } func quadLe0(a, b, c, x0, x1 int) (x int) { y0 := a*x0*x0 + b*x0 + c y1 := a*x1*x1 + b*x1 + c if y0 > 0 || y1 <= 0 { err := fmt.Sprintf("quadLe0: bad initial conditions: (%d)x^2 + (%d)x + (%c): %d -> %d, %d -> %d", a, b, c, x0, y0, x1, y1) panic(err) } for x1-x0 > 1 { x = x0 + (x1-x0)/2 y := a*x*x + b*x + c if y <= 0 { x0 = x } else { x1 = x } } return x0 } func max(a, b int) int { if a > b { return a } return b } func min(a, b int) int { if a < b { return a } return b }
2021/day17/day17.go
0.597256
0.401131
day17.go
starcoder
package tree import ( "strings" ) type Tree struct { Label string Children []*Tree } func (t *Tree) Walk(cb func(t *Tree)) { cb(t) for _, c := range t.Children { c.Walk(cb) } } func (t *Tree) WalkPositions(cb func(t *Tree, p []int), p []int) { cb(t, p) for s, c := range t.Children { c.WalkPositions(cb, append(p, s)) } } func (t *Tree) Height() int { max := 0 for _, p := range t.Positions() { if max < len(p)+1 { max = len(p) + 1 } } return max } func (t *Tree) Size() int { return len(t.Positions()) } func (t *Tree) Positions() [][]int { ps := make([][]int, 0) t.WalkPositions(func(t *Tree, p []int) { ps = append(ps, p) }, []int{}) return ps } func (t *Tree) Subtrees() []*Tree { sts := make([]*Tree, 0) t.Walk(func(t *Tree) { sts = append(sts, t) }) return sts } func (t *Tree) SubtreeAtPosition(p []int) *Tree { if len(p) == 0 { return t } return t.Children[p[0]].SubtreeAtPosition(p[1:]) } func (t *Tree) ReplaceAtPosition(p []int, r *Tree) { if len(p) == 1 { t.Children[p[0]] = r } t.ReplaceAtPosition(p[1:], r) } func (t *Tree) LabelAtPosition(p []int) string { if len(p) == 0 { return t.Label } return t.Children[p[0]].LabelAtPosition(p[1:]) } func (t *Tree) Leaves() []*Tree { ls := make([]*Tree, 0) t.Walk(func(t *Tree) { if len(t.Children) == 0 { ls = append(ls, t) } }) return ls } func (t *Tree) LeafPositions() [][]int { lps := make([][]int, 0) t.WalkPositions(func(t *Tree, p []int) { if len(t.Children) == 0 { lps = append(lps, p) } }, []int{}) return lps } func (t *Tree) Edges() map[*Tree][]*Tree { es := make(map[*Tree][]*Tree) t.Walk(func(t *Tree) { if len(t.Children) != 0 { es[t] = t.Children } }) return es } func (t *Tree) EdgePositions() map[string][]string { eps := make(map[string][]string) t.WalkPositions(func(t *Tree, p []int) { if len(t.Children) == 0 { return } children := make([]string, 0) for s := range t.Children { children = append(children, Position(append(p, s)).String()) } eps[Position(p).String()] = children }, []int{}) return eps } func (t *Tree) Sentence() string { var sb strings.Builder for _, l := range t.Leaves() { sb.WriteString(" ") sb.WriteString(l.Label) } return sb.String()[1:] } func (t *Tree) String() string { var sb strings.Builder var walk func(t *Tree) walk = func(t *Tree) { sb.WriteString(" ") if len(t.Children) == 0 { sb.WriteString(t.Label) return } sb.WriteString("(") sb.WriteString(t.Label) for _, c := range t.Children { walk(c) } sb.WriteString(")") } walk(t) return sb.String()[1:] }
pkg/tree/tree.go
0.619471
0.403743
tree.go
starcoder
package ms import "github.com/ContextLogic/cldr" var calendar = cldr.Calendar{ Formats: cldr.CalendarFormats{ Date: cldr.CalendarDateFormat{Full: "EEEE, d MMMM y", Long: "d MMMM y", Medium: "d MMM y", Short: "d/MM/yy"}, Time: cldr.CalendarDateFormat{Full: "h:mm:ss a zzzz", Long: "h:mm:ss a z", Medium: "h:mm:ss a", Short: "h:mm a"}, DateTime: cldr.CalendarDateFormat{Full: "{1} {0}", Long: "{1} {0}", Medium: "{1} {0}", Short: "{1} {0}"}, }, FormatNames: cldr.CalendarFormatNames{ Months: cldr.CalendarMonthFormatNames{ Abbreviated: cldr.CalendarMonthFormatNameValue{Jan: "Jan", Feb: "Feb", Mar: "Mac", Apr: "Apr", May: "Mei", Jun: "Jun", Jul: "Jul", Aug: "Ogo", Sep: "Sep", Oct: "Okt", Nov: "Nov", Dec: "Dis"}, Narrow: cldr.CalendarMonthFormatNameValue{Jan: "J", Feb: "F", Mar: "M", Apr: "A", May: "M", Jun: "J", Jul: "J", Aug: "O", Sep: "S", Oct: "O", Nov: "N", Dec: "D"}, Short: cldr.CalendarMonthFormatNameValue{}, Wide: cldr.CalendarMonthFormatNameValue{Jan: "Januari", Feb: "Februari", Mar: "Mac", Apr: "April", May: "Mei", Jun: "Jun", Jul: "Julai", Aug: "Ogos", Sep: "September", Oct: "Oktober", Nov: "November", Dec: "Disember"}, }, Days: cldr.CalendarDayFormatNames{ Abbreviated: cldr.CalendarDayFormatNameValue{Sun: "Ahd", Mon: "Isn", Tue: "Sel", Wed: "Rab", Thu: "Kha", Fri: "Jum", Sat: "Sab"}, Narrow: cldr.CalendarDayFormatNameValue{Sun: "A", Mon: "I", Tue: "S", Wed: "R", Thu: "K", Fri: "J", Sat: "S"}, Short: cldr.CalendarDayFormatNameValue{Sun: "Ah", Mon: "Is", Tue: "Se", Wed: "Ra", Thu: "Kh", Fri: "Ju", Sat: "Sa"}, Wide: cldr.CalendarDayFormatNameValue{Sun: "Ahad", Mon: "Isnin", Tue: "Selasa", Wed: "Rabu", Thu: "Khamis", Fri: "Jumaat", Sat: "Sabtu"}, }, Periods: cldr.CalendarPeriodFormatNames{ Abbreviated: cldr.CalendarPeriodFormatNameValue{AM: "pg", PM: "ptg"}, Narrow: cldr.CalendarPeriodFormatNameValue{AM: "a", PM: "p"}, Short: cldr.CalendarPeriodFormatNameValue{}, Wide: cldr.CalendarPeriodFormatNameValue{AM: "PG", PM: "PTG"}, }, }, }
resources/locales/ms/calendar.go
0.533154
0.427337
calendar.go
starcoder
package influx2otel import ( "fmt" "strconv" "strings" "time" "github.com/influxdata/influxdb-observability/common" "go.opentelemetry.io/collector/model/pdata" ) func (b *MetricsBatch) addPointTelegrafPrometheusV2(measurement string, tags map[string]string, fields map[string]interface{}, ts time.Time, vType common.InfluxMetricValueType) error { if measurement != common.MeasurementPrometheus { return fmt.Errorf("unexpected measurement name '%s'", measurement) } vType = b.inferMetricValueTypeV2(vType, tags, fields) if vType == common.InfluxMetricValueTypeUntyped { return errValueTypeUnknown } if ts.IsZero() { ts = time.Now() } switch vType { case common.InfluxMetricValueTypeGauge: return b.convertGaugeV2(tags, fields, ts) case common.InfluxMetricValueTypeSum: return b.convertSumV2(tags, fields, ts) case common.InfluxMetricValueTypeHistogram: return b.convertHistogramV2(tags, fields, ts) case common.InfluxMetricValueTypeSummary: return b.convertSummaryV2(tags, fields, ts) default: return fmt.Errorf("impossible InfluxMetricValueType %d", vType) } } func (b *MetricsBatch) inferMetricValueTypeV2(vType common.InfluxMetricValueType, tags map[string]string, fields map[string]interface{}) common.InfluxMetricValueType { if vType != common.InfluxMetricValueTypeUntyped { return vType } for k := range tags { if k == common.MetricHistogramBoundKeyV2 || k == common.MetricSummaryQuantileKeyV2 { return common.InfluxMetricValueTypeHistogram } } for k := range fields { if strings.HasSuffix(k, common.MetricHistogramCountSuffix) || strings.HasSuffix(k, common.MetricHistogramSumSuffix) { return common.InfluxMetricValueTypeHistogram } } if len(fields) == 1 { return common.InfluxMetricValueTypeGauge } return common.InfluxMetricValueTypeUntyped } type dataPointKey string func newDataPointKey(unixNanos uint64, attributes pdata.AttributeMap) dataPointKey { attributes.Sort() components := make([]string, 0, attributes.Len()*2+1) components = append(components, strconv.FormatUint(unixNanos, 32)) var err error attributes.Range(func(k string, v pdata.AttributeValue) bool { var vv string vv, err = common.AttributeValueToInfluxTagValue(v) if err != nil { return false } components = append(components, k, vv) return true }) return dataPointKey(strings.Join(components, ":")) } func (b *MetricsBatch) convertGaugeV2(tags map[string]string, fields map[string]interface{}, ts time.Time) error { if len(fields) != 1 { return fmt.Errorf("gauge metric should have 1 field, found %d", len(fields)) } var metricName string var floatValue *float64 var intValue *int64 for k, fieldValue := range fields { metricName = k switch typedValue := fieldValue.(type) { case float64: floatValue = &typedValue case int64: intValue = &typedValue case uint64: convertedTypedValue := int64(typedValue) intValue = &convertedTypedValue default: return fmt.Errorf("unsupported gauge value type %T", fieldValue) } } metric, attributes, err := b.lookupMetric(metricName, tags, common.InfluxMetricValueTypeGauge) if err != nil { return err } dataPoint := metric.Gauge().DataPoints().AppendEmpty() attributes.CopyTo(dataPoint.Attributes()) dataPoint.SetTimestamp(pdata.NewTimestampFromTime(ts)) if floatValue != nil { dataPoint.SetDoubleVal(*floatValue) } else if intValue != nil { dataPoint.SetIntVal(*intValue) } else { panic("unreachable") } return nil } func (b *MetricsBatch) convertSumV2(tags map[string]string, fields map[string]interface{}, ts time.Time) error { if len(fields) != 1 { return fmt.Errorf("sum metric should have 1 field, found %d", len(fields)) } var metricName string var floatValue *float64 var intValue *int64 for k, fieldValue := range fields { metricName = k switch typedValue := fieldValue.(type) { case float64: floatValue = &typedValue case int64: intValue = &typedValue case uint64: convertedTypedValue := int64(typedValue) intValue = &convertedTypedValue default: return fmt.Errorf("unsupported gauge value type %T", fieldValue) } } metric, attributes, err := b.lookupMetric(metricName, tags, common.InfluxMetricValueTypeSum) if err != nil { return err } dataPoint := metric.Sum().DataPoints().AppendEmpty() attributes.CopyTo(dataPoint.Attributes()) dataPoint.SetTimestamp(pdata.NewTimestampFromTime(ts)) if floatValue != nil { dataPoint.SetDoubleVal(*floatValue) } else if intValue != nil { dataPoint.SetIntVal(*intValue) } else { panic("unreachable") } return nil } func (b *MetricsBatch) convertHistogramV2(tags map[string]string, fields map[string]interface{}, ts time.Time) error { var metricName string if _, found := tags[common.MetricHistogramBoundKeyV2]; found { if len(fields) != 1 { return fmt.Errorf("histogram metric 'le' tagged line should have 1 field, found %d", len(fields)) } for k := range fields { metricName = strings.TrimSuffix(k, common.MetricHistogramBucketSuffix) } } else if _, found = tags[common.MetricSummaryQuantileKeyV2]; found { if len(fields) != 1 { return fmt.Errorf("summary metric (interpreted as histogram) 'quantile' tagged line should have 1 field, found %d", len(fields)) } for k := range fields { metricName = k } } else { if len(fields) != 2 { return fmt.Errorf("histogram metric count+sum fields should have two values, found %d", len(fields)) } for k := range fields { if strings.HasSuffix(k, common.MetricHistogramCountSuffix) { metricName = strings.TrimSuffix(k, common.MetricHistogramCountSuffix) } else if strings.HasSuffix(k, common.MetricHistogramSumSuffix) { metricName = strings.TrimSuffix(k, common.MetricHistogramSumSuffix) } else { return fmt.Errorf("histogram count+sum field lacks _count or _sum suffix, found '%s'", k) } } } metric, attributes, err := b.lookupMetric(metricName, tags, common.InfluxMetricValueTypeHistogram) if err != nil { return err } dpk := newDataPointKey(uint64(ts.UnixNano()), attributes) dataPoint, found := b.histogramDataPointsByMDPK[metric][dpk] if !found { dataPoint = metric.Histogram().DataPoints().AppendEmpty() attributes.CopyTo(dataPoint.Attributes()) dataPoint.SetTimestamp(pdata.NewTimestampFromTime(ts)) b.histogramDataPointsByMDPK[metric][dpk] = dataPoint } if sExplicitBound, found := tags[common.MetricHistogramBoundKeyV2]; found { if iBucketCount, found := fields[metric.Name()+common.MetricHistogramBucketSuffix]; found { explicitBound, err := strconv.ParseFloat(sExplicitBound, 64) if err != nil { return fmt.Errorf("invalid value for histogram bucket bound: '%s'", sExplicitBound) } bucketCount, ok := iBucketCount.(float64) if !ok { return fmt.Errorf("invalid value type %T for histogram bucket count: %q", iBucketCount, iBucketCount) } dataPoint.SetExplicitBounds(append(dataPoint.ExplicitBounds(), explicitBound)) dataPoint.SetBucketCounts(append(dataPoint.BucketCounts(), uint64(bucketCount))) } else { return fmt.Errorf("histogram bucket bound has no matching count") } } else if _, found = fields[metric.Name()+common.MetricHistogramBucketSuffix]; found { return fmt.Errorf("histogram bucket count has no matching bound") } if sQuantile, found := tags[common.MetricSummaryQuantileKeyV2]; found { if iValue, found := fields[metric.Name()]; found { quantile, err := strconv.ParseFloat(sQuantile, 64) if err != nil { return fmt.Errorf("invalid value for summary (interpreted as histogram) quantile: '%s'", sQuantile) } value, ok := iValue.(float64) if !ok { return fmt.Errorf("invalid value type %T for summary (interpreted as histogram) quantile value: %q", iValue, iValue) } dataPoint.SetExplicitBounds(append(dataPoint.ExplicitBounds(), quantile)) dataPoint.SetBucketCounts(append(dataPoint.BucketCounts(), uint64(value))) } else { return fmt.Errorf("summary (interpreted as histogram) quantile has no matching value") } } else if _, found = fields[metric.Name()]; found { return fmt.Errorf("summary (interpreted as histogram) quantile value has no matching quantile") } if iCount, found := fields[metric.Name()+common.MetricHistogramCountSuffix]; found { if iSum, found := fields[metric.Name()+common.MetricHistogramSumSuffix]; found { count, ok := iCount.(float64) if !ok { return fmt.Errorf("invalid value type %T for histogram count %q", iCount, iCount) } sum, ok := iSum.(float64) if !ok { return fmt.Errorf("invalid value type %T for histogram sum %q", iSum, iSum) } dataPoint.SetCount(uint64(count)) dataPoint.SetSum(sum) } else { return fmt.Errorf("histogram count has no matching sum") } } else if _, found = fields[metric.Name()+common.MetricHistogramSumSuffix]; found { return fmt.Errorf("histogram sum has no matching count") } return nil } func (b *MetricsBatch) convertSummaryV2(tags map[string]string, fields map[string]interface{}, ts time.Time) error { var metricName string if _, found := tags[common.MetricSummaryQuantileKeyV2]; found { if len(fields) != 1 { return fmt.Errorf("summary metric 'quantile' tagged line should have 1 field, found %d", len(fields)) } for k := range fields { metricName = k } } else { if len(fields) != 2 { return fmt.Errorf("summary metric count+sum fields should have two values, found %d", len(fields)) } for k := range fields { if strings.HasSuffix(k, common.MetricSummaryCountSuffix) { metricName = strings.TrimSuffix(k, common.MetricSummaryCountSuffix) } else if strings.HasSuffix(k, common.MetricSummarySumSuffix) { metricName = strings.TrimSuffix(k, common.MetricSummarySumSuffix) } else { return fmt.Errorf("summary count+sum field lacks _count or _sum suffix, found '%s'", k) } } } metric, attributes, err := b.lookupMetric(metricName, tags, common.InfluxMetricValueTypeSummary) if err != nil { return err } dpk := newDataPointKey(uint64(ts.UnixNano()), attributes) dataPoint, found := b.summaryDataPointsByMDPK[metric][dpk] if !found { dataPoint = metric.Summary().DataPoints().AppendEmpty() attributes.CopyTo(dataPoint.Attributes()) dataPoint.SetTimestamp(pdata.NewTimestampFromTime(ts)) b.summaryDataPointsByMDPK[metric][dpk] = dataPoint } if sQuantile, found := tags[common.MetricSummaryQuantileKeyV2]; found { if iValue, found := fields[metric.Name()]; found { quantile, err := strconv.ParseFloat(sQuantile, 64) if err != nil { return fmt.Errorf("invalid value for summary quantile: '%s'", sQuantile) } value, ok := iValue.(float64) if !ok { return fmt.Errorf("invalid value type %T for summary quantile value: %q", iValue, iValue) } valueAtQuantile := dataPoint.QuantileValues().AppendEmpty() valueAtQuantile.SetQuantile(quantile) valueAtQuantile.SetValue(value) } else { return fmt.Errorf("summary quantile has no matching value") } } else if _, found = fields[metric.Name()]; found { return fmt.Errorf("summary quantile value has no matching quantile") } if iCount, found := fields[metric.Name()+common.MetricSummaryCountSuffix]; found { if iSum, found := fields[metric.Name()+common.MetricSummarySumSuffix]; found { count, ok := iCount.(float64) if !ok { return fmt.Errorf("invalid value type %T for summary count %q", iCount, iCount) } sum, ok := iSum.(float64) if !ok { return fmt.Errorf("invalid value type %T for summary sum %q", iSum, iSum) } dataPoint.SetCount(uint64(count)) dataPoint.SetSum(sum) } else { return fmt.Errorf("summary count has no matching sum") } } else if _, found = fields[metric.Name()+common.MetricSummarySumSuffix]; found { return fmt.Errorf("summary sum has no matching count") } return nil }
influx2otel/metrics_telegraf_prometheus_v2.go
0.69368
0.608536
metrics_telegraf_prometheus_v2.go
starcoder
package constraint import ( "github.com/hecate-tech/engine/experimental/physics/equation" "github.com/hecate-tech/engine/math32" ) // Lock constraint. // Removes all degrees of freedom between the bodies. type Lock struct { PointToPoint rotEq1 *equation.Rotational rotEq2 *equation.Rotational rotEq3 *equation.Rotational xA *math32.Vector3 xB *math32.Vector3 yA *math32.Vector3 yB *math32.Vector3 zA *math32.Vector3 zB *math32.Vector3 } // NewLock creates and returns a pointer to a new Lock constraint object. func NewLock(bodyA, bodyB IBody, maxForce float32) *Lock { lc := new(Lock) // Set pivot point in between posA := bodyA.Position() posB := bodyB.Position() halfWay := math32.NewVec3().AddVectors(&posA, &posB) halfWay.MultiplyScalar(0.5) pivotB := bodyB.PointToLocal(halfWay) pivotA := bodyA.PointToLocal(halfWay) // The point-to-point constraint will keep a point shared between the bodies lc.initialize(bodyA, bodyB, &pivotA, &pivotB, maxForce) // Store initial rotation of the bodies as unit vectors in the local body spaces UnitX := math32.NewVector3(1,0,0) localA := bodyA.VectorToLocal(UnitX) localB := bodyB.VectorToLocal(UnitX) lc.xA = &localA lc.xB = &localB lc.yA = &localA lc.yB = &localB lc.zA = &localA lc.zB = &localB // ...and the following rotational equations will keep all rotational DOF's in place lc.rotEq1 = equation.NewRotational(bodyA, bodyB, maxForce) lc.rotEq2 = equation.NewRotational(bodyA, bodyB, maxForce) lc.rotEq3 = equation.NewRotational(bodyA, bodyB, maxForce) lc.AddEquation(lc.rotEq1) lc.AddEquation(lc.rotEq2) lc.AddEquation(lc.rotEq3) return lc } // Update updates the equations with data. func (lc *Lock) Update() { lc.PointToPoint.Update() // These vector pairs must be orthogonal xAw := lc.bodyA.VectorToWorld(lc.xA) yBw := lc.bodyA.VectorToWorld(lc.yB) yAw := lc.bodyA.VectorToWorld(lc.yA) zBw := lc.bodyB.VectorToWorld(lc.zB) zAw := lc.bodyA.VectorToWorld(lc.zA) xBw := lc.bodyB.VectorToWorld(lc.xB) lc.rotEq1.SetAxisA(&xAw) lc.rotEq1.SetAxisB(&yBw) lc.rotEq2.SetAxisA(&yAw) lc.rotEq2.SetAxisB(&zBw) lc.rotEq3.SetAxisA(&zAw) lc.rotEq3.SetAxisB(&xBw) }
experimental/physics/constraint/lock.go
0.719975
0.435301
lock.go
starcoder
package main const SwaggerJSON = `{ "swagger": "2.0", "info": { "title": "BTrDB v4 API", "version": "v4.11.1" }, "schemes": [ "http", "https" ], "consumes": [ "application/json" ], "produces": [ "application/json" ], "paths": { "/v4/alignedwindows": { "post": { "operationId": "AlignedWindows", "responses": { "200": { "description": "(streaming responses)", "schema": { "$ref": "#/definitions/grpcinterfaceAlignedWindowsResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceAlignedWindowsParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/changes": { "post": { "operationId": "Changes", "responses": { "200": { "description": "(streaming responses)", "schema": { "$ref": "#/definitions/grpcinterfaceChangesResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceChangesParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/create": { "post": { "operationId": "Create", "responses": { "200": { "description": "", "schema": { "$ref": "#/definitions/grpcinterfaceCreateResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceCreateParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/delete": { "post": { "operationId": "Delete", "responses": { "200": { "description": "", "schema": { "$ref": "#/definitions/grpcinterfaceDeleteResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceDeleteParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/faultinject": { "post": { "operationId": "FaultInject", "responses": { "200": { "description": "", "schema": { "$ref": "#/definitions/grpcinterfaceFaultInjectResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceFaultInjectParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/flush": { "post": { "operationId": "Flush", "responses": { "200": { "description": "", "schema": { "$ref": "#/definitions/grpcinterfaceFlushResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceFlushParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/generatecsv": { "post": { "operationId": "GenerateCSV", "responses": { "200": { "description": "(streaming responses)", "schema": { "$ref": "#/definitions/grpcinterfaceGenerateCSVResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceGenerateCSVParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/getmetadatausage": { "post": { "operationId": "GetMetadataUsage", "responses": { "200": { "description": "", "schema": { "$ref": "#/definitions/grpcinterfaceMetadataUsageResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceMetadataUsageParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/info": { "post": { "operationId": "Info", "responses": { "200": { "description": "", "schema": { "$ref": "#/definitions/grpcinterfaceInfoResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceInfoParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/insert": { "post": { "operationId": "Insert", "responses": { "200": { "description": "", "schema": { "$ref": "#/definitions/grpcinterfaceInsertResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceInsertParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/listcollections": { "post": { "operationId": "ListCollections", "responses": { "200": { "description": "", "schema": { "$ref": "#/definitions/grpcinterfaceListCollectionsResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceListCollectionsParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/lookupstreams": { "post": { "operationId": "LookupStreams", "responses": { "200": { "description": "(streaming responses)", "schema": { "$ref": "#/definitions/grpcinterfaceLookupStreamsResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceLookupStreamsParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/nearest": { "post": { "operationId": "Nearest", "responses": { "200": { "description": "", "schema": { "$ref": "#/definitions/grpcinterfaceNearestResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceNearestParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/obliterate": { "post": { "operationId": "Obliterate", "responses": { "200": { "description": "", "schema": { "$ref": "#/definitions/grpcinterfaceObliterateResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceObliterateParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/rawvalues": { "post": { "operationId": "RawValues", "responses": { "200": { "description": "(streaming responses)", "schema": { "$ref": "#/definitions/grpcinterfaceRawValuesResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceRawValuesParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/setstreamannotations": { "post": { "operationId": "SetStreamAnnotations", "responses": { "200": { "description": "", "schema": { "$ref": "#/definitions/grpcinterfaceSetStreamAnnotationsResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceSetStreamAnnotationsParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/streaminfo": { "post": { "operationId": "StreamInfo", "responses": { "200": { "description": "", "schema": { "$ref": "#/definitions/grpcinterfaceStreamInfoResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceStreamInfoParams" } } ], "tags": [ "BTrDB" ] } }, "/v4/windows": { "post": { "operationId": "Windows", "responses": { "200": { "description": "(streaming responses)", "schema": { "$ref": "#/definitions/grpcinterfaceWindowsResponse" } } }, "parameters": [ { "name": "body", "in": "body", "required": true, "schema": { "$ref": "#/definitions/grpcinterfaceWindowsParams" } } ], "tags": [ "BTrDB" ] } } }, "definitions": { "GenerateCSVParamsQueryType": { "type": "string", "enum": [ "ALIGNED_WINDOWS_QUERY", "WINDOWS_QUERY", "RAW_QUERY" ], "default": "ALIGNED_WINDOWS_QUERY" }, "grpcinterfaceAlignedWindowsParams": { "type": "object", "properties": { "uuid": { "type": "string", "format": "byte" }, "start": { "type": "string", "format": "int64" }, "end": { "type": "string", "format": "int64" }, "versionMajor": { "type": "string", "format": "uint64" }, "pointWidth": { "type": "integer", "format": "int64" } } }, "grpcinterfaceAlignedWindowsResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "versionMajor": { "type": "string", "format": "uint64" }, "versionMinor": { "type": "string", "format": "uint64" }, "values": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceStatPoint" } } } }, "grpcinterfaceChangedRange": { "type": "object", "properties": { "start": { "type": "string", "format": "int64" }, "end": { "type": "string", "format": "int64" } } }, "grpcinterfaceChangesParams": { "type": "object", "properties": { "uuid": { "type": "string", "format": "byte" }, "fromMajor": { "type": "string", "format": "uint64" }, "toMajor": { "type": "string", "format": "uint64" }, "resolution": { "type": "integer", "format": "int64" } } }, "grpcinterfaceChangesResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "versionMajor": { "type": "string", "format": "uint64" }, "versionMinor": { "type": "string", "format": "uint64" }, "ranges": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceChangedRange" } } } }, "grpcinterfaceCreateParams": { "type": "object", "properties": { "uuid": { "type": "string", "format": "byte" }, "collection": { "type": "string" }, "tags": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceKeyValue" } }, "annotations": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceKeyValue" } } } }, "grpcinterfaceCreateResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" } } }, "grpcinterfaceDeleteParams": { "type": "object", "properties": { "uuid": { "type": "string", "format": "byte" }, "start": { "type": "string", "format": "int64" }, "end": { "type": "string", "format": "int64" } } }, "grpcinterfaceDeleteResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "versionMajor": { "type": "string", "format": "uint64" }, "versionMinor": { "type": "string", "format": "uint64" } } }, "grpcinterfaceFaultInjectParams": { "type": "object", "properties": { "type": { "type": "string", "format": "uint64" }, "params": { "type": "string", "format": "byte" } } }, "grpcinterfaceFaultInjectResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "rv": { "type": "string", "format": "byte" } } }, "grpcinterfaceFlushParams": { "type": "object", "properties": { "uuid": { "type": "string", "format": "byte" } } }, "grpcinterfaceFlushResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "versionMajor": { "type": "string", "format": "uint64" }, "versionMinor": { "type": "string", "format": "uint64" } } }, "grpcinterfaceGenerateCSVParams": { "type": "object", "properties": { "queryType": { "$ref": "#/definitions/GenerateCSVParamsQueryType" }, "startTime": { "type": "string", "format": "int64" }, "endTime": { "type": "string", "format": "int64" }, "windowSize": { "type": "string", "format": "uint64" }, "depth": { "type": "integer", "format": "int64" }, "includeVersions": { "type": "boolean", "format": "boolean" }, "streams": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceStreamCSVConfig" } } } }, "grpcinterfaceGenerateCSVResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "isHeader": { "type": "boolean", "format": "boolean" }, "row": { "type": "array", "items": { "type": "string" } } } }, "grpcinterfaceInfoParams": { "type": "object" }, "grpcinterfaceInfoResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "mash": { "$ref": "#/definitions/grpcinterfaceMash" }, "majorVersion": { "type": "integer", "format": "int64" }, "minorVersion": { "type": "integer", "format": "int64" }, "build": { "type": "string" }, "proxy": { "$ref": "#/definitions/grpcinterfaceProxyInfo" } } }, "grpcinterfaceInsertParams": { "type": "object", "properties": { "uuid": { "type": "string", "format": "byte" }, "sync": { "type": "boolean", "format": "boolean" }, "values": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceRawPoint" } } } }, "grpcinterfaceInsertResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "versionMajor": { "type": "string", "format": "uint64" }, "versionMinor": { "type": "string", "format": "uint64" } } }, "grpcinterfaceKeyCount": { "type": "object", "properties": { "key": { "type": "string" }, "count": { "type": "string", "format": "uint64" } } }, "grpcinterfaceKeyOptValue": { "type": "object", "properties": { "key": { "type": "string" }, "val": { "$ref": "#/definitions/grpcinterfaceOptValue" } } }, "grpcinterfaceKeyValue": { "type": "object", "properties": { "key": { "type": "string" }, "value": { "type": "string", "format": "byte" } } }, "grpcinterfaceListCollectionsParams": { "type": "object", "properties": { "prefix": { "type": "string" }, "startWith": { "type": "string" }, "limit": { "type": "string", "format": "uint64" } } }, "grpcinterfaceListCollectionsResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "collections": { "type": "array", "items": { "type": "string" } } } }, "grpcinterfaceLookupStreamsParams": { "type": "object", "properties": { "collection": { "type": "string" }, "isCollectionPrefix": { "type": "boolean", "format": "boolean" }, "tags": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceKeyOptValue" } }, "annotations": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceKeyOptValue" } } } }, "grpcinterfaceLookupStreamsResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "results": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceStreamDescriptor" } } } }, "grpcinterfaceMash": { "type": "object", "properties": { "revision": { "type": "string", "format": "int64" }, "leader": { "type": "string" }, "leaderRevision": { "type": "string", "format": "int64" }, "totalWeight": { "type": "string", "format": "int64" }, "healthy": { "type": "boolean", "format": "boolean" }, "unmapped": { "type": "number", "format": "double" }, "members": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceMember" } } } }, "grpcinterfaceMember": { "type": "object", "properties": { "hash": { "type": "integer", "format": "int64" }, "nodename": { "type": "string" }, "up": { "type": "boolean", "format": "boolean" }, "in": { "type": "boolean", "format": "boolean" }, "enabled": { "type": "boolean", "format": "boolean" }, "start": { "type": "string", "format": "int64" }, "end": { "type": "string", "format": "int64" }, "weight": { "type": "string", "format": "int64" }, "readPreference": { "type": "number", "format": "double" }, "httpEndpoints": { "type": "string" }, "grpcEndpoints": { "type": "string" } } }, "grpcinterfaceMetadataUsageParams": { "type": "object", "properties": { "prefix": { "type": "string" } } }, "grpcinterfaceMetadataUsageResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "tags": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceKeyCount" } }, "annotations": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceKeyCount" } } } }, "grpcinterfaceNearestParams": { "type": "object", "properties": { "uuid": { "type": "string", "format": "byte" }, "time": { "type": "string", "format": "int64" }, "versionMajor": { "type": "string", "format": "uint64" }, "backward": { "type": "boolean", "format": "boolean" } } }, "grpcinterfaceNearestResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "versionMajor": { "type": "string", "format": "uint64" }, "versionMinor": { "type": "string", "format": "uint64" }, "value": { "$ref": "#/definitions/grpcinterfaceRawPoint" } } }, "grpcinterfaceObliterateParams": { "type": "object", "properties": { "uuid": { "type": "string", "format": "byte" } } }, "grpcinterfaceObliterateResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" } } }, "grpcinterfaceOptValue": { "type": "object", "properties": { "value": { "type": "string", "format": "byte" } } }, "grpcinterfaceProxyInfo": { "type": "object", "properties": { "proxyEndpoints": { "type": "array", "items": { "type": "string" } } } }, "grpcinterfaceRawPoint": { "type": "object", "properties": { "time": { "type": "string", "format": "int64" }, "value": { "type": "number", "format": "double" } } }, "grpcinterfaceRawValuesParams": { "type": "object", "properties": { "uuid": { "type": "string", "format": "byte" }, "start": { "type": "string", "format": "int64" }, "end": { "type": "string", "format": "int64" }, "versionMajor": { "type": "string", "format": "uint64" } } }, "grpcinterfaceRawValuesResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "versionMajor": { "type": "string", "format": "uint64" }, "versionMinor": { "type": "string", "format": "uint64" }, "values": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceRawPoint" } } } }, "grpcinterfaceSetStreamAnnotationsParams": { "type": "object", "properties": { "uuid": { "type": "string", "format": "byte" }, "expectedAnnotationVersion": { "type": "string", "format": "uint64" }, "annotations": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceKeyOptValue" } } } }, "grpcinterfaceSetStreamAnnotationsResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" } } }, "grpcinterfaceStatPoint": { "type": "object", "properties": { "time": { "type": "string", "format": "int64" }, "min": { "type": "number", "format": "double" }, "mean": { "type": "number", "format": "double" }, "max": { "type": "number", "format": "double" }, "count": { "type": "string", "format": "uint64" } } }, "grpcinterfaceStatus": { "type": "object", "properties": { "code": { "type": "integer", "format": "int64" }, "msg": { "type": "string" }, "mash": { "$ref": "#/definitions/grpcinterfaceMash" } } }, "grpcinterfaceStreamCSVConfig": { "type": "object", "properties": { "version": { "type": "string", "format": "uint64" }, "label": { "type": "string" }, "uuid": { "type": "string", "format": "byte" } } }, "grpcinterfaceStreamDescriptor": { "type": "object", "properties": { "uuid": { "type": "string", "format": "byte" }, "collection": { "type": "string" }, "tags": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceKeyValue" } }, "annotations": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceKeyValue" } }, "annotationVersion": { "type": "string", "format": "uint64" } } }, "grpcinterfaceStreamInfoParams": { "type": "object", "properties": { "uuid": { "type": "string", "format": "byte" }, "omitVersion": { "type": "boolean", "format": "boolean" }, "omitDescriptor": { "type": "boolean", "format": "boolean" } } }, "grpcinterfaceStreamInfoResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "versionMajor": { "type": "string", "format": "uint64" }, "versionMinor": { "type": "string", "format": "uint64" }, "descriptor": { "$ref": "#/definitions/grpcinterfaceStreamDescriptor" } } }, "grpcinterfaceWindowsParams": { "type": "object", "properties": { "uuid": { "type": "string", "format": "byte" }, "start": { "type": "string", "format": "int64" }, "end": { "type": "string", "format": "int64" }, "versionMajor": { "type": "string", "format": "uint64" }, "width": { "type": "string", "format": "uint64" }, "depth": { "type": "integer", "format": "int64" } } }, "grpcinterfaceWindowsResponse": { "type": "object", "properties": { "stat": { "$ref": "#/definitions/grpcinterfaceStatus" }, "versionMajor": { "type": "string", "format": "uint64" }, "versionMinor": { "type": "string", "format": "uint64" }, "values": { "type": "array", "items": { "$ref": "#/definitions/grpcinterfaceStatPoint" } } } } } } `;
tools/apifrontend/swagger.json.go
0.588298
0.433142
swagger.json.go
starcoder
package iso20022 // Security that is a sub-set of an investment fund, and is governed by the same investment fund policy, eg, dividend option or valuation currency. type FinancialInstrument22 struct { // Features of units offered by a fund. For example, a unit may have a specific load structure, eg, front end or back end, an income policy, eg, pay out or accumulate, or a trailer policy, eg, with or without. Fund classes are typically denoted by a single character, eg, 'Class A', 'Class 2'. ClassType *Max35Text `xml:"ClssTp,omitempty"` // Specifies the form, that is, ownership, of the security. SecuritiesForm *FormOfSecurity1Code `xml:"SctiesForm,omitempty"` // Income policy relating to a class type, that is, if income is paid out or retained in the fund. DistributionPolicy *DistributionPolicy1Code `xml:"DstrbtnPlcy,omitempty"` // Company specific description of a group of funds. ProductGroup *RestrictedFINXMax140Text `xml:"PdctGrp,omitempty"` // Name of the umbrella fund in which financial instrument is contained. UmbrellaName *Max35Text `xml:"UmbrllNm,omitempty"` // Currency of the investment fund class. BaseCurrency *ActiveCurrencyCode `xml:"BaseCcy,omitempty"` // Currency in which a security is issued or redenominated. DenominationCurrency *ActiveCurrencyCode `xml:"DnmtnCcy,omitempty"` // Currency to be used for pricing the fund. This currency must be among the set of currencies in which the price may be expressed, as stated in the prospectus. RequestedNAVCurrency *ActiveOrHistoricCurrencyCode `xml:"ReqdNAVCcy,omitempty"` // Indicates whether the fund has two prices. DualFundIndicator *YesNoIndicator `xml:"DualFndInd,omitempty"` // Country where the fund has legal domicile as reflected in the ISIN classification. CountryOfDomicile *CountryCode `xml:"CtryOfDmcl,omitempty"` // Countries where the fund is registered for distribution. RegisteredDistributionCountry []*CountryCode `xml:"RegdDstrbtnCtry,omitempty"` } func (f *FinancialInstrument22) SetClassType(value string) { f.ClassType = (*Max35Text)(&value) } func (f *FinancialInstrument22) SetSecuritiesForm(value string) { f.SecuritiesForm = (*FormOfSecurity1Code)(&value) } func (f *FinancialInstrument22) SetDistributionPolicy(value string) { f.DistributionPolicy = (*DistributionPolicy1Code)(&value) } func (f *FinancialInstrument22) SetProductGroup(value string) { f.ProductGroup = (*RestrictedFINXMax140Text)(&value) } func (f *FinancialInstrument22) SetUmbrellaName(value string) { f.UmbrellaName = (*Max35Text)(&value) } func (f *FinancialInstrument22) SetBaseCurrency(value string) { f.BaseCurrency = (*ActiveCurrencyCode)(&value) } func (f *FinancialInstrument22) SetDenominationCurrency(value string) { f.DenominationCurrency = (*ActiveCurrencyCode)(&value) } func (f *FinancialInstrument22) SetRequestedNAVCurrency(value string) { f.RequestedNAVCurrency = (*ActiveOrHistoricCurrencyCode)(&value) } func (f *FinancialInstrument22) SetDualFundIndicator(value string) { f.DualFundIndicator = (*YesNoIndicator)(&value) } func (f *FinancialInstrument22) SetCountryOfDomicile(value string) { f.CountryOfDomicile = (*CountryCode)(&value) } func (f *FinancialInstrument22) AddRegisteredDistributionCountry(value string) { f.RegisteredDistributionCountry = append(f.RegisteredDistributionCountry, (*CountryCode)(&value)) }
FinancialInstrument22.go
0.843992
0.427875
FinancialInstrument22.go
starcoder
package core import ( "encoding/binary" "fmt" ) // All the Read* functions below will panic if something goes wrong. // ReadAt reads len(b) bytes at address a in the inferior // and stores them in b. func (p *Process) ReadAt(b []byte, a Address) { for { m := p.findMapping(a) if m == nil { panic(fmt.Errorf("address %x is not mapped in the core file", a)) } n := copy(b, m.contents[a.Sub(m.min):]) if n == len(b) { return } // Modify request to get data from the next mapping. b = b[n:] a = a.Add(int64(n)) } } // ReadUint8 returns a uint8 read from address a of the inferior. func (p *Process) ReadUint8(a Address) uint8 { m := p.findMapping(a) if m == nil { panic(fmt.Errorf("address %x is not mapped in the core file", a)) } return m.contents[a.Sub(m.min)] } // ReadUint16 returns a uint16 read from address a of the inferior. func (p *Process) ReadUint16(a Address) uint16 { m := p.findMapping(a) if m == nil { panic(fmt.Errorf("address %x is not mapped in the core file", a)) } b := m.contents[a.Sub(m.min):] if len(b) < 2 { var buf [2]byte b = buf[:] p.ReadAt(b, a) } if p.littleEndian { return binary.LittleEndian.Uint16(b) } return binary.BigEndian.Uint16(b) } // ReadUint32 returns a uint32 read from address a of the inferior. func (p *Process) ReadUint32(a Address) uint32 { m := p.findMapping(a) if m == nil { panic(fmt.Errorf("address %x is not mapped in the core file", a)) } b := m.contents[a.Sub(m.min):] if len(b) < 4 { var buf [4]byte b = buf[:] p.ReadAt(b, a) } if p.littleEndian { return binary.LittleEndian.Uint32(b) } return binary.BigEndian.Uint32(b) } // ReadUint64 returns a uint64 read from address a of the inferior. func (p *Process) ReadUint64(a Address) uint64 { m := p.findMapping(a) if m == nil { panic(fmt.Errorf("address %x is not mapped in the core file", a)) } b := m.contents[a.Sub(m.min):] if len(b) < 8 { var buf [8]byte b = buf[:] p.ReadAt(b, a) } if p.littleEndian { return binary.LittleEndian.Uint64(b) } return binary.BigEndian.Uint64(b) } // ReadInt8 returns an int8 read from address a of the inferior. func (p *Process) ReadInt8(a Address) int8 { return int8(p.ReadUint8(a)) } // ReadInt16 returns an int16 read from address a of the inferior. func (p *Process) ReadInt16(a Address) int16 { return int16(p.ReadUint16(a)) } // ReadInt32 returns an int32 read from address a of the inferior. func (p *Process) ReadInt32(a Address) int32 { return int32(p.ReadUint32(a)) } // ReadInt64 returns an int64 read from address a of the inferior. func (p *Process) ReadInt64(a Address) int64 { return int64(p.ReadUint64(a)) } // ReadUintptr returns a uint of pointer size read from address a of the inferior. func (p *Process) ReadUintptr(a Address) uint64 { if p.ptrSize == 4 { return uint64(p.ReadUint32(a)) } return p.ReadUint64(a) } // ReadInt returns an int (of pointer size) read from address a of the inferior. func (p *Process) ReadInt(a Address) int64 { if p.ptrSize == 4 { return int64(p.ReadInt32(a)) } return p.ReadInt64(a) } // ReadPtr returns a pointer loaded from address a of the inferior. func (p *Process) ReadPtr(a Address) Address { return Address(p.ReadUintptr(a)) } // ReadCString reads a null-terminated string starting at address a. func (p *Process) ReadCString(a Address) string { for n := int64(0); ; n++ { if p.ReadUint8(a.Add(n)) == 0 { b := make([]byte, n) p.ReadAt(b, a) return string(b) } } }
internal/core/read.go
0.640861
0.515925
read.go
starcoder
package aoc import ( "bufio" "bytes" "crypto/sha256" "fmt" "io" "math" "os" ) var ( DirectionUp Pos = Pos{0, -1} DirectionRight Pos = Pos{1, 0} DirectionDown Pos = Pos{0, 1} DirectionLeft Pos = Pos{-1, 0} ) type Pos struct { X int Y int } func NewPos(x, y int) Pos { return Pos{x, y} } func (p Pos) IsZero() bool { return p == Pos{} } func (p Pos) Add(v Pos) Pos { return Pos{p.X + v.X, p.Y + v.Y} } func (p Pos) Subtract(v Pos) Pos { return Pos{p.X - v.X, p.Y - v.Y} } // URDL returns the positions one step up, right, down and left of the position. func (p Pos) URDL() [4]Pos { return [4]Pos{ p.Add(DirectionUp), p.Add(DirectionRight), p.Add(DirectionDown), p.Add(DirectionLeft), } } // Degrees returns the angle of these coordinates from the positive x axis. func (p Pos) Degrees() float64 { Θ := math.Atan2(float64(p.Y), float64(p.X)) if Θ < 0 { Θ += 2 * math.Pi // normalize to range [0, 2π) } return (Θ * 360) / (2 * math.Pi) } // Distance returns the line-of-sight distance between this point and (0, 0). func (p Pos) Distance() float64 { x, y := float64(p.X), float64(p.Y) return math.Sqrt(x*x + y*y) } func (p Pos) String() string { return fmt.Sprintf("(%d, %d)", p.X, p.Y) } type Grid struct { Data []byte Width int Height int } func NewGrid(width, height int) *Grid { return &Grid{ Data: make([]byte, width*height), Width: width, Height: height, } } func ReadGrid(r io.Reader) (*Grid, error) { grid := &Grid{} buf := bytes.NewBuffer(nil) scanner := bufio.NewScanner(r) for scanner.Scan() { b := scanner.Bytes() if grid.Width == 0 { grid.Width = len(b) } else { if grid.Width != len(b) { return nil, fmt.Errorf("bad line width") } } grid.Height++ buf.Write(scanner.Bytes()) } grid.Data = buf.Bytes() return grid, nil } func OpenGrid(name string) (*Grid, error) { f, err := os.Open(name) if err != nil { return nil, err } defer f.Close() return ReadGrid(f) } func (c *Grid) Copy() *Grid { b := make([]byte, len(c.Data)) g := &Grid{ Data: b, Width: c.Width, Height: c.Height, } copy(g.Data, c.Data) return g } func (c *Grid) Index(pos Pos) int { if !c.Contains(pos) { return -1 } return (pos.Y * c.Width) + pos.X } func (c *Grid) Pos(i int) Pos { return Pos{i % c.Width, i / c.Width} } func (c *Grid) Contains(pos Pos) bool { return pos.X >= 0 && pos.X < c.Width && pos.Y >= 0 && pos.Y < c.Height } func (c *Grid) Get(pos Pos) byte { i := c.Index(pos) if i < 0 { panic(fmt.Sprintf("out of bounds: %v", pos)) } return c.Data[i] } func (c *Grid) GetWithDefault(pos Pos, def byte) byte { i := c.Index(pos) if i < 0 { return def } return c.Data[i] } func (c *Grid) Set(pos Pos, b byte) { i := c.Index(pos) if i < 0 { println(fmt.Sprintf("out of bounds: %v", pos)) } // fmt.Printf("set %v = %c (was %c)\n", pos, b, c.Data[i]) c.Data[i] = b } func (c *Grid) Count(b byte) (n int) { for i := 0; i < len(c.Data); i++ { if c.Data[i] == b { n++ } } return } func (c *Grid) FindOne(b byte) int { for i, a := range c.Data { if a == b { return i } } return -1 } func (c *Grid) FindAll(b byte) []int { v := make([]int, 0) for i, a := range c.Data { if a == b { v = append(v, i) } } return v } func (c *Grid) Line(y int) []byte { i := y * c.Width return c.Data[i : i+c.Width] } func (c *Grid) Print(w io.Writer) { newline := []byte{'\n'} for y := 0; y < c.Height; y++ { i := y * c.Width w.Write(c.Data[i : i+c.Width]) w.Write(newline) } } func (c *Grid) SHA256() string { sum := sha256.Sum256(c.Data) return fmt.Sprintf("%x", sum) }
go/2019/grid.go
0.710226
0.480662
grid.go
starcoder
package gp import ( "github.com/xlvector/hector/core" "math" "strconv" ) type GaussianProcessParameters struct { Dim int64 Theta float64 } type GaussianProcess struct { Params GaussianProcessParameters CovarianceFunc CovFunc CovMatrix *core.Matrix TargetValues *core.Vector InvCovTarget *core.Vector // inv(CovMatrix)*TargetValues DataSet *core.RealDataSet TrainingDataCount int64 } func (self *GaussianProcess) SaveModel(path string) { } func (self *GaussianProcess) LoadModel(path string) { } /* Given matrix m and vector v, compute inv(m)*v. Based on Gibbs and MacKay 1997, and <NAME>'s PhD dissertation Details: A - positive seminidefinite matrix u - a vector theta - positive number C = A + I*theta Returns inv(C)*u - So you need the diagonal noise term for covariance matrix in a sense. However, this algorithm is numerically stable, the noise term can be very small and the inversion can still be calculated... */ func (algo *GaussianProcess) ApproximateInversion(A *core.Matrix, u *core.Vector, theta float64, dim int64) *core.Vector { max_itr := 500 tol := 0.01 C := core.NewMatrix() for key, val := range A.Data { C.Data[key] = val.Copy() } // Add theta to diagonal elements for i := int64(0); i < dim; i++ { _, ok := C.Data[i] if !ok { C.Data[i] = core.NewVector() } C.Data[i].Data[i] = C.Data[i].Data[i] + theta } var Q_l float64 var Q_u float64 var dQ float64 u_norm := u.Dot(u) / 2 // Lower bound y_l := core.NewVector() g_l := u.Copy() h_l := u.Copy() lambda_l := float64(0) gamma_l := float64(0) var tmp_f1 float64 var tmp_f2 float64 var tmp_v1 *core.Vector tmp_f1 = g_l.Dot(g_l) tmp_v1 = C.MultiplyVector(h_l) // Upper bound y_u := core.NewVector() g_u := u.Copy() h_u := u.Copy() lambda_u := float64(0) gamma_u := float64(0) var tmp_f3 float64 var tmp_f4 float64 var tmp_v3 *core.Vector var tmp_v4 *core.Vector tmp_v3 = g_u.MultiplyMatrix(A) tmp_v4 = C.MultiplyVector(h_u) tmp_f3 = tmp_v1.Dot(g_u) for i := 0; i < max_itr; i++ { // Lower bound lambda_l = tmp_f1 / h_l.Dot(tmp_v1) y_l.AddVector(h_l, lambda_l) //y_l next Q_l = y_l.Dot(u) - 0.5*(y_l.MultiplyMatrix(C)).Dot(y_l) // Upper bound lambda_u = tmp_f3 / tmp_v3.Dot(tmp_v4) y_u.AddVector(h_u, lambda_u) //y_u next Q_u = (y_u.MultiplyMatrix(A)).Dot(u) - 0.5*((y_u.MultiplyMatrix(C)).MultiplyMatrix(A)).Dot(y_u) dQ = (u_norm-Q_u)/theta - Q_l if dQ < tol { break } // Lower bound var updates g_l.AddVector(tmp_v1, -lambda_l) //g_l next tmp_f2 = g_l.Dot(g_l) gamma_l = tmp_f2 / tmp_f1 for key, val := range h_l.Data { h_l.SetValue(key, val*gamma_l) } h_l.AddVector(g_l, 1) //h_l next tmp_f1 = tmp_f2 //tmp_f1 next tmp_v1 = C.MultiplyVector(h_l) //tmp_v1 next // Upper bound var updates g_u.AddVector(tmp_v4, -lambda_u) //g_u next tmp_v3 = g_u.MultiplyMatrix(A) //tmp_v3 next tmp_f4 = tmp_v3.Dot(g_u) gamma_u = tmp_f4 / tmp_f3 for key, val := range h_u.Data { h_u.SetValue(key, val*gamma_u) } h_u.AddVector(g_u, 1) //h_u next tmp_v4 = C.MultiplyVector(h_u) //tmp_v4 next tmp_f3 = tmp_f4 // tmp_f3 next } return y_l } func (algo *GaussianProcess) ExtractTargetValuesAsVector(samples []*core.RealSample) *core.Vector { targets := core.NewVector() for i := 0; i < len(samples); i++ { targets.SetValue(int64(i), samples[i].Value) } return targets } func (algo *GaussianProcess) Init(params map[string]string) { dim, _ := strconv.ParseInt(params["dim"], 10, 64) algo.Params = GaussianProcessParameters{} algo.Params.Dim = dim // Pass in dim as a param.. and require feature space to be continous. algo.Params.Theta = 1e-7 // Used by approximate inversion as the diagonal noise radius := 0.1 camp := 40.0 cf := CovSEARD{} radiuses := core.NewVector() for i := int64(1); i <= dim; i++ { radiuses.SetValue(i, radius) } cf.Init(radiuses, camp) algo.CovarianceFunc = cf.Cov } func (algo *GaussianProcess) Train(dataset *core.RealDataSet) { algo.DataSet = dataset algo.TrainingDataCount = int64(len(dataset.Samples)) algo.CovMatrix = CovMatrix(algo.DataSet.Samples, algo.CovarianceFunc) algo.TargetValues = algo.ExtractTargetValuesAsVector(algo.DataSet.Samples) algo.InvCovTarget = algo.ApproximateInversion(algo.CovMatrix, algo.TargetValues, algo.Params.Theta, algo.TrainingDataCount) } func (algo *GaussianProcess) Predict(sample *core.RealSample) float64 { k := CovVector(algo.DataSet.Samples, sample, algo.CovarianceFunc) pred := k.Dot(algo.InvCovTarget) return pred } func (algo *GaussianProcess) PredictStd(sample *core.RealSample) float64 { k := CovVector(algo.DataSet.Samples, sample, algo.CovarianceFunc) C_inv_k := algo.ApproximateInversion(algo.CovMatrix, k, algo.Params.Theta, algo.TrainingDataCount) std := math.Sqrt(algo.CovarianceFunc(sample.GetFeatureVector(), sample.GetFeatureVector()) - k.Dot(C_inv_k)) return std }
vendor/github.com/xlvector/hector/gp/gaussian_process.go
0.723114
0.441191
gaussian_process.go
starcoder
package cmb import ( "bytes" "fmt" "math" "regexp" ) // Parselet is a single combinable recognizer of grammatical structure. type Parselet func(s []byte, pos int, parser *Parser) (*ParseTreeNode, error) // Rule matches a named production rule. func Rule(name string) Parselet { return func(s []byte, pos int, parser *Parser) (*ParseTreeNode, error) { p, exists := parser.rules[name] if !exists { return nil, fmt.Errorf(`pos %d: rule "%s" not found`, pos, name) } memo, ok := parser.table[name][pos] if ok { return memo.val, memo.err } r, err := p(s, pos, parser) if err != nil { parser.table[name][pos] = &parseletResult{nil, err} return nil, err } val := &ParseTreeNode{name, r.Text, r.Start, r.End, s, r.Children} parser.table[name][pos] = &parseletResult{val, nil} return val, nil } } // Literal will match a substring in the parsed string. func Literal(strToFind string) Parselet { return func(s []byte, pos int, parser *Parser) (*ParseTreeNode, error) { cursor := ignore(s, pos, parser) strlen := len(strToFind) if pos+strlen > len(s) { return nil, fmt.Errorf("pos %d: Unexpected EOF", pos) } if !bytes.HasPrefix(s[cursor:], []byte(strToFind)) { return nil, fmt.Errorf("pos %d: Expected %s got %s", pos, strToFind, s[pos:pos+strlen]) } return &ParseTreeNode{"literal", s[cursor : cursor+strlen], pos, cursor + strlen, s, []*ParseTreeNode{}}, nil } } // Pattern will match a regex in the parsed string. func Pattern(regex *regexp.Regexp) Parselet { return func(s []byte, pos int, parser *Parser) (*ParseTreeNode, error) { cursor := ignore(s, pos, parser) bounds := regex.FindIndex(s[cursor:]) if bounds == nil || bounds[0] != 0 { maxBounds := int(math.Min(float64(pos+10), float64(len(s)))) return nil, fmt.Errorf("pos %d: expected number, got %s", pos, s[pos:maxBounds]) } start, end := bounds[0]+cursor, bounds[1]+cursor return &ParseTreeNode{"pattern", s[start:end], pos, end, s, []*ParseTreeNode{}}, nil } } // Sequence matches multiple rules one after another. func Sequence(items ...Parselet) Parselet { return func(s []byte, pos int, parser *Parser) (*ParseTreeNode, error) { cursor := pos children := []*ParseTreeNode{} for _, item := range items { r, err := item(s, cursor, parser) if err != nil { return nil, err } children = append(children, r) cursor = r.End } return &ParseTreeNode{"sequence", s[pos:cursor], pos, cursor, s, children}, nil } } // Choice matches several rules in the same place and returns whichever matches first. func Choice(items ...Parselet) Parselet { return func(s []byte, pos int, parser *Parser) (*ParseTreeNode, error) { for _, item := range items { r, err := item(s, pos, parser) if err == nil { return &ParseTreeNode{"choice", r.Text, r.Start, r.End, s, []*ParseTreeNode{r}}, nil } } return nil, fmt.Errorf("pos %d: none of the available options were valid", pos) } } // Optional matches a rule or the absence of the rule. func Optional(item Parselet) Parselet { return func(s []byte, pos int, parser *Parser) (*ParseTreeNode, error) { r, err := item(s, pos, parser) if err != nil { return &ParseTreeNode{"optional", []byte{}, pos, pos, s, []*ParseTreeNode{}}, nil } return &ParseTreeNode{"optional", r.Text, r.Start, r.End, s, []*ParseTreeNode{r}}, nil } } // ZeroOrMore matches a rule multiple times. func ZeroOrMore(item Parselet) Parselet { return func(s []byte, pos int, parser *Parser) (*ParseTreeNode, error) { children := []*ParseTreeNode{} cursor := pos for { r, err := item(s, cursor, parser) if err != nil { break } children = append(children, r) cursor = r.End } return &ParseTreeNode{"zeroOrMore", s[pos:cursor], pos, cursor, s, children}, nil } }
parselets.go
0.618665
0.478773
parselets.go
starcoder
package types import ( "bytes" "fmt" "io" "math" "strconv" "reflect" "github.com/lyraproj/issue/issue" "github.com/lyraproj/pcore/px" ) type ( IntegerType struct { min int64 max int64 } // integerValue represents int64 as a pcore.Value integerValue int64 ) var IntegerTypePositive = &IntegerType{0, math.MaxInt64} var IntegerTypeZero = &IntegerType{0, 0} var IntegerTypeOne = &IntegerType{1, 1} var ZERO = integerValue(0) var integerTypeDefault = &IntegerType{math.MinInt64, math.MaxInt64} var integerType8 = &IntegerType{math.MinInt8, math.MaxInt8} var integerType16 = &IntegerType{math.MinInt16, math.MaxInt16} var integerType32 = &IntegerType{math.MinInt32, math.MaxInt32} var integerTypeU8 = &IntegerType{0, math.MaxUint8} var integerTypeU16 = &IntegerType{0, math.MaxUint16} var integerTypeU32 = &IntegerType{0, math.MaxUint32} var integerTypeU64 = IntegerTypePositive // MaxUInt64 isn't supported at this time var IntegerMetaType px.ObjectType func init() { IntegerMetaType = newObjectType(`Pcore::IntegerType`, `Pcore::NumericType { attributes => { from => { type => Optional[Integer], value => undef }, to => { type => Optional[Integer], value => undef } } }`, func(ctx px.Context, args []px.Value) px.Value { return newIntegerType2(args...) }) newGoConstructor2(`Integer`, func(t px.LocalTypes) { t.Type(`Radix`, `Variant[Default, Integer[2,2], Integer[8,8], Integer[10,10], Integer[16,16]]`) t.Type(`Convertible`, `Variant[Numeric, Boolean, Pattern[/`+IntegerPattern+`/], Timespan, Timestamp]`) t.Type(`NamedArgs`, `Struct[{from => Convertible, Optional[radix] => Radix, Optional[abs] => Boolean}]`) }, func(d px.Dispatch) { d.Param(`Convertible`) d.OptionalParam(`Radix`) d.OptionalParam(`Boolean`) d.Function(func(c px.Context, args []px.Value) px.Value { r := 10 abs := false if len(args) > 1 { if radix, ok := args[1].(integerValue); ok { r = int(radix) } if len(args) > 2 { abs = args[2].(booleanValue).Bool() } } n := intFromConvertible(args[0], r) if abs && n < 0 { n = -n } return integerValue(n) }) }, func(d px.Dispatch) { d.Param(`NamedArgs`) d.Function(func(c px.Context, args []px.Value) px.Value { h := args[0].(*Hash) r := 10 abs := false if rx, ok := h.Get4(`radix`); ok { if radix, ok := rx.(integerValue); ok { r = int(radix) } } if ab, ok := h.Get4(`abs`); ok { abs = ab.(booleanValue).Bool() } n := intFromConvertible(h.Get5(`from`, undef), r) if abs && n < 0 { n = -n } return integerValue(n) }) }, ) } func intFromConvertible(from px.Value, radix int) int64 { switch from := from.(type) { case integerValue: return from.Int() case floatValue: return from.Int() case *Timestamp: return from.Int() case Timespan: return from.Int() case booleanValue: return from.Int() default: i, err := strconv.ParseInt(from.String(), radix, 64) if err == nil { return i } panic(px.Error(px.NotInteger, issue.H{`value`: from})) } } func DefaultIntegerType() *IntegerType { return integerTypeDefault } func PositiveIntegerType() *IntegerType { return IntegerTypePositive } func NewIntegerType(min int64, max int64) *IntegerType { if min == math.MinInt64 { if max == math.MaxInt64 { return DefaultIntegerType() } } else if min == 0 { if max == math.MaxInt64 { return PositiveIntegerType() } else if max == 0 { return IntegerTypeZero } } else if min == 1 && max == 1 { return IntegerTypeOne } if min > max { panic(illegalArguments(`Integer[]`, `min is not allowed to be greater than max`)) } return &IntegerType{min, max} } func newIntegerType2(limits ...px.Value) *IntegerType { argc := len(limits) if argc == 0 { return integerTypeDefault } min, ok := toInt(limits[0]) if !ok { if _, ok = limits[0].(*DefaultValue); !ok { panic(illegalArgumentType(`Integer[]`, 0, `Integer`, limits[0])) } min = math.MinInt64 } var max int64 switch len(limits) { case 1: max = math.MaxInt64 case 2: max, ok = toInt(limits[1]) if !ok { if _, ok = limits[1].(*DefaultValue); !ok { panic(illegalArgumentType(`Integer[]`, 1, `Integer`, limits[1])) } max = math.MaxInt64 } default: panic(illegalArgumentCount(`Integer[]`, `0 - 2`, len(limits))) } return NewIntegerType(min, max) } func (t *IntegerType) Default() px.Type { return integerTypeDefault } func (t *IntegerType) Accept(v px.Visitor, g px.Guard) { v(t) } func (t *IntegerType) Equals(o interface{}, g px.Guard) bool { if ot, ok := o.(*IntegerType); ok { return t.min == ot.min && t.max == ot.max } return false } func (t *IntegerType) Generic() px.Type { return integerTypeDefault } func (t *IntegerType) Get(key string) (px.Value, bool) { switch key { case `from`: v := px.Undef if t.min != math.MinInt64 { v = integerValue(t.min) } return v, true case `to`: v := px.Undef if t.max != math.MaxInt64 { v = integerValue(t.max) } return v, true default: return nil, false } } func (t *IntegerType) IsAssignable(o px.Type, g px.Guard) bool { if it, ok := o.(*IntegerType); ok { return t.min <= it.min && t.max >= it.max } return false } func (t *IntegerType) IsInstance(o px.Value, g px.Guard) bool { if n, ok := toInt(o); ok { return t.IsInstance2(n) } return false } func (t *IntegerType) IsInstance2(n int64) bool { return t.min <= n && n <= t.max } func (t *IntegerType) IsInstance3(n int) bool { return t.IsInstance2(int64(n)) } func (t *IntegerType) IsUnbounded() bool { return t.min == math.MinInt64 && t.max == math.MaxInt64 } func (t *IntegerType) Min() int64 { return t.min } func (t *IntegerType) Max() int64 { return t.max } func (t *IntegerType) MetaType() px.ObjectType { return IntegerMetaType } func (t *IntegerType) Name() string { return `Integer` } func (t *IntegerType) Parameters() []px.Value { if t.min == math.MinInt64 { if t.max == math.MaxInt64 { return px.EmptyValues } return []px.Value{WrapDefault(), integerValue(t.max)} } if t.max == math.MaxInt64 { return []px.Value{integerValue(t.min)} } return []px.Value{integerValue(t.min), integerValue(t.max)} } func (t *IntegerType) ReflectType(c px.Context) (reflect.Type, bool) { return reflect.TypeOf(int64(0)), true } func (t *IntegerType) SizeParameters() []px.Value { params := make([]px.Value, 2) params[0] = integerValue(t.min) if t.max == math.MaxInt64 { params[1] = WrapDefault() } else { params[1] = integerValue(t.max) } return params } func (t *IntegerType) CanSerializeAsString() bool { return true } func (t *IntegerType) SerializationString() string { return t.String() } func (t *IntegerType) String() string { return px.ToString2(t, None) } func (t *IntegerType) ToString(b io.Writer, s px.FormatContext, g px.RDetect) { TypeToString(t, b, s, g) } func (t *IntegerType) PType() px.Type { return &TypeType{t} } func WrapInteger(val int64) px.Integer { return integerValue(val) } func (iv integerValue) Abs() int64 { if iv < 0 { return -int64(iv) } return int64(iv) } func (iv integerValue) Equals(o interface{}, g px.Guard) bool { if ov, ok := o.(integerValue); ok { return iv == ov } return false } func (iv integerValue) Float() float64 { return float64(iv) } func (iv integerValue) Int() int64 { return int64(iv) } func (iv integerValue) Reflect(c px.Context) reflect.Value { return reflect.ValueOf(int64(iv)) } func (iv integerValue) ReflectTo(c px.Context, value reflect.Value) { if !value.CanSet() { panic(px.Error(px.AttemptToSetUnsettable, issue.H{`kind`: reflect.Int.String()})) } ok := true switch value.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: value.SetInt(int64(iv)) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: value.SetUint(uint64(iv)) case reflect.Interface: value.Set(reflect.ValueOf(int64(iv))) case reflect.Ptr: switch value.Type().Elem().Kind() { case reflect.Int64: v := int64(iv) value.Set(reflect.ValueOf(&v)) case reflect.Int: v := int(iv) value.Set(reflect.ValueOf(&v)) case reflect.Int8: v := int8(iv) value.Set(reflect.ValueOf(&v)) case reflect.Int16: v := int16(iv) value.Set(reflect.ValueOf(&v)) case reflect.Int32: v := int32(iv) value.Set(reflect.ValueOf(&v)) case reflect.Uint: v := uint(iv) value.Set(reflect.ValueOf(&v)) case reflect.Uint8: v := uint8(iv) value.Set(reflect.ValueOf(&v)) case reflect.Uint16: v := uint16(iv) value.Set(reflect.ValueOf(&v)) case reflect.Uint32: v := uint32(iv) value.Set(reflect.ValueOf(&v)) case reflect.Uint64: v := uint64(iv) value.Set(reflect.ValueOf(&v)) default: ok = false } default: ok = false } if !ok { panic(px.Error(px.AttemptToSetWrongKind, issue.H{`expected`: reflect.Int.String(), `actual`: value.Kind().String()})) } } func (iv integerValue) String() string { return fmt.Sprintf(`%d`, int64(iv)) } func (iv integerValue) ToKey(b *bytes.Buffer) { n := int64(iv) b.WriteByte(1) b.WriteByte(HkInteger) b.WriteByte(byte(n >> 56)) b.WriteByte(byte(n >> 48)) b.WriteByte(byte(n >> 40)) b.WriteByte(byte(n >> 32)) b.WriteByte(byte(n >> 24)) b.WriteByte(byte(n >> 16)) b.WriteByte(byte(n >> 8)) b.WriteByte(byte(n)) } func (iv integerValue) ToString(b io.Writer, s px.FormatContext, g px.RDetect) { f := px.GetFormat(s.FormatMap(), iv.PType()) var err error switch f.FormatChar() { case 'x', 'X', 'o', 'd': _, err = fmt.Fprintf(b, f.OrigFormat(), int64(iv)) case 'p', 'b', 'B': longVal := int64(iv) intString := strconv.FormatInt(longVal, integerRadix(f.FormatChar())) totWidth := 0 if f.Width() > 0 { totWidth = f.Width() } numWidth := 0 if f.Precision() > 0 { numWidth = f.Precision() } if numWidth > 0 && numWidth < len(intString) && f.FormatChar() == 'p' { intString = intString[:numWidth] } zeroPad := numWidth - len(intString) pfx := `` if f.IsAlt() && longVal != 0 && !(f.FormatChar() == 'o' && zeroPad > 0) { pfx = integerPrefixRadix(f.FormatChar()) } computedFieldWidth := len(pfx) + intMax(numWidth, len(intString)) for spacePad := totWidth - computedFieldWidth; spacePad > 0; spacePad-- { _, err = b.Write([]byte{' '}) if err != nil { break } } if err != nil { break } _, err = io.WriteString(b, pfx) if err != nil { break } if zeroPad > 0 { padChar := []byte{'0'} if f.FormatChar() == 'p' { padChar = []byte{' '} } for ; zeroPad > 0; zeroPad-- { _, err = b.Write(padChar) if err != nil { break } } } if err == nil { _, err = io.WriteString(b, intString) } case 'e', 'E', 'f', 'g', 'G', 'a', 'A': floatValue(iv.Float()).ToString(b, px.NewFormatContext(DefaultFloatType(), f, s.Indentation()), g) case 'c': bld := bytes.NewBufferString(``) bld.WriteRune(rune(int64(iv))) f.ApplyStringFlags(b, bld.String(), f.IsAlt()) case 's': f.ApplyStringFlags(b, strconv.Itoa(int(int64(iv))), f.IsAlt()) default: //noinspection SpellCheckingInspection panic(s.UnsupportedFormat(iv.PType(), `dxXobBeEfgGaAspc`, f)) } if err != nil { panic(err) } } func intMax(a int, b int) int { if a > b { return a } return b } func integerRadix(c byte) int { switch c { case 'b', 'B': return 2 case 'o': return 8 case 'x', 'X': return 16 default: return 10 } } func integerPrefixRadix(c byte) string { switch c { case 'x': return `0x` case 'X': return `0X` case 'o': return `0` case 'b': return `0b` case 'B': return `0B` default: return `` } } func (iv integerValue) PType() px.Type { v := int64(iv) return &IntegerType{v, v} }
types/integertype.go
0.682362
0.411525
integertype.go
starcoder
package utils import ( "errors" "regexp" "strconv" ) // ExtractUserIDFromMention takes in a mention (<@ID>) and spits out only the ID. func ExtractUserIDFromMention(mention string) string { if len(mention) >= 3 && mention[:2] == "<@" && mention[len(mention)-1:] == ">" { return mention[2:][:len(mention)-3] // -3 because we remove 2 chars. } return mention } // GetStringFromQuotes finds a "string which spans multiple spaces" in a split // message. It then takes that and replaces the Quote string with a single // string value of the quote contents. func GetStringFromQuotes(parts []string) []string { var ( // str is the string we're searching for in quotes. str string // startQuote holds the location of the quote startQuote int ) // the length of the original parts length := len(parts) startQuote = -1 val := "" for k := 0; k < length; k++ { val = parts[k] if len(val) == 0 { continue } switch { // If a startQuote hasn't been found, and the first byte is a quote, // then set the startQuote, and start the string. case val[0] == '"' && startQuote == -1: if val[len(val)-1] == '"' { parts[k] = val[:len(val)-1][1:] } else { startQuote = k str = val[1:] + " " } // If a startQuote has been found, and the last byte is a quote, then // remove the old parts and append the new ones. case val[len(val)-1] == '"' && startQuote >= 0: // Take the parts before startQuote, append the quote string (str) // to it, and take the parts after the current index and append // them to the new parts. parts = append(append(parts[:startQuote], str+val[:len(val)-1]), parts[k+1:]...) // Reset k to be at the index just after the current combined // string - so we don't check the combined string. newLen := len(parts) k = k - (length - newLen) + 1 length = newLen startQuote = -1 default: // If a start quote has been found, just add current value // to the current quote string. if startQuote >= 0 { str = str + val + " " } } } return parts } var hexColorRegex = regexp.MustCompile("^([A-Fa-f0-9]{6}|[A-Fa-f0-9]{3})$") // This holds the errors that HexColorToInt will throw. var ( ErrColorInvalid = errors.New("hex code is invalid") ) // HexColorToInt converts a hex color code to an int, which is usable in the // Discord API. The hexCode is without the "#" prefix. func HexColorToInt(hexCode string) (int, error) { if !hexColorRegex.MatchString(hexCode) { return 0, ErrColorInvalid } color, err := strconv.ParseInt(hexCode, 16, 32) if err != nil { return 0, err } return int(color), nil }
utils/utils.go
0.578924
0.404066
utils.go
starcoder
package tpdu // DeliverReport represents a SMS-Deliver-Report PDU as defined in 3GPP TS 23.038 Section 9.2.2.1a. type DeliverReport struct { TPDU FCS byte PI byte } // NewDeliverReport creates a DeliverReport TPDU and initialises non-zero fields. func NewDeliverReport() *DeliverReport { return &DeliverReport{TPDU: TPDU{FirstOctet: byte(MtDeliver)}} } // SetDCS sets the DeliverReport dcs field and the corresponding bit of the pi. func (d *DeliverReport) SetDCS(dcs byte) { d.PI = d.PI | 0x02 d.TPDU.DCS = dcs } // SetPID sets the DeliverReport pid field and the corresponding bit of the pi. func (d *DeliverReport) SetPID(pid byte) { d.PI = d.PI | 0x01 d.TPDU.PID = pid } // SetUD sets the DeliverReport ud field and the corresponding bit of the pi. func (d *DeliverReport) SetUD(ud UserData) { d.PI = d.PI | 0x04 d.TPDU.UD = ud } // SetUDH sets the User Data Header of the DeliverReport and the corresponding bit of the pi. func (d *DeliverReport) SetUDH(udh UserDataHeader) { d.PI = d.PI | 0x04 d.TPDU.SetUDH(udh) } // MarshalBinary marshals an SMS-Deliver-Report TPDU. func (d *DeliverReport) MarshalBinary() ([]byte, error) { b := []byte{d.FirstOctet, d.FCS, d.PI} if d.PI&0x01 == 0x01 { b = append(b, d.PID) } if d.PI&0x02 == 0x02 { b = append(b, d.DCS) } if d.PI&0x4 == 0x4 { ud, err := d.encodeUserData() if err != nil { return nil, EncodeError("ud", err) } b = append(b, ud...) } return b, nil } // UnmarshalBinary unmarshals an SMS-Deliver-Report TPDU. func (d *DeliverReport) UnmarshalBinary(src []byte) error { if len(src) < 1 { return DecodeError("firstOctet", 0, ErrUnderflow) } d.FirstOctet = src[0] ri := 1 if len(src) <= ri { return DecodeError("fcs", ri, ErrUnderflow) } d.FCS = src[ri] ri++ if len(src) <= ri { return DecodeError("pi", ri, ErrUnderflow) } d.PI = src[ri] ri++ if d.PI&0x01 == 0x01 { if len(src) <= ri { return DecodeError("pid", ri, ErrUnderflow) } d.PID = src[ri] ri++ } if d.PI&0x02 == 0x02 { if len(src) <= ri { return DecodeError("dcs", ri, ErrUnderflow) } d.DCS = src[ri] ri++ } if d.PI&0x04 == 0x04 { err := d.decodeUserData(src[ri:]) if err != nil { return DecodeError("ud", ri, err) } } return nil } func decodeDeliverReport(src []byte) (interface{}, error) { d := NewDeliverReport() if err := d.UnmarshalBinary(src); err != nil { return nil, err } return d, nil } // RegisterDeliverReportDecoder registers a decoder for the DeliverReport TPDU. func RegisterDeliverReportDecoder(d *Decoder) error { return d.RegisterDecoder(MtDeliver, MO, decodeDeliverReport) }
encoding/tpdu/deliverreport.go
0.701509
0.68762
deliverreport.go
starcoder
package openpose import ( "math" tf "github.com/tensorflow/tensorflow/tensorflow/go" "github.com/tensorflow/tensorflow/tensorflow/go/op" ) // nonMaxSuppression uppress performs non-max suppression on a sorted list of detections. func nonMaxSuppression(imap [][]float32, scale float64, threshold float32) ([][]float32, error) { l := len(imap) bbox, scores := generateBbox(imap, scale, threshold) tbbox, err := tf.NewTensor(bbox) if err != nil { return nil, err } tscore, err := tf.NewTensor(scores) if err != nil { return nil, err } s := op.NewScope() pbbox := op.Placeholder(s.SubScope("bbox"), tf.Float, op.PlaceholderShape(tf.MakeShape(-1, 4))) pscore := op.Placeholder(s.SubScope("score"), tf.Float, op.PlaceholderShape(tf.MakeShape(-1))) out := op.NonMaxSuppression(s, pbbox, pscore, op.Const(s.SubScope("max_len"), int32(l)), op.NonMaxSuppressionIouThreshold(threshold)) outs, err := runScope(s, map[tf.Output]*tf.Tensor{pbbox: tbbox, pscore: tscore}, []tf.Output{out}) if err != nil { return nil, err } ret := make([][]float32, l) for i, v := range imap { ret[i] = make([]float32, len(v)) } pick := outs[0] if pick != nil { if idx, ok := pick.Value().([]int32); ok { rowLen := len(imap[0]) for _, i := range idx { x := int(i) y := x / rowLen if x >= rowLen { x %= rowLen } // log.Printf("i:%d, x:%d, y:%d, score:%f, imap:%f\n", i, x, y, scores[i], imap[y][x]) ret[y][x] = scores[i] } } } return ret, nil } func resizeArea(imgs []*tf.Tensor, upsampleSize []int32) ([]*tf.Tensor, error) { ret := make([]*tf.Tensor, 0, len(imgs)) for _, img := range imgs { s := op.NewScope() input := op.Placeholder(s.SubScope("image"), tf.Float, op.PlaceholderShape(tf.MakeShape(img.Shape()...))) upsample, err := tf.NewTensor(upsampleSize) if err != nil { return nil, err } size := op.Placeholder(s.SubScope("upsample"), tf.Int32, op.PlaceholderShape(tf.MakeShape(2))) out := op.ResizeArea(s, input, size) outs, err := runScope(s, map[tf.Output]*tf.Tensor{input: img, size: upsample}, []tf.Output{out}) if err != nil { return nil, err } ret = append(ret, outs[0]) } return ret, nil } func runScope(s *op.Scope, inputs map[tf.Output]*tf.Tensor, outputs []tf.Output) ([]*tf.Tensor, error) { graph, err := s.Finalize() if err != nil { return nil, err } session, err := tf.NewSession(graph, nil) if err != nil { return nil, err } defer session.Close() return session.Run(inputs, outputs, nil) } func generateBbox(imap [][]float32, scale float64, threshold float32) ([][]float32, []float32) { const ( Stride = 2.0 CellSize = 12.0 ) var ( l = len(imap) bbox = make([][]float32, 0, l*len(imap[0])) scores = make([]float32, 0, l*len(imap[0])) ) for i, x := range imap { for j, y := range x { n := []float32{float32(math.Floor((Stride*float64(j)+1.0)/scale + 0.5)), float32(math.Floor((Stride*float64(i)+1.0)/scale + 0.5)), float32(math.Floor((Stride*float64(j)+1.0+CellSize)/scale + 0.5)), float32(math.Floor((Stride*float64(i)+1.0+CellSize)/scale + 0.5)), } bbox = append(bbox, n) scores = append(scores, y) /* if y > threshold { scores = append(scores, y) } else { scores = append(scores, 0) } */ } } return bbox, scores }
nms.go
0.64969
0.459986
nms.go
starcoder
package blockchain import ( "bytes" "encoding/binary" "io" "math" "github.com/ubclaunchpad/cumulus/common/util" "gopkg.in/fatih/set.v0" ) // TxHashPointer is a reference to a transaction on the blockchain. type TxHashPointer struct { BlockNumber uint32 Hash Hash Index uint32 } // Marshal converts a TxHashPointer to a byte slice func (thp TxHashPointer) Marshal() []byte { var buf []byte buf = util.AppendUint32(buf, thp.BlockNumber) buf = append(buf, thp.Hash.Marshal()...) buf = util.AppendUint32(buf, thp.Index) return buf } // TxOutput defines an output to a transaction type TxOutput struct { Amount uint64 Recipient string } // Marshal converts a TxOutput to a byte slice func (to TxOutput) Marshal() []byte { buf := make([]byte, 8) binary.LittleEndian.PutUint64(buf, to.Amount) buf = append(buf, []byte(to.Recipient)...) return buf } // TxBody contains all relevant information about a transaction type TxBody struct { Sender Address Inputs []TxHashPointer Outputs []TxOutput } // Len returns the length of a transaction body func (tb TxBody) Len() int { return len(tb.Marshal()) } // Marshal converts a TxBody to a byte slice func (tb TxBody) Marshal() []byte { var buf []byte buf = append(buf, tb.Sender.Marshal()...) for _, in := range tb.Inputs { buf = append(buf, in.Marshal()...) } for _, out := range tb.Outputs { buf = append(buf, out.Marshal()...) } return buf } // Sign returns a signed Transaction from a TxBody func (tb TxBody) Sign(w Wallet, r io.Reader) (*Transaction, error) { digest := HashSum(tb) sig, err := w.Sign(digest, r) return &Transaction{tb, sig}, err } // Transaction contains a TxBody and a signature verifying it type Transaction struct { TxBody Sig Signature } // Len returns the length in bytes of a transaction func (t *Transaction) Len() int { return len(t.Marshal()) } // Equal returns true if the signatures of the given transactions are equal // (i.e. the transactions are the same). func (t *Transaction) Equal(txnToCompare *Transaction) bool { return bytes.Compare(t.Marshal(), txnToCompare.Marshal()) == 0 } // Marshal converts a Transaction to a byte slice func (t *Transaction) Marshal() []byte { var buf []byte buf = append(buf, t.TxBody.Marshal()...) buf = append(buf, t.Sig.Marshal()...) return buf } // InputsEqualOutputs returns true if t.Inputs == other.Outputs, as well // as the difference between the two (outputs - inputs). func (t *Transaction) InputsEqualOutputs(other ...*Transaction) bool { var inAmount uint64 for _, otherTransaction := range other { for _, output := range otherTransaction.Outputs { inAmount += output.Amount } } var outAmount uint64 for _, output := range t.Outputs { outAmount += output.Amount } return (int(outAmount) - int(inAmount)) == 0 } // GetTotalOutput sums the output amounts from the transaction. func (t *Transaction) GetTotalOutput() uint64 { result := uint64(0) for _, out := range t.Outputs { result += out.Amount } return result } // GetTotalOutputFor sums the outputs referenced to a specific recipient. // recipient is an address checksum hex string. func (t *Transaction) GetTotalOutputFor(recipient string) uint64 { result := uint64(0) for _, out := range t.Outputs { if out.Recipient == recipient { result += out.Amount } } return result } // GetTotalInput sums the input amounts from the transaction. // Requires the blockchain for lookups. func (t *Transaction) GetTotalInput(bc *BlockChain) (uint64, error) { result := uint64(0) // This is a bit crazy; filter all input transactions // by this senders address and sum the outputs. inputs, err := bc.GetAllInputs(t) if err != nil { return 0, err } for _, in := range inputs { result += in.GetTotalOutputFor(t.Sender.Repr()) } return result, nil } // GetBlockRange returns the start and end block indexes for the inputs // to a transaction. func (bc *BlockChain) GetBlockRange(t *Transaction) (uint32, uint32) { min := uint32(math.MaxUint32) max := uint32(0) for _, in := range t.Inputs { if in.BlockNumber < min { min = in.BlockNumber } if in.BlockNumber > max { max = in.BlockNumber } } return min, max } // InputsIntersect returns true if the inputs of t intersect with those of other. func (t *Transaction) InputsIntersect(other *Transaction) bool { return !t.InputIntersection(other).IsEmpty() } // InputIntersection returns the intersection of the inputs of t and other. func (t *Transaction) InputIntersection(other *Transaction) set.Interface { return set.Intersection(t.InputSet(), other.InputSet()) } // InputSet returns the transaction inputs as a set object. func (t *Transaction) InputSet() *set.Set { a := make([]interface{}, len(t.Inputs)) for i, v := range t.Inputs { a[i] = v } return set.New(a...) } // InputsSpentElsewhere returns true if inputs purported to be only spent // on transaction t have been spent elsewhere after block index `start`. func (t *Transaction) InputsSpentElsewhere(bc *BlockChain, start uint32) bool { // Get the set of inputs for t. inSet := t.InputSet() // Look at each transaction in the chain from start on. for _, b := range bc.Blocks[start:] { for _, txn := range b.Transactions { // If the inputs to t intersect with the inputs to txn... if !set.Intersection(inSet, txn.InputSet()).IsEmpty() { // ... and the sender is the same, then we have a respend. if txn.Sender.Repr() == t.Sender.Repr() { return true } } } } // If we made it through all the transactions, without finding // inputs respent anywhere, then we're good. return false }
blockchain/transaction.go
0.804444
0.407628
transaction.go
starcoder
package stat import ( "math" "sort" ) type IntSlice struct { Data []int } func NewIntSlice(data []int) *IntSlice { return &IntSlice{Data: data} } // Sort sort data func (s *IntSlice) Sort() { sort.Ints(s.Data) } func (s *IntSlice) Percentile(p float64) float64 { if len(s.Data) == 0 { return 0 } k := float64(len(s.Data)-1) * p f := math.Floor(k) c := math.Ceil(k) if f == c { return float64(s.Data[int(k)]) } d0 := float64(s.Data[int(f)]) * (c - k) d1 := float64(s.Data[int(c)]) * (k - f) return d0 + d1 } func (s *IntSlice) Sum() int { sum := 0 for i, n := 0, len(s.Data); i < n; i++ { sum += s.Data[i] } return sum } func (s *IntSlice) Mean() float64 { n := len(s.Data) if n == 0 { return 0 } return float64(s.Sum()) / float64(n) } func (s *IntSlice) Median() float64 { return s.Percentile(0.5) } type IntMap struct { Data map[int]int keys []int vals []int } func NewIntMap(data map[int]int) *IntMap { return &IntMap{Data: data} } // Keys return sorted keys func (m *IntMap) Keys() []int { if m.keys != nil { return m.keys } keys := []int{} for key := range m.Data { keys = append(keys, key) } sort.Ints(keys) m.keys = keys return keys } func (m *IntMap) Vals() []int { if m.vals != nil { return m.vals } vals := make([]int, len(m.Data)) for i, key := range m.Keys() { vals[i] = m.Data[key] } m.vals = vals return vals } func (m *IntMap) Items() int { n := 0 // total items number for _, v := range m.Data { n += v } return n } func (m *IntMap) percentKey(i int) int { count := -1 for _, key := range m.Keys() { count += m.Data[key] if count >= i { return key } } return 0 } func (m *IntMap) Percentile(p float64) float64 { if len(m.Data) == 0 { return 0 } n := m.Items() k := float64(n-1) * p f := math.Floor(k) c := math.Ceil(k) if f == c { return float64(m.percentKey(int(k))) } d0 := float64(m.percentKey(int(f))) * (c - k) d1 := float64(m.percentKey(int(c))) * (k - f) return d0 + d1 } func (m *IntMap) Sum() int { sum := 0 for _, key := range m.Keys() { sum += key * m.Data[key] } return sum } func (m *IntMap) Mean() float64 { if len(m.Data) == 0 { return 0 } return float64(m.Sum()) / float64(m.Items()) } func (m *IntMap) Median() float64 { return m.Percentile(0.5) }
stat/percentile.go
0.680135
0.417628
percentile.go
starcoder
package reactnative const ( deployWorkflowDescription = `Tests, builds and deploys the app using *Deploy to bitrise.io* Step. Next steps: - Set up an [Apple service with API key](https://devcenter.bitrise.io/en/accounts/connecting-to-services/connecting-to-an-apple-service-with-api-key.html). - Check out [Getting started with React Native apps](https://devcenter.bitrise.io/en/getting-started/getting-started-with-react-native-apps.html). ` primaryWorkflowDescription = `Runs tests. Next steps: - Check out [Getting started with React Native apps](https://devcenter.bitrise.io/en/getting-started/getting-started-with-react-native-apps.html). ` primaryWorkflowNoTestsDescription = `Installs dependencies. Next steps: - Add tests to your project and configure the workflow to run them. - Check out [Getting started with React Native apps](https://devcenter.bitrise.io/en/getting-started/getting-started-with-react-native-apps.html). ` ) const ( expoDeployWorkflowDescription = `Tests the app and runs a build on Expo Application Services (EAS). Next steps: - Configure the ` + "`Run Expo Application Services (EAS) build`" + ` Step's ` + "`Access Token`" + ` input. - Check out [Getting started with Expo apps](https://devcenter.bitrise.io/en/getting-started/getting-started-with-expo-apps.html). - For an alternative deploy workflow checkout the [(React Native) Expo: Build using Turtle CLI recipe](https://github.com/bitrise-io/workflow-recipes/blob/main/recipes/rn-expo-turtle-build.md). ` expoDeployWorkflowNoTestsDescription = `Runs a build on Expo Application Services (EAS). Next steps: - Configure the ` + "`Run Expo Application Services (EAS) build`" + ` Step's ` + "`Access Token`" + ` input. - Check out [Getting started with Expo apps](https://devcenter.bitrise.io/en/getting-started/getting-started-with-expo-apps.html). - For an alternative deploy workflow checkout the [(React Native) Expo: Build using Turtle CLI recipe](https://github.com/bitrise-io/workflow-recipes/blob/main/recipes/rn-expo-turtle-build.md). ` expoPrimaryWorkflowDescription = `Runs tests. Next steps: - Check out [Getting started with Expo apps](https://devcenter.bitrise.io/en/getting-started/getting-started-with-expo-apps.html). ` expoPrimaryWorkflowNoTestsDescription = `Installs dependencies. Next steps: - Add tests to your project and configure the workflow to run them. - Check out [Getting started with Expo apps](https://devcenter.bitrise.io/en/getting-started/getting-started-with-expo-apps.html). ` )
vendor/github.com/bitrise-io/bitrise-init/scanners/reactnative/const.go
0.796609
0.558146
const.go
starcoder
package chain import ( "bytes" "fmt" "log" "strings" "github.com/edotau/goFish/simpleio" ) // Chain alignment fields. type Chain struct { Score int TName string TSize int TStrand byte TStart int TEnd int QName string QSize int QStrand byte QStart int QEnd int Alignment []Bases Id int } // Bases is a cigar-like info for alignment block: First number is the length/size of bases, then number of target gaps and finally query gaps. type Bases struct { Size int TBases int QBases int } // NewChain will process text into chain data fields. It will read the first line of the file and assign to header fields and use a reader to read and process the additional lines of the alignment. func NewChain(text string, reader *simpleio.SimpleReader) *Chain { data := strings.Split(text, " ") if len(data) == 13 { return &Chain{ Score: simpleio.StringToInt(data[1]), TName: data[2], TSize: simpleio.StringToInt(data[3]), TStrand: data[4][0], TStart: simpleio.StringToInt(data[5]), TEnd: simpleio.StringToInt(data[6]), QName: data[7], QSize: simpleio.StringToInt(data[8]), QStrand: data[9][0], QStart: simpleio.StringToInt(data[10]), QEnd: simpleio.StringToInt(data[11]), Alignment: chainingHelper(reader), Id: simpleio.StringToInt(data[12]), } } else { log.Fatalf("Error: header line needs to contain 13 data fields\n") return nil } } // chainingHelper is the helper function that will process the chain alignment fields and return the alignment stats. func chainingHelper(reader *simpleio.SimpleReader) []Bases { var line *bytes.Buffer var data []string var answer []Bases var curr Bases for nextBytes, done := reader.Peek(1); nextBytes[0] != 0 && done == nil; nextBytes, done = reader.Peek(1) { line, _ = simpleio.ReadLine(reader) data = strings.Split(line.String(), "\t") if len(data) == 1 { curr = Bases{ Size: simpleio.StringToInt(data[0]), TBases: 0, QBases: 0, } answer = append(answer, curr) //this will advance the reader to the blank line i beliebe the reader will peak at the blank line in the next iteration and exit line, _ = simpleio.ReadLine(reader) return answer } else if len(data) == 3 { curr = Bases{ Size: simpleio.StringToInt(data[0]), TBases: simpleio.StringToInt(data[1]), QBases: simpleio.StringToInt(data[2]), } answer = append(answer, curr) } else { log.Fatalf("Error: expecting alignment data columns to be 3 or 1 but encountered %d\n", len(data)) } } return nil } // Chain struct implements the bed interface with the Chrom() method which returns the chromosome name referencing the target sequence. func (c *Chain) Chrom() string { return c.TName } // Chain struct implements the bed interface with the ChrStart() method which returns the starting position of the region referencing the target sequence. func (c *Chain) ChrStart() int { return c.TStart } // Chain struct implements the bed interface with the ChrEnd() method which returns the starting position of the region referencing the target sequence. func (c *Chain) ChrEnd() int { return c.TEnd } // ReadHeaderComments will process header comments that sometimes appear at the beginning of chain file and returns a struct. func ReadHeaderComments(er *simpleio.SimpleReader) *HeaderComments { var line *bytes.Buffer var commments HeaderComments for nextBytes, done := er.Peek(1); nextBytes[0] == '#' && done == nil; nextBytes, done = er.Peek(1) { line, _ = simpleio.ReadLine(er) commments.HashTag = append(commments.HashTag, line.String()) } return &commments } // HeaderComments stores the comment lines at the beginning of chain alignments into a struct. type HeaderComments struct { HashTag []string } // Read is a simple function to read in chain test files and covert into a slice of chain structs func Read(filename string) []Chain { reader := simpleio.NewReader(filename) ReadHeaderComments(reader) var ans []Chain for i, done := ParseChain(reader); !done; i, done = ParseChain(reader) { ans = append(ans, *i) } return ans } // ReadAll is a function that will take a list of file names and append all data into a single slice of chain structs func ReadAll(files []string) []Chain { var ans []Chain for _, each := range files { ans = append(ans, Read(each)...) } return ans } // NextChain will read lines in file and return one chain record at a time and a true false determining the EOF. func ParseChain(reader *simpleio.SimpleReader) (*Chain, bool) { line, done := simpleio.ReadLine(reader) if !done { return NewChain(line.String(), reader), false } else { return nil, true } } // ToString will convert a chain struct to original string format. func ToString(ch *Chain) string { var answer string = fmt.Sprintf("chain %d %s %d %c %d %d %s %d %c %d %d %d\n", ch.Score, ch.TName, ch.TSize, ch.TStrand, ch.TStart, ch.TEnd, ch.QName, ch.QSize, ch.QStrand, ch.QStart, ch.QEnd, ch.Id) //minus one in the loop because last line contains 2 zeros and we do not want to print those for i := 0; i < len(ch.Alignment)-1; i++ { answer += fmt.Sprintf("%d\t%d\t%d\n", ch.Alignment[i].Size, ch.Alignment[i].TBases, ch.Alignment[i].QBases) } answer = fmt.Sprintf("%s%d\n", answer, ch.Alignment[len(ch.Alignment)-1].Size) return answer } // PrettyFmt will summarize the chain header lines in a more human readable format func PrettyFmt(c *Chain) string { return fmt.Sprintf("%s\t%s\t%d\t%d\t%s\t%s\t%d\t%d", c.TName, string(c.TStrand), c.TStart, c.TEnd, c.QName, string(c.QStrand), c.QStart, c.QEnd) } func Write(filename string, chains []Chain) { writer := simpleio.NewWriter(filename) for _, c := range chains { writer.WriteString(ToString(&c)) writer.WriteByte('\n') } writer.Close() }
chain/chain.go
0.580709
0.401688
chain.go
starcoder
package volume // Matrix is an array of Volumes type Matrix struct { StaticMatrix Channels int } // Apply takes a volume matrix and multiplies it by incoming volumes func (m Matrix) ApplyToMatrix(mtx Matrix) Matrix { if mtx.Channels == 0 { return m } if m.Channels == mtx.Channels { // simple straight-through for i := 0; i < m.Channels; i++ { m.StaticMatrix[i] = mtx.StaticMatrix[i].ApplySingle(m.StaticMatrix[i]) } return m } // more complex applications follow... if mtx.Channels == 1 { // right (mtx) is mono, so just do direct mono application return m.Apply(mtx.StaticMatrix[0]) } // NOTE: recursive return m.ApplyToMatrix(mtx.ToChannels(m.Channels)) } func (m Matrix) Apply(vol Volume) Matrix { for i := 0; i < m.Channels; i++ { m.StaticMatrix[i] = vol.ApplySingle(m.StaticMatrix[i]) } return m } func (m *Matrix) Accumulate(in Matrix) { if m.Channels == 0 { *m = in return } dry := in.ToChannels(m.Channels) for i := 0; i < m.Channels; i++ { m.StaticMatrix[i] += dry.StaticMatrix[i] } } func (m *Matrix) Assign(channels int, data []Volume) { m.Channels = channels for i := 0; i < channels; i++ { m.StaticMatrix[i] = data[i] } } func (m Matrix) ToChannels(channels int) Matrix { if m.Channels == channels { return m } switch channels { case 1: return m.AsMono() case 2: return m.AsStereo() case 4: return m.AsQuad() default: return Matrix{} } } // Sum sums all the elements of the Matrix and returns the resulting Volume func (m Matrix) Sum() Volume { var v Volume for i := 0; i < m.Channels; i++ { v += m.StaticMatrix[i] } return v } func (m *Matrix) Set(ch int, vol Volume) { m.StaticMatrix[ch] = vol } func (m Matrix) Get(ch int) Volume { return m.StaticMatrix[ch] } func (m Matrix) AsMono() Matrix { switch m.Channels { case 0: return Matrix{} case 1: return m default: return Matrix{ StaticMatrix: StaticMatrix{m.Sum() / Volume(m.Channels)}, Channels: 1, } } } func (m Matrix) AsStereo() Matrix { switch m.Channels { case 0: return Matrix{} case 1: return Matrix{ StaticMatrix: StaticMatrix{m.StaticMatrix[0], m.StaticMatrix[0]}, Channels: 2, } case 2: return m case 4: return Matrix{ StaticMatrix: StaticMatrix{(m.StaticMatrix[0] + m.StaticMatrix[2]) / 2.0, (m.StaticMatrix[1] + m.StaticMatrix[3]) / 2.0}, Channels: 2, } default: return Matrix{} } } func (m Matrix) AsQuad() Matrix { switch m.Channels { case 0: return Matrix{} case 1: return Matrix{ StaticMatrix: StaticMatrix{m.StaticMatrix[0], m.StaticMatrix[0], m.StaticMatrix[0], m.StaticMatrix[0]}, Channels: 4, } case 2: return Matrix{ StaticMatrix: StaticMatrix{m.StaticMatrix[0], m.StaticMatrix[1], m.StaticMatrix[0], m.StaticMatrix[1]}, Channels: 4, } case 4: return m default: return Matrix{} } } func (m Matrix) Lerp(other Matrix, t float32) Matrix { if other.Channels == 0 || t <= 0 { return m } out := other.ToChannels(m.Channels) // lerp between m and v for c := 0; c < m.Channels; c++ { a := m.StaticMatrix[c] b := out.StaticMatrix[c] out.StaticMatrix[c] = a + Volume(t)*(b-a) } return out }
volume/matrix.go
0.86592
0.677179
matrix.go
starcoder
package math type Rect struct { Min, Max Point } func CreateRect(minX, minY, maxX, maxY int) Rect { return Rect{Point{minX, minY}, Point{maxX, maxY}} } func (r Rect) Mid() Point { return Point{ (r.Min.X + r.Max.X) / 2, (r.Min.Y + r.Max.Y) / 2, } } func (r Rect) W() int { return r.Max.X - r.Min.X } func (r Rect) H() int { return r.Max.Y - r.Min.Y } func (r Rect) TL() Point { return r.Min } func (r Rect) TC() Point { return Point{(r.Min.X + r.Max.X) / 2, r.Min.Y} } func (r Rect) TR() Point { return Point{r.Max.X, r.Min.Y} } func (r Rect) BL() Point { return Point{r.Min.X, r.Max.Y} } func (r Rect) BC() Point { return Point{(r.Min.X + r.Max.X) / 2, r.Max.Y} } func (r Rect) BR() Point { return r.Max } func (r Rect) ML() Point { return Point{r.Min.X, (r.Min.Y + r.Max.Y) / 2} } func (r Rect) MR() Point { return Point{r.Max.X, (r.Min.Y + r.Max.Y) / 2} } func (r Rect) Size() Size { return Size{r.Max.X - r.Min.X, r.Max.Y - r.Min.Y} } func (r Rect) ScaleAt(p Point, s Vec2) Rect { return Rect{ p.Add(r.Min.Sub(p).Scale(s)), p.Add(r.Max.Sub(p).Scale(s)), } } func (r Rect) ScaleS(s float32) Rect { return Rect{r.Min.ScaleS(s), r.Max.ScaleS(s)} } func (r Rect) Offset(p Point) Rect { return Rect{r.Min.Add(p), r.Max.Add(p)} } func (r Rect) OffsetX(i int) Rect { return r.Offset(Point{i, 0}) } func (r Rect) OffsetY(i int) Rect { return r.Offset(Point{0, i}) } func (r Rect) ClampXY(x, y int) (int, int) { return Clamp(x, r.Min.X, r.Max.X), Clamp(y, r.Min.Y, r.Max.Y) } func (r Rect) Lerp(v Vec2) Point { return r.Min.Add(r.Size().Scale(v).Point()) } func (r Rect) Frac(v Point) Vec2 { return v.Sub(r.Min).Vec2().Div(r.Size().Vec2()) } func (r Rect) Remap(from, to Rect) Rect { return Rect{r.Min.Remap(from, to), r.Max.Remap(from, to)} } func (r Rect) Expand(s Spacing) Rect { return Rect{ Point{r.Min.X - s.L, r.Min.Y - s.T}, Point{r.Max.X + s.R, r.Max.Y + s.B}, }.Canon() } func (r Rect) ExpandI(s int) Rect { return Rect{ Point{r.Min.X - s, r.Min.Y - s}, Point{r.Max.X + s, r.Max.Y + s}, }.Canon() } func (r Rect) Contract(s Spacing) Rect { return Rect{ Point{r.Min.X + s.L, r.Min.Y + s.T}, Point{r.Max.X - s.R, r.Max.Y - s.B}, }.Canon() } func (r Rect) ContractI(s int) Rect { return Rect{ Point{r.Min.X + s, r.Min.Y + s}, Point{r.Max.X - s, r.Max.Y - s}, }.Canon() } func (r Rect) Union(o Rect) Rect { return Rect{r.Min.Min(o.Min), r.Max.Max(o.Max)} } func (r Rect) Intersect(o Rect) Rect { return Rect{ r.Min.Max(o.Min), r.Max.Min(o.Max), }.Canon() } func (r Rect) Constrain(o Rect) Rect { overflowMin := o.Min.Sub(r.Min).Max(ZeroPoint) overflowMax := o.Max.Sub(r.Max).Min(ZeroPoint) return Rect{ r.Min.Add(overflowMax).Max(o.Min), r.Max.Add(overflowMin).Min(o.Max), } } func (r Rect) Canon() Rect { return Rect{ r.Min.Min(r.Max), r.Min.Max(r.Max), } } func (r Rect) Contains(p Point) bool { return r.Min.X <= p.X && r.Min.Y <= p.Y && r.Max.X > p.X && r.Max.Y > p.Y }
math/rect.go
0.877319
0.577019
rect.go
starcoder
package cryptypes import "database/sql/driver" // EncryptedInt64 supports encrypting Int64 data type EncryptedInt64 struct { Field Raw int64 } // Scan converts the value from the DB into a usable EncryptedInt64 value func (s *EncryptedInt64) Scan(value interface{}) error { return decrypt(value.([]byte), &s.Raw) } // Value converts an initialized EncryptedInt64 value into a value that can safely be stored in the DB func (s EncryptedInt64) Value() (driver.Value, error) { return encrypt(s.Raw) } // NullEncryptedInt64 supports encrypting nullable Int64 data type NullEncryptedInt64 struct { Field Raw int64 Empty bool } // Scan converts the value from the DB into a usable NullEncryptedInt64 value func (s *NullEncryptedInt64) Scan(value interface{}) error { if value == nil { s.Raw = 0 s.Empty = true return nil } return decrypt(value.([]byte), &s.Raw) } // Value converts an initialized NullEncryptedInt64 value into a value that can safely be stored in the DB func (s NullEncryptedInt64) Value() (driver.Value, error) { if s.Empty { return nil, nil } return encrypt(s.Raw) } // SignedInt64 supports signing Int64 data type SignedInt64 struct { Field Raw int64 Valid bool } // Scan converts the value from the DB into a usable SignedInt64 value func (s *SignedInt64) Scan(value interface{}) (err error) { s.Valid, err = verify(value.([]byte), &s.Raw) return } // Value converts an initialized SignedInt64 value into a value that can safely be stored in the DB func (s SignedInt64) Value() (driver.Value, error) { return sign(s.Raw) } // NullSignedInt64 supports signing nullable Int64 data type NullSignedInt64 struct { Field Raw int64 Empty bool Valid bool } // Scan converts the value from the DB into a usable NullSignedInt64 value func (s *NullSignedInt64) Scan(value interface{}) (err error) { if value == nil { s.Raw = 0 s.Empty = true s.Valid = true return nil } s.Valid, err = verify(value.([]byte), &s.Raw) return } // Value converts an initialized NullSignedInt64 value into a value that can safely be stored in the DB func (s NullSignedInt64) Value() (driver.Value, error) { if s.Empty { return nil, nil } return sign(s.Raw) } // SignedEncryptedInt64 supports signing and encrypting Int64 data type SignedEncryptedInt64 struct { Field Raw int64 Valid bool } // Scan converts the value from the DB into a usable SignedEncryptedInt64 value func (s *SignedEncryptedInt64) Scan(value interface{}) (err error) { s.Valid, err = decryptVerify(value.([]byte), &s.Raw) return } // Value converts an initialized SignedEncryptedInt64 value into a value that can safely be stored in the DB func (s SignedEncryptedInt64) Value() (driver.Value, error) { return encryptSign(s.Raw) } // NullSignedEncryptedInt64 supports signing and encrypting nullable Int64 data type NullSignedEncryptedInt64 struct { Field Raw int64 Empty bool Valid bool } // Scan converts the value from the DB into a usable NullSignedEncryptedInt64 value func (s *NullSignedEncryptedInt64) Scan(value interface{}) (err error) { if value == nil { s.Raw = 0 s.Empty = true s.Valid = true return nil } s.Valid, err = decryptVerify(value.([]byte), &s.Raw) return } // Value converts an initialized NullSignedEncryptedInt64 value into a value that can safely be stored in the DB func (s NullSignedEncryptedInt64) Value() (driver.Value, error) { if s.Empty { return nil, nil } return encryptSign(s.Raw) }
cryptypes/type_int64.go
0.800458
0.611817
type_int64.go
starcoder
package circuit import ( "sort" "github.com/heustis/tsp-solver-go/model" "github.com/heustis/tsp-solver-go/stats" ) const minimumSignificance = 1.0 const maxClones uint16 = 1000 // DisparityClonable relies on the following priniciples to approximate the smallest concave circuit: // 1. That the minimum convex hull of a 2D circuit must be traversed in that order to be optimal (i.e. swapping the order of any 2 vertices in the hull will result in edges intersecting.) // 1a. This means that each convex hull vertex may have any number of interior points between and the next convex hull vertex, but that adding the interior vertices to the circuit cannot reorder these vertices. // 2. Interior points are either near an edge, near a corner, or near the middle of a set of edges (i.e. similarly close to several edges, possibly all edges). // 2a. A point that is close to a single edge will have a significant disparity between the distance increase of its closest edge, and the distance increase of all other edges. // 2b. A point that is close to a corner of two edges will have a significant disparity between the distance increase of those two corner edges, and the distance increase of all other edges. // 2c. A point that is near the middle of a group of edges may or may not have a significant disparity between its distance increase // 3. As interior points are connected to the circuit, other points will move from '2c' to '2a' or '2b' (or become exterior points). // 3a. This is because the new concave edges will be closer to the other interior points than the previous convex edges were. // 3b. If a point becomes exterior, ignore edges that would intersect a closer edge if the point attached to the farther edge. // In other words, if the exterior point is close to a concave corner, it could attach to either edge without intersecting the other. // However, if it is near a convex corner, the farther edge would have to cross the closer edge to attach to the point. // 3c. If all points are in 2c, clone the circuit once per edge and attach that edge to its closest edge, then solve each of those clones in parallel. type DisparityClonable struct { significance float64 maxClones uint16 perimeterBuilder model.PerimeterBuilder circuits []*disparityClonableCircuit } func NewDisparityClonable(vertices []model.CircuitVertex, perimeterBuilder model.PerimeterBuilder) *DisparityClonable { circuitEdges, unattachedVertices := perimeterBuilder(vertices) initCircuit := &disparityClonableCircuit{ edges: circuitEdges, distances: make(map[model.CircuitVertex]*stats.DistanceGaps), length: 0.0, } circuits := []*disparityClonableCircuit{initCircuit} // Determine the distance increase from each point to each edge, and the gaps between those distance increases for each point. for vertex := range unattachedVertices { initCircuit.distances[vertex] = stats.NewDistanceGaps(vertex, circuitEdges) } for _, edge := range circuitEdges { initCircuit.length += edge.GetLength() } return &DisparityClonable{ significance: minimumSignificance, maxClones: maxClones, circuits: circuits, perimeterBuilder: perimeterBuilder, } } func (c *DisparityClonable) FindNextVertexAndEdge() (model.CircuitVertex, model.CircuitEdge) { // Since updating may need to clone the circuits, and each circuit may need to updated with a different vertex, we just need to return any unattached point and edge. if len(c.circuits) > 0 { for k, v := range c.circuits[0].distances { return k, v.ClosestEdges[0].Edge } } return nil, nil } func (c *DisparityClonable) GetAttachedEdges() []model.CircuitEdge { if len(c.circuits) > 0 { return c.circuits[0].edges } return []model.CircuitEdge{} } func (c *DisparityClonable) GetAttachedVertices() []model.CircuitVertex { if len(c.circuits) > 0 && len(c.circuits[0].edges) > 0 { vertices := make([]model.CircuitVertex, len(c.circuits[0].edges)) for i, edge := range c.circuits[0].edges { vertices[i] = edge.GetStart() } return vertices } return []model.CircuitVertex{} } func (c *DisparityClonable) GetLength() float64 { if len(c.circuits) > 0 { return c.circuits[0].length } return 0.0 } func (c *DisparityClonable) GetUnattachedVertices() map[model.CircuitVertex]bool { unattachedVertices := make(map[model.CircuitVertex]bool) if len(c.circuits) > 0 { for k := range c.circuits[0].distances { unattachedVertices[k] = true } } return unattachedVertices } func (c *DisparityClonable) SetMaxClones(max uint16) { c.maxClones = max } func (c *DisparityClonable) SetSignificance(minSignificance float64) { c.significance = minSignificance } func (c *DisparityClonable) Update(ignoredVertex model.CircuitVertex, ignoredEdge model.CircuitEdge) { // Don't update if the perimeter has not been built, nor if the shortest circuit is completed. if len(c.circuits) == 0 || len(c.circuits[0].distances) == 0 { return } // Note: track updated and cloned circuits in a separate array once we need to clone at least one circuit. // Do not mutate the 'circuits' array while we are iterating over it, replace it with the updated/cloned array afterward (if appropriate). var updatedCircuits []*disparityClonableCircuit useUpdated := false for i, circuit := range c.circuits { if clones := circuit.update(c.significance); len(clones) > 0 || useUpdated { // Add all previously processed circuits the first time the clone array is constructed. if !useUpdated { updatedCircuits = make([]*disparityClonableCircuit, 0, len(c.circuits)+len(clones)) updatedCircuits = append(updatedCircuits, c.circuits[0:i]...) useUpdated = true } updatedCircuits = append(updatedCircuits, circuit) updatedCircuits = append(updatedCircuits, clones...) } } if useUpdated { c.circuits = updatedCircuits } // Sort the updated slice from smallest to largest, preferring circuits that are close to completion. sort.Slice(c.circuits, func(i, j int) bool { return c.circuits[i].getLengthPerVertex() < c.circuits[j].getLengthPerVertex() }) if len(c.circuits) > int(c.maxClones) { c.circuits = c.circuits[0:c.maxClones] } } var _ model.Circuit = (*DisparityClonable)(nil)
circuit/disparityclonable.go
0.805364
0.710176
disparityclonable.go
starcoder
package ui import ( "github.com/gdamore/tcell/v2" "github.com/rivo/tview" ) const ( logo = ` ____ ____ | | ______ __| _/____ ____ | |/ / _ \ / __ |/ _ \ / _ \ | ( (_) ) /_/ ( (_) | (_) ) |__|_ \____/\____ |\____/ \____/ \/ \/ ` note = `[yellow]Shortcuts are working in two modes : [yellow][red]1) Global[yellow]: see global shortcuts Type one of the keys [red]> ! : / #[yellow] to switch to this mode. [yellow][red]2) Table[yellow]: see table shortcuts Type the key [red]Esc[yellow] to go to this mode. ` ) func getLogo(options *Options) *tview.Grid { logoTextView := tview.NewTextView() logoTextView.SetText(logo) logoTextView.SetTextColor(options.Skin.TitleColor) logoTextView.SetBackgroundColor(options.Skin.BackgroundColor) logoGrid := tview.NewGrid(). SetRows(6). SetColumns(0) logoGrid.AddItem(logoTextView, 0, 0, 1, 1, 0, 0, false) return logoGrid } func getInfos(options *Options) *tview.Grid { infoGrid := tview.NewGrid(). SetRows(1, 1, 1, 1). SetColumns(8, 3, 0) infoGrid.SetBorder(true).SetTitle(" Informations ") infoGrid.SetBackgroundColor(options.Skin.BackgroundColor) infoGrid.SetBorderColor(options.Skin.BorderColor) infoGrid.SetTitleColor(options.Skin.TitleColor) addField(options, infoGrid, 0, 0, "URL", options.Config.URL, false, false) addField(options, infoGrid, 1, 0, "Version", options.Config.Version, false, false) addField(options, infoGrid, 2, 0, "", "", false, false) addField(options, infoGrid, 3, 0, "", "", false, false) return infoGrid } func getNote(options *Options) *tview.Grid { noteTextView := tview.NewTextView() noteTextView.SetText(note).SetDynamicColors(true) noteTextView.SetBackgroundColor(options.Skin.BackgroundColor) noteGrid := tview.NewGrid(). SetRows(0). SetColumns(0) noteGrid.SetBorder(true).SetTitle(" Notes ") noteGrid.SetBackgroundColor(options.Skin.BackgroundColor) noteGrid.SetBorderColor(options.Skin.BorderColor) noteGrid.SetTitleColor(options.Skin.TitleColor) noteGrid.AddItem(noteTextView, 0, 0, 1, 1, 0, 0, false) return noteGrid } func getQuickActions(options *Options) *tview.Grid { quickActionsGrid := tview.NewGrid(). SetRows(0). SetColumns(13, 3, 0) quickActionsGrid.SetBorder(true).SetTitle(" Quick Actions ") quickActionsGrid.SetBackgroundColor(options.Skin.BackgroundColor) quickActionsGrid.SetBorderColor(options.Skin.BorderColor) quickActionsGrid.SetTitleColor(options.Skin.TitleColor) addField(options, quickActionsGrid, 0, 0, "?", "Show help", false, false) addField(options, quickActionsGrid, 1, 0, ":q", "Exit the application", false, false) addField(options, quickActionsGrid, 2, 0, ":x", "List the servers", false, false) addField(options, quickActionsGrid, 3, 0, ":o", "List the macros", false, false) addField(options, quickActionsGrid, 4, 0, "@MacroName", "Execute the macro named <MacroName>", false, false) addField(options, quickActionsGrid, 5, 0, "#Object", "Show the fields of the object <Object>", false, false) addField(options, quickActionsGrid, 6, 0, ">Query", "Run a query (e.g: >sale.order id +name state state=draft partner_id.name~azure)", false, false) return quickActionsGrid } func getGlobalShortcuts(options *Options) *tview.Grid { globalShortcutsGrid := tview.NewGrid(). SetRows(0). SetColumns(13, 3, 0) globalShortcutsGrid.SetBorder(true).SetTitle(" Global shortcuts ") globalShortcutsGrid.SetBackgroundColor(options.Skin.BackgroundColor) globalShortcutsGrid.SetBorderColor(options.Skin.BorderColor) globalShortcutsGrid.SetTitleColor(options.Skin.TitleColor) addField(options, globalShortcutsGrid, 0, 0, "Ctrl-K", "Clear the input", false, false) addField(options, globalShortcutsGrid, 1, 0, "Ctrl-N", "Go to the next command", false, false) addField(options, globalShortcutsGrid, 2, 0, "Ctrl-P", "Go to the previous command", false, false) addField(options, globalShortcutsGrid, 3, 0, "Ctrl-F", "Filter records", false, false) addField(options, globalShortcutsGrid, 4, 0, "Ctrl-O", "List macros", false, false) addField(options, globalShortcutsGrid, 5, 0, "Ctrl-R", "Enable or disable auto-refresh", false, false) addField(options, globalShortcutsGrid, 6, 0, "Ctrl-X", "List servers", false, false) addField(options, globalShortcutsGrid, 7, 0, "Ctrl-H", "Show and hide the help", false, false) addField(options, globalShortcutsGrid, 8, 0, "Ctrl-D", "Change database or a user", false, false) addField(options, globalShortcutsGrid, 9, 0, "Ctrl-Q", "Exit the application", false, false) addField(options, globalShortcutsGrid, 10, 0, "Key UP/DOWN", "Navigate through command history", false, false) return globalShortcutsGrid } func getTableShortcuts(options *Options) *tview.Grid { tableShortcutsGrid := tview.NewGrid(). SetRows(0). SetColumns(13, 3, 0) tableShortcutsGrid.SetBorder(true).SetTitle(" Table shortcuts ") tableShortcutsGrid.SetBackgroundColor(options.Skin.BackgroundColor) tableShortcutsGrid.SetBorderColor(options.Skin.BorderColor) tableShortcutsGrid.SetTitleColor(options.Skin.TitleColor) addField(options, tableShortcutsGrid, 0, 0, "^np$", "Go to the first/next/previous/last page", false, false) addField(options, tableShortcutsGrid, 1, 0, "hjkl", "Navigate in the table", false, false) addField(options, tableShortcutsGrid, 2, 0, "gG", "Go to first/last line of the table", false, false) addField(options, tableShortcutsGrid, 3, 0, "r", "Refresh the data", false, false) addField(options, tableShortcutsGrid, 4, 0, "m", "Show metadata", false, false) addField(options, tableShortcutsGrid, 5, 0, "d", "Show details", false, false) addField(options, tableShortcutsGrid, 6, 0, "s", "Show links", false, false) addField(options, tableShortcutsGrid, 7, 0, "Key <SPACE>", "Select a record", false, false) addField(options, tableShortcutsGrid, 8, 0, "Key <ENTER>", "Open related records", false, false) addField(options, tableShortcutsGrid, 9, 0, "!FuncName", "Execute the remote function <FuncName>", false, false) addField(options, tableShortcutsGrid, 10, 0, "/filter", "Filter the records", false, false) return tableShortcutsGrid } func getHelpFooter(options *Options) *tview.Grid { helpFooterGrid := tview.NewGrid(). SetRows(0). SetColumns(0) helpFooterTextView := tview.NewTextView() const code string = ` [green]Type the shortcut [red]Ctrl-H[green] or [red]q[green] to quit the help mode[green] [green]You can also type one of the keys: [red]> @ :[green] or [red]#[green]` helpFooterTextView.SetText(code) helpFooterTextView.SetTextAlign(tview.AlignCenter) helpFooterTextView.SetDynamicColors(true) helpFooterTextView.SetBackgroundColor(options.Skin.BackgroundColor) helpFooterGrid.AddItem(helpFooterTextView, 0, 0, 1, 1, 0, 0, false) return helpFooterGrid } func getHomeGrid(options *Options) *tview.Grid { homeGrid := tview.NewGrid(). SetRows(6, 10, 13, 0). SetColumns(0, 0) homeGrid.AddItem(getLogo(options), 0, 0, 1, 1, 0, 0, false) homeGrid.AddItem(getInfos(options), 0, 1, 1, 1, 0, 0, false) homeGrid.AddItem(getNote(options), 1, 0, 1, 1, 0, 0, false) homeGrid.AddItem(getQuickActions(options), 1, 1, 1, 1, 0, 0, false) homeGrid.AddItem(getGlobalShortcuts(options), 2, 0, 1, 1, 0, 0, false) homeGrid.AddItem(getTableShortcuts(options), 2, 1, 1, 1, 0, 0, false) homeGrid.AddItem(getHelpFooter(options), 3, 0, 1, 2, 0, 0, false) homeGrid.SetInputCapture(func(event *tcell.EventKey) *tcell.EventKey { for _, r := range []rune{'>', '@', ':', '#'} { if event.Rune() == r { setSearchBarText(string(r), options) options.Pages.SwitchToPage("main") setSearchBarFocus(options) } } if event.Rune() == 'q' { options.Pages.SwitchToPage("main") } return event }) return homeGrid } func showHome(options *Options) { pageName, _ := options.Pages.GetFrontPage() if pageName == "home" { options.Pages.SwitchToPage("main") } else { options.Pages.SwitchToPage("home") } }
internal/ui/home.go
0.5083
0.445891
home.go
starcoder
package grader var _ BlockGrader = (*V2BlockGrader)(nil) // V2BlockGrader implements the V2 grading algorithm. // Entries are encoded in Protobuf with 25 winners each block. // Valid assets can be found in ´opr.V2Assets´ type V2BlockGrader struct { baseGrader } // Version 2 func (v2 *V2BlockGrader) Version() uint8 { return 2 } // WinnerAmount is the number of OPRs that receive a payout func (v2 *V2BlockGrader) WinnerAmount() int { return 25 } // AddOPR verifies and adds a V2 OPR. func (v2 *V2BlockGrader) AddOPR(entryhash []byte, extids [][]byte, content []byte) error { gopr, err := ValidateV2(entryhash, extids, v2.height, v2.prevWinners, content) if err != nil { return err } v2.oprs = append(v2.oprs, gopr) return nil } // Grade the OPRs. The V2 algorithm works the following way: // 1. Take the top 50 entries with the best proof of work // 2. Calculate the average of each of the 32 assets // 3. Calculate the distance of each OPR to the average, where distance is the sum of quadratic differences // to the average of each asset. If an asset is within `band`% of the average, that asset's // distance is 0. // 4. Throw out the OPR with the highest distance // 5. Repeat 3-4 until there are only 25 OPRs left // 6. Repeat 3 but this time don't apply the band and don't throw out OPRs, just reorder them // until you are left with one func (v2 *V2BlockGrader) Grade() GradedBlock { return v2.GradeCustom(50) } // GradeCustom grades the block using a custom cutoff for the top X func (v2 *V2BlockGrader) GradeCustom(cutoff int) GradedBlock { block := new(V2GradedBlock) block.cutoff = cutoff block.height = v2.height block.cloneOPRS(v2.oprs) block.filterDuplicates() block.sortByDifficulty(cutoff) block.grade() if len(block.oprs) < 25 { block.shorthashes = v2.prevWinners } else { block.createShortHashes(25) } return block } // Payout returns the amount of Pegtoshi awarded to the OPR at the specified index func (v2 *V2BlockGrader) Payout(index int) int64 { return V2Payout(index) }
modules/grader/v2grader.go
0.767341
0.437583
v2grader.go
starcoder
package main const pikari = `/** * @file Pikari API * @see https://github.com/olliNiinivaara/Pikari * @author <NAME> * @copyright <NAME> 2019 * @license MIT * @version 0.9 */ /** @namespace * @description The global Pikari object. To initialize, add listeners and call {@link Pikari.start}. * @global * */ window.Pikari = new Object() /** * @description Local copy of the database. * <br>Keys identify fields and values are field values. * <br>Keys should be strings but values can be any objects, even nested. * <br>If you want to delete a field from database, set it's value to null. * <br>Changes to fields committed by any user are automatically updated. * @type {Map<string, *>} * @example * if (await Pikari.setLocks("somefield", someotherfield)) { * Pikari.data.set("somefield", "some value") * Pikari.data.get(someotherfield)[someproperty] = "some value" * Pikari.commit() * } */ Pikari.data = new Map() /** * @description Helper function to generate a number that is unique enough for prototyping purposes. * @return {number} a random integer that is hopefully unique. */ Pikari.generateKey = function () { return Math.floor(Math.random() * Number.MAX_SAFE_INTEGER) } /** * @description Name of the user. Automatically generated if not explicitly given as user query param in url or at {@link Pikari.start}. * Maximum length of a name is 200 letters. * @type {string} */ Pikari.user = "Anon-" + Pikari.generateKey() /** * @description Names of locks that are currently held by the current user. * <br>Locks can be acquired with {@link Pikari.setLocks}. * <br>At least one lock must be held before commit can be called. * @type {string[]} */ Pikari.mylocks = [] /** @typedef Pikari.Lock @type {Object} @property {string} lockedby current lock owner @property {string} lockedsince The start time of locking */ /** * @description Locks that are currently held. * <br>Object's property names (keys) identify the locks and properties are of type {@link Pikari.Lock}. * <br>Changes to locks can be listened with {@link Pikari.addLockListener}. * @type {Object.<string, Pikari.Lock>} */ Pikari.locks = {} /** * @description Users currenty online. * <br>Keys identify users and values tell the times when users became online. * <br>Changes to user presence can be listened with {@link Pikari.addUserListener}. * <br>The users map is ordered by ascending connection time (which means that {@link Pikari.user} is last at {@link Pikari.EVENT.START}) * @type {Map<string,Date>} */ Pikari.users = new Map() /** * @description Describes the reason for data change in {@link {changeCallback}} * @typedef {Object} EVENT * @memberOf Pikari * @property {string} START - Connection with server is established (see {@link Pikari.start}) or server restarted itself * @property {string} COMMIT - Someone committed data (see {@link Pikari.commit}) * @property {string} ROLLBACK - The local user rollbacked (see {@link Pikari.rollback}) */ Pikari.EVENT = { START: "START", COMMIT: "COMMIT", ROLLBACK: "ROLLBACK" } Object.freeze(Pikari.EVENT) /** * @description Helper function to clean a string so that it can be safely used as innerHTML. * @param {string} str - the string to be cleaned * @return {string} the cleaned version. */ Pikari.clean = function (str) { return String(str).trim().replace(/&/g, '&amp;').replace(/</g, '&lt;').replace(/>/g, '&gt;').replace(/"/g, '&quot;') } /** * @description Connects the Pikari client to the Pikari server with given name and password (both optional). * If a user with same name is already connected, existing user will be immediately disconnected. * @param {string} user - user name (max 200 letters). If user is false, user name from URL query param is used. * If URL query param does not exist and user is "prompt", user name is asked. Finally, if no user name is given, a random name is used. * @param {string} password - if password is set for application, this must match it */ Pikari.start = function (user, password) { if (!Pikari._startstate) { Pikari._startstate = "starting" let style = document.createElement('style') document.head.appendChild(style) style.sheet.insertRule(" body.waiting * { cursor: wait; }", 0) } Pikari.waiting(true) if (!user || user == "prompt") { const userparam = new URLSearchParams(window.location.search).get('user') if (userparam) user = userparam else if (user == "prompt") user = prompt(document.title+"\nUser name") } if (user) Pikari.user = user Pikari.user = Pikari.user.substring(0, 200) if (!password) password = "" Pikari._password = btoa(String(password)) Pikari._startWebSocket() } /** * @callback stopCallback * @memberOf Pikari * @description Triggered at most once, only if connection to server is lost (usually might happen when server is closed). * @return {boolean} - Return true to prevent default handling (which disables all elements and shows an alert). */ /** * @description Set handler for the stop event. * @param {stopCallback} handler - {@link Pikari.stopCallback} */ Pikari.setStopListener = function (handler) { Pikari._stoplistener = handler } /** * @callback changeCallback * @memberOf Pikari * @description Triggered whenever data might have been changed. * @param {EVENT} changetype - {@link Pikari.EVENT} * @param {string[]} changedfields - an array of changed field names * @param {string} changer - changer's name ("" if EVENT == START) */ /** * @description Add handler for change events. * @param {changeCallback} handler - {@link Pikari.changeCallback} */ Pikari.addChangeListener = function (handler) { Pikari._changelisteners.push(handler) } /** * @callback messageCallback * @memberOf Pikari * @description Triggered when a user sends a message. * @param {string} sender - sender of the message * @param {string} message - the message itself */ /** * @description Add handler for messages sent by other users. * @param {messageCallback} handler - {@link Pikari.messageCallback} */ Pikari.addMessageListener = function (handler) { Pikari._messagelisteners.push(handler) } /** * @callback lockCallback * @memberOf Pikari * @description Triggered when locks are acquired or released. * <br>Note: also local {@link Pikari.setLocks} calls will trigger the lockCallback. */ /** * @description Add handler for changes in lock ownerships. * @param {lockCallback} handler - {@link Pikari.lockCallback} */ Pikari.addLockListener = function (handler) { Pikari._locklisteners.push(handler) } /** * @callback userCallback * @memberOf Pikari * @description Triggered when a user logs in or disconnects. * @param {string} user - the user that logged in or disconnected * @param {boolean} login - true iff the user logged in */ /** * @description Add handler for changes in on-line users. * @param {userCallback} handler - {@link Pikari.userCallback} */ Pikari.addUserListener = function (handler) { Pikari._userlisteners.push(handler) } /** * @description A helper function to get field value or default if undefined. * The default will be set to local data but not sent to server database. * @param {string} field - Field name. * @param {*} defaultvalue - The default value if field is undefined. * @return {*} The field value or defaultvalue. */ Pikari.getOrDefault = function (field, defaultvalue) { let result = Pikari.data.get(field) if (result !== undefined) return result Pikari.data.set(field, defaultvalue) return defaultStatus } /** * @description A helper function to get data field names as an array. * @return {string[]} Array of field names. */ Pikari.getFields = function () { return Array.from(Pikari.data.keys()) } /** * @description A helper function set wait cursor and disable all elements * @param {boolean} waiting - true for waiting, false for defaulting cursor and re-enabling elements */ Pikari.waiting = function (waiting) { if (waiting) { document.body.className += (" waiting") Pikari._activeElement = document.activeElement document.querySelectorAll("body *").forEach(el => { el.disabled = true }) } else { const n = document.body.className if (n.endsWith(" waiting")) document.body.className = n.substr(0, n.length - 8) else document.body.className = n.replace(" waiting ", " ") document.querySelectorAll("body *").forEach(el => { el.disabled = false }) if (Pikari._activeElement) Pikari._activeElement.focus() } } /** * @description Acquire locks to data before making changes and committing ("start a transaction"). * <br>Locks prevent concurrent modification by multiple users. * <br>The basic procedure is to use names-of-fields-to-be-changed as lock names. * <br>If lock setting fails (because required locks were already locked by other user(s)), all currently held locks by the user are released (to prevent dead-locks). * <br>Note: Remember to await for the return value, this is an async function. * @async * @param {...string} locks - names (or array(s) of names) of the locks to lock. If locks is missing or empty, tries to acquire all existing field names. * @return {boolean} true if locking was successful. False if user lost all locks and is not eligible to {@link Pikari.commit}. */ Pikari.setLocks = async function (...locks) { if (!locks || locks.length == 0) locks = Pikari.getFields() locks = locks.flat() Pikari.locks = "inflight" Pikari.waiting(true) let response try { response = await fetch(Pikari._basepath+"setlocks", { method: "post", body: JSON.stringify({ "user": Pikari.user, "pw": Pikari._password, "locks": locks }) }) if (Pikari.locks === "inflight") { if (response.status != 200) { Pikari.locks = new Map() let w = window.open() w.document.write(response.text()) return false } Pikari.locks = await response.json() if (Pikari.locks["error"]) { Pikari._reportError(Pikari.locks["error"]) Pikari.locks = new Map() return false } Pikari.mylocks = [] Object.keys(Pikari.locks).forEach(l => { if (Pikari.locks[l].lockedby === Pikari.user) Pikari.mylocks.push(l) }) } if (Pikari.mylocks.length == 0) return false Pikari._oldData = new Map() Pikari.data.forEach((value, field) => { Pikari._oldData.set(field, JSON.stringify(value)) }) return true } catch(e) { alert(e.toString()) } finally { Pikari.waiting(false) } } /** * @description Commit changes to data fields. * <br>Only fields that are changed will be transferred to server. * <br>Will release all locks. * @throws if no locks are held ("no transaction is active") an error will be thrown */ Pikari.commit = function () { if (Pikari.mylocks.length == 0) throw ("No transaction to commit") let newdata = {} Pikari.data.forEach((value, field) => { const newvalue = JSON.stringify(value) if (!Pikari._oldData.has(field) || Pikari._oldData.get(field) != newvalue) newdata[field] = newvalue }) newdata = JSON.stringify(newdata) Pikari._sendToServer("commit", newdata) } /** * @description Rollback any changes to data fields. * <br>Will cause a local {@link Pikari.changeCallback} with a list of rolled-back (modified) fields. * @throws if no locks are held ("no transaction is active") an error will be thrown */ Pikari.rollback = function () { if (Pikari.mylocks.length == 0) throw ("No transaction to rollback") let changedfields = [] Pikari._oldData.forEach((oldvalue, field) => { const modifiedvalue = JSON.stringify(Pikari.data.get(field)) if (oldvalue != modifiedvalue) changedfields.push(field) Pikari._oldData.set(field, JSON.parse(oldvalue)) }) Pikari.data = Pikari._oldData fetch(Pikari._basepath+"setlocks", { method: "post", body: JSON.stringify({ "user": Pikari.user, "pw": Pikari._password, "locks": [] }) }) for (let l of Pikari._changelisteners) l(Pikari.EVENT.ROLLBACK, changedfields, Pikari.user) } /** * @description Send a message to other on-line users. * @param message - the message to send * @param {string|string[]} - a receiver or array of receivers. If missing or empty, the message will be sent to all users. */ Pikari.sendMessage = function (message, receivers) { if (!receivers) receivers = [] if (!Array.isArray(receivers)) receivers = [receivers] Pikari._ws.send(JSON.stringify({ "sender": Pikari.user, "pw": Pikari._password, "receivers": receivers, "messagetype": "message", "message": message })) } /** * @description Send some string to be added to the server-side log. * @param {string} event - the string to be added; will be truncated to 10000 letters. */ Pikari.log = function (event) { if (typeof event !== "string") return event = event.substring(0, 10000) Pikari._sendToServer("log", event) } /** * @description Log out user. Pikari will respond by closing the web socket connection which triggers {@link Pikari.stopCallback}. */ Pikari.logOut = function () { Pikari._sendToServer("logout") } //private stuff------------------------------------------- Pikari._stoplistener = null Pikari._locklisteners = [] Pikari._changelisteners = [] Pikari._messagelisteners = [] Pikari._userlisteners = [] Pikari._app Pikari._basepath Pikari._activeElement Pikari._reportError = function (error) { error = "Pikari client error - " + error console.log(error) if (!error.includes("Web socket problem: ")) Pikari.log(error) alert(error) throw error } Pikari._sendToServer = function (messagetype, message) { if (!Pikari._ws) alert("No connection server!") else Pikari._ws.send(JSON.stringify({ "sender": Pikari.user, "pw": Pikari._password, "messagetype": messagetype, "message": message })) } Pikari._handleStart = function (d) { try { if (d.message == "wrongpassword") return alert("Wrong password") if (d.message == "passwordrequired") return alert("Password required") let startdata = JSON.parse(d.message) Object.entries(JSON.parse(startdata.Db)).forEach(([field, data]) => { Pikari.data.set(field, data) }) const userbag = JSON.parse(startdata.Users) let userlist = Object.keys(userbag).sort((a, b) => { return userbag[a] - userbag[b] }) userlist.forEach((name) => { Pikari.users.set(name, new Date(userbag[name] * 1000)) }) const changedfields = Pikari.getFields() for (let l of Pikari._changelisteners) l(Pikari.EVENT.START, changedfields, "") Pikari._startstate = "started" } finally { Pikari.waiting(false) } } Pikari._handleChange = function (d) { const newdata = JSON.parse(d.message) if (Object.keys(newdata).length == 0) Pikari.data = new Map() Object.entries(newdata).forEach(([field, data]) => { if (data == "null") Pikari.data.delete(field); else Pikari.data.set(field, JSON.parse(data)) }) for (let l of Pikari._changelisteners) l(Pikari.EVENT.COMMIT, Object.keys(newdata), d.sender) } Pikari._handleLocking = function (d) { Pikari.locks = JSON.parse(d.message) Pikari.mylocks = [] Object.keys(Pikari.locks).forEach(l => { if (Pikari.locks[l].lockedby === Pikari.user) Pikari.mylocks.push(l) }) for (let l of Pikari._locklisteners) l(d.sender) } Pikari._handleUser = function (d) { if (d.message === Pikari.user) return if (d.pw === "in") Pikari.users.set(d.message, new Date()) else Pikari.users.delete(d.message) for (let l of Pikari._userlisteners) l(d.message, d.pw === "in") } Pikari._getUrl = function () { if (Pikari._app == "index") { let url = location.href url = url.replace("https://", "wss://") url = url.replace("http://", "ws://") const end = url.indexOf("?") if (end > -1) url = url.substring(0, end) return url+"ws?user=" + Pikari.user } const protocol = location.protocol === "https:" ? "wss://" : "ws://" let host = location.hostname if (location.port) host += ":" + location.port Pikari._app = location.pathname.substring(1, location.pathname.length) if (Pikari._app.endsWith("/")) Pikari._app = Pikari._app.substring(0, Pikari._app.length-1) const split = Pikari._app.lastIndexOf('/') Pikari._basepath = "/" if (split > -1) { Pikari._app = Pikari._app.substring(split+1) Pikari._basepath = location.pathname.substring(0, split+1) + "/" } return protocol + host + Pikari._basepath + "ws?user=" + Pikari.user + "&app=" + Pikari._app } Pikari._startWebSocket = function () { Pikari._ws = new WebSocket(Pikari._getUrl()) Pikari._ws.onopen = function () { Pikari._sendToServer("start") } Pikari._ws.onclose = function () { Pikari._ws = null Pikari.data = new Map() if (Pikari._startstate != "started") return let preventdefault = false if (Pikari._stoplistener) preventdefault = Pikari._stoplistener() if (!preventdefault) { Pikari.waiting(false) alert("Connection to Pikari server was lost!") } } Pikari._ws.onmessage = function (evt) { const d = JSON.parse(evt.data) switch (d.messagetype) { case "start": { Pikari._handleStart(d); break } case "message": { for (let l of Pikari._messagelisteners) l(d.sender, d.message); break } case "lock": { Pikari._handleLocking(d); break } case "change": { Pikari._handleChange(d); break } case "sign": { Pikari._handleUser(d); break } default: Pikari._reportError("Unrecognized message type received: " + d.messagetype) } } Pikari._ws.onerror = function (evt) { Pikari._reportError("Web socket problem: " + evt.data) } }`
pikarijs.go
0.885953
0.465205
pikarijs.go
starcoder
// Package geo contains the base types for spatial data type operations. package geo import ( "github.com/cockroachdb/cockroach/pkg/geo/geopb" "github.com/cockroachdb/cockroach/pkg/geo/geos" "github.com/golang/geo/s2" "github.com/twpayne/go-geom" "github.com/twpayne/go-geom/encoding/ewkb" // Force these into vendor until they're used. _ "github.com/twpayne/go-geom/encoding/ewkbhex" _ "github.com/twpayne/go-geom/encoding/geojson" _ "github.com/twpayne/go-geom/encoding/kml" _ "github.com/twpayne/go-geom/encoding/wkb" _ "github.com/twpayne/go-geom/encoding/wkbhex" _ "github.com/twpayne/go-geom/encoding/wkt" ) // spatialObjectBase is the base for spatial objects. type spatialObjectBase struct { ewkb geopb.EWKB // TODO: denormalize SRID from EWKB. } // Geometry is planar spatial object. type Geometry struct { spatialObjectBase } // NewGeometry returns a new Geometry. func NewGeometry(ewkb geopb.EWKB) *Geometry { return &Geometry{spatialObjectBase{ewkb: ewkb}} } // ParseGeometry parses a Geometry from a given text. func ParseGeometry(str geopb.WKT) (*Geometry, error) { wkb, err := geos.WKTToWKB(str) if err != nil { return nil, err } return NewGeometry(geopb.EWKB(wkb)), nil } // Geography is a spherical spatial object. type Geography struct { spatialObjectBase } // NewGeography returns a new Geography. func NewGeography(ewkb geopb.EWKB) *Geography { return &Geography{spatialObjectBase{ewkb: ewkb}} } // ParseGeography parses a Geography from a given text. // TODO(otan): when we have our own WKT parser, move this to geo. func ParseGeography(str geopb.WKT) (*Geography, error) { // TODO(otan): set SRID of EWKB to 4326. wkb, err := geos.WKTToWKB(str) if err != nil { return nil, err } return NewGeography(geopb.EWKB(wkb)), nil } // AsS2 converts a given Geography into it's S2 form. func (g *Geography) AsS2() ([]s2.Region, error) { // TODO(otan): parse EWKB ourselves. geomRepr, err := ewkb.Unmarshal(g.ewkb) if err != nil { return nil, err } // TODO(otan): convert by reading from S2 directly. return s2RegionsFromGeom(geomRepr), nil } // s2RegionsFromGeom converts an geom representation of an object // to s2 regions. func s2RegionsFromGeom(geomRepr geom.T) []s2.Region { var regions []s2.Region switch repr := geomRepr.(type) { case *geom.Point: regions = []s2.Region{ s2.PointFromLatLng(s2.LatLngFromDegrees(repr.Y(), repr.X())), } case *geom.LineString: latLngs := make([]s2.LatLng, repr.NumCoords()) for i := 0; i < repr.NumCoords(); i++ { p := repr.Coord(i) latLngs[i] = s2.LatLngFromDegrees(p.Y(), p.X()) } regions = []s2.Region{ s2.PolylineFromLatLngs(latLngs), } case *geom.Polygon: loops := make([]*s2.Loop, repr.NumLinearRings()) // The first ring is a "shell", which is represented as CCW. // Following rings are "holes", which are CW. For S2, they are CCW and automatically figured out. for ringIdx := 0; ringIdx < repr.NumLinearRings(); ringIdx++ { linearRing := repr.LinearRing(ringIdx) points := make([]s2.Point, linearRing.NumCoords()) for pointIdx := 0; pointIdx < linearRing.NumCoords(); pointIdx++ { p := linearRing.Coord(pointIdx) pt := s2.PointFromLatLng(s2.LatLngFromDegrees(p.Y(), p.X())) if ringIdx == 0 { points[pointIdx] = pt } else { points[len(points)-pointIdx-1] = pt } } loops[ringIdx] = s2.LoopFromPoints(points) } regions = []s2.Region{ s2.PolygonFromLoops(loops), } case *geom.GeometryCollection: for _, geom := range repr.Geoms() { regions = append(regions, s2RegionsFromGeom(geom)...) } case *geom.MultiPoint: for i := 0; i < repr.NumPoints(); i++ { regions = append(regions, s2RegionsFromGeom(repr.Point(i))...) } case *geom.MultiLineString: for i := 0; i < repr.NumLineStrings(); i++ { regions = append(regions, s2RegionsFromGeom(repr.LineString(i))...) } case *geom.MultiPolygon: for i := 0; i < repr.NumPolygons(); i++ { regions = append(regions, s2RegionsFromGeom(repr.Polygon(i))...) } } return regions }
pkg/geo/geo.go
0.53048
0.431225
geo.go
starcoder
package entropy import ( "bufio" "fmt" "io" "math" "strings" ) // NGramCounter contains counts and totals for Ngrams of a // particular size type NGramCounter struct { Size int Counts map[string]uint64 Total uint64 } // NewNGramCounter returns a new ngram counter func NewNGramCounter(maxNGramSize int) (counter *NGramCounter) { counter = new(NGramCounter) counter.Size = maxNGramSize counter.Counts = make(map[string]uint64) return } // Prediction is the log probability of a string and other data type Prediction struct { LogProbAverage float64 LogProbTotal float64 NumberOfNGrams int Text string } // Update updates the counter for a newly seen string func (counter *NGramCounter) Update(line string) { counter.UpdateWithMultiplier(line, 1) } // UpdateWithMultiplier updates the counter for a string, using // a multiplier func (counter *NGramCounter) UpdateWithMultiplier(line string, multiplier uint64) { for _, key := range sliding([]rune(line), counter.Size) { counter.Counts[string(key)] += multiplier counter.Total += multiplier } } // Count returns the number of ngrams in a particular counter. // returns default if not found func (counter *NGramCounter) Count(key string, ifNotFound uint64) (count uint64) { count, ok := counter.Counts[key] if !ok { count = ifNotFound } return } // Model contains a max size and a map from its to character models type Model struct { Size int Counter *NGramCounter } // New creates a Model with maximum ngram size of `MaxNGramSize` func New(MaxNGramSize int) (model *Model) { model = new(Model) model.Size = MaxNGramSize model.Counter = NewNGramCounter(MaxNGramSize) return } // Update for Models send string to each counter func (model *Model) Update(line string) { model.UpdateWithMultiplier(line, 1) } // UpdateWithMultiplier for Models send string to each counter with multiplier func (model *Model) UpdateWithMultiplier(line string, multiplier uint64) { model.Counter.UpdateWithMultiplier(line, multiplier) } // LogProb returns the best matching log probability for a key given // a set of models func (model *Model) LogProb(key string) (logProb float64) { if model.Size == 0 || len(key) == 0 { logProb = math.Inf(-1) return } counter := model.Counter count := counter.Count(key, 0) if count == 0 { logProb = math.Log2(0.5) - math.Log2(float64(counter.Total)) } else { logProb = math.Log2(float64(count)) - math.Log2(float64(counter.Total)) } return } // Dump sends a set of ngram models to a writer func (model *Model) Dump(f io.Writer) { counter := model.Counter sz := model.Size for key, value := range counter.Counts { outs := fmt.Sprintf("%v\t%s\t%d\n", sz, key, value) _, err := f.Write([]byte(outs)) if err != nil { panic(err) } } } // Predict returns a prediction for a string func (model *Model) Predict(text string) (prediction *Prediction) { prediction = new(Prediction) prediction.Text = text keys := sliding([]rune(text), model.Size) nKeys := len(keys) prediction.NumberOfNGrams = nKeys if len(keys) == 0 { return } var logProbTotal float64 for _, key := range keys { lp := model.LogProb(key) logProbTotal += lp } logProbAverage := logProbTotal / float64(nKeys) prediction.LogProbAverage = logProbAverage prediction.LogProbTotal = logProbTotal return } func (model *Model) Entropy(text string) (entropy float64) { entropy = -model.Predict(text).LogProbAverage return } // Read reads a model in func Read(f io.Reader) (model *Model) { model = new(Model) scanner := bufio.NewScanner(f) var counter *NGramCounter var ngramSize int var linenum uint for scanner.Scan() { linenum++ var size int var ngram string var count uint64 text := scanner.Text() _, err := fmt.Sscanf(text, "%v\t%s\t%d", &size, &ngram, &count) if err != nil { fmt.Printf("Invalid line at %v: %v\n", linenum, text) } if ngramSize == 0 { ngramSize = size counter = NewNGramCounter(ngramSize) } // fmt.Printf("size: %v, ngram: '%v', count: %v\n", size, ngram, count) counter.Total += count counter.Counts[ngram] = count } model.Size = ngramSize model.Counter = counter return } // Train trains a set of ngram models from a file. Models must be // initialized. returns the number of example lines used func (model *Model) Train(f io.Reader) (exampleCount int) { sc := bufio.NewScanner(f) for sc.Scan() { text := sc.Text() exampleCount++ model.Update(strings.TrimSpace(text)) } return } // TrainWithMultiplier trains a set of ngram models from a file. Models must be // initialized. returns the number of example lines used. // format is token <tab> count func (model *Model) TrainWithMultiplier(f io.Reader) (exampleCount int) { sc := bufio.NewScanner(f) for sc.Scan() { text := sc.Text() exampleCount++ var ngram string var count uint64 _, err := fmt.Sscanf(text, "%s\t%d", &ngram, &count) if err != nil { fmt.Printf("Invalid line at %v: %v\n", exampleCount, text) } model.UpdateWithMultiplier(text, count) } return } // sliding window function func sliding(s []rune, length int) (windows []string) { for i := 0; i+length <= len(s); i++ { windows = append(windows, string(s[i:i+length])) } return }
entropy.go
0.750278
0.40869
entropy.go
starcoder
package bwt import ( "fmt" "sort" "strings" ) // Bstring holds one ciclic permutation of the input strig to BWT type Bstring struct { Pos int Ch string } // Bstrings holds a list of Bstring type Bstrings []Bstring // FIXME: This sucks big time. Was the quickest thing to get sorting work though. var strX string // LessThan compares two Bstring's and returns which should have a "lower" position in the BWT func (bs Bstring) LessThan(other Bstring) bool { a, b, n := bs.Pos, other.Pos, len(strX) lim := n - 1 if a > b { lim -= a } else { lim -= b } if strX[a:a+lim] != strX[b:b+lim] { return strX[a:a+lim] < strX[b:b+lim] } a += lim b += lim for { if strX[a] != strX[b] { return strX[a] < strX[b] } a, b = (a+1)%n, (b+1)%n } return false } func (bs Bstrings) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] } func (bs Bstrings) Len() int { return len(bs) } func (bs Bstrings) Less(i, j int) bool { return bs[i].LessThan(bs[j]) } // BurrowsWheelerTransform implments the operation with the same name upon str func BurrowsWheelerTransform(str string) (out string) { strX = str n := len(str) sx := make(Bstrings, n) for k := range str { end := (k + n - 1) % n sx[k] = Bstring{k, string(str[end])} } sort.Sort(sx) for _, bstr := range sx { out += bstr.Ch } return } // BurrowsWheelerTransformNaive implements the operation with the same name upon str, // using a naive (and thus suboptimal) approach. func BurrowsWheelerTransformNaive(str string) (out string) { strX = str sx := make([]string, len(str)) for k := range str { curr := str[k:] + str[:k] sx[k] = curr } sort.Strings(sx) out = "" for _, line := range sx { out += fmt.Sprintf("%s", string(line[len(line)-1])) } return } // Inverse BWT // Pos seems to be pretty much the same as Bstring. // FIXME: reconcile them BUT only after adding tests. type Pos struct { Ch uint8 Pos int } func sortedString(str string) string { str1x := strings.Split(str, "") sort.Strings(str1x) return strings.Join(str1x, "") } func countOccurences(str string) (occ []int) { occ = make([]int, len(str)) counts := map[rune]int{} for k, v := range str { counts[v]++ occ[k] = counts[v] } return } // InvertedBurrowsWheelerTransform implements the operation with the same name upon str func InvertedBurrowsWheelerTransform(str string) string { str1 := sortedString(str) left, right, rightToLeft := countOccurences(str1), countOccurences(str), map[Pos]Pos{} for k := range str { posR, posL := Pos{str[k], right[k]}, Pos{str1[k], left[k]} rightToLeft[posR] = posL } out, prev := "", Pos{'$', 1} for { curr := rightToLeft[prev] out += string(curr.Ch) prev = curr if curr.Ch == '$' { break } } return out }
burrows_wheeler_transform.go
0.534855
0.520984
burrows_wheeler_transform.go
starcoder
package state import ( "bytes" "fmt" "github.com/cc14514/go-ydcoin/common" "github.com/cc14514/go-ydcoin/rlp" "github.com/cc14514/go-ydcoin/trie" ) // NodeIterator is an iterator to traverse the entire state trie post-order, // including all of the contract code and contract state tries. type NodeIterator struct { state *StateDB // State being iterated stateIt trie.NodeIterator // Primary iterator for the global state trie dataIt trie.NodeIterator // Secondary iterator for the data trie of a contract accountHash common.Hash // Hash of the node containing the account codeHash common.Hash // Hash of the contract source code code []byte // Source code associated with a contract Hash common.Hash // Hash of the current entry being iterated (nil if not standalone) Parent common.Hash // Hash of the first full ancestor node (nil if current is the root) Error error // Failure set in case of an internal error in the iterator } // NewNodeIterator creates an post-order state node iterator. func NewNodeIterator(state *StateDB) *NodeIterator { return &NodeIterator{ state: state, } } // Next moves the iterator to the next node, returning whether there are any // further nodes. In case of an internal error this method returns false and // sets the Error field to the encountered failure. func (it *NodeIterator) Next() bool { // If the iterator failed previously, don't do anything if it.Error != nil { return false } // Otherwise step forward with the iterator and report any errors if err := it.step(); err != nil { it.Error = err return false } return it.retrieve() } // step moves the iterator to the next entry of the state trie. func (it *NodeIterator) step() error { // Abort if we reached the end of the iteration if it.state == nil { return nil } // Initialize the iterator if we've just started if it.stateIt == nil { it.stateIt = it.state.trie.NodeIterator(nil) } // If we had data nodes previously, we surely have at least state nodes if it.dataIt != nil { if cont := it.dataIt.Next(true); !cont { if it.dataIt.Error() != nil { return it.dataIt.Error() } it.dataIt = nil } return nil } // If we had source code previously, discard that if it.code != nil { it.code = nil return nil } // Step to the next state trie node, terminating if we're out of nodes if cont := it.stateIt.Next(true); !cont { if it.stateIt.Error() != nil { return it.stateIt.Error() } it.state, it.stateIt = nil, nil return nil } // If the state trie node is an internal entry, leave as is if !it.stateIt.Leaf() { return nil } // Otherwise we've reached an account node, initiate data iteration var account Account if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil { return err } dataTrie, err := it.state.db.OpenStorageTrie(common.BytesToHash(it.stateIt.LeafKey()), account.Root) if err != nil { return err } it.dataIt = dataTrie.NodeIterator(nil) if !it.dataIt.Next(true) { it.dataIt = nil } if !bytes.Equal(account.CodeHash, emptyCodeHash) { it.codeHash = common.BytesToHash(account.CodeHash) addrHash := common.BytesToHash(it.stateIt.LeafKey()) it.code, err = it.state.db.ContractCode(addrHash, common.BytesToHash(account.CodeHash)) if err != nil { return fmt.Errorf("code %x: %v", account.CodeHash, err) } } it.accountHash = it.stateIt.Parent() return nil } // retrieve pulls and caches the current state entry the iterator is traversing. // The method returns whether there are any more data left for inspection. func (it *NodeIterator) retrieve() bool { // Clear out any previously set values it.Hash = common.Hash{} // If the iteration's done, return no available data if it.state == nil { return false } // Otherwise retrieve the current entry switch { case it.dataIt != nil: it.Hash, it.Parent = it.dataIt.Hash(), it.dataIt.Parent() if it.Parent == (common.Hash{}) { it.Parent = it.accountHash } case it.code != nil: it.Hash, it.Parent = it.codeHash, it.accountHash case it.stateIt != nil: it.Hash, it.Parent = it.stateIt.Hash(), it.stateIt.Parent() } return true }
core/state/iterator.go
0.58439
0.514644
iterator.go
starcoder
package main import ( "fmt" "strings" ) // marshal creates a function that encodes the structs in SSZ format. It creates two functions: // 1. MarshalTo(dst []byte) marshals the content to the target array. // 2. Marshal() marshals the content to a newly created array. func (e *env) marshal(name string, v *Value) string { tmpl := `// MarshalSSZ ssz marshals the {{.name}} object func (:: *{{.name}}) MarshalSSZ() ([]byte, error) { buf := make([]byte, ::.SizeSSZ()) return ::.MarshalSSZTo(buf[:0]) } // MarshalSSZTo ssz marshals the {{.name}} object to a target array func (:: *{{.name}}) MarshalSSZTo(dst []byte) ([]byte, error) { var err error {{.offset}} {{.marshal}} return dst, err }` data := map[string]interface{}{ "name": name, "marshal": v.marshalContainer(true), "offset": "", } if !v.isFixed() { // offset is the position where the offset starts data["offset"] = fmt.Sprintf("offset := int(%d)\n", v.n) } str := execTmpl(tmpl, data) return appendObjSignature(str, v) } func (v *Value) marshal() string { switch v.t { case TypeContainer: return v.marshalContainer(false) case TypeBytes: if v.isFixed() { // fixed. It ensures that the size is correct return fmt.Sprintf("if dst, err = ssz.MarshalFixedBytes(dst, ::.%s, %d); err != nil {\n return nil, errMarshalFixedBytes\n}", v.name, v.s) } // dynamic return fmt.Sprintf("if len(::.%s) > %d {\n return nil, errMarshalDynamicBytes\n}\ndst = append(dst, ::.%s...)", v.name, v.m, v.name) case TypeUint: return fmt.Sprintf("dst = ssz.Marshal%s(dst, ::.%s)", uintVToName(v), v.name) case TypeBitList: return fmt.Sprintf("dst = append(dst, ::.%s...)", v.name) case TypeBool: return fmt.Sprintf("dst = ssz.MarshalBool(dst, ::.%s)", v.name) case TypeVector: if v.e.isFixed() { return v.marshalVector() } fallthrough case TypeList: return v.marshalList() default: panic(fmt.Errorf("marshal not implemented for type %s", v.t.String())) } } func (v *Value) marshalList() string { v.e.name = v.name + "[ii]" // bound check str := fmt.Sprintf("if len(::.%s) > %d {\n return nil, errMarshalList\n}\n", v.name, v.s) if v.e.isFixed() { tmpl := `for ii := 0; ii < len(::.{{.name}}); ii++ { {{.dynamic}} }` str += execTmpl(tmpl, map[string]interface{}{ "name": v.name, "dynamic": v.e.marshal(), }) return str } // encode a list of dynamic objects: // 1. write offsets for each // 2. marshal each element tmpl := `{ offset = 4 * len(::.{{.name}}) for ii := 0; ii < len(::.{{.name}}); ii++ { dst = ssz.WriteOffset(dst, offset) {{.size}} } } for ii := 0; ii < len(::.{{.name}}); ii++ { {{.marshal}} }` str += execTmpl(tmpl, map[string]interface{}{ "name": v.name, "size": v.e.size("offset"), "marshal": v.e.marshal(), }) return str } func (v *Value) marshalVector() (str string) { v.e.name = fmt.Sprintf("%s[ii]", v.name) tmpl := `if len(::.{{.name}}) != {{.size}} { return nil, errMarshalVector } for ii := 0; ii < {{.size}}; ii++ { {{.marshal}} }` return execTmpl(tmpl, map[string]interface{}{ "name": v.name, "size": v.s, "marshal": v.e.marshal(), }) } func (v *Value) marshalContainer(start bool) string { if !start { tmpl := `{{ if .check }}if ::.{{.name}} == nil { ::.{{.name}} = new({{.obj}}) } {{ end }}if dst, err = ::.{{.name}}.MarshalSSZTo(dst); err != nil { return nil, err }` // validate only for fixed structs check := v.isFixed() if v.isListElem() { check = false } return execTmpl(tmpl, map[string]interface{}{ "name": v.name, "obj": v.objRef(), "check": check, }) } offset := v.n out := []string{} for indx, i := range v.o { var str string if i.isFixed() { // write the content str = fmt.Sprintf("// Field (%d) '%s'\n%s\n", indx, i.name, i.marshal()) } else { // write the offset str = fmt.Sprintf("// Offset (%d) '%s'\ndst = ssz.WriteOffset(dst, offset)\n%s\n", indx, i.name, i.size("offset")) offset += i.n } out = append(out, str) } // write the dynamic parts for indx, i := range v.o { if !i.isFixed() { out = append(out, fmt.Sprintf("// Field (%d) '%s'\n%s\n", indx, i.name, i.marshal())) } } return strings.Join(out, "\n") }
sszgen/marshal.go
0.657648
0.422743
marshal.go
starcoder
package phy import ( "math" "github.com/Tnze/go-mc/bot/world" ) type MinMax struct { Min, Max float64 } // Extends adjusts the bounds of the MinMax. A negative number will reduce the // minimum bound, whereas a positive number will increase the maximum bound. func (mm MinMax) Extend(delta float64) MinMax { if delta < 0 { return MinMax{ Min: mm.Min + delta, Max: mm.Max, } } return MinMax{ Min: mm.Min, Max: mm.Max + delta, } } // Contract reduces both the minimum and maximum bound by the provided amount, // such that the difference between the bounds decreases for positive values. func (mm MinMax) Contract(amt float64) MinMax { return MinMax{ Min: mm.Min + amt, Max: mm.Max - amt, } } // Expand changes the minimum and maximum bounds by the provided amount, such // that the difference between the bounds increases for positive values. func (mm MinMax) Expand(amt float64) MinMax { return MinMax{ Min: mm.Min - amt, Max: mm.Max + amt, } } // Offset adds the provided value to both the minimum and maximum value. func (mm MinMax) Offset(amt float64) MinMax { return MinMax{ Min: mm.Min + amt, Max: mm.Max + amt, } } // AABB implements Axis Aligned Bounding Box operations. type AABB struct { X, Y, Z MinMax Block world.BlockStatus } // Extend adjusts the minimum (for negative values) or maximum bounds (for // positive values) by the provided scalar for each dimension. func (bb AABB) Extend(dx, dy, dz float64) AABB { return AABB{ X: bb.X.Extend(dx), Y: bb.Y.Extend(dx), Z: bb.Z.Extend(dx), Block: bb.Block, } } // Contract reduces the difference between the min/max bounds (for positive // values) for each dimension. func (bb AABB) Contract(x, y, z float64) AABB { return AABB{ X: bb.X.Contract(x), Y: bb.Y.Contract(y), Z: bb.Z.Contract(z), Block: bb.Block, } } // Expand increases both the minimum and maximum bounds by the provided amount // (for positive values) for each dimension. func (bb AABB) Expand(x, y, z float64) AABB { return AABB{ X: bb.X.Expand(x), Y: bb.Y.Expand(y), Z: bb.Z.Expand(z), Block: bb.Block, } } // Offset moves both the minimum and maximum bound by the provided value for // each dimension. func (bb AABB) Offset(x, y, z float64) AABB { return AABB{ X: bb.X.Offset(x), Y: bb.Y.Offset(y), Z: bb.Z.Offset(z), Block: bb.Block, } } func (bb AABB) XOffset(o AABB, xOffset float64) float64 { if o.Y.Max > bb.Y.Min && o.Y.Min < bb.Y.Max && o.Z.Max > bb.Z.Min && o.Z.Min < bb.Z.Max { if xOffset > 0.0 && o.X.Max <= bb.X.Min { xOffset = math.Min(bb.X.Min-o.X.Max, xOffset) } else if xOffset < 0.0 && o.X.Min >= bb.X.Max { xOffset = math.Max(bb.X.Max-o.X.Min, xOffset) } } return xOffset } func (bb AABB) YOffset(o AABB, yOffset float64) float64 { if o.X.Max > bb.X.Min && o.X.Min < bb.X.Max && o.Z.Max > bb.Z.Min && o.Z.Min < bb.Z.Max { if yOffset > 0.0 && o.Y.Max <= bb.Y.Min { yOffset = math.Min(bb.Y.Min-o.Y.Max, yOffset) } else if yOffset < 0.0 && o.Y.Min >= bb.Y.Max { yOffset = math.Max(bb.Y.Max-o.Y.Min, yOffset) } } return yOffset } func (bb AABB) ZOffset(o AABB, zOffset float64) float64 { if o.X.Max > bb.X.Min && o.X.Min < bb.X.Max && o.Y.Max > bb.Y.Min && o.Y.Min < bb.Y.Max { if zOffset > 0.0 && o.Z.Max <= bb.Z.Min { zOffset = math.Min(bb.Z.Min-o.Z.Max, zOffset) } else if zOffset < 0.0 && o.Z.Min >= bb.Z.Max { zOffset = math.Max(bb.Z.Max-o.Z.Min, zOffset) } } return zOffset } func (bb AABB) Intersects(o AABB) bool { return true && bb.X.Min < o.X.Max && bb.X.Max > o.X.Min && bb.Y.Min < o.Y.Max && bb.Y.Max > o.Y.Min && bb.Z.Min < o.Z.Max && bb.Z.Max > o.Z.Min }
bot/phy/aabb.go
0.864682
0.72829
aabb.go
starcoder
package v1 func (CloudInitNoCloudSource) SwaggerDoc() map[string]string { return map[string]string{ "": "Represents a cloud-init nocloud user data source.\nMore info: http://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html", "secretRef": "UserDataSecretRef references a k8s secret that contains NoCloud userdata.\n+ optional", "userDataBase64": "UserDataBase64 contains NoCloud cloud-init userdata as a base64 encoded string.\n+ optional", "userData": "UserData contains NoCloud inline cloud-init userdata.\n+ optional", } } func (DomainSpec) SwaggerDoc() map[string]string { return map[string]string{ "resources": "Resources describes the Compute Resources required by this vmi.", "cpu": "CPU allow specified the detailed CPU topology inside the vmi.\n+optional", "memory": "Memory allow specifying the VMI memory features.\n+optional", "machine": "Machine type.\n+optional", "firmware": "Firmware.\n+optional", "clock": "Clock sets the clock and timers of the vmi.\n+optional", "features": "Features like acpi, apic, hyperv.\n+optional", "devices": "Devices allows adding disks, network interfaces, ...", } } func (DomainPresetSpec) SwaggerDoc() map[string]string { return map[string]string{ "resources": "Resources describes the Compute Resources required by this vmi.", "cpu": "CPU allow specified the detailed CPU topology inside the vmi.\n+optional", "memory": "Memory allow specifying the VMI memory features.\n+optional", "machine": "Machine type.\n+optional", "firmware": "Firmware.\n+optional", "clock": "Clock sets the clock and timers of the vmi.\n+optional", "features": "Features like acpi, apic, hyperv.\n+optional", "devices": "Devices allows adding disks, network interfaces, ...\n+optional", } } func (ResourceRequirements) SwaggerDoc() map[string]string { return map[string]string{ "requests": "Requests is a description of the initial vmi resources.\nValid resource keys are \"memory\" and \"cpu\".\n+optional", "limits": "Limits describes the maximum amount of compute resources allowed.\nValid resource keys are \"memory\" and \"cpu\".\n+optional", "overcommitGuestOverhead": "Don't ask the scheduler to take the guest-management overhead into account. Instead\nput the overhead only into the requested memory limits. This can lead to crashes if\nall memory is in use on a node. Defaults to false.", } } func (CPU) SwaggerDoc() map[string]string { return map[string]string{ "": "CPU allows specifying the CPU topology.", "cores": "Cores specifies the number of cores inside the vmi.\nMust be a value greater or equal 1.", "model": "Model specifies the CPU model inside the VMI.\nList of available models https://github.com/libvirt/libvirt/blob/master/src/cpu/cpu_map.xml.\nIt is possible to specify special cases like \"host-passthrough\" to get the same CPU as the node\nand \"host-model\" to get CPU closest to the node one.\nFor more information see https://libvirt.org/formatdomain.html#elementsCPU.\nDefaults to host-model.\n+optional", } } func (Memory) SwaggerDoc() map[string]string { return map[string]string{ "": "Memory allows specifying the VirtualMachineInstance memory features.", "hugepages": "Hugepages allow to use hugepages for the VirtualMachineInstance instead of regular memory.\n+optional", "guest": "Guest allows to specifying the amount of memory which is visible inside the Guest OS.\nThe Guest must lie between Requests and Limits from the resources section.\nDefaults to the requested memory in the resources section if not specified.\n+ optional", } } func (Hugepages) SwaggerDoc() map[string]string { return map[string]string{ "": "Hugepages allow to use hugepages for the VirtualMachineInstance instead of regular memory.", "pageSize": "PageSize specifies the hugepage size, for x86_64 architecture valid values are 1Gi and 2Mi.", } } func (Machine) SwaggerDoc() map[string]string { return map[string]string{ "type": "QEMU machine type is the actual chipset of the VirtualMachineInstance.", } } func (Firmware) SwaggerDoc() map[string]string { return map[string]string{ "uuid": "UUID reported by the vmi bios.\nDefaults to a random generated uid.", } } func (Devices) SwaggerDoc() map[string]string { return map[string]string{ "disks": "Disks describes disks, cdroms, floppy and luns which are connected to the vmi.", "watchdog": "Watchdog describes a watchdog device which can be added to the vmi.", "interfaces": "Interfaces describe network interfaces which are added to the vm.", "autoattachPodInterface": "Whether to attach a pod network interface. Defaults to true.", "autoattachGraphicsDevice": "Wheater to attach the default graphics device or not.\nVNC will not be available if set to false. Defaults to true.", } } func (Disk) SwaggerDoc() map[string]string { return map[string]string{ "name": "Name is the device name", "volumeName": "Name of the volume which is referenced.\nMust match the Name of a Volume.", "bootOrder": "BootOrder is an integer value > 0, used to determine ordering of boot devices.\nLower values take precedence.\nEach disk or interface that has a boot order must have a unique value.\nDisks without a boot order are not tried if a disk with a boot order exists.\n+optional", "serial": "Serial provides the ability to specify a serial number for the disk device.\n+optional", } } func (DiskDevice) SwaggerDoc() map[string]string { return map[string]string{ "": "Represents the target of a volume to mount.\nOnly one of its members may be specified.", "disk": "Attach a volume as a disk to the vmi.", "lun": "Attach a volume as a LUN to the vmi.", "floppy": "Attach a volume as a floppy to the vmi.", "cdrom": "Attach a volume as a cdrom to the vmi.", } } func (DiskTarget) SwaggerDoc() map[string]string { return map[string]string{ "bus": "Bus indicates the type of disk device to emulate.\nsupported values: virtio, sata, scsi, ide.", "readonly": "ReadOnly.\nDefaults to false.", } } func (LunTarget) SwaggerDoc() map[string]string { return map[string]string{ "bus": "Bus indicates the type of disk device to emulate.\nsupported values: virtio, sata, scsi, ide.", "readonly": "ReadOnly.\nDefaults to false.", } } func (FloppyTarget) SwaggerDoc() map[string]string { return map[string]string{ "readonly": "ReadOnly.\nDefaults to false.", "tray": "Tray indicates if the tray of the device is open or closed.\nAllowed values are \"open\" and \"closed\".\nDefaults to closed.\n+optional", } } func (CDRomTarget) SwaggerDoc() map[string]string { return map[string]string{ "bus": "Bus indicates the type of disk device to emulate.\nsupported values: virtio, sata, scsi, ide.", "readonly": "ReadOnly.\nDefaults to true.", "tray": "Tray indicates if the tray of the device is open or closed.\nAllowed values are \"open\" and \"closed\".\nDefaults to closed.\n+optional", } } func (Volume) SwaggerDoc() map[string]string { return map[string]string{ "": "Volume represents a named volume in a vmi.", "name": "Volume's name.\nMust be a DNS_LABEL and unique within the vmi.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", } } func (VolumeSource) SwaggerDoc() map[string]string { return map[string]string{ "": "Represents the source of a volume to mount.\nOnly one of its members may be specified.", "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace.\nDirectly attached to the vmi via qemu.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional", "cloudInitNoCloud": "CloudInitNoCloud represents a cloud-init NoCloud user-data source.\nThe NoCloud data will be added as a disk to the vmi. A proper cloud-init installation is required inside the guest.\nMore info: http://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html\n+optional", "registryDisk": "RegistryDisk references a docker image, embedding a qcow or raw disk.\nMore info: https://kubevirt.gitbooks.io/user-guide/registry-disk.html\n+optional", "ephemeral": "Ephemeral is a special volume source that \"wraps\" specified source and provides copy-on-write image on top of it.\n+optional", "emptyDisk": "EmptyDisk represents a temporary disk which shares the vmis lifecycle.\nMore info: https://kubevirt.gitbooks.io/user-guide/disks-and-volumes.html\n+optional", } } func (EphemeralVolumeSource) SwaggerDoc() map[string]string { return map[string]string{ "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace.\nDirectly attached to the vmi via qemu.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional", } } func (EmptyDiskSource) SwaggerDoc() map[string]string { return map[string]string{ "": "EmptyDisk represents a temporary disk which shares the vmis lifecycle.", "capacity": "Capacity of the sparse disk.", } } func (RegistryDiskSource) SwaggerDoc() map[string]string { return map[string]string{ "": "Represents a docker image with an embedded disk.", "image": "Image is the name of the image with the embedded disk.", "imagePullSecret": "ImagePullSecret is the name of the Docker registry secret required to pull the image. The secret must already exist.", } } func (ClockOffset) SwaggerDoc() map[string]string { return map[string]string{ "": "Exactly one of its members must be set.", "utc": "UTC sets the guest clock to UTC on each boot. If an offset is specified,\nguest changes to the clock will be kept during reboots and are not reset.", "timezone": "Timezone sets the guest clock to the specified timezone.\nZone name follows the TZ environment variable format (e.g. 'America/New_York').", } } func (ClockOffsetUTC) SwaggerDoc() map[string]string { return map[string]string{ "": "UTC sets the guest clock to UTC on each boot.", "offsetSeconds": "OffsetSeconds specifies an offset in seconds, relative to UTC. If set,\nguest changes to the clock will be kept during reboots and not reset.", } } func (Clock) SwaggerDoc() map[string]string { return map[string]string{ "": "Represents the clock and timers of a vmi.", "timer": "Timer specifies whih timers are attached to the vmi.", } } func (Timer) SwaggerDoc() map[string]string { return map[string]string{ "": "Represents all available timers in a vmi.", "hpet": "HPET (High Precision Event Timer) - multiple timers with periodic interrupts.", "kvm": "KVM \t(KVM clock) - lets guests read the host’s wall clock time (paravirtualized). For linux guests.", "pit": "PIT (Programmable Interval Timer) - a timer with periodic interrupts.", "rtc": "RTC (Real Time Clock) - a continuously running timer with periodic interrupts.", "hyperv": "Hyperv (Hypervclock) - lets guests read the host’s wall clock time (paravirtualized). For windows guests.", } } func (RTCTimer) SwaggerDoc() map[string]string { return map[string]string{ "tickPolicy": "TickPolicy determines what happens when QEMU misses a deadline for injecting a tick to the guest.\nOne of \"delay\", \"catchup\".", "present": "Enabled set to false makes sure that the machine type or a preset can't add the timer.\nDefaults to true.\n+optional", "track": "Track the guest or the wall clock.", } } func (HPETTimer) SwaggerDoc() map[string]string { return map[string]string{ "tickPolicy": "TickPolicy determines what happens when QEMU misses a deadline for injecting a tick to the guest.\nOne of \"delay\", \"catchup\", \"merge\", \"discard\".", "present": "Enabled set to false makes sure that the machine type or a preset can't add the timer.\nDefaults to true.\n+optional", } } func (PITTimer) SwaggerDoc() map[string]string { return map[string]string{ "tickPolicy": "TickPolicy determines what happens when QEMU misses a deadline for injecting a tick to the guest.\nOne of \"delay\", \"catchup\", \"discard\".", "present": "Enabled set to false makes sure that the machine type or a preset can't add the timer.\nDefaults to true.\n+optional", } } func (KVMTimer) SwaggerDoc() map[string]string { return map[string]string{ "present": "Enabled set to false makes sure that the machine type or a preset can't add the timer.\nDefaults to true.\n+optional", } } func (HypervTimer) SwaggerDoc() map[string]string { return map[string]string{ "present": "Enabled set to false makes sure that the machine type or a preset can't add the timer.\nDefaults to true.\n+optional", } } func (Features) SwaggerDoc() map[string]string { return map[string]string{ "acpi": "ACPI enables/disables ACPI insidejsondata guest.\nDefaults to enabled.\n+optional", "apic": "Defaults to the machine type setting.\n+optional", "hyperv": "Defaults to the machine type setting.\n+optional", } } func (FeatureState) SwaggerDoc() map[string]string { return map[string]string{ "": "Represents if a feature is enabled or disabled.", "enabled": "Enabled determines if the feature should be enabled or disabled on the guest.\nDefaults to true.\n+optional", } } func (FeatureAPIC) SwaggerDoc() map[string]string { return map[string]string{ "enabled": "Enabled determines if the feature should be enabled or disabled on the guest.\nDefaults to true.\n+optional", "endOfInterrupt": "EndOfInterrupt enables the end of interrupt notification in the guest.\nDefaults to false.\n+optional", } } func (FeatureSpinlocks) SwaggerDoc() map[string]string { return map[string]string{ "enabled": "Enabled determines if the feature should be enabled or disabled on the guest.\nDefaults to true.\n+optional", "spinlocks": "Retries indicates the number of retries.\nMust be a value greater or equal 4096.\nDefaults to 4096.\n+optional", } } func (FeatureVendorID) SwaggerDoc() map[string]string { return map[string]string{ "enabled": "Enabled determines if the feature should be enabled or disabled on the guest.\nDefaults to true.\n+optional", "vendorid": "VendorID sets the hypervisor vendor id, visible to the vmi.\nString up to twelve characters.", } } func (FeatureHyperv) SwaggerDoc() map[string]string { return map[string]string{ "": "Hyperv specific features.", "relaxed": "Relaxed relaxes constraints on timer.\nDefaults to the machine type setting.\n+optional", "vapic": "VAPIC indicates whether virtual APIC is enabled.\nDefaults to the machine type setting.\n+optional", "spinlocks": "Spinlocks indicates if spinlocks should be made available to the guest.\n+optional", "vpindex": "VPIndex enables the Virtual Processor Index to help windows identifying virtual processors.\nDefaults to the machine type setting.\n+optional", "runtime": "Runtime.\nDefaults to the machine type setting.\n+optional", "synic": "SyNIC enable Synthetic Interrupt Controller.\nDefaults to the machine type setting.\n+optional", "synictimer": "SyNICTimer enable Synthetic Interrupt Controller timer.\nDefaults to the machine type setting.\n+optional", "reset": "Reset enables Hyperv reboot/reset for the vmi.\nDefaults to the machine type setting.\n+optional", "vendorid": "VendorID allows setting the hypervisor vendor id.\nDefaults to the machine type setting.\n+optional", } } func (Watchdog) SwaggerDoc() map[string]string { return map[string]string{ "": "Named watchdog device.", "name": "Name of the watchdog.", } } func (WatchdogDevice) SwaggerDoc() map[string]string { return map[string]string{ "": "Hardware watchdog device.\nExactly one of its members must be set.", "i6300esb": "i6300esb watchdog device.\n+optional", } } func (I6300ESBWatchdog) SwaggerDoc() map[string]string { return map[string]string{ "": "i6300esb watchdog device.", "action": "The action to take. Valid values are poweroff, reset, shutdown.\nDefaults to reset.", } } func (Interface) SwaggerDoc() map[string]string { return map[string]string{ "name": "Logical name of the interface as well as a reference to the associated networks.\nMust match the Name of a Network.", "model": "Interface model.\nOne of: e1000, e1000e, ne2k_pci, pcnet, rtl8139, virtio.\nDefaults to virtio.", "ports": "List of ports to be forwarded to the virtual machine.", "macAddress": "Interface MAC address. For example: de:ad:00:00:be:af or DE-AD-00-00-BE-AF.", "bootOrder": "BootOrder is an integer value > 0, used to determine ordering of boot devices.\nLower values take precedence.\nEach interface or disk that has a boot order must have a unique value.\nInterfaces without a boot order are not tried.\n+optional", "pciAddress": "If specified, the virtual network interface will be placed on the guests pci address with the specifed PCI address. For example: 0000:81:01.10\n+optional", } } func (InterfaceBindingMethod) SwaggerDoc() map[string]string { return map[string]string{ "": "Represents the method which will be used to connect the interface to the guest.\nOnly one of its members may be specified.", } } func (InterfaceBridge) SwaggerDoc() map[string]string { return map[string]string{} } func (InterfaceSlirp) SwaggerDoc() map[string]string { return map[string]string{} } func (Port) SwaggerDoc() map[string]string { return map[string]string{ "": "Port repesents a port to expose from the virtual machine.\nDefault protocol TCP.\nThe port field is mandatory", "name": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services.\n+optional", "protocol": "Protocol for port. Must be UDP or TCP.\nDefaults to \"TCP\".\n+optional", "port": "Number of port to expose for the virtual machine.\nThis must be a valid port number, 0 < x < 65536.", } } func (Network) SwaggerDoc() map[string]string { return map[string]string{ "": "Network represents a network type and a resource that should be connected to the vm.", "name": "Network name.\nMust be a DNS_LABEL and unique within the vm.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", } } func (NetworkSource) SwaggerDoc() map[string]string { return map[string]string{ "": "Represents the source resource that will be connected to the vm.\nOnly one of its members may be specified.", } } func (PodNetwork) SwaggerDoc() map[string]string { return map[string]string{ "": "Represents the stock pod network interface.", "vmNetworkCIDR": "CIDR for vm network.\nDefault 10.0.2.0/24 if not specified.", } }
pkg/api/v1/schema_swagger_generated.go
0.891162
0.41739
schema_swagger_generated.go
starcoder
package input import ( "context" "fmt" "time" "github.com/benthosdev/benthos/v4/internal/component/input" "github.com/benthosdev/benthos/v4/internal/component/metrics" "github.com/benthosdev/benthos/v4/internal/docs" "github.com/benthosdev/benthos/v4/internal/interop" "github.com/benthosdev/benthos/v4/internal/log" "github.com/benthosdev/benthos/v4/internal/message" ) func init() { Constructors[TypeResource] = TypeSpec{ constructor: fromSimpleConstructor(NewResource), Summary: ` Resource is an input type that runs a resource input by its name.`, Description: ` This input allows you to reference the same configured input resource in multiple places, and can also tidy up large nested configs. For example, the config: ` + "```yaml" + ` input: broker: inputs: - kafka: addresses: [ TODO ] topics: [ foo ] consumer_group: foogroup - gcp_pubsub: project: bar subscription: baz ` + "```" + ` Could also be expressed as: ` + "```yaml" + ` input: broker: inputs: - resource: foo - resource: bar input_resources: - label: foo kafka: addresses: [ TODO ] topics: [ foo ] consumer_group: foogroup - label: bar gcp_pubsub: project: bar subscription: baz ` + "```" + ` You can find out more about resources [in this document.](/docs/configuration/resources)`, Categories: []string{ "Utility", }, Config: docs.FieldString("", "").HasDefault(""), } } //------------------------------------------------------------------------------ // Resource is an input that wraps an input resource. type Resource struct { mgr interop.Manager name string log log.Modular } // NewResource returns a resource input. func NewResource( conf Config, mgr interop.Manager, log log.Modular, stats metrics.Type, ) (input.Streamed, error) { if !mgr.ProbeInput(conf.Resource) { return nil, fmt.Errorf("input resource '%v' was not found", conf.Resource) } return &Resource{ mgr: mgr, name: conf.Resource, log: log, }, nil } //------------------------------------------------------------------------------ // TransactionChan returns a transactions channel for consuming messages from // this input type. func (r *Resource) TransactionChan() (tChan <-chan message.Transaction) { if err := r.mgr.AccessInput(context.Background(), r.name, func(i input.Streamed) { tChan = i.TransactionChan() }); err != nil { r.log.Errorf("Failed to obtain input resource '%v': %v", r.name, err) } return } // Connected returns a boolean indicating whether this input is currently // connected to its target. func (r *Resource) Connected() (isConnected bool) { if err := r.mgr.AccessInput(context.Background(), r.name, func(i input.Streamed) { isConnected = i.Connected() }); err != nil { r.log.Errorf("Failed to obtain input resource '%v': %v", r.name, err) } return } // CloseAsync shuts down the processor and stops processing requests. func (r *Resource) CloseAsync() { } // WaitForClose blocks until the processor has closed down. func (r *Resource) WaitForClose(timeout time.Duration) error { return nil }
internal/old/input/resource.go
0.627381
0.509642
resource.go
starcoder
package sq import ( "bytes" "fmt" "strings" ) // InsertBuilder builds SQL INSERT statements. type InsertBuilder interface { // Prefix adds an expression to the beginning of the query. Prefix(sql string, args ...interface{}) InsertBuilder // Options adds keyword options before the INTO clause of the query. Options(options ...string) InsertBuilder // Into sets the INTO clause of the query. Into(into string) InsertBuilder // Columns adds insert columns to the query. Columns(columns ...string) InsertBuilder // Values adds a single row's values to the query. Values(values ...interface{}) InsertBuilder // Suffix adds an expression to the end of the query. Suffix(sql string, args ...interface{}) InsertBuilder // SetMap set columns and values for insert builder from a map of column name and value // note that it will reset all previous columns and values was set if any. SetMap(clauses map[string]interface{}) InsertBuilder ToSQL() (sqlStr string, args []interface{}, err error) } type insertBuilder struct { prefixes exprs options []string into string columns []string values [][]interface{} suffixes exprs } // NewInsertBuilder creates new instance of InsertBuilder func NewInsertBuilder() InsertBuilder { return &insertBuilder{} } func (b *insertBuilder) ToSQL() (sqlStr string, args []interface{}, err error) { if len(b.into) == 0 { err = fmt.Errorf("insert statements must specify a table") return } if len(b.values) == 0 { err = fmt.Errorf("insert statements must have at least one set of values") return } sql := &bytes.Buffer{} if len(b.prefixes) > 0 { args, _ = b.prefixes.AppendToSQL(sql, " ", args) sql.WriteString(" ") } sql.WriteString("INSERT ") if len(b.options) > 0 { sql.WriteString(strings.Join(b.options, " ")) sql.WriteString(" ") } sql.WriteString("INTO ") sql.WriteString(b.into) sql.WriteString(" ") if len(b.columns) > 0 { sql.WriteString("(") sql.WriteString(strings.Join(b.columns, ",")) sql.WriteString(") ") } sql.WriteString("VALUES ") valuesStrings := make([]string, len(b.values)) for r, row := range b.values { valueStrings := make([]string, len(row)) for v, val := range row { switch typedVal := val.(type) { case StatementBuilder: var valSQL string var valArgs []interface{} valSQL, valArgs, err = typedVal.ToSQL() if err != nil { return } valueStrings[v] = valSQL args = append(args, valArgs...) default: valueStrings[v] = "?" args = append(args, val) } } valuesStrings[r] = fmt.Sprintf("(%s)", strings.Join(valueStrings, ",")) } sql.WriteString(strings.Join(valuesStrings, ",")) if len(b.suffixes) > 0 { sql.WriteString(" ") args, _ = b.suffixes.AppendToSQL(sql, " ", args) } sqlStr = sql.String() return } func (b *insertBuilder) Prefix(sql string, args ...interface{}) InsertBuilder { b.prefixes = append(b.prefixes, expr{sql: sql, args: args}) return b } func (b *insertBuilder) Options(options ...string) InsertBuilder { b.options = append(b.options, options...) return b } func (b *insertBuilder) Into(into string) InsertBuilder { b.into = into return b } func (b *insertBuilder) Columns(columns ...string) InsertBuilder { b.columns = append(b.columns, columns...) return b } func (b *insertBuilder) Values(values ...interface{}) InsertBuilder { b.values = append(b.values, values) return b } func (b *insertBuilder) Suffix(sql string, args ...interface{}) InsertBuilder { b.suffixes = append(b.suffixes, expr{sql: sql, args: args}) return b } func (b *insertBuilder) SetMap(clauses map[string]interface{}) InsertBuilder { // TODO: replace resetting previous values with extending existing ones? cols := make([]string, 0, len(clauses)) vals := make([]interface{}, 0, len(clauses)) for col, val := range clauses { cols = append(cols, col) vals = append(vals, val) } b.columns = cols b.values = [][]interface{}{vals} return b }
insert.go
0.583441
0.406862
insert.go
starcoder
package yqlib import ( "container/list" "fmt" yaml "gopkg.in/yaml.v3" ) func entrySeqFor(key *yaml.Node, value *yaml.Node) *yaml.Node { var keyKey = &yaml.Node{Kind: yaml.ScalarNode, Tag: "!!str", Value: "key"} var valueKey = &yaml.Node{Kind: yaml.ScalarNode, Tag: "!!str", Value: "value"} return &yaml.Node{ Kind: yaml.MappingNode, Tag: "!!map", Content: []*yaml.Node{keyKey, key, valueKey, value}, } } func toEntriesFromMap(candidateNode *CandidateNode) *CandidateNode { var sequence = &yaml.Node{Kind: yaml.SequenceNode, Tag: "!!seq"} var entriesNode = candidateNode.CreateReplacement(sequence) var contents = unwrapDoc(candidateNode.Node).Content for index := 0; index < len(contents); index = index + 2 { key := contents[index] value := contents[index+1] sequence.Content = append(sequence.Content, entrySeqFor(key, value)) } return entriesNode } func toEntriesfromSeq(candidateNode *CandidateNode) *CandidateNode { var sequence = &yaml.Node{Kind: yaml.SequenceNode, Tag: "!!seq"} var entriesNode = candidateNode.CreateReplacement(sequence) var contents = unwrapDoc(candidateNode.Node).Content for index := 0; index < len(contents); index = index + 1 { key := &yaml.Node{Kind: yaml.ScalarNode, Tag: "!!int", Value: fmt.Sprintf("%v", index)} value := contents[index] sequence.Content = append(sequence.Content, entrySeqFor(key, value)) } return entriesNode } func toEntriesOperator(d *dataTreeNavigator, context Context, expressionNode *ExpressionNode) (Context, error) { var results = list.New() for el := context.MatchingNodes.Front(); el != nil; el = el.Next() { candidate := el.Value.(*CandidateNode) candidateNode := unwrapDoc(candidate.Node) switch candidateNode.Kind { case yaml.MappingNode: results.PushBack(toEntriesFromMap(candidate)) case yaml.SequenceNode: results.PushBack(toEntriesfromSeq(candidate)) default: if candidateNode.Tag != "!!null" { return Context{}, fmt.Errorf("%v has no keys", candidate.Node.Tag) } } } return context.ChildContext(results), nil } func parseEntry(entry *yaml.Node, position int) (*yaml.Node, *yaml.Node, error) { prefs := traversePreferences{DontAutoCreate: true} candidateNode := &CandidateNode{Node: entry} keyResults, err := traverseMap(Context{}, candidateNode, "key", prefs, false) if err != nil { return nil, nil, err } else if keyResults.Len() != 1 { return nil, nil, fmt.Errorf("Expected to find one 'key' entry but found %v in position %v", keyResults.Len(), position) } valueResults, err := traverseMap(Context{}, candidateNode, "value", prefs, false) if err != nil { return nil, nil, err } else if valueResults.Len() != 1 { return nil, nil, fmt.Errorf("Expected to find one 'value' entry but found %v in position %v", valueResults.Len(), position) } return keyResults.Front().Value.(*CandidateNode).Node, valueResults.Front().Value.(*CandidateNode).Node, nil } func fromEntries(candidateNode *CandidateNode) (*CandidateNode, error) { var node = &yaml.Node{Kind: yaml.MappingNode, Tag: "!!map"} var mapCandidateNode = candidateNode.CreateReplacement(node) var contents = unwrapDoc(candidateNode.Node).Content for index := 0; index < len(contents); index = index + 1 { key, value, err := parseEntry(contents[index], index) if err != nil { return nil, err } node.Content = append(node.Content, key, value) } return mapCandidateNode, nil } func fromEntriesOperator(d *dataTreeNavigator, context Context, expressionNode *ExpressionNode) (Context, error) { var results = list.New() for el := context.MatchingNodes.Front(); el != nil; el = el.Next() { candidate := el.Value.(*CandidateNode) candidateNode := unwrapDoc(candidate.Node) switch candidateNode.Kind { case yaml.SequenceNode: mapResult, err := fromEntries(candidate) if err != nil { return Context{}, err } results.PushBack(mapResult) default: return Context{}, fmt.Errorf("from entries only runs against arrays") } } return context.ChildContext(results), nil } func withEntriesOperator(d *dataTreeNavigator, context Context, expressionNode *ExpressionNode) (Context, error) { //to_entries on the context toEntries, err := toEntriesOperator(d, context, expressionNode) if err != nil { return Context{}, err } var results = list.New() for el := toEntries.MatchingNodes.Front(); el != nil; el = el.Next() { //run expression against entries // splat toEntries and pipe it into Rhs splatted, err := splat(context.SingleChildContext(el.Value.(*CandidateNode)), traversePreferences{}) if err != nil { return Context{}, err } result, err := d.GetMatchingNodes(splatted, expressionNode.Rhs) log.Debug("expressionNode.Rhs %v", expressionNode.Rhs.Operation.OperationType) log.Debug("result %v", result) if err != nil { return Context{}, err } selfExpression := &ExpressionNode{Operation: &Operation{OperationType: selfReferenceOpType}} collected, err := collectTogether(d, result, selfExpression) if err != nil { return Context{}, err } fromEntries, err := fromEntriesOperator(d, context.SingleChildContext(collected), expressionNode) if err != nil { return Context{}, err } results.PushBackList(fromEntries.MatchingNodes) } //from_entries on the result return context.ChildContext(results), nil }
pkg/yqlib/operator_entries.go
0.526099
0.434221
operator_entries.go
starcoder
package streamer import ( "github.com/lyraproj/dgo/dgo" "github.com/lyraproj/dgo/vf" ) // A BasicCollector is an extendable basic implementation of the Consumer interface type BasicCollector struct { // Values is an array of all values that are added to the BasicCollector. When adding // a reference, the reference is considered to be an index in this array. Values dgo.Array // The Stack of values that is used when adding nested constructs (arrays and maps) Stack []dgo.Array } // NewCollector returns a new BasicCollector instance func NewCollector() Collector { c := &BasicCollector{} c.Init() return c } // Init initializes the internal stack and reference storage func (c *BasicCollector) Init() { c.Values = vf.MutableValues() c.Stack = make([]dgo.Array, 1, 8) c.Stack[0] = vf.MutableValues() } // AddArray initializes and adds a new array and then calls the function with is supposed to // add the elements. func (c *BasicCollector) AddArray(cap int, doer dgo.Doer) { a := vf.ArrayWithCapacity(cap) c.Add(a) top := len(c.Stack) c.Stack = append(c.Stack, a) doer() c.Stack = c.Stack[0:top] } // AddMap initializes and adds a new map and then calls the function with is supposed to // add an even number of elements as a sequence of key, value, [key, value, ...] func (c *BasicCollector) AddMap(cap int, doer dgo.Doer) { h := vf.MapWithCapacity(cap) c.Add(h) a := vf.ArrayWithCapacity(cap * 2) top := len(c.Stack) c.Stack = append(c.Stack, a) doer() c.Stack = c.Stack[0:top] h.PutAll(a.ToMap()) } // Add adds a new value func (c *BasicCollector) Add(element dgo.Value) { c.StackTop().Add(element) c.Values.Add(element) } // AddRef adds the nth value of the values that has been added once again. func (c *BasicCollector) AddRef(ref int) { c.StackTop().Add(c.Values.Get(ref)) } // CanDoBinary returns true func (c *BasicCollector) CanDoBinary() bool { return true } // CanDoTime returns true func (c *BasicCollector) CanDoTime() bool { return true } // CanDoComplexKeys returns true func (c *BasicCollector) CanDoComplexKeys() bool { return true } // StringDedupThreshold returns 0 func (c *BasicCollector) StringDedupThreshold() int { return 0 } // Value returns the last value added to this collector func (c *BasicCollector) Value() dgo.Value { return c.Stack[0].Get(0) } // StackTop returns the Array at the top of the collector stack. func (c *BasicCollector) StackTop() dgo.Array { return c.Stack[len(c.Stack)-1] } // PeekLast returns the last added value from func (c *BasicCollector) PeekLast() dgo.Value { a := c.StackTop() return a.Get(a.Len() - 1) } // ReplaceLast replaces the last added value with the given value func (c *BasicCollector) ReplaceLast(v dgo.Value) { a := c.StackTop() a.Set(a.Len()-1, v) }
streamer/basiccollector.go
0.802362
0.46478
basiccollector.go
starcoder
package preprocessing import ( "github.com/Wieku/gosu-pp/beatmap/difficulty" "github.com/Wieku/gosu-pp/beatmap/objects" "github.com/Wieku/gosu-pp/math/math32" "github.com/Wieku/gosu-pp/math/vector" "math" ) const ( NormalizedRadius = 50.0 CircleSizeBuffThreshold = 30.0 MinDeltaTime = 25 ) type DifficultyObject struct { diff *difficulty.Difficulty BaseObject objects.IHitObject lastObject objects.IHitObject lastLastObject objects.IHitObject DeltaTime float64 StartTime float64 EndTime float64 JumpDistance float64 MovementDistance float64 TravelDistance float64 Angle float64 MovementTime float64 TravelTime float64 StrainTime float64 } func NewDifficultyObject(hitObject, lastLastObject, lastObject objects.IHitObject, d *difficulty.Difficulty) *DifficultyObject { obj := &DifficultyObject{ diff: d, BaseObject: hitObject, lastObject: lastObject, lastLastObject: lastLastObject, DeltaTime: (hitObject.GetStartTime() - lastObject.GetStartTime()) / d.Speed, StartTime: hitObject.GetStartTime() / d.Speed, EndTime: hitObject.GetEndTime() / d.Speed, Angle: math.NaN(), } obj.StrainTime = math.Max(obj.DeltaTime, MinDeltaTime) obj.setDistances() return obj } func (o *DifficultyObject) setDistances() { _, ok1 := o.BaseObject.(*objects.Spinner) _, ok2 := o.lastObject.(*objects.Spinner) if ok1 || ok2 { return } scalingFactor := NormalizedRadius / float32(o.diff.CircleRadius) if o.diff.CircleRadius < CircleSizeBuffThreshold { scalingFactor *= 1.0 + math32.Min(CircleSizeBuffThreshold-float32(o.diff.CircleRadius), 5.0)/50.0 } lastCursorPosition := getEndCursorPosition(o.lastObject, o.diff) o.JumpDistance = float64((o.BaseObject.GetStackedStartPositionMod(o.diff.Mods).Scl(scalingFactor)).Dst(lastCursorPosition.Scl(scalingFactor))) if lastSlider, ok := o.lastObject.(*LazySlider); ok { o.TravelDistance = float64(lastSlider.LazyTravelDistance) o.TravelTime = math.Max(lastSlider.LazyTravelTime/o.diff.Speed, MinDeltaTime) o.MovementTime = math.Max(o.StrainTime-o.TravelTime, MinDeltaTime) // Jump distance from the slider tail to the next object, as opposed to the lazy position of JumpDistance. tailJumpDistance := lastSlider.GetStackedEndPositionMod(o.diff.Mods).Dst(o.BaseObject.GetStackedStartPositionMod(o.diff.Mods)) * scalingFactor // For hitobjects which continue in the direction of the slider, the player will normally follow through the slider, // such that they're not jumping from the lazy position but rather from very close to (or the end of) the slider. // In such cases, a leniency is applied by also considering the jump distance from the tail of the slider, and taking the minimum jump distance. // Additional distance is removed based on position of jump relative to slider follow circle radius. // JumpDistance is the leniency distance beyond the assumed_slider_radius. tailJumpDistance is maximum_slider_radius since the full distance of radial leniency is still possible. o.MovementDistance = math.Max(0, math.Min(o.JumpDistance-float64(maximumSliderRadius-assumedSliderRadius), float64(tailJumpDistance-maximumSliderRadius))) } else { o.MovementTime = o.StrainTime o.MovementDistance = o.JumpDistance } if o.lastLastObject != nil { if _, ok := o.lastLastObject.(*objects.Spinner); ok { return } lastLastCursorPosition := getEndCursorPosition(o.lastLastObject, o.diff) v1 := lastLastCursorPosition.Sub(o.lastObject.GetStackedStartPositionMod(o.diff.Mods)) v2 := o.BaseObject.GetStackedStartPositionMod(o.diff.Mods).Sub(lastCursorPosition) dot := v1.Dot(v2) det := v1.X*v2.Y - v1.Y*v2.X o.Angle = float64(math32.Abs(math32.Atan2(det, dot))) } } func getEndCursorPosition(obj objects.IHitObject, d *difficulty.Difficulty) (pos vector.Vector2f) { pos = obj.GetStackedStartPositionMod(d.Mods) if s, ok := obj.(*LazySlider); ok { pos = s.LazyEndPosition } return }
performance/osu/preprocessing/object.go
0.679285
0.443239
object.go
starcoder
package casbin import "github.com/Knetic/govaluate" // GetAllSubjects gets the list of subjects that show up in the current policy. func (e *Enforcer) GetAllSubjects() []string { return e.model.GetValuesForFieldInPolicyAllTypes("p", 0) } // GetAllNamedSubjects gets the list of subjects that show up in the current named policy. func (e *Enforcer) GetAllNamedSubjects(ptype string) []string { return e.model.GetValuesForFieldInPolicy("p", ptype, 0) } // GetAllObjects gets the list of objects that show up in the current policy. func (e *Enforcer) GetAllObjects() []string { return e.model.GetValuesForFieldInPolicyAllTypes("p", 1) } // GetAllNamedObjects gets the list of objects that show up in the current named policy. func (e *Enforcer) GetAllNamedObjects(ptype string) []string { return e.model.GetValuesForFieldInPolicy("p", ptype, 1) } // GetAllActions gets the list of actions that show up in the current policy. func (e *Enforcer) GetAllActions() []string { return e.model.GetValuesForFieldInPolicyAllTypes("p", 2) } // GetAllNamedActions gets the list of actions that show up in the current named policy. func (e *Enforcer) GetAllNamedActions(ptype string) []string { return e.model.GetValuesForFieldInPolicy("p", ptype, 2) } // GetAllRoles gets the list of roles that show up in the current policy. func (e *Enforcer) GetAllRoles() []string { return e.model.GetValuesForFieldInPolicyAllTypes("g", 1) } // GetAllNamedRoles gets the list of roles that show up in the current named policy. func (e *Enforcer) GetAllNamedRoles(ptype string) []string { return e.model.GetValuesForFieldInPolicy("g", ptype, 1) } // GetPolicy gets all the authorization rules in the policy. func (e *Enforcer) GetPolicy() [][]string { return e.GetNamedPolicy("p") } // GetFilteredPolicy gets all the authorization rules in the policy, field filters can be specified. func (e *Enforcer) GetFilteredPolicy(fieldIndex int, fieldValues ...string) [][]string { return e.GetFilteredNamedPolicy("p", fieldIndex, fieldValues...) } // GetNamedPolicy gets all the authorization rules in the named policy. func (e *Enforcer) GetNamedPolicy(ptype string) [][]string { return e.model.GetPolicy("p", ptype) } // GetFilteredNamedPolicy gets all the authorization rules in the named policy, field filters can be specified. func (e *Enforcer) GetFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) [][]string { return e.model.GetFilteredPolicy("p", ptype, fieldIndex, fieldValues...) } // GetGroupingPolicy gets all the role inheritance rules in the policy. func (e *Enforcer) GetGroupingPolicy() [][]string { return e.GetNamedGroupingPolicy("g") } // GetFilteredGroupingPolicy gets all the role inheritance rules in the policy, field filters can be specified. func (e *Enforcer) GetFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) [][]string { return e.GetFilteredNamedGroupingPolicy("g", fieldIndex, fieldValues...) } // GetNamedGroupingPolicy gets all the role inheritance rules in the policy. func (e *Enforcer) GetNamedGroupingPolicy(ptype string) [][]string { return e.model.GetPolicy("g", ptype) } // GetFilteredNamedGroupingPolicy gets all the role inheritance rules in the policy, field filters can be specified. func (e *Enforcer) GetFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) [][]string { return e.model.GetFilteredPolicy("g", ptype, fieldIndex, fieldValues...) } // HasPolicy determines whether an authorization rule exists. func (e *Enforcer) HasPolicy(params ...interface{}) bool { return e.HasNamedPolicy("p", params...) } // HasNamedPolicy determines whether a named authorization rule exists. func (e *Enforcer) HasNamedPolicy(ptype string, params ...interface{}) bool { if strSlice, ok := params[0].([]string); len(params) == 1 && ok { return e.model.HasPolicy("p", ptype, strSlice) } policy := make([]string, 0) for _, param := range params { policy = append(policy, param.(string)) } return e.model.HasPolicy("p", ptype, policy) } // AddPolicy adds an authorization rule to the current policy. // If the rule already exists, the function returns false and the rule will not be added. // Otherwise the function returns true by adding the new rule. func (e *Enforcer) AddPolicy(params ...interface{}) (bool, error) { return e.AddNamedPolicy("p", params...) } // AddPolicies adds authorization rules to the current policy. // If the rule already exists, the function returns false for the corresponding rule and the rule will not be added. // Otherwise the function returns true for the corresponding rule by adding the new rule. func (e *Enforcer) AddPolicies(rules [][]string) (bool, error) { return e.AddNamedPolicies("p", rules) } // AddNamedPolicy adds an authorization rule to the current named policy. // If the rule already exists, the function returns false and the rule will not be added. // Otherwise the function returns true by adding the new rule. func (e *Enforcer) AddNamedPolicy(ptype string, params ...interface{}) (bool, error) { if strSlice, ok := params[0].([]string); len(params) == 1 && ok { strSlice = append(make([]string, 0, len(strSlice)), strSlice...) return e.addPolicy("p", ptype, strSlice) } policy := make([]string, 0) for _, param := range params { policy = append(policy, param.(string)) } return e.addPolicy("p", ptype, policy) } // AddNamedPolicies adds authorization rules to the current named policy. // If the rule already exists, the function returns false for the corresponding rule and the rule will not be added. // Otherwise the function returns true for the corresponding by adding the new rule. func (e *Enforcer) AddNamedPolicies(ptype string, rules [][]string) (bool, error) { return e.addPolicies("p", ptype, rules) } // RemovePolicy removes an authorization rule from the current policy. func (e *Enforcer) RemovePolicy(params ...interface{}) (bool, error) { return e.RemoveNamedPolicy("p", params...) } // UpdatePolicy updates an authorization rule from the current policy. func (e *Enforcer) UpdatePolicy(oldPolicy []string, newPolicy []string) (bool, error) { return e.UpdateNamedPolicy("p", oldPolicy, newPolicy) } func (e *Enforcer) UpdateNamedPolicy(ptype string, p1 []string, p2 []string) (bool, error) { return e.updatePolicy("p", ptype, p1, p2) } // RemovePolicies removes authorization rules from the current policy. func (e *Enforcer) RemovePolicies(rules [][]string) (bool, error) { return e.RemoveNamedPolicies("p", rules) } // RemoveFilteredPolicy removes an authorization rule from the current policy, field filters can be specified. func (e *Enforcer) RemoveFilteredPolicy(fieldIndex int, fieldValues ...string) (bool, error) { return e.RemoveFilteredNamedPolicy("p", fieldIndex, fieldValues...) } // RemoveNamedPolicy removes an authorization rule from the current named policy. func (e *Enforcer) RemoveNamedPolicy(ptype string, params ...interface{}) (bool, error) { if strSlice, ok := params[0].([]string); len(params) == 1 && ok { return e.removePolicy("p", ptype, strSlice) } policy := make([]string, 0) for _, param := range params { policy = append(policy, param.(string)) } return e.removePolicy("p", ptype, policy) } // RemoveNamedPolicies removes authorization rules from the current named policy. func (e *Enforcer) RemoveNamedPolicies(ptype string, rules [][]string) (bool, error) { return e.removePolicies("p", ptype, rules) } // RemoveFilteredNamedPolicy removes an authorization rule from the current named policy, field filters can be specified. func (e *Enforcer) RemoveFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) { return e.removeFilteredPolicy("p", ptype, fieldIndex, fieldValues...) } // HasGroupingPolicy determines whether a role inheritance rule exists. func (e *Enforcer) HasGroupingPolicy(params ...interface{}) bool { return e.HasNamedGroupingPolicy("g", params...) } // HasNamedGroupingPolicy determines whether a named role inheritance rule exists. func (e *Enforcer) HasNamedGroupingPolicy(ptype string, params ...interface{}) bool { if strSlice, ok := params[0].([]string); len(params) == 1 && ok { return e.model.HasPolicy("g", ptype, strSlice) } policy := make([]string, 0) for _, param := range params { policy = append(policy, param.(string)) } return e.model.HasPolicy("g", ptype, policy) } // AddGroupingPolicy adds a role inheritance rule to the current policy. // If the rule already exists, the function returns false and the rule will not be added. // Otherwise the function returns true by adding the new rule. func (e *Enforcer) AddGroupingPolicy(params ...interface{}) (bool, error) { return e.AddNamedGroupingPolicy("g", params...) } // AddGroupingPolicies adds role inheritance rules to the current policy. // If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added. // Otherwise the function returns true for the corresponding policy rule by adding the new rule. func (e *Enforcer) AddGroupingPolicies(rules [][]string) (bool, error) { return e.AddNamedGroupingPolicies("g", rules) } // AddNamedGroupingPolicy adds a named role inheritance rule to the current policy. // If the rule already exists, the function returns false and the rule will not be added. // Otherwise the function returns true by adding the new rule. func (e *Enforcer) AddNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { var ruleAdded bool var err error if strSlice, ok := params[0].([]string); len(params) == 1 && ok { ruleAdded, err = e.addPolicy("g", ptype, strSlice) } else { policy := make([]string, 0) for _, param := range params { policy = append(policy, param.(string)) } ruleAdded, err = e.addPolicy("g", ptype, policy) } return ruleAdded, err } // AddNamedGroupingPolicies adds named role inheritance rules to the current policy. // If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added. // Otherwise the function returns true for the corresponding policy rule by adding the new rule. func (e *Enforcer) AddNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) { return e.addPolicies("g", ptype, rules) } // RemoveGroupingPolicy removes a role inheritance rule from the current policy. func (e *Enforcer) RemoveGroupingPolicy(params ...interface{}) (bool, error) { return e.RemoveNamedGroupingPolicy("g", params...) } // RemoveGroupingPolicies removes role inheritance rules from the current policy. func (e *Enforcer) RemoveGroupingPolicies(rules [][]string) (bool, error) { return e.RemoveNamedGroupingPolicies("g", rules) } // RemoveFilteredGroupingPolicy removes a role inheritance rule from the current policy, field filters can be specified. func (e *Enforcer) RemoveFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) (bool, error) { return e.RemoveFilteredNamedGroupingPolicy("g", fieldIndex, fieldValues...) } // RemoveNamedGroupingPolicy removes a role inheritance rule from the current named policy. func (e *Enforcer) RemoveNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { var ruleRemoved bool var err error if strSlice, ok := params[0].([]string); len(params) == 1 && ok { ruleRemoved, err = e.removePolicy("g", ptype, strSlice) } else { policy := make([]string, 0) for _, param := range params { policy = append(policy, param.(string)) } ruleRemoved, err = e.removePolicy("g", ptype, policy) } return ruleRemoved, err } // RemoveNamedGroupingPolicies removes role inheritance rules from the current named policy. func (e *Enforcer) RemoveNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) { return e.removePolicies("g", ptype, rules) } func (e *Enforcer) UpdateGroupingPolicy(oldRule []string, newRule []string) (bool, error) { return e.UpdateNamedGroupingPolicy("g", oldRule, newRule) } func (e *Enforcer) UpdateNamedGroupingPolicy(ptype string, oldRule []string, newRule []string) (bool, error) { return e.updatePolicy("g", ptype, oldRule, newRule) } // RemoveFilteredNamedGroupingPolicy removes a role inheritance rule from the current named policy, field filters can be specified. func (e *Enforcer) RemoveFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) { return e.removeFilteredPolicy("g", ptype, fieldIndex, fieldValues...) } // AddFunction adds a customized function. func (e *Enforcer) AddFunction(name string, function govaluate.ExpressionFunction) { e.fm.AddFunction(name, function) }
management_api.go
0.762778
0.420897
management_api.go
starcoder
package graph import ( i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55 "github.com/microsoft/kiota/abstractions/go/serialization" ) // AverageComparativeScore provides operations to manage the security singleton. type AverageComparativeScore struct { // Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. additionalData map[string]interface{}; // Average score within specified basis. averageScore *float64; // Scope type. The possible values are: AllTenants, TotalSeats, IndustryTypes. basis *string; } // NewAverageComparativeScore instantiates a new averageComparativeScore and sets the default values. func NewAverageComparativeScore()(*AverageComparativeScore) { m := &AverageComparativeScore{ } m.SetAdditionalData(make(map[string]interface{})); return m } // CreateAverageComparativeScoreFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value func CreateAverageComparativeScoreFromDiscriminatorValue(parseNode i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.Parsable, error) { return NewAverageComparativeScore(), nil } // GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. func (m *AverageComparativeScore) GetAdditionalData()(map[string]interface{}) { if m == nil { return nil } else { return m.additionalData } } // GetAverageScore gets the averageScore property value. Average score within specified basis. func (m *AverageComparativeScore) GetAverageScore()(*float64) { if m == nil { return nil } else { return m.averageScore } } // GetBasis gets the basis property value. Scope type. The possible values are: AllTenants, TotalSeats, IndustryTypes. func (m *AverageComparativeScore) GetBasis()(*string) { if m == nil { return nil } else { return m.basis } } // GetFieldDeserializers the deserialization information for the current model func (m *AverageComparativeScore) GetFieldDeserializers()(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) { res := make(map[string]func(interface{}, i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode)(error)) res["averageScore"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error { val, err := n.GetFloat64Value() if err != nil { return err } if val != nil { m.SetAverageScore(val) } return nil } res["basis"] = func (o interface{}, n i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.ParseNode) error { val, err := n.GetStringValue() if err != nil { return err } if val != nil { m.SetBasis(val) } return nil } return res } func (m *AverageComparativeScore) IsNil()(bool) { return m == nil } // Serialize serializes information the current object func (m *AverageComparativeScore) Serialize(writer i04eb5309aeaafadd28374d79c8471df9b267510b4dc2e3144c378c50f6fd7b55.SerializationWriter)(error) { { err := writer.WriteFloat64Value("averageScore", m.GetAverageScore()) if err != nil { return err } } { err := writer.WriteStringValue("basis", m.GetBasis()) if err != nil { return err } } { err := writer.WriteAdditionalData(m.GetAdditionalData()) if err != nil { return err } } return nil } // SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well. func (m *AverageComparativeScore) SetAdditionalData(value map[string]interface{})() { if m != nil { m.additionalData = value } } // SetAverageScore sets the averageScore property value. Average score within specified basis. func (m *AverageComparativeScore) SetAverageScore(value *float64)() { if m != nil { m.averageScore = value } } // SetBasis sets the basis property value. Scope type. The possible values are: AllTenants, TotalSeats, IndustryTypes. func (m *AverageComparativeScore) SetBasis(value *string)() { if m != nil { m.basis = value } }
models/microsoft/graph/average_comparative_score.go
0.831246
0.406626
average_comparative_score.go
starcoder
package utils /* * bit.go - some collection of bitwise operations * see more @ * - https://en.wikipedia.org/wiki/Bitwise_operation * - https://en.wikipedia.org/wiki/Bitwise_operations_in_C * - http://www.cprogramming.com/tutorial/bitwise_operators.html * - https://discuss.leetcode.com/topic/50315/ */ import ( "fmt" ) var ( binaryFormat = "%s%s%s%s%s%s%s%s" // BinaryString is an array of all 4-bit binary representation BinaryString = map[string][]string{ "b": []string{ "0000", "0001", "0010", "0011", "0100", "0101", "0110", "0111", "1000", "1001", "1010", "1011", "1100", "1101", "1110", "1111", }, "x": []string{ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", }, } ) // BitAllOne returns integer with all bits are 1 func BitAllOne() int64 { return ^0 } // BitCheck checks on nth bit of x func BitCheck(x int64, n uint8) bool { return (x & 1 << n) != 0 } // BitClear sets 0 on nth bit of x func BitClear(x int64, n uint8) int64 { return x & ^(1 << n) } // BitCountOne returns number of 1 in x (aka Hamming weight) func BitCountOne(x int64) int { count := 0 for x != 0 { x = x & (x - 1) count++ } return count } // BitCountOneUint64 returns number of 1 in x (aka Hamming weight) func BitCountOneUint64(x uint64) int { var count int var mask uint64 = 1 for i := 0; i < 64; i++ { if mask&x != 0 { count++ } mask = mask << 1 } return count } // BitIntersection applies bitwise AND (&) operator on a and b (interaction) func BitIntersection(a, b int64) int64 { return a & b } // BitInverse returns inverted x func BitInverse(x int64) int64 { return ^x } // BitIsPowerOf2 checks if the number is power of 2 func BitIsPowerOf2(number int64) bool { return number > 0 && 0 == number&(number-1) } // BitIsPowerOf4 checks if the number is power of 4 func BitIsPowerOf4(number int64) bool { return number > 0 && 0 == number&(number-1) && 0 == number&0x5555555555555555 } // BitNegativeInt returns negative number of x (0 - x) func BitNegativeInt(x int64) int64 { var y int64 = 1 return BitSumInt64(^x, y) } // BitSet sets 1 on nth bit of x func BitSet(x int64, n uint8) int64 { return x | 1<<n } // BitString converts uint64 x to zero-padding bits representation func BitString(x uint64, key, delimiter string) string { var mask uint64 = 0xF var sFmt = binaryFormat + binaryFormat var xbin = key == "b" var xpad = delimiter != "" var xmap []string var xval string var okay bool if xpad { sFmt += sFmt } if xmap, okay = BinaryString[key]; !okay { return "" } Debug("\nx= %d [0x%x / %b], xmap= %+v\n", x, x, x, xmap) bnum := len(xmap) data := make([]interface{}, 0, bnum*2) for n := 1; n <= bnum; n++ { ndx := int(mask & (x >> uint(4*(bnum-n)))) Debug("n= %2d, shift= %2d, ndx= %2d [%b]\n", n, 4*(bnum-n), ndx, ndx) data = append(data, xmap[ndx]) if xpad { if n < bnum && (xbin || n%2 == 0) { data = append(data, delimiter) } else { data = append(data, "") } } } Debug("%+v\n", data) xval = fmt.Sprintf(sFmt, data...) return xval } // BitSubstraction applies A & ~B func BitSubstraction(a, b int64) int64 { return a & ^b } // BitSumInt64 calculates sum of two integers without using arithmetic operators func BitSumInt64(x, y int64) int64 { if y != 0 { return BitSumInt64(x^y, (x&y)<<1) } return x } // BitSumInt calculates sum of two integers without using arithmetic operators func BitSumInt(x, y int) int { // Iterate till there is no carry for y != 0 { // carry now contains common set bits of x and y carry := x & y // XOR on bits of x and y where at least one of the bits is not set x = x ^ y // carry is shifted by one so that adding it to x gives the required sum y = carry << 1 } return x } // BitUnion applies bitwise OR (|) operator on a and b (union) func BitUnion(a, b int64) int64 { return a | b } // ToBinaryString converts uint64 x to zero-padding binary representation func ToBinaryString(x uint64, delimiter string) string { return BitString(x, "b", delimiter) } // ToHexString converts uint64 x to zero-padding hexidecimal representation func ToHexString(x uint64, delimiter string) string { return BitString(x, "x", delimiter) }
utils/bit.go
0.830181
0.489442
bit.go
starcoder
package parse import ( "fmt" "go/ast" "strings" ) func parseLiteralExpression(expr *ast.BasicLit) *Expression { // Literal value of int: 0, 1, 2, ... res := &Expression{ Type: ExpressionTypeDefault, Name: expr.Value, NameType: strings.ToLower(expr.Kind.String()), } return res } func parseIdentifierExpression(expr *ast.Ident) *Expression { if expr.Obj == nil { // Literal value of boolean: //true, false res := &Expression{ Type: ExpressionTypeDefault, Name: expr.Name, NameType: "bool", // bool } return res } else { // Variable res := &Expression{ Type: ExpressionTypeDefault, Name: expr.Obj.Name, NameType: strings.ToLower(expr.Obj.Kind.String()), // int, string, .. } return res } } func parseBinaryExpression(expr *ast.BinaryExpr) *Expression { // Binary expression like: // i < 10 op := expr.Op.String() left := parseExpression(&expr.X) right := parseExpression(&expr.Y) value := fmt.Sprintf("%s %s %s", left, op, right) res := &Expression{ Type: ExpressionTypeDefault, Name: value, NameType: op, } return res } func parseUnaryExpression(expr *ast.UnaryExpr) *Expression { // Unary expression like: // -1 op := expr.Op.String() inside := parseExpression(&expr.X) value := fmt.Sprintf("%s%s", op, inside) res := &Expression{ Type: ExpressionTypeDefault, Name: value, NameType: op, } return res } func parseParenthesesExpression(expr *ast.ParenExpr) *Expression { // Parentheses expression like: // (1 + 2) inside := parseExpression(&expr.X) res := &Expression{ Type: ExpressionTypeParentheses, Children: Expressions{inside}, } return res } func parseArrayExpression(expr *ast.CompositeLit) *Expression { // Composite literal like: // []int{1, 2} nameType := parseNameType(&expr.Type) res := &Expression{ Type: ExpressionTypeArray, NameType: nameType, // int* } res.Children = Expressions{} for _, elt := range expr.Elts { res.Children = append(res.Children, parseExpression(&elt)) } return res } func parseCallExpression(expr *ast.CallExpr) *Expression { // Call expression like: // append(array, 1) res := &Expression{ Type: ExpressionTypeCall, Name: parseExpression(&expr.Fun).String(), } res.Children = Expressions{} for _, arg := range expr.Args { res.Children = append(res.Children, parseExpression(&arg)) } return res } func parseIndexExpression(expr *ast.IndexExpr) *Expression { // Index expression like: // array[123] index := parseExpression(&expr.Index) res := &Expression{ Type: ExpressionTypeIndex, Name: expr.X.(*ast.Ident).Name, // array Children: Expressions{index}, // 123 } return res } func parseStarExpression(expr *ast.StarExpr) *Expression { // Star expression like: // *flag inside := parseExpression(&expr.X) res := &Expression{ Type: ExpressionTypeStar, Children: Expressions{inside}, // flag } return res } func parseSelectorExpression(expr *ast.SelectorExpr) *Expression { // Selector expression like: // fmt.Printf before := parseExpression(&expr.X) // fmt after := expr.Sel.Name // Printf res := &Expression{ Type: ExpressionTypeSelector, Name: after, // Printf Children: Expressions{before}, // fmt } return res } func parseExpression(expr *ast.Expr) *Expression { e, ok := (*expr).(*ast.BasicLit) if ok { return parseLiteralExpression(e) } e2, ok := (*expr).(*ast.Ident) if ok { return parseIdentifierExpression(e2) } e3, ok := (*expr).(*ast.BinaryExpr) if ok { return parseBinaryExpression(e3) } e4, ok := (*expr).(*ast.UnaryExpr) if ok { return parseUnaryExpression(e4) } e5, ok := (*expr).(*ast.ParenExpr) if ok { return parseParenthesesExpression(e5) } e6, ok := (*expr).(*ast.CompositeLit) if ok { return parseArrayExpression(e6) } e7, ok := (*expr).(*ast.CallExpr) if ok { return parseCallExpression(e7) } e8, ok := (*expr).(*ast.IndexExpr) if ok { return parseIndexExpression(e8) } e9, ok := (*expr).(*ast.StarExpr) if ok { return parseStarExpression(e9) } e10, ok := (*expr).(*ast.SelectorExpr) if ok { return parseSelectorExpression(e10) } panic("parseExpression(): unknown expression type") }
parse/expression_parse.go
0.520009
0.497253
expression_parse.go
starcoder
package condition import ( "encoding/json" "github.com/Jeffail/benthos/lib/log" "github.com/Jeffail/benthos/lib/metrics" "github.com/Jeffail/benthos/lib/types" ) //------------------------------------------------------------------------------ func init() { Constructors[TypeNot] = TypeSpec{ constructor: NewNot, description: ` Not is a condition that returns the opposite (NOT) of its child condition. The body of a not object is the child condition, i.e. in order to express 'part 0 NOT equal to "foo"' you could have the following YAML config: ` + "``` yaml" + ` type: not not: type: text text: operator: equal part: 0 arg: foo ` + "```" + ` Or, the same example as JSON: ` + "``` json" + ` { "type": "not", "not": { "type": "text", "text": { "operator": "equal", "part": 0, "arg": "foo" } } } ` + "```", sanitiseConfigFunc: func(conf Config) (interface{}, error) { if conf.Not.Config == nil { return struct{}{}, nil } return SanitiseConfig(*conf.Not.Config) }, } } //------------------------------------------------------------------------------ // NotConfig is a configuration struct containing fields for the Not condition. type NotConfig struct { *Config } // NewNotConfig returns a NotConfig with default values. func NewNotConfig() NotConfig { return NotConfig{ Config: nil, } } //------------------------------------------------------------------------------ // MarshalJSON prints an empty object instead of nil. func (m NotConfig) MarshalJSON() ([]byte, error) { if m.Config != nil { return json.Marshal(m.Config) } return json.Marshal(struct{}{}) } // MarshalYAML prints an empty object instead of nil. func (m NotConfig) MarshalYAML() (interface{}, error) { if m.Config != nil { return *m.Config, nil } return struct{}{}, nil } //------------------------------------------------------------------------------ // UnmarshalJSON ensures that when parsing child config it is initialised. func (m *NotConfig) UnmarshalJSON(bytes []byte) error { if m.Config == nil { nConf := NewConfig() m.Config = &nConf } return json.Unmarshal(bytes, m.Config) } // UnmarshalYAML ensures that when parsing child config it is initialised. func (m *NotConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if m.Config == nil { nConf := NewConfig() m.Config = &nConf } return unmarshal(m.Config) } //------------------------------------------------------------------------------ // Not is a condition that returns the opposite of a child condition. type Not struct { child Type } // NewNot returns a Not condition. func NewNot( conf Config, mgr types.Manager, log log.Modular, stats metrics.Type, ) (Type, error) { childConf := conf.Not.Config if childConf == nil { newConf := NewConfig() childConf = &newConf } child, err := New(*childConf, mgr, log, stats) if err != nil { return nil, err } return &Not{ child: child, }, nil } //------------------------------------------------------------------------------ // Check attempts to check a message part against a configured condition. func (c *Not) Check(msg types.Message) bool { return !c.child.Check(msg) } //------------------------------------------------------------------------------
lib/processor/condition/not.go
0.722918
0.714697
not.go
starcoder
package iso20022 // Summary information about amount financed. type FinancingAllowedSummary1 struct { // Number of invoices/instalments financed. FinancedItemNumber *Number `xml:"FincdItmNb"` // Sum of the original total amounts of the invoices accepted for financing. TotalAcceptedItemsAmount *ActiveCurrencyAndAmount `xml:"TtlAccptdItmsAmt"` // Percentage rate applied to calculate the total amount financed related to the total amounts of the invoices accepted for financing. It represents the average percentage rate applied to all single invoice requests financed. It can be calculated as result of "TotalFinancedAmount" divided by "TotalAcceptedItemsAmount". AppliedPercentage *PercentageRate `xml:"ApldPctg,omitempty"` // Total amount financed, defined as the entire financed amount of the requests. TotalFinancedAmount *ActiveCurrencyAndAmount `xml:"TtlFincdAmt"` // Set of dates (eg book date, credit date) related to the crediting of the financed amount. FinancingDateDetails *FinancingDateDetails1 `xml:"FincgDtDtls,omitempty"` // Unambiguous identification of the account, held by Financing Requestor, actually used for crediting the amount financed. CreditAccount *CashAccount7 `xml:"CdtAcct,omitempty"` // Unambiguous identification of the internal bank account actually used by First Agent to manage the line of credit granted to Financing Requestor. FinancingAccount *CashAccount7 `xml:"FincgAcct,omitempty"` } func (f *FinancingAllowedSummary1) SetFinancedItemNumber(value string) { f.FinancedItemNumber = (*Number)(&value) } func (f *FinancingAllowedSummary1) SetTotalAcceptedItemsAmount(value, currency string) { f.TotalAcceptedItemsAmount = NewActiveCurrencyAndAmount(value, currency) } func (f *FinancingAllowedSummary1) SetAppliedPercentage(value string) { f.AppliedPercentage = (*PercentageRate)(&value) } func (f *FinancingAllowedSummary1) SetTotalFinancedAmount(value, currency string) { f.TotalFinancedAmount = NewActiveCurrencyAndAmount(value, currency) } func (f *FinancingAllowedSummary1) AddFinancingDateDetails() *FinancingDateDetails1 { f.FinancingDateDetails = new(FinancingDateDetails1) return f.FinancingDateDetails } func (f *FinancingAllowedSummary1) AddCreditAccount() *CashAccount7 { f.CreditAccount = new(CashAccount7) return f.CreditAccount } func (f *FinancingAllowedSummary1) AddFinancingAccount() *CashAccount7 { f.FinancingAccount = new(CashAccount7) return f.FinancingAccount }
FinancingAllowedSummary1.go
0.831998
0.405272
FinancingAllowedSummary1.go
starcoder
package math32 // Plane represents a plane in 3D space by its normal vector and a constant. // When the the normal vector is the unit vector the constant is the distance from the origin. type Plane struct { normal Vector3 constant float32 } // NewPlane creates and returns a new plane from a normal vector and a constant. func NewPlane(normal *Vector3, constant float32) *Plane { p := new(Plane) if normal != nil { p.normal = *normal } p.constant = constant return p } // Set sets this plane normal vector and constant. // Returns pointer to this updated plane. func (p *Plane) Set(normal *Vector3, constant float32) *Plane { p.normal = *normal p.constant = constant return p } // SetComponents sets this plane normal vector components and constant. // Returns pointer to this updated plane. func (p *Plane) SetComponents(x, y, z, w float32) *Plane { p.normal.Set(x, y, z) p.constant = w return p } // SetFromNormalAndCoplanarPoint sets this plane from a normal vector and a point on the plane. // Returns pointer to this updated plane. func (p *Plane) SetFromNormalAndCoplanarPoint(normal *Vector3, point *Vector3) *Plane { p.normal = *normal p.constant = -point.Dot(&p.normal) return p } // SetFromCoplanarPoints sets this plane from three coplanar points. // Returns pointer to this updated plane. func (p *Plane) SetFromCoplanarPoints(a, b, c *Vector3) *Plane { var v1 Vector3 var v2 Vector3 normal := v1.SubVectors(c, b).Cross(v2.SubVectors(a, b)).Normalize() // Q: should an error be thrown if normal is zero (e.g. degenerate plane)? p.SetFromNormalAndCoplanarPoint(normal, a) return p } // Copy sets this plane to a copy of other. // Returns pointer to this updated plane. func (p *Plane) Copy(other *Plane) *Plane { p.normal.Copy(&other.normal) p.constant = other.constant return p } // Normalize normalizes this plane normal vector and adjusts the constant. // Note: will lead to a divide by zero if the plane is invalid. // Returns pointer to this updated plane. func (p *Plane) Normalize() *Plane { inverseNormalLength := 1.0 / p.normal.Length() p.normal.MultiplyScalar(inverseNormalLength) p.constant *= inverseNormalLength return p } // Negate negates this plane normal. // Returns pointer to this updated plane. func (p *Plane) Negate() *Plane { p.constant *= -1 p.normal.Negate() return p } // DistanceToPoint returns the distance of this plane from point. func (p *Plane) DistanceToPoint(point *Vector3) float32 { return p.normal.Dot(point) + p.constant } // DistanceToSphere returns the distance of this place from the sphere. func (p *Plane) DistanceToSphere(sphere *Sphere) float32 { return p.DistanceToPoint(&sphere.Center) - sphere.Radius } // IsIntersectionLine returns the line intersects this plane. func (p *Plane) IsIntersectionLine(line *Line3) bool { startSign := p.DistanceToPoint(&line.start) endSign := p.DistanceToPoint(&line.end) return (startSign < 0 && endSign > 0) || (endSign < 0 && startSign > 0) } // IntersectLine calculates the point in the plane which intersets the specified line. // Sets the optionalTarget, if not nil to this point, and also returns it. // Returns nil if the line does not intersects the plane. func (p *Plane) IntersectLine(line *Line3, optionalTarget *Vector3) *Vector3 { var v1 Vector3 var result *Vector3 if optionalTarget == nil { result = NewVector3(0, 0, 0) } else { result = optionalTarget } direction := line.Delta(&v1) denominator := p.normal.Dot(direction) if denominator == 0 { // line is coplanar, return origin if p.DistanceToPoint(&line.start) == 0 { return result.Copy(&line.start) } // Unsure if this is the correct method to handle this case. return nil } var t = -(line.start.Dot(&p.normal) + p.constant) / denominator if t < 0 || t > 1 { return nil } return result.Copy(direction).MultiplyScalar(t).Add(&line.start) } // CoplanarPoint sets the optionalTarget to a point in the plane and also returns it. // The point set and returned is the closest point from the origin. func (p *Plane) CoplanarPoint(optionalTarget *Vector3) *Vector3 { var result *Vector3 if optionalTarget == nil { result = NewVector3(0, 0, 0) } else { result = optionalTarget } return result.Copy(&p.normal).MultiplyScalar(-p.constant) } // Translate translates this plane in the direction of its normal by offset. // Returns pointer to this updated plane. func (p *Plane) Translate(offset *Vector3) *Plane { p.constant = p.constant - offset.Dot(&p.normal) return p } // Equals returns if this plane is equal to other func (p *Plane) Equals(other *Plane) bool { return other.normal.Equals(&p.normal) && (other.constant == p.constant) } // Clone creates and returns a pointer to a copy of this plane. func (p *Plane) Clone(plane *Plane) *Plane { return NewPlane(&plane.normal, plane.constant) }
math32/plane.go
0.93315
0.845751
plane.go
starcoder
package assertions import ( "fmt" "reflect" ) import ( "github.com/smartystreets/oglematchers" ) // ShouldContain receives exactly two parameters. The first is a slice and the // second is a proposed member. Membership is determined using ShouldEqual. func ShouldContain(actual interface{}, expected ...interface{}) string { if fail := need(1, expected); fail != success { return fail } if matchError := oglematchers.Contains(expected[0]).Matches(actual); matchError != nil { typeName := reflect.TypeOf(actual) if fmt.Sprintf("%v", matchError) == "which is not a slice or array" { return fmt.Sprintf(shouldHaveBeenAValidCollection, typeName) } return fmt.Sprintf(shouldHaveContained, typeName, expected[0]) } return success } // ShouldNotContain receives exactly two parameters. The first is a slice and the // second is a proposed member. Membership is determinied using ShouldEqual. func ShouldNotContain(actual interface{}, expected ...interface{}) string { if fail := need(1, expected); fail != success { return fail } typeName := reflect.TypeOf(actual) if matchError := oglematchers.Contains(expected[0]).Matches(actual); matchError != nil { if fmt.Sprintf("%v", matchError) == "which is not a slice or array" { return fmt.Sprintf(shouldHaveBeenAValidCollection, typeName) } return success } return fmt.Sprintf(shouldNotHaveContained, typeName, expected[0]) } // ShouldBeIn receives at least 2 parameters. The first is a proposed member of the collection // that is passed in either as the second parameter, or of the collection that is comprised // of all the remaining parameters. This assertion ensures that the proposed member is in // the collection (using ShouldEqual). func ShouldBeIn(actual interface{}, expected ...interface{}) string { if fail := atLeast(1, expected); fail != success { return fail } if len(expected) == 1 { return shouldBeIn(actual, expected[0]) } return shouldBeIn(actual, expected) } func shouldBeIn(actual interface{}, expected interface{}) string { if matchError := oglematchers.Contains(actual).Matches(expected); matchError != nil { return fmt.Sprintf(shouldHaveBeenIn, actual, reflect.TypeOf(expected)) } return success } // ShouldNotBeIn receives at least 2 parameters. The first is a proposed member of the collection // that is passed in either as the second parameter, or of the collection that is comprised // of all the remaining parameters. This assertion ensures that the proposed member is NOT in // the collection (using ShouldEqual). func ShouldNotBeIn(actual interface{}, expected ...interface{}) string { if fail := atLeast(1, expected); fail != success { return fail } if len(expected) == 1 { return shouldNotBeIn(actual, expected[0]) } return shouldNotBeIn(actual, expected) } func shouldNotBeIn(actual interface{}, expected interface{}) string { if matchError := oglematchers.Contains(actual).Matches(expected); matchError == nil { return fmt.Sprintf(shouldNotHaveBeenIn, actual, reflect.TypeOf(expected)) } return success }
src/github.com/smartystreets/goconvey/assertions/collections.go
0.79909
0.602909
collections.go
starcoder
package onshape import ( "encoding/json" ) // BTPAnnotation231 struct for BTPAnnotation231 type BTPAnnotation231 struct { Atomic *bool `json:"atomic,omitempty"` BtType *string `json:"btType,omitempty"` DocumentationType *string `json:"documentationType,omitempty"` EndSourceLocation *int32 `json:"endSourceLocation,omitempty"` NodeId *string `json:"nodeId,omitempty"` ShortDescriptor *string `json:"shortDescriptor,omitempty"` SpaceAfter *BTPSpace10 `json:"spaceAfter,omitempty"` SpaceBefore *BTPSpace10 `json:"spaceBefore,omitempty"` SpaceDefault *bool `json:"spaceDefault,omitempty"` StartSourceLocation *int32 `json:"startSourceLocation,omitempty"` Value *BTPLiteralMap256 `json:"value,omitempty"` } // NewBTPAnnotation231 instantiates a new BTPAnnotation231 object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewBTPAnnotation231() *BTPAnnotation231 { this := BTPAnnotation231{} return &this } // NewBTPAnnotation231WithDefaults instantiates a new BTPAnnotation231 object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewBTPAnnotation231WithDefaults() *BTPAnnotation231 { this := BTPAnnotation231{} return &this } // GetAtomic returns the Atomic field value if set, zero value otherwise. func (o *BTPAnnotation231) GetAtomic() bool { if o == nil || o.Atomic == nil { var ret bool return ret } return *o.Atomic } // GetAtomicOk returns a tuple with the Atomic field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTPAnnotation231) GetAtomicOk() (*bool, bool) { if o == nil || o.Atomic == nil { return nil, false } return o.Atomic, true } // HasAtomic returns a boolean if a field has been set. func (o *BTPAnnotation231) HasAtomic() bool { if o != nil && o.Atomic != nil { return true } return false } // SetAtomic gets a reference to the given bool and assigns it to the Atomic field. func (o *BTPAnnotation231) SetAtomic(v bool) { o.Atomic = &v } // GetBtType returns the BtType field value if set, zero value otherwise. func (o *BTPAnnotation231) GetBtType() string { if o == nil || o.BtType == nil { var ret string return ret } return *o.BtType } // GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTPAnnotation231) GetBtTypeOk() (*string, bool) { if o == nil || o.BtType == nil { return nil, false } return o.BtType, true } // HasBtType returns a boolean if a field has been set. func (o *BTPAnnotation231) HasBtType() bool { if o != nil && o.BtType != nil { return true } return false } // SetBtType gets a reference to the given string and assigns it to the BtType field. func (o *BTPAnnotation231) SetBtType(v string) { o.BtType = &v } // GetDocumentationType returns the DocumentationType field value if set, zero value otherwise. func (o *BTPAnnotation231) GetDocumentationType() string { if o == nil || o.DocumentationType == nil { var ret string return ret } return *o.DocumentationType } // GetDocumentationTypeOk returns a tuple with the DocumentationType field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTPAnnotation231) GetDocumentationTypeOk() (*string, bool) { if o == nil || o.DocumentationType == nil { return nil, false } return o.DocumentationType, true } // HasDocumentationType returns a boolean if a field has been set. func (o *BTPAnnotation231) HasDocumentationType() bool { if o != nil && o.DocumentationType != nil { return true } return false } // SetDocumentationType gets a reference to the given string and assigns it to the DocumentationType field. func (o *BTPAnnotation231) SetDocumentationType(v string) { o.DocumentationType = &v } // GetEndSourceLocation returns the EndSourceLocation field value if set, zero value otherwise. func (o *BTPAnnotation231) GetEndSourceLocation() int32 { if o == nil || o.EndSourceLocation == nil { var ret int32 return ret } return *o.EndSourceLocation } // GetEndSourceLocationOk returns a tuple with the EndSourceLocation field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTPAnnotation231) GetEndSourceLocationOk() (*int32, bool) { if o == nil || o.EndSourceLocation == nil { return nil, false } return o.EndSourceLocation, true } // HasEndSourceLocation returns a boolean if a field has been set. func (o *BTPAnnotation231) HasEndSourceLocation() bool { if o != nil && o.EndSourceLocation != nil { return true } return false } // SetEndSourceLocation gets a reference to the given int32 and assigns it to the EndSourceLocation field. func (o *BTPAnnotation231) SetEndSourceLocation(v int32) { o.EndSourceLocation = &v } // GetNodeId returns the NodeId field value if set, zero value otherwise. func (o *BTPAnnotation231) GetNodeId() string { if o == nil || o.NodeId == nil { var ret string return ret } return *o.NodeId } // GetNodeIdOk returns a tuple with the NodeId field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTPAnnotation231) GetNodeIdOk() (*string, bool) { if o == nil || o.NodeId == nil { return nil, false } return o.NodeId, true } // HasNodeId returns a boolean if a field has been set. func (o *BTPAnnotation231) HasNodeId() bool { if o != nil && o.NodeId != nil { return true } return false } // SetNodeId gets a reference to the given string and assigns it to the NodeId field. func (o *BTPAnnotation231) SetNodeId(v string) { o.NodeId = &v } // GetShortDescriptor returns the ShortDescriptor field value if set, zero value otherwise. func (o *BTPAnnotation231) GetShortDescriptor() string { if o == nil || o.ShortDescriptor == nil { var ret string return ret } return *o.ShortDescriptor } // GetShortDescriptorOk returns a tuple with the ShortDescriptor field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTPAnnotation231) GetShortDescriptorOk() (*string, bool) { if o == nil || o.ShortDescriptor == nil { return nil, false } return o.ShortDescriptor, true } // HasShortDescriptor returns a boolean if a field has been set. func (o *BTPAnnotation231) HasShortDescriptor() bool { if o != nil && o.ShortDescriptor != nil { return true } return false } // SetShortDescriptor gets a reference to the given string and assigns it to the ShortDescriptor field. func (o *BTPAnnotation231) SetShortDescriptor(v string) { o.ShortDescriptor = &v } // GetSpaceAfter returns the SpaceAfter field value if set, zero value otherwise. func (o *BTPAnnotation231) GetSpaceAfter() BTPSpace10 { if o == nil || o.SpaceAfter == nil { var ret BTPSpace10 return ret } return *o.SpaceAfter } // GetSpaceAfterOk returns a tuple with the SpaceAfter field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTPAnnotation231) GetSpaceAfterOk() (*BTPSpace10, bool) { if o == nil || o.SpaceAfter == nil { return nil, false } return o.SpaceAfter, true } // HasSpaceAfter returns a boolean if a field has been set. func (o *BTPAnnotation231) HasSpaceAfter() bool { if o != nil && o.SpaceAfter != nil { return true } return false } // SetSpaceAfter gets a reference to the given BTPSpace10 and assigns it to the SpaceAfter field. func (o *BTPAnnotation231) SetSpaceAfter(v BTPSpace10) { o.SpaceAfter = &v } // GetSpaceBefore returns the SpaceBefore field value if set, zero value otherwise. func (o *BTPAnnotation231) GetSpaceBefore() BTPSpace10 { if o == nil || o.SpaceBefore == nil { var ret BTPSpace10 return ret } return *o.SpaceBefore } // GetSpaceBeforeOk returns a tuple with the SpaceBefore field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTPAnnotation231) GetSpaceBeforeOk() (*BTPSpace10, bool) { if o == nil || o.SpaceBefore == nil { return nil, false } return o.SpaceBefore, true } // HasSpaceBefore returns a boolean if a field has been set. func (o *BTPAnnotation231) HasSpaceBefore() bool { if o != nil && o.SpaceBefore != nil { return true } return false } // SetSpaceBefore gets a reference to the given BTPSpace10 and assigns it to the SpaceBefore field. func (o *BTPAnnotation231) SetSpaceBefore(v BTPSpace10) { o.SpaceBefore = &v } // GetSpaceDefault returns the SpaceDefault field value if set, zero value otherwise. func (o *BTPAnnotation231) GetSpaceDefault() bool { if o == nil || o.SpaceDefault == nil { var ret bool return ret } return *o.SpaceDefault } // GetSpaceDefaultOk returns a tuple with the SpaceDefault field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTPAnnotation231) GetSpaceDefaultOk() (*bool, bool) { if o == nil || o.SpaceDefault == nil { return nil, false } return o.SpaceDefault, true } // HasSpaceDefault returns a boolean if a field has been set. func (o *BTPAnnotation231) HasSpaceDefault() bool { if o != nil && o.SpaceDefault != nil { return true } return false } // SetSpaceDefault gets a reference to the given bool and assigns it to the SpaceDefault field. func (o *BTPAnnotation231) SetSpaceDefault(v bool) { o.SpaceDefault = &v } // GetStartSourceLocation returns the StartSourceLocation field value if set, zero value otherwise. func (o *BTPAnnotation231) GetStartSourceLocation() int32 { if o == nil || o.StartSourceLocation == nil { var ret int32 return ret } return *o.StartSourceLocation } // GetStartSourceLocationOk returns a tuple with the StartSourceLocation field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTPAnnotation231) GetStartSourceLocationOk() (*int32, bool) { if o == nil || o.StartSourceLocation == nil { return nil, false } return o.StartSourceLocation, true } // HasStartSourceLocation returns a boolean if a field has been set. func (o *BTPAnnotation231) HasStartSourceLocation() bool { if o != nil && o.StartSourceLocation != nil { return true } return false } // SetStartSourceLocation gets a reference to the given int32 and assigns it to the StartSourceLocation field. func (o *BTPAnnotation231) SetStartSourceLocation(v int32) { o.StartSourceLocation = &v } // GetValue returns the Value field value if set, zero value otherwise. func (o *BTPAnnotation231) GetValue() BTPLiteralMap256 { if o == nil || o.Value == nil { var ret BTPLiteralMap256 return ret } return *o.Value } // GetValueOk returns a tuple with the Value field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTPAnnotation231) GetValueOk() (*BTPLiteralMap256, bool) { if o == nil || o.Value == nil { return nil, false } return o.Value, true } // HasValue returns a boolean if a field has been set. func (o *BTPAnnotation231) HasValue() bool { if o != nil && o.Value != nil { return true } return false } // SetValue gets a reference to the given BTPLiteralMap256 and assigns it to the Value field. func (o *BTPAnnotation231) SetValue(v BTPLiteralMap256) { o.Value = &v } func (o BTPAnnotation231) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if o.Atomic != nil { toSerialize["atomic"] = o.Atomic } if o.BtType != nil { toSerialize["btType"] = o.BtType } if o.DocumentationType != nil { toSerialize["documentationType"] = o.DocumentationType } if o.EndSourceLocation != nil { toSerialize["endSourceLocation"] = o.EndSourceLocation } if o.NodeId != nil { toSerialize["nodeId"] = o.NodeId } if o.ShortDescriptor != nil { toSerialize["shortDescriptor"] = o.ShortDescriptor } if o.SpaceAfter != nil { toSerialize["spaceAfter"] = o.SpaceAfter } if o.SpaceBefore != nil { toSerialize["spaceBefore"] = o.SpaceBefore } if o.SpaceDefault != nil { toSerialize["spaceDefault"] = o.SpaceDefault } if o.StartSourceLocation != nil { toSerialize["startSourceLocation"] = o.StartSourceLocation } if o.Value != nil { toSerialize["value"] = o.Value } return json.Marshal(toSerialize) } type NullableBTPAnnotation231 struct { value *BTPAnnotation231 isSet bool } func (v NullableBTPAnnotation231) Get() *BTPAnnotation231 { return v.value } func (v *NullableBTPAnnotation231) Set(val *BTPAnnotation231) { v.value = val v.isSet = true } func (v NullableBTPAnnotation231) IsSet() bool { return v.isSet } func (v *NullableBTPAnnotation231) Unset() { v.value = nil v.isSet = false } func NewNullableBTPAnnotation231(val *BTPAnnotation231) *NullableBTPAnnotation231 { return &NullableBTPAnnotation231{value: val, isSet: true} } func (v NullableBTPAnnotation231) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableBTPAnnotation231) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
onshape/model_btp_annotation_231.go
0.76882
0.452354
model_btp_annotation_231.go
starcoder
package ugfx import ( "image" "image/png" "math" "os" ) // Describes a literal color using four 32-bit floating-point numbers in RGBA order. type Rgba32 struct { // Red component R float32 // Green component G float32 // Blue component B float32 // Alpha component A float32 } // Converts the specified `vals` to a newly initialized `Rgba32` instance. // // The first 4 `vals` are used for `R`, `G`, `B`, and `A` in that order, if present. // `A` is set to 1 if `vals[3]` is not present. func NewRgba32(vals ...float64) (me *Rgba32) { me = &Rgba32{} if len(vals) > 0 { if me.R = float32(vals[0]); len(vals) > 1 { if me.G = float32(vals[1]); len(vals) > 2 { if me.B = float32(vals[2]); len(vals) > 3 { me.A = float32(vals[3]) } else { me.A = 1 } } } } return } // Describes a literal color using four 64-bit floating-point numbers in RGBA order. type Rgba64 struct { // Red component R float64 // Green component G float64 // Blue component B float64 // Alpha component A float64 } // Converts the specified `vals` to a newly initialized `Rgba64` instance. // // The first 4 `vals` are used for `R`, `G`, `B`, and `A` in that order, if present. // `A` is set to 1 if `vals[3]` is not present. func NewRgba64(vals ...float64) (me *Rgba64) { me = &Rgba64{} if len(vals) > 0 { if me.R = vals[0]; len(vals) > 1 { if me.G = vals[1]; len(vals) > 2 { if me.B = vals[2]; len(vals) > 3 { me.A = vals[3] } else { me.A = 1 } } } } return } // Converts the given value from gamma to linear color space. func GammaToLinearSpace(f float64) float64 { if f > 0.0404482362771082 { return math.Pow((f+0.055)/1.055, 2.4) } return f / 12.92 } // If 2 dimensions are represented in a 1-dimensional linear array, this function provides a way to return a 1D index addressing the specified 2D coordinate. func Index2D(x, y, ysize int) int { return (x * ysize) + y } // If 3 dimensions are represented in a 1-dimensional linear array, this function provides a way to return a 1D index addressing the specified 3D coordinate. func Index3D(x, y, z, xsize, ysize int) int { return (((z * xsize) + x) * ysize) + y } // Converts the given value from linear to gamma color space. func LinearToGammaSpace(f float64) float64 { if f > 0.00313066844250063 { return 1.055*math.Pow(f, 1/2.4) - 0.055 } return f * 12.92 } // Saves any given `Image` as a local PNG file. func SavePngImageFile(img image.Image, filePath string) error { file, err := os.Create(filePath) if err == nil { defer file.Close() err = png.Encode(file, img) } return err }
util/gfx/gfx.go
0.746046
0.424054
gfx.go
starcoder
package card import ( "context" "github.com/xendit/xendit-go" ) /* Charge */ // CreateCharge creates new card charge func CreateCharge(data *CreateChargeParams) (*xendit.CardCharge, *xendit.Error) { return CreateChargeWithContext(context.Background(), data) } // CreateChargeWithContext creates new card charge with context func CreateChargeWithContext(ctx context.Context, data *CreateChargeParams) (*xendit.CardCharge, *xendit.Error) { client, err := getClient() if err != nil { return nil, err } return client.CreateChargeWithContext(ctx, data) } // CaptureCharge captures a card charge func CaptureCharge(data *CaptureChargeParams) (*xendit.CardCharge, *xendit.Error) { return CaptureChargeWithContext(context.Background(), data) } // CaptureChargeWithContext captures a card charge with context func CaptureChargeWithContext(ctx context.Context, data *CaptureChargeParams) (*xendit.CardCharge, *xendit.Error) { client, err := getClient() if err != nil { return nil, err } return client.CaptureChargeWithContext(ctx, data) } // GetCharge gets a card charge func GetCharge(data *GetChargeParams) (*xendit.CardCharge, *xendit.Error) { return GetChargeWithContext(context.Background(), data) } // GetChargeWithContext gets a card charge with context func GetChargeWithContext(ctx context.Context, data *GetChargeParams) (*xendit.CardCharge, *xendit.Error) { client, err := getClient() if err != nil { return nil, err } return client.GetChargeWithContext(ctx, data) } // CreateRefund gets a card charge func CreateRefund(data *CreateRefundParams) (*xendit.CardRefund, *xendit.Error) { return CreateRefundWithContext(context.Background(), data) } // CreateRefundWithContext gets a card charge with context func CreateRefundWithContext(ctx context.Context, data *CreateRefundParams) (*xendit.CardRefund, *xendit.Error) { client, err := getClient() if err != nil { return nil, err } return client.CreateRefundWithContext(ctx, data) } /* Authorization */ // ReverseAuthorization reverses a card authorization func ReverseAuthorization(data *ReverseAuthorizationParams) (*xendit.CardReverseAuthorization, *xendit.Error) { return ReverseAuthorizationWithContext(context.Background(), data) } // ReverseAuthorizationWithContext reverses a card authorization with context func ReverseAuthorizationWithContext(ctx context.Context, data *ReverseAuthorizationParams) (*xendit.CardReverseAuthorization, *xendit.Error) { client, err := getClient() if err != nil { return nil, err } return client.ReverseAuthorizationWithContext(ctx, data) } func getClient() (*Client, *xendit.Error) { return &Client{ Opt: &xendit.Opt, APIRequester: xendit.GetAPIRequester(), }, nil }
card/card.go
0.719186
0.503906
card.go
starcoder
package iex // BalanceSheets pulls balance sheet data. Available quarterly (4 quarters) and // annually (4 years). type BalanceSheets struct { Symbol string `json:"symbol"` Statements []BalanceSheet `json:"balancesheet"` } // BalanceSheet models one balance sheet statement. Normally the amounts // returned are integers, although the currentCash for UBNT returned is a // float; therefore, these are all floats. type BalanceSheet struct { ReportDate Date `json:"reportDate"` CurrentCash float64 `json:"currentCash"` ShortTermInvestments float64 `json:"shortTermInvestments"` Receivables float64 `json:"receivables"` Inventory float64 `json:"inventory"` OtherCurrentAssets float64 `json:"otherCurrentAssets"` CurrentAssets float64 `json:"currentAssets"` LongTermInvestments float64 `json:"longTermInvestments"` PropertyPlanetEquipment float64 `json:"propertyPlantEquipment"` Goodwill float64 `json:"goodwill"` IntangibleAssets float64 `json:"intangibleAssets"` OtherAssets float64 `json:"otherAssets"` TotalAssets float64 `json:"totalAssets"` AccountsPayable float64 `json:"accountsPayable"` CurrentLongTermDebt float64 `json:"currentLongTermDebt"` OtherCurrentLiabilities float64 `json:"otherCurrentLiabilities"` TotalCurrentLiabilities float64 `json:"totalCurrentLiabilities"` LongTermDebt float64 `json:"longTermDebt"` OtherLiablities float64 `json:"otherLiabilities"` MinorityInterest float64 `json:"minorityInterest"` TotalLiabilities float64 `json:"totalLiabilities"` CommonStock float64 `json:"commonStock"` RetainedEarnings float64 `json:"retainedEarnings"` TreasuryStock float64 `json:"treasuryStock"` CapitalSurplus float64 `json:"capitalSurplus"` ShareholderEquity float64 `json:"shareholderEquity"` NetTangibleAssets float64 `json:"netTangibleAssets"` } // Book models the data returned from the /book endpoint. type Book struct { Quote Quote `json:"quote"` Bids []BidAsk `json:"bids"` Asks []BidAsk `json:"asks"` Trades []Trade `json:"trades"` SystemEvent SystemEvent `json:"systemEvent"` } // BidAsk models a bid or an ask for a quote. type BidAsk struct { Price float64 `json:"price"` Size int `json:"size"` Timestamp EpochTime `json:"timestamp"` } // Trade models a trade for a quote. type Trade struct { Price float64 `json:"price"` Size int `json:"size"` TradeID int `json:"tradeId"` IsISO bool `json:"isISO"` IsOddLot bool `json:"isOddLot"` IsOutsideRegularHours bool `json:"isOutsideRegularHours"` IsSinglePriceCross bool `json:"isSinglePriceCross"` IsTradeThroughExempt bool `json:"isTradeThroughExempt"` Timestamp EpochTime `json:"timestamp"` } // Auction models auction data for a security type Auction struct { AuctionType string `json:"auctionType"` PairedShares int `json:"pairedShares"` ImbalanceShares int `json:"imbalanceShares"` ReferencePrice float64 `json:"referencePrice"` IndicativePrice float64 `json:"indicativePrice"` AuctionBookPrice float64 `json:"auctionBookPrice"` CollarReferencePrice float64 `json:"collarReferencePrice"` LowerCollarPrice float64 `json:"lowerCollarPrice"` UpperCollarPrice float64 `json:"upperCollarPrice"` ExtensionNumber int `json:"extensionNumber"` StartTime EpochTime `json:"startTime"` LastUpdate EpochTime `json:"lastUpdate"` } // SystemEvent models a system event for a quote. type SystemEvent struct { SystemEvent string `json:"systemEvent"` Timestamp EpochTime `json:"timestamp"` } // SecurityEvent models events which apply to a specific security type SecurityEvent struct { SecurityEvent string `json:"securityEvent"` Timestamp EpochTime `json:"timestamp"` } // TradingStatus models the current trading status of a security type TradingStatus struct { Status string `json:"status"` Reason string `json:"reason"` Timestamp EpochTime `json:"timestamp"` } // OpHaltStatus models the operational halt status of a security type OpHaltStatus struct { IsHalted bool `json:"isHalted"` Timestamp EpochTime `json:"timestamp"` } // SSRStatus models the short sale price test status for a security type SSRStatus struct { IsSSR bool `json:"isSSR"` Detail string `json:"detail"` Timestamp EpochTime `json:"timestamp"` } // CashFlows pulls cash flow data. Available quarterly (4 quarters) or annually // (4 years). type CashFlows struct { Symbol string `json:"symbol"` Statements []CashFlow `json:"cashflow"` } // CashFlow models one cash flow statement. type CashFlow struct { ReportDate Date `json:"reportDate"` NetIncome float64 `json:"netIncome"` Depreciation float64 `json:"depreciation"` ChangesInReceivables float64 `json:"changesInReceivables"` ChangesInInventories float64 `json:"changesInInventories"` CashChange float64 `json:"cashChange"` CashFlow float64 `json:"cashFlow"` CapitalExpenditures float64 `json:"capitalExpenditures"` Investment float64 `json:"investments"` InvestingActivityOther float64 `json:"investingActivityOther"` TotalInvestingCashFloes float64 `json:"totalInvestingCashFlows"` DividensPaid float64 `json:"dividendsPaid"` NetBorrowings float64 `json:"netBorrowings"` OtherFinancingCashFlows float64 `json:"otherFinancingCashFlows"` CashFlowFinancing float64 `json:"cashFlowFinancing"` ExchangeRateEffect float64 `json:"exchangeRateEffect"` } // Company models the company data from the /company endpoint. type Company struct { Symbol string `json:"symbol"` Name string `json:"companyName"` Exchange string `json:"exchange"` Industry string `json:"industry"` Website string `json:"website"` Description string `json:"description"` CEO string `json:"CEO"` IssueType IssueType `json:"issueType"` Sector string `json:"sector"` Employees int `json:"employees"` Tags []string `json:"tags"` } // Dividend models one dividend. type Dividend struct { ExDate Date `json:"exDate"` PaymentDate Date `json:"paymentDate"` RecordDate Date `json:"recordDate"` DeclaredDate Date `json:"declaredDate"` Amount float64 `json:"amount"` Flag string `json:"flag"` } // Earnings provides earnings data for a given company including the actual // EPS, consensus, and fiscal period. Earnings are available quarterly (last 4 // quarters) and annually (last 4 years). type Earnings struct { Symbol string `json:"symbol"` Earnings []Earning `json:"earnings"` } // Earning models the earnings for one date. type Earning struct { ActualEPS float64 `json:"actualEPS"` ConsensusEPS float64 `json:"consensusEPS"` AnnounceTime AnnounceTime `json:"announcetime"` NumberOfEstimates int `json:"numberOfEstimates"` EPSSurpriseDollar float64 `json:"EPSSurpriseDollar"` EPSReportDate Date `json:"EPSReportDate"` FiscalPeriod string `json:"fiscalPeriod"` FiscalEndDate Date `json:"fiscalEndDate"` YearAgo float64 `json:"yearAgo"` YearAgoChangePercent float64 `json:"yearAgoChangePercent"` } // EarningsToday models the earning that will be reported today as two arrays: // before the open and after market close. Each array contains an object with // all keys from earnings, a quote object, and a headline key. type EarningsToday struct { BeforeOpen []TodayEarning `json:"bto"` AfterClose []TodayEarning `json:"amc"` } // TodayEarning models a single earning being reported today containing all // keys from earnings, a quote object, and a headline. type TodayEarning struct { Earning EstimatedChangePercent float64 `json:"estimatedChangePercent"` SymbolID int `json:"symbolId"` Symbol string `json:"symbol"` Quote Quote `json:"quote"` Headline string `json:"headline"` } // DelayedQuote returns the 15 minute delayed market quote. type DelayedQuote struct { Symbol string `json:"symbol"` DelayedPrice float64 `json:"delayedPrice"` DelayedSize int `json:"delayedSize"` DelayedPriceTime int `json:"delayedPriceTime"` High float64 `json:"High"` Low float64 `json:"Low"` TotalVolume int `json:"totalVolume"` ProcessedTime int `json:"processedTime"` } // EffectiveSpread models the effective spread, eligible volume, and price // improvement of a stock by market. type EffectiveSpread struct { Volume int `json:"volume"` Venue string `json:"venue"` VenueName string `json:"venueName"` EffectiveSpread float64 `json:"effectiveSpread"` EffectiveQuoted float64 `json:"effectiveQuoted"` PriceImprovement float64 `json:"priceImprovement"` } // Estimates models the latest consensus esimtate for the next fiscal period. type Estimates struct { Symbol string `json:"symbol"` Estimates []Estimate `json:"estimates"` } // Estimate models one estimate. type Estimate struct { ConsensusEPS float64 `json:"consensusEPS"` NumberOfEstimates int `json:"numberOfEstimates"` FiscalPeriod string `json:"fiscalPeriod"` FiscalEndDate Date `json:"fiscalEndDate"` ReportDate Date `json:"reportDate"` } // Financials models income statement, balance sheet, and cash flow data from // the most recent reported quarter. type Financials struct { Symbol string `json:"symbol"` Financials []Financial `json:"financials"` } // Financial pulls income statement, balance sheet, and cash flow data from // the most recent reported quarter. type Financial struct { ReportDate Date `json:"reportDate"` GrossProfit float64 `json:"grossProfit"` CostOfRevenue float64 `json:"costOfRevenue"` OperatingRevenue float64 `json:"operatingRevenue"` TotalRevenue float64 `json:"totalRevenue"` OperatingIncome float64 `json:"operatingIncome"` NetIncome float64 `json:"netIncome"` ResearchAndDevelopment float64 `json:"researchAndDevelopment"` OperatingExpense float64 `json:"operatingExpense"` CurrentAssets float64 `json:"currentAssets"` TotalAssets float64 `json:"totalAssets"` TotalLiabilities float64 `json:"totalLiabilities"` CurrentCash float64 `json:"currentCash"` TotalCash float64 `json:"totalCash"` TotalDebt float64 `json:"totalDebt"` ShareholderEquity float64 `json:"shareholderEquity"` CashChange float64 `json:"cashChange"` CashFlow float64 `json:"cashFlow"` OperatingGainsLosses string `json:"operatingGainsLosses"` } // FundOwner models a fund owning a stock. type FundOwner struct { AdjustedHolding float64 `json:"adjHolding"` AdjustedMarketValue float64 `json:"adjMv"` Name string `json:"entityProperName"` ReportDate EpochTime `json:"reportDate"` ReportedHolding float64 `json:"reportedHolding"` ReportedMarketValue float64 `json:"reportedMv"` } // HistoricalPrice models the data for a historical stock price. type HistoricalPrice struct { Date string `json:"date"` } // IncomeStatements pulls income statement data. Available quarterly (4 quarters) and // annually (4 years). type IncomeStatements struct { Symbol string `json:"symbol"` Statements []IncomeStatement `json:"income"` } // IncomeStatement models one income statement. type IncomeStatement struct { ReportDate Date `json:"reportDate"` TotalRevenue float64 `json:"totalRevenue"` CostOfRevenue float64 `json:"costOfRevenue"` GrossProfit float64 `json:"grossProfit"` ResearchAndDevelopment float64 `json:"researchAndDevelopment"` SellingGeneralAndAdmin float64 `json:"sellingGeneralAndAdmin"` OperatingExpense float64 `json:"operatingExpense"` OperatingIncome float64 `json:"operatingIncome"` OtherIncomeExpenseNet float64 `json:"otherIncomeExpenseNet"` EBIT float64 `json:"ebit"` InterestIncome float64 `json:"interestIncome"` PretaxIncome float64 `json:"pretaxIncome"` IncomeTax float64 `json:"incomeTax"` MinorityInterest float64 `json:"minorityInterest"` NetIncome float64 `json:"netIncome"` NetIncomeBasic float64 `json:"netIncomeBasic"` } // InsiderRoster models the top 10 insiders with the most recent information. type InsiderRoster struct { EntityName string `json:"entityName"` Position int `json:"position"` ReportDate Date `json:"reportDate"` } // InsiderSummary models a summary of insider information. type InsiderSummary struct { Name string `json:"fullName"` NetTransaction int `json:"netTransaction"` ReportedTitle string `json:"reportedTitle"` TotalBought int `json:"totalBought"` TotalSold int `json:"totalSold"` } // InsiderTransaction models a buy or sell transaction made by an insider of a // company. type InsiderTransaction struct { EffectiveDate EpochTime `json:"effectiveDate"` Name string `json:"fullName"` ReportedTitle string `json:"reportedTitle"` Price float64 `json:"tranPrice"` Shares int `json:"tranShares"` Value float64 `json:"tranValue"` } // InstitutionalOwner models an institutional owner of a stock. type InstitutionalOwner struct { EntityName string `json:"entityProperName"` AdjustedHolding float64 `json:"adjHolding"` AdjustedMarketValue float64 `json:"adjMv"` ReportDate EpochTime `json:"reportDate"` ReportedHolding float64 `json:"reportedHolding"` } // KeyStats models the data returned from IEX Cloud's /stats endpoint. type KeyStats struct { Name string `json:"companyName"` MarketCap int `json:"marketCap"` Week52High float64 `json:"week52High"` Week52Low float64 `json:"week52Low"` Week52Change float64 `json:"week52Change"` SharesOutstanding int `json:"sharesOutstanding"` Avg30Volume int `json:"avg30Volume"` Avg10Volume int `json:"avg10Volume"` Float int `json:"float"` Symbol string `json:"symbol"` Employees int `json:"employees"` TTMEPS float64 `json:"ttmEPS"` TTMDividendRate float64 `json:"ttmDividendRate"` DividendYield float64 `json:"dividendYield"` NextDividendDate Date `json:"nextDividendDate"` ExDividendDate Date `json:"exDividendDate"` NextEarningsDate Date `json:"nextEarningsDate"` PERatio float64 `json:"peRatio"` Day200MovingAvg float64 `json:"day200MovingAvg"` Day50MovingAvg float64 `json:"day50MovingAvg"` MaxChangePercent float64 `json:"maxChangePercent"` Year5ChangePercent float64 `json:"year5ChangePercent"` Year2ChangePercent float64 `json:"year2ChangePercent"` Year1ChangePercent float64 `json:"year1ChangePercent"` YTDChangePercent float64 `json:"ytdChangePercent"` Month6ChangePercent float64 `json:"month6ChangePercent"` Month3ChangePercent float64 `json:"month3ChangePercent"` Month1ChangePercent float64 `json:"month1ChangePercent"` Day30ChangePercent float64 `json:"day30ChangePercent"` Day5ChangePercent float64 `json:"day5ChangePercent"` } // LargestTrade models the 15 minute delayed, last sale eligible trades. type LargestTrade struct { Price float64 `json:"price"` Size int `json:"size"` Time int `json:"time"` TimeLabel string `json:"timeLabel"` Venue string `json:"venue"` VenueName string `json:"venueName"` } // Logo models the /logo endpoint. type Logo struct { URL string `json:"url"` } // Market models the traded volume on U.S. markets. type Market struct { MIC string `json:"mic"` TapeID string `json:"tapeId"` Venue string `json:"venueName"` Volume int `json:"volume"` TapeA int `json:"tapeA"` TapeB int `json:"tapeB"` TapeC int `json:"tapeC"` Percent float64 `json:"marketPercent"` LastUpdated EpochTime `json:"lastUpdated"` } // News models a news item either for the market or for an individual stock. type News struct { Time EpochTime `json:"datetime"` Headline string `json:"headline"` Source string `json:"source"` URL string `json:"url"` Summary string `json:"summary"` Related string `json:"related"` Image string `json:"image"` Language string `json:"lang"` HasPaywall bool `json:"hasPaywall"` } // OHLC models the open, high, low, close for a stock. type OHLC struct { Open OpenClose `json:"open"` Close OpenClose `json:"close"` High float64 `json:"high"` Low float64 `json:"low"` } // OpenClose provides the price and time for either the open or close price of // a stock. type OpenClose struct { Price float64 `json:"price"` Time int `json:"Time"` } // PreviousDay models the previous day adjusted price data. type PreviousDay struct { Symbol string `json:"symbol"` Date Date `json:"date"` Open float64 `json:"open"` High float64 `json:"high"` Low float64 `json:"Low"` Close float64 `json:"close"` Volume int `json:"volume"` UnadjustedVolume int `json:"unadjustedVolume"` Change float64 `json:"change"` ChangePercent float64 `json:"changePercent"` } // PriceTarget models the latest average, high, and low analyst price target for // a symbol. type PriceTarget struct { Symbol string `json:"symbol"` UpdatedDate Date `json:"updatedDate"` Average float64 `json:"priceTargetAverage"` High float64 `json:"priceTargetHigh"` Low float64 `json:"priceTargetLow"` NumAnalysts int `json:"numberOfAnalysts"` } // Quote models the data returned from the IEX Cloud /quote endpoint. type Quote struct { Symbol string `json:"symbol"` CompanyName string `json:"companyName"` CalculationPrice string `json:"calculationPrice"` Open float64 `json:"open"` OpenTime EpochTime `json:"openTime"` Close float64 `json:"close"` CloseTime EpochTime `json:"closeTime"` High float64 `json:"high"` Low float64 `json:"low"` LatestPrice float64 `json:"latestPrice"` LatestSource string `json:"latestSource"` LatestTime string `json:"latestTime"` LatestUpdate EpochTime `json:"latestUpdate"` LatestVolume int `json:"latestVolume"` IEXRealtimePrice float64 `json:"iexRealtimePrice"` IEXRealtimeSize int `json:"iexRealtimeSize"` IEXLastUpdated EpochTime `json:"iexLastUpdated"` DelayedPrice float64 `json:"delayedPrice"` DelayedPriceTime EpochTime `json:"delayedPriceTime"` ExtendedPrice float64 `json:"extendedPrice"` ExtendedChange float64 `json:"extendedChange"` ExtendedChangePercent float64 `json:"extendedChangePercent"` ExtendedPriceTime EpochTime `json:"extendedPriceTime"` PreviousClose float64 `json:"previousClose"` Change float64 `json:"change"` ChangePercent float64 `json:"changePercent"` IEXMarketPercent float64 `json:"iexMarketPercent"` IEXVolume int `json:"iexVolume"` AvgTotalVolume int `json:"avgTotalVolume"` IEXBidPrice float64 `json:"iexBidPrice"` IEXBidSize int `json:"iexBidSize"` IEXAskPrice float64 `json:"iexAskPrice"` IEXAskSize int `json:"iexAskSize"` MarketCap int `json:"marketCap"` Week52High float64 `json:"week52High"` Week52Low float64 `json:"week52Low"` YTDChange float64 `json:"ytdChange"` PERatio float64 `json:"peRatio"` } // Recommendation models the buy, hold, sell recommendations for a stock. type Recommendation struct { ConsensusEndDate EpochTime `json:"consensusEndDate"` ConsensusStartDate EpochTime `json:"consensusStartDate"` BuyRatings int `json:"ratingBuy"` HoldRatings int `json:"ratingHold"` NoRatings int `json:"ratingNone"` OverweightRatings int `json:"ratingOverweight"` SellRatings int `json:"ratingSell"` UnderweightRatings int `json:"ratingUnderweight"` ConsensusRating float64 `json:"ratingScaleMark"` } // RelevantStocks models a list of relevant stocks that may or may not be // peers. type RelevantStocks struct { Peers bool `json:"peers"` Symbols []string `json:"symbols"` } // SectorPerformance models the performance based on each sector ETF. type SectorPerformance struct { Type string `json:"sector"` Name string `json:"name"` Performance float64 `json:"performance"` LastUpdated EpochTime `json:"lastUpdated"` } // Split models the a stock split. type Split struct { ExDate Date `json:"exDate"` DeclaredDate Date `json:"declaredDate"` Ratio float64 `json:"ratio"` FromFactor float64 `json:"fromFactor"` Description string `json:"description"` } // Volume models the 15 minute delayed and 30 day average consolidated volume // percentage of a stock by market. type Volume struct { Volume int `json:"volume"` Venue string `json:"venue"` VenueName string `json:"venueName"` Date Date `json:"date"` MarketPercent float64 `json:"marketPercent"` AverageMarketPercent float64 `json:"avgMarketPercent"` }
stocks.go
0.708616
0.424949
stocks.go
starcoder
package interp import ( "fmt" "math" "strconv" "github.com/benhoyt/goawk/internal/strutil" ) type valueType uint8 const ( typeNull valueType = iota typeStr typeNum typeNumStr ) // An AWK value (these are passed around by value) type value struct { typ valueType // Type of value s string // String value (for typeStr) n float64 // Numeric value (for typeNum and typeNumStr) } // Create a new null value func null() value { return value{} } // Create a new number value func num(n float64) value { return value{typ: typeNum, n: n} } // Create a new string value func str(s string) value { return value{typ: typeStr, s: s} } // Create a new value for a "numeric string" context, converting the // string to a number if possible. func numStr(s string) value { f, err := strconv.ParseFloat(strutil.TrimSpace(s), 64) if err != nil { // Doesn't parse as number, make it a "true string" return value{typ: typeStr, s: s} } return value{typ: typeNumStr, s: s, n: f} } // Create a numeric value from a Go bool func boolean(b bool) value { if b { return num(1) } return num(0) } // Return true if value is a "true string" (string but not a "numeric // string") func (v value) isTrueStr() bool { return v.typ == typeStr } // Return Go bool value of AWK value. For numbers or numeric strings, // zero is false and everything else is true. For strings, empty // string is false and everything else is true. func (v value) boolean() bool { if v.isTrueStr() { return v.s != "" } else { return v.n != 0 } } // Return value's string value, or convert to a string using given // format if a number value. Integers are a special case and don't // use floatFormat. func (v value) str(floatFormat string) string { if v.typ == typeNum { switch { case math.IsNaN(v.n): return "nan" case math.IsInf(v.n, 0): if v.n < 0 { return "-inf" } else { return "inf" } case v.n == float64(int(v.n)): return strconv.Itoa(int(v.n)) default: return fmt.Sprintf(floatFormat, v.n) } } // For typeStr and typeStrNum we already have the string, for // typeNull v.s == "". return v.s } // Return value's number value, converting from string if necessary func (v value) num() float64 { if v.typ == typeStr { // Ensure string starts with a float and convert it return parseFloatPrefix(v.s) } // Handle case for typeNum and typeStrNum. If it's a numeric // string, we already have the float value from the numStr() // call. For typeNull v.n == 0. return v.n } // Like strconv.ParseFloat, but parses at the start of string and // allows things like "1.5foo" func parseFloatPrefix(s string) float64 { // Skip whitespace at start i := 0 for i < len(s) && strutil.IsASCIISpace(s[i]) { i++ } start := i // Parse mantissa: optional sign, initial digit(s), optional '.', // then more digits gotDigit := false if i < len(s) && (s[i] == '+' || s[i] == '-') { i++ } for i < len(s) && s[i] >= '0' && s[i] <= '9' { gotDigit = true i++ } if i < len(s) && s[i] == '.' { i++ } for i < len(s) && s[i] >= '0' && s[i] <= '9' { gotDigit = true i++ } if !gotDigit { return 0 } // Parse exponent ("1e" and similar are allowed, but ParseFloat // rejects them) end := i if i < len(s) && (s[i] == 'e' || s[i] == 'E') { i++ if i < len(s) && (s[i] == '+' || s[i] == '-') { i++ } for i < len(s) && s[i] >= '0' && s[i] <= '9' { i++ end = i } } floatStr := s[start:end] f, _ := strconv.ParseFloat(floatStr, 64) return f // Returns infinity in case of "value out of range" error }
interp/value.go
0.705075
0.453504
value.go
starcoder
package factor import ( "fmt" "math/big" "sort" "strings" ) // Value captures a single factor. It is either a number or a symbol. type Value struct { num *big.Rat pow int sym string } // IsNum indicates that v is a rational number. func (v Value) IsNum() bool { return v.num != nil } // Num simply returns the num value of the term. func (v Value) Num() *big.Rat { return v.num } // String displays a single factor. func (v Value) String() string { if v.num != nil { return v.num.RatString() } if v.sym != "" { if v.pow == 1 { return v.sym } return fmt.Sprintf("%s^%d", v.sym, v.pow) } return "<ERROR>" } // zero is a constant zero for comparisons. var zero = big.NewRat(0, 1) // one is a constant one for comparisons. var one = big.NewRat(1, 1) // minusOne is a constant -one for comparisons. var minusOne = big.NewRat(-1, 1) // R copies a rational value into a number value. func R(n *big.Rat) Value { c := &big.Rat{} return Value{num: c.Set(n)} } // D converts two integers to a rational number value. func D(num, den int64) Value { return Value{num: big.NewRat(num, den)} } // S converts a string into a symbol value. func S(sym string) Value { return Value{sym: sym, pow: 1} } // Sp converts a string, power to a symbol value. func Sp(sym string, pow int) Value { if pow == 0 { return D(1, 1) } return Value{sym: sym, pow: pow} } type ByAlpha []Value func (a ByAlpha) Len() int { return len(a) } func (a ByAlpha) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a ByAlpha) Less(i, j int) bool { if a[i].sym < a[j].sym { return true } if a[i].sym > a[j].sym { return false } // Higher powers first (after simplify this is moot). return a[i].pow > a[j].pow } // Simplify condenses an unsorted array (product) of values into a // simplified (ordered) form. func Simplify(vs ...Value) []Value { if len(vs) == 0 { return nil } var syms []Value n := big.NewRat(1, 1) for _, v := range vs { if v.num != nil { if zero.Cmp(v.num) == 0 { return nil } n.Mul(n, v.num) continue } syms = append(syms, v) } sort.Sort(ByAlpha(syms)) res := []Value{R(n)} for _, s := range syms { i := len(res) - 1 last := res[i] if last.sym != s.sym { res = append(res, s) continue } last.pow += s.pow if last.pow == 0 { res = res[:i] continue } res = append(res[:i], last) } return res } // Prod returns a string representing an product of values. This // function does not attempt to simplify the array first. func Prod(vs ...Value) string { if len(vs) == 0 { return "0" } var x []string prefix := "" for i, v := range vs { if v.num != nil && i == 0 && len(vs) != 1 { if one.Cmp(v.num) == 0 { continue } if minusOne.Cmp(v.num) == 0 { prefix = "-" continue } } x = append(x, v.String()) } return prefix + strings.Join(x, "*") } // Segment simplifies a set of factors and returns the numerical // coefficient, the non-numeric array of factors and a string // representation of this array of non-numeric factors. func Segment(vs ...Value) (*big.Rat, []Value, string) { x := Simplify(vs...) if len(x) == 0 { return nil, nil, "" } return x[0].num, x[1:], Prod(x[1:]...) } // Replace replaces copies of b found in a with c. The number of times b // appeared in a is returned as well as the replaced array of factors. func Replace(a, b, c []Value, max int) (int, []Value) { pn, pf, _ := Segment(b...) qf := Simplify(a...) r := pn.Inv(pn) n := 0 for len(pf) > 0 && (max <= 0 || n < max) { var nf []Value i := 0 j := 0 GIVEUP: for i < len(pf) && j < len(qf) { t := pf[i] for j < len(qf) { u := qf[j] j++ if u.num != nil || t.sym != u.sym { nf = append(nf, u) continue } if t.pow*u.pow < 0 { // Same symbol, but we require that // the sign of the power is the same. break GIVEUP } np := u.pow - t.pow if np*t.pow < 0 { break GIVEUP } if np != 0 { nf = append(nf, Sp(t.sym, np)) } i++ break } } if i != len(pf) { break } // Whole match found. qf = Simplify(append(append(nf, qf[j:]...), append(c, R(r))...)...) n++ } return n, qf }
src/algex/factor/factor.go
0.747247
0.525004
factor.go
starcoder
package work import ( "context" "database/sql" "time" ) type DayStats struct { Date time.Time Total time.Duration Worked time.Duration Expected time.Duration } type Stats struct { Total time.Duration Worked time.Duration Expected time.Duration DayStats []DayStats From time.Time To time.Time } func statsWork(db *sql.DB, from, to time.Time) (Stats, error) { expected := calculateExpected(from, to) stats := Stats{Expected: expected, From: from, To: to} total, err := getTotalDuration(db, from, to) if err != nil { return Stats{}, err } stats.Worked = total stats.Total = total - expected dayStats, err := getDayStats(db, from, to) if err != nil { return Stats{}, nil } stats.DayStats = dayStats return stats, nil } func calculateExpected(from time.Time, to time.Time) time.Duration { fromDate := time.Date(from.Year(), from.Month(), from.Day(), 0, 0, 0, 0, time.UTC) toDate := time.Date(to.Year(), to.Month(), to.Day(), 0, 0, 0, 0, time.UTC) result := 0 * time.Hour for d := fromDate; d.Unix() <= toDate.Unix(); d = d.AddDate(0, 0, 1) { if d.Weekday() != time.Sunday && d.Weekday() != time.Saturday { result += 8 * time.Hour } } return result } func getTotalDuration(db *sql.DB, from, to time.Time) (time.Duration, error) { var seconds sql.NullInt64 ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() err := db.QueryRowContext( ctx, `SELECT SUM(ended_at - started_at) FROM work_log WHERE started_at >= $1 AND ended_at <= $2 AND started_at IS NOT NULL AND ended_at IS NOT NULL`, from.Unix(), to.Unix(), ).Scan(&seconds) if err == sql.ErrNoRows { return 0, nil } if err != nil { return 0, err } if seconds.Valid { return time.Duration(seconds.Int64) * time.Second, nil } return 0, nil } func getDayStats(db *sql.DB, from, to time.Time) ([]DayStats, error) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() rows, err := db.QueryContext( ctx, `SELECT STRFTIME('%Y-%m-%d', started_at, 'unixepoch') AS day, SUM(ended_at - started_at) AS duration FROM work_log WHERE started_at >= $1 AND ended_at <= $2 AND started_at IS NOT NULL AND ended_at IS NOT NULL GROUP BY 1`, from.Unix(), to.Unix(), ) if err != nil { return nil, err } var stats []DayStats for rows.Next() { var dayStr string var seconds int64 err := rows.Scan(&dayStr, &seconds) if err != nil { return nil, err } date, err := time.ParseInLocation("2006-01-02", dayStr, time.Local) if err != nil { return nil, err } item := DayStats{} item.Date = date item.Expected = 8 * time.Hour item.Worked = time.Duration(seconds) * time.Second item.Total = item.Worked - item.Expected stats = append(stats, item) } return stats, nil } func getDateRangeForLog(now time.Time, weekOffset int) (time.Time, time.Time) { normWeekday := int(now.Weekday()) if normWeekday == 0 { normWeekday = 7 } var fromOffset, toOffset int if weekOffset == 0 { fromOffset = normWeekday - 1 if normWeekday >= 1 && normWeekday <= 5 { toOffset = 0 } else if normWeekday == 6 { toOffset = 1 } else if normWeekday == 7 { toOffset = 2 } } else { fromOffset = 7 * weekOffset + normWeekday - 1 toOffset = (7 * (weekOffset - 1)) + normWeekday + 2 } from := time.Date(now.Year(), now.Month(), now.Day()-fromOffset, 0, 0, 0, 0, now.Location()) to := time.Date(now.Year(), now.Month(), now.Day()-toOffset, 23, 59, 59, 0, now.Location()) return from, to }
stats.go
0.576423
0.407333
stats.go
starcoder
package constraint import ( "reflect" "strings" "time" "github.com/jt0/gomer/flect" "github.com/jt0/gomer/gomerr" ) type ComparisonType = string const ( EQ ComparisonType = "EQ" NEQ = "NEQ" GT = "GT" GTE = "GTE" LT = "LT" LTE = "LTE" ) func Gte(ft reflect.Type, compareTo *interface{}) { } // IntCompare compares the tested value to compareTo. While compareTo is an int64, the tested value can be any of the // integer types (e.g. int, int16, etc). If the tested value is not an integer type, the constraint will fail. func IntCompare(comparisonType ComparisonType, compareTo *int64) Constraint { comparisonType = strings.ToUpper(comparisonType) comparator, ok := intComparators[comparisonType] if !ok { panic("Unrecognized comparison type: " + comparisonType) } return New("Int"+comparisonType, compareTo, func(toTest interface{}) (ge gomerr.Gomerr) { if compareTo == nil { return nil } ttv, ok := flect.ReadableIndirectValue(toTest) if !ok { return nil // should be NotSatisfied? } defer func() { if r := recover(); r != nil { ge = gomerr.Unprocessable("toTest is not an int (or *int)", toTest) } }() if !comparator(ttv.Int(), *compareTo) { return NotSatisfied(toTest) } return nil }) } // IntBetween determines whether the provided value is (inclusively) between the lower and upper values provided. // Stated explicitly, this tests for lower <= value <= upper. func IntBetween(lower, upper *int64) Constraint { return And(IntCompare(GTE, lower), IntCompare(LTE, upper)) } var intComparators = map[ComparisonType]func(int64, int64) bool{ EQ: func(value, compareTo int64) bool { return value == compareTo }, NEQ: func(value, compareTo int64) bool { return value != compareTo }, GT: func(value, compareTo int64) bool { return value > compareTo }, GTE: func(value, compareTo int64) bool { return value >= compareTo }, LT: func(value, compareTo int64) bool { return value < compareTo }, LTE: func(value, compareTo int64) bool { return value <= compareTo }, } // UintCompare compares a tested value to compareTo. While compareTo is an uint64, the tested value can be any of the // unsigned integer types (e.g. uint, uint16, etc). If the tested value is not an unsigned integer type, the constraint // will fail. func UintCompare(comparisonType ComparisonType, compareTo *uint64) Constraint { comparisonType = strings.ToUpper(comparisonType) comparator, ok := uintComparators[comparisonType] if !ok { panic("Unrecognized comparison type: " + comparisonType) } return New("Uint"+comparisonType, compareTo, func(toTest interface{}) (ge gomerr.Gomerr) { if compareTo == nil { return nil } ttv, ok := flect.ReadableIndirectValue(toTest) if !ok { return nil // should be NotSatisfied? } defer func() { if r := recover(); r != nil { ge = gomerr.Unprocessable("toTest is not a uint (or *uint)", toTest) } }() if !comparator(ttv.Uint(), *compareTo) { return NotSatisfied(toTest) } return nil }) } // UintBetween determines whether the provided value is (inclusively) between the lower and upper values provided. // Stated explicitly, this tests for lower <= value <= upper. func UintBetween(lower, upper *uint64) Constraint { return And(UintCompare(GTE, lower), UintCompare(LTE, upper)) } var uintComparators = map[ComparisonType]func(uint64, uint64) bool{ EQ: func(value, compareTo uint64) bool { return value == compareTo }, NEQ: func(value, compareTo uint64) bool { return value != compareTo }, GT: func(value, compareTo uint64) bool { return value > compareTo }, GTE: func(value, compareTo uint64) bool { return value >= compareTo }, LT: func(value, compareTo uint64) bool { return value < compareTo }, LTE: func(value, compareTo uint64) bool { return value <= compareTo }, } // FloatCompare compares a tested value to compareTo. While compareTo is an float64, the tested value can be either // float32/float64. If the value is not a float type, the constraint will fail. func FloatCompare(comparisonType ComparisonType, compareTo *float64) Constraint { comparisonType = strings.ToUpper(comparisonType) comparator, ok := floatComparators[comparisonType] if !ok { panic("Unrecognized comparison type: " + comparisonType) } return New("Float"+comparisonType, compareTo, func(toTest interface{}) (ge gomerr.Gomerr) { if compareTo == nil { return nil } ttv, ok := flect.ReadableIndirectValue(toTest) if !ok { return nil // should be NotSatisfied? } defer func() { if r := recover(); r != nil { ge = gomerr.Unprocessable("toTest is not a float (or *float)", toTest) } }() if !comparator(ttv.Float(), *compareTo) { return NotSatisfied(toTest) } return nil }) } // FloatBetween determines whether the provided value is (inclusively) between the lower and upper values provided. // Stated explicitly, this tests for lower <= value <= upper. func FloatBetween(lower, upper *float64) Constraint { return And(FloatCompare(GTE, lower), FloatCompare(LTE, upper)) } var floatComparators = map[ComparisonType]func(float64, float64) bool{ EQ: func(value, compareTo float64) bool { return value == compareTo }, NEQ: func(value, compareTo float64) bool { return value != compareTo }, GT: func(value, compareTo float64) bool { return value > compareTo }, GTE: func(value, compareTo float64) bool { return value >= compareTo }, LT: func(value, compareTo float64) bool { return value < compareTo }, LTE: func(value, compareTo float64) bool { return value <= compareTo }, } // TimeCompare compares a tested value to compareTo. If the tested value is not a time.Time, the constraint will fail. func TimeCompare(comparisonType ComparisonType, compareTo *time.Time) Constraint { comparisonType = strings.ToUpper(comparisonType) comparator, ok := timeComparators[comparisonType] if !ok { panic("Unrecognized comparison type: " + comparisonType) } return New("Time"+comparisonType, compareTo, func(toTest interface{}) (ge gomerr.Gomerr) { if compareTo == nil { return nil } ttv, ok := flect.ReadableIndirectValue(toTest) if !ok { return nil // should be NotSatisfied? } defer func() { if r := recover(); r != nil { ge = gomerr.Unprocessable("toTest is not a time.Time (or *time.Time)", toTest) } }() if !comparator(ttv.Interface().(time.Time), *compareTo) { return NotSatisfied(toTest) } return nil }) } // TimeBetween determines whether the provided value is (inclusively) between the lower and upper values provided. // Stated explicitly, this tests for lower <= value <= upper. func TimeBetween(lower, upper *time.Time) Constraint { return And(TimeCompare(GTE, lower), TimeCompare(LTE, upper)) } var timeComparators = map[ComparisonType]func(time.Time, time.Time) bool{ EQ: func(value, compareTo time.Time) bool { return value.Equal(compareTo) }, NEQ: func(value, compareTo time.Time) bool { return !value.Equal(compareTo) }, GT: func(value, compareTo time.Time) bool { return value.After(compareTo) }, GTE: func(value, compareTo time.Time) bool { return value.After(compareTo) || value.Equal(compareTo) }, LT: func(value, compareTo time.Time) bool { return value.Before(compareTo) }, LTE: func(value, compareTo time.Time) bool { return value.Before(compareTo) || value.Equal(compareTo) }, }
constraint/comparison.go
0.773088
0.586404
comparison.go
starcoder
package spec import ( "testing" "time" "github.com/256dpi/gomqtt/client" "github.com/256dpi/gomqtt/packet" "github.com/stretchr/testify/assert" ) // OfflineSubscriptionTest tests the broker for properly handling offline // subscriptions. func OfflineSubscriptionTest(t *testing.T, config *Config, topic string, sub, pub packet.QOS, await bool) { id := config.clientID() options := client.NewConfigWithClientID(config.URL, id) options.CleanSession = false assert.NoError(t, client.ClearSession(options, 10*time.Second)) offlineSubscriber := client.New() cf, err := offlineSubscriber.Connect(options) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) sf, err := offlineSubscriber.Subscribe(topic, sub) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{sub}, sf.ReturnCodes()) err = offlineSubscriber.Disconnect() assert.NoError(t, err) publisher := client.New() cf, err = publisher.Connect(client.NewConfig(config.URL)) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) pf, err := publisher.Publish(topic, testPayload, pub, false) assert.NoError(t, err) assert.NoError(t, pf.Wait(10*time.Second)) err = publisher.Disconnect() assert.NoError(t, err) wait := make(chan struct{}) offlineReceiver := client.New() offlineReceiver.Callback = func(msg *packet.Message, err error) error { assert.NoError(t, err) assert.Equal(t, topic, msg.Topic) assert.Equal(t, testPayload, msg.Payload) assert.Equal(t, lower(sub, pub), msg.QOS) assert.False(t, msg.Retain) close(wait) return nil } cf, err = offlineReceiver.Connect(options) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.True(t, cf.SessionPresent()) if await { safeReceive(wait) } time.Sleep(config.NoMessageWait) err = offlineReceiver.Disconnect() assert.NoError(t, err) } // OfflineSubscriptionRetainedTest tests the broker for properly handling // retained messages and offline subscriptions. func OfflineSubscriptionRetainedTest(t *testing.T, config *Config, topic string, sub, pub packet.QOS, await bool) { id := config.clientID() options := client.NewConfigWithClientID(config.URL, id) options.CleanSession = false assert.NoError(t, client.ClearRetainedMessage(options, topic, 10*time.Second)) assert.NoError(t, client.ClearSession(options, 10*time.Second)) time.Sleep(config.MessageRetainWait) offlineSubscriber := client.New() cf, err := offlineSubscriber.Connect(options) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) sf, err := offlineSubscriber.Subscribe(topic, sub) assert.NoError(t, err) assert.NoError(t, sf.Wait(10*time.Second)) assert.Equal(t, []packet.QOS{sub}, sf.ReturnCodes()) err = offlineSubscriber.Disconnect() assert.NoError(t, err) publisher := client.New() cf, err = publisher.Connect(client.NewConfig(config.URL)) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.False(t, cf.SessionPresent()) pf, err := publisher.Publish(topic, testPayload, pub, true) assert.NoError(t, err) assert.NoError(t, pf.Wait(10*time.Second)) time.Sleep(config.MessageRetainWait) err = publisher.Disconnect() assert.NoError(t, err) wait := make(chan struct{}) offlineReceiver := client.New() offlineReceiver.Callback = func(msg *packet.Message, err error) error { assert.NoError(t, err) assert.Equal(t, topic, msg.Topic) assert.Equal(t, testPayload, msg.Payload) assert.Equal(t, packet.QOS(sub), msg.QOS) assert.False(t, msg.Retain) close(wait) return nil } cf, err = offlineReceiver.Connect(options) assert.NoError(t, err) assert.NoError(t, cf.Wait(10*time.Second)) assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode()) assert.True(t, cf.SessionPresent()) if await { safeReceive(wait) } time.Sleep(config.NoMessageWait) err = offlineReceiver.Disconnect() assert.NoError(t, err) }
spec/offline.go
0.62681
0.493226
offline.go
starcoder
package examples import ( "encoding/json" "fmt" "os" ) // Go offers built-in support for JSON encoding and decoding, // including to and from built-in and custom data types. // We’ll use these two structs to demonstrate encoding and decoding of custom types below. type response1 struct { Page int Fruits []string } type response2 struct { Page int `json:"page"` Fruits []string `json:"fruits"` } // JSON function to illstrate json capabilites in go func JSON() { // First we’ll look at encoding basic data types to JSON strings. // Here are some examples for atomic values. bolB, _ := json.Marshal(true) fmt.Println(string(bolB)) intB, _ := json.Marshal(1) fmt.Println(string(intB)) fltB, _ := json.Marshal(2.15) fmt.Println(string(fltB)) strB, _ := json.Marshal("GOLANG") fmt.Println(string(strB)) // And here are some for slices and maps, // which encode to JSON arrays and objects as you’d expect. slcD := []string{"apple", "peach", "pear"} slcB, _ := json.Marshal(slcD) fmt.Println(string(slcB)) mapD := map[string]int{"apple": 5, "lettuce": 7} mapB, _ := json.Marshal(mapD) fmt.Println(string(mapB)) // The JSON package can automatically encode your custom data types. // It will only include exported fields in the encoded output // and will by default use those names as the JSON keys. res1D := &response1{ Page: 1, Fruits: []string{"apple", "peach", "pear"}} res1B, _ := json.Marshal(res1D) fmt.Println(string(res1B)) // You can use tags on struct field declarations // to customize the encoded JSON key names. // Check the definition of response2 above to see an example of such tags. res2D := &response2{ Page: 1, Fruits: []string{"apple", "pear", "peach"}} res2B, _ := json.Marshal(res2D) fmt.Println(string(res2B)) // ------------------------------------------------------------------------------------- // Now let’s look at decoding JSON data into Go values. // Here’s an example for a generic data structure. byt := []byte(`{"num": 6.13, "strs": ["a", "b"]}`) // We need to provide a variable where the JSON package can put the decoded data. // This map[string]interface{} will hold a map of strings to arbitrary data types. var dat map[string]interface{} // Here’s the actual decoding, and a check for associated errors. if err := json.Unmarshal(byt, &dat); err != nil { panic(err) } fmt.Println(dat) // In order to use the values in the decoded map, // we’ll need to cast them to their appropriate type. // For example here we cast the value in num to the expected float64 type. num := dat["num"].(float64) fmt.Println(num) // Accessing nested data requires a series of casts. strs := dat["strs"].([]interface{}) str1 := strs[0].(string) fmt.Println(str1) // We can also decode JSON into custom data types. // This has the advantages of adding additional type-safety to // our programs and eliminating the need for type assertions when // accessing the decoded data. str := `{"page": 1, "fruits": ["apple", "peach"]}` res := response2{} json.Unmarshal([]byte(str), &res) fmt.Println(res) fmt.Println(res.Fruits[0]) // In the examples above we always used bytes and strings as intermediates // between the data and JSON representation on standard out. // We can also stream JSON encodings directly to os.Writers // like os.Stdout or even HTTP response bodies. enc := json.NewEncoder(os.Stdout) d := map[string]int{"apple": 5, "pear": 3} enc.Encode(d) // For in-depth learning : // https://blog.golang.org/json-and-go // https://golang.org/pkg/encoding/json/ }
examples/json.go
0.695648
0.422803
json.go
starcoder
package spdx // CreationInfo2_1 is a Document Creation Information section of an // SPDX Document for version 2.1 of the spec. type CreationInfo2_1 struct { // 2.1: SPDX Version; should be in the format "SPDX-2.1" // Cardinality: mandatory, one SPDXVersion string // 2.2: Data License; should be "CC0-1.0" // Cardinality: mandatory, one DataLicense string // 2.3: SPDX Identifier; should be "DOCUMENT" to represent // mandatory identifier of SPDXRef-DOCUMENT // Cardinality: mandatory, one SPDXIdentifier ElementID // 2.4: Document Name // Cardinality: mandatory, one DocumentName string // 2.5: Document Namespace // Cardinality: mandatory, one DocumentNamespace string // 2.6: External Document References // Cardinality: optional, one or many ExternalDocumentReferences map[string]ExternalDocumentRef2_1 // 2.7: License List Version // Cardinality: optional, one LicenseListVersion string // 2.8: Creators: may have multiple keys for Person, Organization // and/or Tool // Cardinality: mandatory, one or many CreatorPersons []string CreatorOrganizations []string CreatorTools []string // 2.9: Created: data format YYYY-MM-DDThh:mm:ssZ // Cardinality: mandatory, one Created string // 2.10: Creator Comment // Cardinality: optional, one CreatorComment string // 2.11: Document Comment // Cardinality: optional, one DocumentComment string } // ExternalDocumentRef2_1 is a reference to an external SPDX document // as defined in section 2.6 for version 2.1 of the spec. type ExternalDocumentRef2_1 struct { // DocumentRefID is the ID string defined in the start of the // reference. It should _not_ contain the "DocumentRef-" part // of the mandatory ID string. DocumentRefID string // URI is the URI defined for the external document URI string // Alg is the type of hash algorithm used, e.g. "SHA1", "SHA256" Alg string // Checksum is the actual hash data Checksum string } // CreationInfo2_2 is a Document Creation Information section of an // SPDX Document for version 2.2 of the spec. type CreationInfo2_2 struct { // 2.1: SPDX Version; should be in the format "SPDX-2.2" // Cardinality: mandatory, one SPDXVersion string // 2.2: Data License; should be "CC0-1.0" // Cardinality: mandatory, one DataLicense string // 2.3: SPDX Identifier; should be "DOCUMENT" to represent // mandatory identifier of SPDXRef-DOCUMENT // Cardinality: mandatory, one SPDXIdentifier ElementID // 2.4: Document Name // Cardinality: mandatory, one DocumentName string // 2.5: Document Namespace // Cardinality: mandatory, one DocumentNamespace string // 2.6: External Document References // Cardinality: optional, one or many ExternalDocumentReferences map[string]ExternalDocumentRef2_2 // 2.7: License List Version // Cardinality: optional, one LicenseListVersion string // 2.8: Creators: may have multiple keys for Person, Organization // and/or Tool // Cardinality: mandatory, one or many CreatorPersons []string CreatorOrganizations []string CreatorTools []string // 2.9: Created: data format YYYY-MM-DDThh:mm:ssZ // Cardinality: mandatory, one Created string // 2.10: Creator Comment // Cardinality: optional, one CreatorComment string // 2.11: Document Comment // Cardinality: optional, one DocumentComment string } // ExternalDocumentRef2_2 is a reference to an external SPDX document // as defined in section 2.6 for version 2.2 of the spec. type ExternalDocumentRef2_2 struct { // DocumentRefID is the ID string defined in the start of the // reference. It should _not_ contain the "DocumentRef-" part // of the mandatory ID string. DocumentRefID string // URI is the URI defined for the external document URI string // Alg is the type of hash algorithm used, e.g. "SHA1", "SHA256" Alg string // Checksum is the actual hash data Checksum string }
spdx/creation_info.go
0.679498
0.452717
creation_info.go
starcoder
package rrule import "time" // validFunc is a kind of function that checks if a time is valid against a rule. It returns true if the time is valid. // A pointer is accepted in order to avoid the memory copy of the entire time structure. Nil is never considered valid. type validFunc func(t *time.Time) bool func alwaysValid(t *time.Time) bool { return t != nil } func validSecond(seconds []int) validFunc { if len(seconds) == 0 { return alwaysValid } m := intmap(seconds) return func(t *time.Time) bool { if t == nil { return false } return m[t.Second()] } } func validMinute(minutes []int) validFunc { if len(minutes) == 0 { return alwaysValid } m := intmap(minutes) return func(t *time.Time) bool { if t == nil { return false } return m[t.Minute()] } } func validHour(hours []int) validFunc { if len(hours) == 0 { return alwaysValid } m := intmap(hours) return func(t *time.Time) bool { if t == nil { return false } return m[t.Hour()] } } // validWeekday ignores the N modifier of QualifiedWeekday func validWeekday(weekdays []QualifiedWeekday) validFunc { if len(weekdays) == 0 { return alwaysValid } m := weekdaymap(weekdays) return func(t *time.Time) bool { if t == nil { return false } return m[t.Weekday()] } } func validMonthDay(monthdays []int) validFunc { if len(monthdays) == 0 { return alwaysValid } m := intmap(monthdays) return func(t *time.Time) bool { if t == nil { return false } return m[t.Day()] } } func validWeek(weeks []int) validFunc { if len(weeks) == 0 { return alwaysValid } m := intmap(weeks) return func(t *time.Time) bool { if t == nil { return false } return m[1+t.YearDay()/7] } } func validMonth(months []time.Month) validFunc { if len(months) == 0 { return alwaysValid } m := monthmap(months) return func(t *time.Time) bool { if t == nil { return false } return m[t.Month()] } } func validYearDay(yeardays []int) validFunc { if len(yeardays) == 0 { return alwaysValid } m := intmap(yeardays) return func(t *time.Time) bool { if t == nil { return false } return m[t.YearDay()] } }
validators.go
0.70253
0.554591
validators.go
starcoder
// Package entity defines entities used in sdk package entity import ( "encoding/binary" "errors" "math" "github.com/milvus-io/milvus-sdk-go/internal/proto/schema" ) //go:generate go run gen/gen.go // Column interface field type for column-based data frame type Column interface { Name() string Type() FieldType Len() int FieldData() *schema.FieldData } // Vector interface vector used int search type Vector interface { Dim() int Serialize() []byte } // FloatVector float32 vector wrapper type FloatVector []float32 // Dim returns vector dimension func (fv FloatVector) Dim() int { return len(fv) } // Serialize serialize vector into byte slice, used in search placeholder // LittleEndian is used for convention func (fv FloatVector) Serialize() []byte { data := make([]byte, 0, 4*len(fv)) // float32 occupies 4 bytes buf := make([]byte, 4) for _, f := range fv { binary.LittleEndian.PutUint32(buf, math.Float32bits(f)) data = append(data, buf...) } return data } // BinaryVector []byte vector wrapper type BinaryVector []byte // Dim return vector dimension, note that binary vector is bits count func (bv BinaryVector) Dim() int { return 8 * len(bv) } // Serialize just return bytes func (bv BinaryVector) Serialize() []byte { return bv } var errFieldDataTypeNotMatch = errors.New("FieldData type not matched") // IDColumns converts schema.IDs to corresponding column // currently Int64 / string may be in IDs func IDColumns(idField *schema.IDs, begin, end int) (Column, error) { var idColumn Column if idField == nil { return nil, errors.New("nil Ids from response") } switch field := idField.GetIdField().(type) { case *schema.IDs_IntId: if end >= 0 { idColumn = NewColumnInt64("", field.IntId.GetData()[begin:end]) } else { idColumn = NewColumnInt64("", field.IntId.GetData()[begin:]) } case *schema.IDs_StrId: if end >= 0 { idColumn = NewColumnString("", field.StrId.GetData()[begin:end]) } else { idColumn = NewColumnString("", field.StrId.GetData()[begin:]) } default: return nil, errors.New("unsupported id type") } return idColumn, nil } // FieldDataColumn converts schema.FieldData to Column, used int search result conversion logic // begin, end specifies the start and end positions func FieldDataColumn(fd *schema.FieldData, begin, end int) (Column, error) { switch fd.GetType() { case schema.DataType_Bool: data, ok := fd.GetScalars().GetData().(*schema.ScalarField_BoolData) if !ok { return nil, errFieldDataTypeNotMatch } return NewColumnBool(fd.GetFieldName(), data.BoolData.GetData()[begin:end]), nil case schema.DataType_Int8: data, ok := fd.GetScalars().GetData().(*schema.ScalarField_IntData) if !ok { return nil, errFieldDataTypeNotMatch } values := make([]int8, 0, len(data.IntData.GetData())) for _, v := range data.IntData.GetData() { values = append(values, int8(v)) } return NewColumnInt8(fd.GetFieldName(), values[begin:end]), nil case schema.DataType_Int16: data, ok := fd.GetScalars().GetData().(*schema.ScalarField_IntData) if !ok { return nil, errFieldDataTypeNotMatch } values := make([]int16, 0, len(data.IntData.GetData())) for _, v := range data.IntData.GetData() { values = append(values, int16(v)) } return NewColumnInt16(fd.GetFieldName(), values[begin:end]), nil case schema.DataType_Int32: data, ok := fd.GetScalars().GetData().(*schema.ScalarField_IntData) if !ok { return nil, errFieldDataTypeNotMatch } return NewColumnInt32(fd.GetFieldName(), data.IntData.GetData()[begin:end]), nil case schema.DataType_Int64: data, ok := fd.GetScalars().GetData().(*schema.ScalarField_LongData) if !ok { return nil, errFieldDataTypeNotMatch } return NewColumnInt64(fd.GetFieldName(), data.LongData.GetData()[begin:end]), nil case schema.DataType_Float: data, ok := fd.GetScalars().GetData().(*schema.ScalarField_FloatData) if !ok { return nil, errFieldDataTypeNotMatch } return NewColumnFloat(fd.GetFieldName(), data.FloatData.GetData()[begin:end]), nil case schema.DataType_Double: data, ok := fd.GetScalars().GetData().(*schema.ScalarField_DoubleData) if !ok { return nil, errFieldDataTypeNotMatch } return NewColumnDouble(fd.GetFieldName(), data.DoubleData.GetData()[begin:end]), nil case schema.DataType_String: data, ok := fd.GetScalars().GetData().(*schema.ScalarField_StringData) if !ok { return nil, errFieldDataTypeNotMatch } return NewColumnString(fd.GetFieldName(), data.StringData.GetData()[begin:end]), nil default: return nil, errors.New("unsupported data type") } }
entity/columns.go
0.632843
0.406626
columns.go
starcoder
package dht import ( "math" "math/big" ) // Full token range. const ( Murmur3MinToken = int64(math.MinInt64) Murmur3MaxToken = int64(math.MaxInt64) ) // Murmur3Partitioner see // https://github.com/scylladb/scylla/blob/master/dht/murmur3_partitioner.hh // https://github.com/scylladb/scylla/blob/master/dht/murmur3_partitioner.cc type Murmur3Partitioner struct { shardCount uint shardingIgnoreMsbBits uint shardStart []uint64 } // NewMurmur3Partitioner creates a new Murmur3Partitioner instance. func NewMurmur3Partitioner(shardCount, shardingIgnoreMsbBits uint) *Murmur3Partitioner { if shardCount <= 1 { shardingIgnoreMsbBits = 0 } p := &Murmur3Partitioner{ shardCount: shardCount, shardingIgnoreMsbBits: shardingIgnoreMsbBits, } p.initZeroBasedShardStart() return p } func (p *Murmur3Partitioner) initZeroBasedShardStart() { p.shardStart = make([]uint64, p.shardCount) if p.shardCount == 1 { return } var ( t uint64 token = new(big.Int) shards = new(big.Int) ) shards.SetUint64(uint64(p.shardCount)) for s := uint(0); s < p.shardCount; s++ { // uint64_t token = (uint128_t(s) << 64) / shards; token.SetUint64(uint64(s)) token.Lsh(token, 64) token.Div(token, shards) // token >>= sharding_ignore_msb_bits; t = token.Uint64() t >>= p.shardingIgnoreMsbBits // Token is the start of the next shard, and can be slightly before due // to rounding errors adjust. for p.zeroBasedShardOf(t) != s { t++ } p.shardStart[s] = t } } func (p *Murmur3Partitioner) zeroBasedShardOf(t uint64) uint { var ( token = new(big.Int) shards = new(big.Int) ) // token <<= sharding_ignore_msb_bits; token.SetUint64(t << p.shardingIgnoreMsbBits) // (uint128_t(token) * shards) >> 64; shards.SetUint64(uint64(p.shardCount)) token.Mul(token, shards) token.Rsh(token, 64) return uint(token.Uint64()) } // ShardCount returns the number of shards. func (p *Murmur3Partitioner) ShardCount() uint { return p.shardCount } // ShardOf returns shard the token belongs to. func (p *Murmur3Partitioner) ShardOf(t int64) uint { return p.zeroBasedShardOf(zeroBased(t)) } // PrevShard returns id of a previous shard in a round robin fashion. func (p *Murmur3Partitioner) PrevShard(shard uint) uint { prev := shard if prev == 0 { prev = p.ShardCount() } return prev - 1 } // TokenForPrevShard returns the start token for the shard -1. func (p *Murmur3Partitioner) TokenForPrevShard(t int64, shard uint) int64 { z := zeroBased(t) s := p.zeroBasedShardOf(z) if p.shardingIgnoreMsbBits == 0 { if shard > s { return Murmur3MinToken } return unzeroBased(p.shardStart[shard]) } l := z >> (64 - p.shardingIgnoreMsbBits) if shard > s { l-- } l <<= 64 - p.shardingIgnoreMsbBits return unzeroBased(l | p.shardStart[shard]) } func zeroBased(t int64) uint64 { mid := uint64(1 << 63) if t >= 0 { return mid + uint64(t) } return mid - uint64(-t) } func unzeroBased(z uint64) int64 { mid := uint64(1 << 63) if z >= mid { return int64(z - mid) } return -int64(mid - z) }
pkg/dht/murmur3partitioner.go
0.77081
0.413359
murmur3partitioner.go
starcoder
package cmd import ( "io" "github.com/minio/minio/pkg/bpool" ) // ReadFile reads as much data as requested from the file under the given volume and path and writes the data to the provided writer. // The algorithm and the keys/checksums are used to verify the integrity of the given file. ReadFile will read data from the given offset // up to the given length. If parts of the file are corrupted ReadFile tries to reconstruct the data. func (s ErasureStorage) ReadFile(writer io.Writer, volume, path string, offset, length int64, totalLength int64, checksums [][]byte, algorithm BitrotAlgorithm, blocksize int64, pool *bpool.BytePool) (f ErasureFileInfo, err error) { if offset < 0 || length < 0 { return f, traceError(errUnexpected) } if offset+length > totalLength { return f, traceError(errUnexpected) } if !algorithm.Available() { return f, traceError(errBitrotHashAlgoInvalid) } f.Checksums = make([][]byte, len(s.disks)) verifiers := make([]*BitrotVerifier, len(s.disks)) for i, disk := range s.disks { if disk == OfflineDisk { continue } verifiers[i] = NewBitrotVerifier(algorithm, checksums[i]) } errChans := make([]chan error, len(s.disks)) for i := range errChans { errChans[i] = make(chan error, 1) } lastBlock := totalLength / blocksize startOffset := offset % blocksize chunksize := getChunkSize(blocksize, s.dataBlocks) blocks := make([][]byte, len(s.disks)) for off := offset / blocksize; length > 0; off++ { blockOffset := off * chunksize pool.Reset() if currentBlock := (offset + f.Size) / blocksize; currentBlock == lastBlock { blocksize = totalLength % blocksize chunksize = getChunkSize(blocksize, s.dataBlocks) } err = s.readConcurrent(volume, path, blockOffset, chunksize, blocks, verifiers, errChans, pool) if err != nil { return f, traceError(errXLReadQuorum) } writeLength := blocksize - startOffset if length < writeLength { writeLength = length } n, err := writeDataBlocks(writer, blocks, s.dataBlocks, startOffset, writeLength) if err != nil { return f, err } startOffset = 0 f.Size += n length -= n } f.Algorithm = algorithm for i, disk := range s.disks { if disk == OfflineDisk { continue } f.Checksums[i] = verifiers[i].Sum(nil) } return f, nil } func erasureCountMissingBlocks(blocks [][]byte, limit int) int { missing := 0 for i := range blocks[:limit] { if blocks[i] == nil { missing++ } } return missing } // readConcurrent reads all requested data concurrently from the disks into blocks. It returns an error if // too many disks failed while reading. func (s *ErasureStorage) readConcurrent(volume, path string, offset int64, length int64, blocks [][]byte, verifiers []*BitrotVerifier, errChans []chan error, pool *bpool.BytePool) (err error) { errs := make([]error, len(s.disks)) for i := range blocks { blocks[i], err = pool.Get() if err != nil { return traceErrorf("failed to get new buffer from pool: %v", err) } blocks[i] = blocks[i][:length] } erasureReadBlocksConcurrent(s.disks[:s.dataBlocks], volume, path, offset, blocks[:s.dataBlocks], verifiers[:s.dataBlocks], errs[:s.dataBlocks], errChans[:s.dataBlocks]) missingDataBlocks := erasureCountMissingBlocks(blocks, s.dataBlocks) mustReconstruct := missingDataBlocks > 0 if mustReconstruct { requiredReads := s.dataBlocks + missingDataBlocks if requiredReads > s.dataBlocks+s.parityBlocks { return errXLReadQuorum } erasureReadBlocksConcurrent(s.disks[s.dataBlocks:requiredReads], volume, path, offset, blocks[s.dataBlocks:requiredReads], verifiers[s.dataBlocks:requiredReads], errs[s.dataBlocks:requiredReads], errChans[s.dataBlocks:requiredReads]) if erasureCountMissingBlocks(blocks, requiredReads) > 0 { erasureReadBlocksConcurrent(s.disks[requiredReads:], volume, path, offset, blocks[requiredReads:], verifiers[requiredReads:], errs[requiredReads:], errChans[requiredReads:]) } } if err = reduceReadQuorumErrs(errs, []error{}, s.dataBlocks); err != nil { return err } if mustReconstruct { if err = s.ErasureDecodeDataBlocks(blocks); err != nil { return err } } return nil } // erasureReadBlocksConcurrent reads all data from each disk to each data block in parallel. // Therefore disks, blocks, verifiers errors and locks must have the same length. func erasureReadBlocksConcurrent(disks []StorageAPI, volume, path string, offset int64, blocks [][]byte, verifiers []*BitrotVerifier, errors []error, errChans []chan error) { for i := range errChans { go erasureReadFromFile(disks[i], volume, path, offset, blocks[i], verifiers[i], errChans[i]) } for i := range errChans { errors[i] = <-errChans[i] // blocks until the go routine 'i' is done - no data race if errors[i] != nil { disks[i] = OfflineDisk blocks[i] = nil } } } // erasureReadFromFile reads data from the disk to buffer in parallel. // It sends the returned error through the error channel. func erasureReadFromFile(disk StorageAPI, volume, path string, offset int64, buffer []byte, verifier *BitrotVerifier, errChan chan<- error) { if disk == OfflineDisk { errChan <- traceError(errDiskNotFound) return } var err error if !verifier.IsVerified() { _, err = disk.ReadFileWithVerify(volume, path, offset, buffer, verifier) } else { _, err = disk.ReadFile(volume, path, offset, buffer) } errChan <- err }
cmd/erasure-readfile.go
0.501465
0.4184
erasure-readfile.go
starcoder
package main import ( "flag" "fmt" "math" "math/cmplx" "github.com/pointlander/datum/iris" ) const ( // LFSRMask is a LFSR mask with a maximum period LFSRMask = 0x80000057 // LFSRInit is an initial LFSR state LFSRInit = 0x55555555 ) var ( // FlagXOR xor flag FlagXOR = flag.Bool("xor", false, "xor mode") // FlagQXOR xor flag FlagQXOR = flag.Bool("qxor", false, "quantum xor mode") // FlagIRIS iris flag FlagIRIS = flag.Bool("iris", false, "iris mode") ) // Rand is a random number generator type Rand uint32 // Float32 returns a random float32 between 0 and 1 func (r *Rand) Float32() float32 { lfsr := *r if lfsr&1 == 1 { lfsr = (lfsr >> 1) ^ LFSRMask } else { lfsr = lfsr >> 1 } *r = lfsr return float32(lfsr) / ((1 << 32) - 1) } // Uint32 returns a random uint32 func (r *Rand) Uint32() uint32 { lfsr := *r if lfsr&1 == 1 { lfsr = (lfsr >> 1) ^ LFSRMask } else { lfsr = lfsr >> 1 } *r = lfsr return uint32(lfsr) } // Neuron is a neuron type Neuron struct { Inputs []float32 Output float32 Learn float32 Weights []float32 DWeights []float32 } // Layer is a neural network layer type Layer []Neuron // Layers is layers of networks type Layers []Layer // Inference computes the neuron func (n *Neuron) Inference() { var sum, sumWeight, sumInput float32 for i := range n.Weights { sum += n.Weights[i] * n.Inputs[i] sumWeight += n.Weights[i] sumInput += n.Inputs[i] } e := float32(math.Exp(float64(sum))) i := float32(math.Exp(float64(-sum))) n.Output = (e - i) / (e + i) sum -= sumWeight * sumInput e = float32(math.Exp(float64(sum))) i = float32(math.Exp(float64(-sum))) n.Learn = (e - i) / (e + i) } // QuantumNeuron is a quantum neuron type QuantumNeuron struct { Inputs []complex64 Output complex64 Learn complex64 Weights []complex64 DWeights []complex64 } // QuantumLayer is a quantum neural network layer type QuantumLayer []QuantumNeuron // QuantumLayers is layers of quantum networks type QuantumLayers []QuantumLayer // Inference computes the neuron func (n *QuantumNeuron) Inference() { var sum, sumWeight, sumInput complex64 for i := range n.Weights { sum += n.Weights[i] * n.Inputs[i] sumWeight += n.Weights[i] sumInput += n.Inputs[i] } e := complex64(cmplx.Exp(complex128(sum))) i := complex64(cmplx.Exp(complex128(-sum))) n.Output = (e - i) / (e + i) sum -= sumWeight * sumInput e = complex64(cmplx.Exp(complex128(sum))) i = complex64(cmplx.Exp(complex128(-sum))) n.Learn = (e - i) / (e + i) } func main() { flag.Parse() if *FlagXOR { XOR() return } else if *FlagQXOR { QXOR() return } else if *FlagIRIS { IRIS() return } } // XOR mode func XOR() { network := make([]Layer, 2) network[0] = make([]Neuron, 2) network[1] = make([]Neuron, 1) g, factor := Rand(LFSRInit), float32(math.Sqrt(2/float64(2))) for i := range network { for j := range network[i] { network[i][j].Inputs = make([]float32, 2) network[i][j].Weights = make([]float32, 2) network[i][j].DWeights = make([]float32, 2) for k := range network[i][j].Weights { network[i][j].Weights[k] = (2*g.Float32() - 1) * factor } } } xor := [][]float32{ []float32{-1, -1, -1}, []float32{1, -1, 1}, []float32{-1, 1, 1}, []float32{1, 1, -1}, } n := float32(.01) for i := 0; i < 16; i++ { cost := float32(0) for j := range xor { network[0][0].Inputs[0] = xor[j][0] network[0][0].Inputs[1] = xor[j][1] network[0][0].Inference() network[0][1].Inputs[0] = xor[j][0] network[0][1].Inputs[1] = xor[j][1] network[0][1].Inference() network[1][0].Inputs[0] = network[0][0].Output network[1][0].Inputs[1] = network[0][1].Output network[1][0].Inference() for k := range network { for l := range network[k] { for m := range network[k][l].Inputs { network[k][l].DWeights[m] -= n * network[k][l].Inputs[m] * network[k][l].Learn } } } diff := network[1][0].Output - xor[j][2] fmt.Println(network[1][0].Output, xor[j][2]) cost += diff * diff } for k := range network { for l := range network[k] { for m := range network[k][l].Inputs { network[k][l].Weights[m] += network[k][l].DWeights[m] network[k][l].DWeights[m] = 0 } } } fmt.Println(cost) } } // QXOR mode func QXOR() { network := make([]QuantumLayer, 2) network[0] = make([]QuantumNeuron, 2) network[1] = make([]QuantumNeuron, 1) g, factor := Rand(LFSRInit), float32(math.Sqrt(2/float64(2))) for i := range network { for j := range network[i] { network[i][j].Inputs = make([]complex64, 2) network[i][j].Weights = make([]complex64, 2) network[i][j].DWeights = make([]complex64, 2) for k := range network[i][j].Weights { network[i][j].Weights[k] = complex((2*g.Float32()-1)*factor, (2*g.Float32()-1)*factor) } } } xor := [][]complex64{ []complex64{-1, -1, -1}, []complex64{1, -1, 1}, []complex64{-1, 1, 1}, []complex64{1, 1, -1}, } n := complex64(.01) for i := 0; i < 16; i++ { cost := float32(0) for j := range xor { network[0][0].Inputs[0] = xor[j][0] network[0][0].Inputs[1] = xor[j][1] network[0][0].Inference() network[0][1].Inputs[0] = xor[j][0] network[0][1].Inputs[1] = xor[j][1] network[0][1].Inference() network[1][0].Inputs[0] = network[0][0].Output network[1][0].Inputs[1] = network[0][1].Output network[1][0].Inference() for k := range network { for l := range network[k] { for m := range network[k][l].Inputs { network[k][l].DWeights[m] -= n * network[k][l].Inputs[m] * network[k][l].Learn } } } diff := float32(cmplx.Abs(complex128(network[1][0].Output - xor[j][2]))) fmt.Println(network[1][0].Output, xor[j][2]) cost += diff * diff } for k := range network { for l := range network[k] { for m := range network[k][l].Inputs { network[k][l].Weights[m] += network[k][l].DWeights[m] network[k][l].DWeights[m] = 0 } } } fmt.Println(cost) } } // IRIS mode func IRIS() { network := make([]Layer, 2) network[0] = make([]Neuron, 4) network[1] = make([]Neuron, 4) g, factor := Rand(LFSRInit), float32(math.Sqrt(2/float64(2))) for i := range network { for j := range network[i] { network[i][j].Inputs = make([]float32, 4) network[i][j].Weights = make([]float32, 4) network[i][j].DWeights = make([]float32, 4) for k := range network[i][j].Weights { network[i][j].Weights[k] = (2*g.Float32() - 1) * factor } } } datum, err := iris.Load() if err != nil { panic(err) } n := float32(.5) for i := 0; i < 1; i++ { for _, flower := range datum.Fisher { for neuron := range network[0] { for i, value := range flower.Measures { network[0][neuron].Inputs[i] = float32(value) } network[0][neuron].Inference() } for neuron := range network[1] { network[1][neuron].Inputs[0] = network[0][0].Output network[1][neuron].Inputs[1] = network[0][1].Output network[1][neuron].Inputs[2] = network[0][2].Output network[1][neuron].Inputs[3] = network[0][3].Output network[1][neuron].Inference() } for k := range network { for l := range network[k] { for m := range network[k][l].Inputs { network[k][l].DWeights[m] -= n * network[k][l].Inputs[m] * network[k][l].Learn } } } fmt.Println(iris.Labels[flower.Label], network[1][0].Output, network[1][1].Output, network[1][2].Output, network[1][3].Output) } for k := range network { for l := range network[k] { for m := range network[k][l].Inputs { network[k][l].Weights[m] += network[k][l].DWeights[m] network[k][l].DWeights[m] = 0 } } } } }
main.go
0.59749
0.439567
main.go
starcoder
package model /* we deviate from the Constructor convention (New...) by intention, to enable concise expressions like a := And(Equals(Variable(7), Variable(6)), Equals(Variable(5), Variable(5))) */ type Expression interface { Interprete(map[string]Expression) bool //Interprete(Map<String,Expression> variables) bool } type VariableExpression struct { Value bool } func Variable(value bool) *VariableExpression { return &VariableExpression{Value: value} } func (s *VariableExpression) Interprete(variables map[string]Expression) bool { return s.Value } type EqualsExpression struct { leftOperand Expression rightOperand Expression } func Equals(leftOperand Expression, rightOperand Expression) *EqualsExpression { return &EqualsExpression{leftOperand: leftOperand, rightOperand: rightOperand} } func (s *EqualsExpression) Interprete(variables map[string]Expression) bool { return s.leftOperand.Interprete(variables) == s.rightOperand.Interprete(variables) } type OrExpression struct { leftOperand Expression rightOperand Expression } func Or(leftOperand Expression, rightOperand Expression) *OrExpression { return &OrExpression{leftOperand: leftOperand, rightOperand: rightOperand} } func (s *OrExpression) Interprete(variables map[string]Expression) bool { return s.leftOperand.Interprete(variables) || s.rightOperand.Interprete(variables) } type AndExpression struct { leftOperand Expression rightOperand Expression } func And(leftOperand Expression, rightOperand Expression) *AndExpression { return &AndExpression{leftOperand: leftOperand, rightOperand: rightOperand} } func (s *AndExpression) Interprete(variables map[string]Expression) bool { return s.leftOperand.Interprete(variables) && s.rightOperand.Interprete(variables) } type XorExpression struct { leftOperand Expression rightOperand Expression } func Xor(leftOperand Expression, rightOperand Expression) *XorExpression { return &XorExpression{leftOperand: leftOperand, rightOperand: rightOperand} } func (s *XorExpression) Interprete(variables map[string]Expression) bool { return s.leftOperand.Interprete(variables) != s.rightOperand.Interprete(variables) } type NotExpression struct { operand Expression } func Not(operand Expression) *NotExpression { return &NotExpression{operand: operand} } func (s *NotExpression) Interprete(variables map[string]Expression) bool { return !s.operand.Interprete(variables) }
model/interpreter.go
0.808483
0.683183
interpreter.go
starcoder
package renderer import ( "math" "math/rand" ) type PathTracer struct { } var defaultColor Vector3 = NewVector3(0, 0, 0) var M_PI float64 = math.Pi var M_1_PI float64 = 1. / M_PI var nbSamples int = 4 func (r PathTracer) Sample(x, y uint, camera Camera, scene Scene, options RenderingOptions, rnd *rand.Rand) Vector3 { // See: https://www.scratchapixel.com/lessons/3d-basic-rendering/ray-tracing-generating-camera-rays/generating-camera-rays w := float64(options.Width) h := float64(options.Height) cam := NewRay(camera.position, camera.direction) aspectRatio := w / h scale := math.Tan(0.5 * degToRad(options.Fov)) pixelColor := NewVector3(0, 0, 0) for sy := 0; sy < 2; sy++ { for sx := 0; sx < 2; sx++ { acc := NewVector3(0, 0, 0) for subSample := 0; subSample < nbSamples; subSample++ { r1 := 2. * rand.Float64() dx := ternaryFloat64(r1 < 1, math.Sqrt(r1)-1., 1.-math.Sqrt(2.-r1)) r2 := 2. * rand.Float64() dy := ternaryFloat64(r2 < 1, math.Sqrt(r2)-1., 1.-math.Sqrt(2.-r2)) // Normalized Device Coordinates ([0,1]) // We add 0.5 because we want to pass through the center of the pixel, not the top left corner pixelNdcX := ((dx+float64(sx))/2. + float64(x) + 0.5) / w pixelNdcY := ((dy+float64(sy))/2. + float64(y) + 0.5) / h // Screen space ([-1,1]) pixelScreenX := 2*pixelNdcX - 1 pixelScreenY := 1 - 2*pixelNdcY // We want the Y axis to go UP, not DOWN so we "inverse" it // Camera space (Applying aspect ratio, scale and camera transform) pixelCameraX := pixelScreenX * aspectRatio * scale pixelCameraY := pixelScreenY * scale pixelCameraSpace := camera.cameraToWorld.MultDirection(NewVector3(pixelCameraX, pixelCameraY, -1)).Normalize() // Compute color for that pixel radiance := r.radiance(NewRay(cam.Origin, pixelCameraSpace), scene, options, 0, rnd, 1) acc = acc.Add(radiance.MulScalar(1. / float64(nbSamples))) } // Sum up pixel color pixelColor = pixelColor.Add(NewVector3(clamp(acc.X), clamp(acc.Y), clamp(acc.Z)).MulScalar(.25)) } } pixelColor.X = gammaCorrection(pixelColor.X) pixelColor.Y = gammaCorrection(pixelColor.Y) pixelColor.Z = gammaCorrection(pixelColor.Z) return pixelColor } func (r PathTracer) intersect(ray Ray, scene Scene, ignoreLights bool) (hit Hit, index int) { tnear := math.MaxFloat64 collisionIndex := -1 var nearestHit = Hit{Valid: false} // Compute nearest intersection for i := 0; i < len(scene.Objects); i++ { obj := scene.Objects[i] if ignoreLights && obj.Material().EmissionColor != Vector3Zero { continue } hit := scene.Objects[i].Intersects(ray) if hit.Valid { if hit.Distance < tnear { tnear = hit.Distance collisionIndex = i nearestHit = hit } } } return nearestHit, collisionIndex } func (r PathTracer) radiance(ray Ray, scene Scene, options RenderingOptions, depth uint, rnd *rand.Rand, E float64) Vector3 { depth = depth + 1 // We don't want to draw the light spheres ignoreLights := false if depth == 1 { ignoreLights = true } nearestHit, collisionIndex := r.intersect(ray, scene, ignoreLights) if collisionIndex == -1 { return defaultColor } collidingObject := scene.Objects[collisionIndex] material := collidingObject.Material() objectColor := material.Color // Intersection point phit := nearestHit.Position // Normal at intersection nhit := nearestHit.Normal nhitCleaned := nhit // Inside the sphere if nhit.Dot(ray.Direction) >= 0 { nhitCleaned = nhit.MulScalar(-1) } // Russian Roulette p := objectColor.Z if objectColor.X > objectColor.Y && objectColor.X > objectColor.Z { p = objectColor.X } else if objectColor.Y > objectColor.Z { p = objectColor.Y } if depth > 5 || p == 0 { if rand.Float64() < p { objectColor = objectColor.MulScalar(1 / p) } else { return material.EmissionColor.MulScalar(E) } } // Pure diffuse material if material.Reflectivity == 0 && material.Transparency == 0 { r1 := 2. * M_PI * rand.Float64() r2 := rand.Float64() r2s := math.Sqrt(r2) // Create orthonormal coordinate frame (w,u,v) w := nhitCleaned u := NewVector3(1, 1, 1) if math.Abs(w.X) > .1 { u = NewVector3(0, 1, 0) } u = u.Cross(w).Normalize() v := w.Cross(u).Normalize() d1 := u.MulScalar(math.Cos(r1) * r2s) d2 := v.MulScalar(math.Sin(r1) * r2s) d3 := w.MulScalar(math.Sqrt(1 - r2)) // Random reflection ray d := d1.Add(d2).Add(d3).Normalize() e := NewVector3(0, 0, 0) for i := 0; i < len(scene.Objects); i++ { if scene.Objects[i].Material().EmissionColor == Vector3Zero { continue } light := scene.Objects[i] lightMaterial := light.Material() //lightDistance := phit.DistanceTo(light.Position()) sw := light.Position().Sub(phit) su := NewVector3(1, 1, 1) if math.Abs(sw.X) > .1 { su = NewVector3(0, 1, 0) } su = su.Cross(sw).Normalize() sv := sw.Cross(su).Normalize() p := phit.Sub(light.Position()) rad := 1.5 // TODO: ? cos_a_max := math.Sqrt(1 - (rad*rad)/p.Dot(p)) eps1 := rand.Float64() eps2 := rand.Float64() cos_a := 1 - eps1 + eps1*cos_a_max sin_a := math.Sqrt(1 - cos_a*cos_a) phi := 2 * M_PI * eps2 l1 := su.MulScalar(math.Cos(phi) * sin_a) l2 := sv.MulScalar(math.Sin(phi) * sin_a) l3 := sw.MulScalar(cos_a) lightDirection := l1.Add(l2).Add(l3).Normalize() lightRay := NewRay(phit, lightDirection) collision, index := r.intersect(lightRay, scene, false) // The only collision was the light itself if collision.Valid && index == i { omega := 2 * M_PI * (1 - cos_a_max) e = e.Add(objectColor.Mul(lightMaterial.EmissionColor.MulScalar(lightDirection.Dot(nhitCleaned) * omega)).MulScalar(M_1_PI)) } } return material.EmissionColor.MulScalar(E). Add(e). Add(objectColor.Mul(r.radiance(NewRay(phit, d), scene, options, depth, rnd, 0))) } reflectionDirection := ray.Direction.Sub(nhit.MulScalar(2 * nhit.Dot(ray.Direction))) reflectionRay := NewRay(phit, reflectionDirection) // Specular reflection if material.Transparency == 0 { return material.EmissionColor. Add(objectColor.Mul(r.radiance(reflectionRay, scene, options, depth, rnd, 1))) } // Reflection + Refraction (dielectric (glass)) into := nhit.Dot(nhitCleaned) > 0 nc := 1. nt := 1.5 nnt := ternaryFloat64(into, nc/nt, nt/nc) ddn := ray.Direction.Dot(nhitCleaned) cost2t := 1 - nnt*nnt*(1-ddn*ddn) // Total internal reflection if cost2t < 0 { return material.EmissionColor. Add(objectColor.Mul(r.radiance(reflectionRay, scene, options, depth, rnd, 1))) } // Choose reflection or refraction coeff := ternaryFloat64(into, 1, -1) tdir1 := ray.Direction.MulScalar(nnt) tdir2 := nhit.MulScalar(coeff * (ddn * nnt * math.Sqrt(cost2t))) tdir := tdir1.Sub(tdir2).Normalize() a := nt - nc b := nt + nc c := 1 - ternaryFloat64(into, -ddn, tdir.Dot(nhit)) R0 := a * a / (b * b) Re := R0 + (1-R0)*c*c*c*c*c Tr := 1 - Re P := .25 + .5*Re RP := Re / P TP := Tr / (1 - P) // TODO ? colorDelta := Vector3Zero if depth > 2 { // Russian Roulette if rand.Float64() < P { colorDelta = r.radiance(reflectionRay, scene, options, depth, rnd, 1).MulScalar(RP) } else { colorDelta = r.radiance(NewRay(phit, tdir), scene, options, depth, rnd, 1).MulScalar(TP) } } else { c1 := r.radiance(reflectionRay, scene, options, depth, rnd, 1).MulScalar(Re) c2 := r.radiance(NewRay(phit, tdir), scene, options, depth, rnd, 1).MulScalar(Tr) colorDelta = c1.Add(c2) } return material.EmissionColor.Add(material.Color.Mul(colorDelta)) } func clamp(x float64) float64 { if x < 0 { return 0 } if x > 1 { return 1 } return x } func gammaCorrection(x float64) float64 { // Gamma correction of 2.2 return math.Pow(clamp(x), 1./2.2)*255. + .5 } func ternaryFloat64(condition bool, a, b float64) float64 { if condition { return a } return b }
renderer/pathtracer.go
0.790288
0.551393
pathtracer.go
starcoder
package math type Box3 struct { min Vector3 max Vector3 } // Equivalent to makeEmpty func NewDefaultBox3() *Box3 { return NewBox3( NewVector3Inf(1), NewVector3Inf(-1), ) } func NewBox3(min *Vector3, max *Vector3) *Box3 { return &Box3{ min: Vector3{Vector2: Vector2{X: min.X, Y: min.Y}, Z: min.Z}, max: Vector3{Vector2: Vector2{X: max.X, Y: max.Y}, Z: max.Z}, } } func NewBox3FromComponents(minX, minY, minZ, maxX, maxY, maxZ float32) *Box3 { return &Box3{ min: Vector3{Vector2: Vector2{X: minX, Y: minY}, Z: minZ}, max: Vector3{Vector2: Vector2{X: maxX, Y: maxY}, Z: maxZ}, } } func NewBox3FromPoints(points []*Vector3) *Box3 { box := NewDefaultBox3() box.SetFromPoints(points) return box } func NewBox3FromCenterAndSize(center *Vector3, size *Vector3) *Box3 { halfSize := size.Clone() halfSize.MultiplyScalar(0.5) nb := &Box3{ min: *center.Clone(), max: *center.Clone(), } nb.min.Sub(halfSize) nb.max.Add(halfSize) return nb } func (box *Box3) Clone() *Box3 { return &Box3{ min: *box.min.Clone(), max: *box.max.Clone(), } } func (box *Box3) Copy(source *Box3) { box.min.Copy(&source.min) box.max.Copy(&source.max) } func (box *Box3) SetFromPoints(points []*Vector3) { for i := 0; i < len(points); i++ { box.ExpandByPoint(points[i]) } } func (box *Box3) IsEmpty() bool { return box.max.X < box.min.X || box.max.Y < box.min.Y || box.max.Z < box.min.Z } func (box *Box3) GetCenter() *Vector3 { if box.IsEmpty() { return NewVector3(0, 0, 0) } else { c := NewDefaultVector3() c.SetAddVectors(&box.min, &box.max) c.MultiplyScalar(0.5) return c } } func (box *Box3) GetSize() *Vector3 { if box.IsEmpty() { return NewVector3(0, 0, 0) } else { c := NewDefaultVector3() c.SetSubVectors(&box.min, &box.max) return c } } func (box *Box3) ExpandByPoint(point *Vector3) { box.min.Min(point) box.max.Max(point) } func (box *Box3) ExpandByVector(vector *Vector3) { box.min.Sub(vector) box.max.Add(vector) } func (box *Box3) ExpandByScalar(scalar float32) { box.min.SubScalar(scalar) box.max.AddScalar(scalar) } func (box *Box3) ContainsPoint(point *Vector3) bool { return !(point.X < box.min.X || point.X > box.max.X || point.Y < box.min.Y || point.Y > box.max.Y || point.Z < box.min.Z || point.Z > box.max.Z) } func (box *Box3) ContainsBox(b *Box3) bool { return box.min.X <= b.min.X && b.max.X <= box.max.X && box.min.Y <= b.min.Y && b.max.Y <= box.max.Y && box.min.Z <= b.min.Z && b.max.Z <= box.max.Z } func (box *Box3) IntersectsBox(b *Box3) bool { return !(b.max.X < box.min.X || b.min.X > box.max.X || b.max.Y < box.min.Y || b.min.Y > box.max.Y || b.max.Z < box.min.Z || b.min.Z > box.max.Z) } func (box *Box3) ClampPoint(point *Vector3) *Vector3 { target := NewDefaultVector3() target.Copy(point) target.Clamp(&box.min, &box.max) return target } func (box *Box3) DistanceToPoint(point *Vector3) float32 { v1 := point.Clone() v1.Clamp(&box.min, &box.max) v1.Sub(point) return v1.GetLength() } func (box *Box3) Intersect(b *Box3) { box.min.Max(&b.min) box.max.Min(&b.max) } func (box *Box3) Union(b *Box3) { box.min.Min(&b.min) box.max.Max(&b.max) } func (box *Box3) Translate(offset *Vector3) { box.min.Add(offset) box.max.Add(offset) } func (box *Box3) Equals(b *Box3) bool { return box.min.Equals(&b.min) && box.max.Equals(&b.max) }
box3.go
0.864839
0.593315
box3.go
starcoder
package jsonassert import ( "fmt" ) // Printer is any type that has a testing.T-like Errorf function. // You probably want to pass in a *testing.T instance here if you are using // this in your tests. type Printer interface { Errorf(msg string, args ...interface{}) } // Asserter represents the main type within the jsonassert package. // See Asserter.Assertf for the main use of this package. type Asserter struct { tt } /* New creates a new *jsonassert.Asserter for making assertions against JSON payloads. This type can be reused. I.e. if you are using jsonassert as part of your tests, you only need one *jsonassert.Asseter per (sub)test. In most cases, this will look something like ja := jsonassert.New(t) */ func New(p Printer) *Asserter { // Initially this package was written without the assumption that the // provided Printer will implement testing.tt, which includes the Helper() // function to get better stacktraces in your testing utility functions. // This assumption was later added in order to get more accurate stackframe // information in test failures. In most cases users will pass in a // *testing.T to this function, which does adhere to that interface. // However, in order to be backwards compatible we also permit the use of // printers that do not implement Helper(). This is done by wrapping the // provided Printer into another struct that implements a NOOP Helper // method. if t, ok := p.(tt); ok { return &Asserter{tt: t} } return &Asserter{tt: &noopHelperTT{Printer: p}} } /* Assertf takes two strings, the first being the 'actual' JSON that you wish to make assertions against. The second string is the 'expected' JSON, which can be treated as a template for additional format arguments. If any discrepancies are found, these will be given to the Errorf function in the Printer. E.g. for the JSON {"hello": "world"} you may use an expected JSON of {"hello": "%s"} along with the "world" format argument. For example: ja.Assertf(`{"hello": "world"}`, `{"hello":"%s"}`, "world") You may also use format arguments in the case when your expected JSON contains a percent character, which would otherwise be interpreted as a format-directive. ja.Assertf(`{"averageTestScore": "99%"}`, `{"averageTestScore":"%s"}`, "99%") You may wish to make assertions against the *presence* of a value, but not against its value. For example: ja.Assertf(`{"uuid": "94ae1a31-63b2-4a55-a478-47764b60c56b"}`, `{"uuid":"<<PRESENCE>>"}`) will verify that the UUID field is present, but does not check its actual value. You may use "<<PRESENCE>>" against any type of value. The only exception is null, which will result in an assertion failure. If you don't know / care about the order of the elements in an array in your payload, you can ignore the ordering: payload := `["bar", "foo", "baz"]` ja.Assertf(payload, `["<<UNORDERED>>", "foo", "bar", "baz"]`) The above will verify that "foo", "bar", and "baz" are exactly the elements in the payload, but will ignore the order in which they appear. */ func (a *Asserter) Assertf(actualJSON, expectedJSON string, fmtArgs ...interface{}) { a.tt.Helper() a.pathassertf("$", actualJSON, fmt.Sprintf(expectedJSON, fmtArgs...)) }
exports.go
0.728941
0.471223
exports.go
starcoder
package main import ( "fmt" "math" "math/bits" "sort" "github.com/pointlander/datum/iris" ) // SharedLayer is a neural network layer with shared weights type SharedLayer struct { Rows int Columns int Weights []float32 Rand Rand } // SharedNetwork is a neural network with shared weights type SharedNetwork []SharedLayer // Inference performs inference on a neural network func (n SharedNetwork) Inference(inputs, outputs []float32) { last := len(n) - 1 for i, layer := range n { rnd := layer.Rand columns := len(outputs) if i < len(n)-1 { columns = n[i+1].Columns } mask, values := uint32((1<<bits.TrailingZeros(uint(len(layer.Weights))))-1), make([]float32, columns) for j := 0; j < layer.Rows; j++ { sum := layer.Weights[rnd.Uint32()&mask] for k := 0; k < layer.Columns; k++ { sum += inputs[k] * layer.Weights[rnd.Uint32()&mask] } e := float32(math.Exp(float64(sum))) values[j] = e / (e + 1) } if i == last { copy(outputs, values) } else { inputs = values } } } // Copy copies a network func (n SharedNetwork) Copy() SharedNetwork { var network SharedNetwork for _, layer := range n { l := SharedLayer{ Rows: layer.Rows, Columns: layer.Columns, Weights: make([]float32, len(layer.Weights)), Rand: layer.Rand, } copy(l.Weights, layer.Weights) network = append(network, l) } return network } // SharedNetworkModel is the real network with shared weights func SharedNetworkModel(seed int) float64 { rnd := Rand(LFSRInit + seed) type Genome struct { Network SharedNetwork Fitness float32 } var genomes []Genome addNetwork := func(i int) { var network SharedNetwork layer := SharedLayer{ Rows: 4, Columns: 4, Weights: make([]float32, 4), Rand: Rand(LFSRInit + i + seed + NumGenomes), } factor := float32(math.Sqrt(2 / float64(4))) for i := range layer.Weights { layer.Weights[i] = (2*rnd.Float32() - 1) * factor } network = append(network, layer) layer = SharedLayer{ Rows: 3, Columns: 4, Weights: make([]float32, 4), Rand: Rand(LFSRInit + i + seed + 2*NumGenomes), } factor = float32(math.Sqrt(2 / float64(3))) for i := range layer.Weights { layer.Weights[i] = (2*rnd.Float32() - 1) * factor } network = append(network, layer) genomes = append(genomes, Genome{ Network: network, }) } for i := 0; i < NumGenomes; i++ { addNetwork(i) } datum, err := iris.Load() if err != nil { panic(err) } inputs, outputs := make([]float32, 4), make([]float32, 3) get := func() int { for { for i, genome := range genomes { if rnd.Float32() > genome.Fitness { return i } } } } i := 0 for { for j, genome := range genomes { sum := float32(0) for _, flower := range datum.Fisher { for k, value := range flower.Measures { inputs[k] = float32(value) } genome.Network.Inference(inputs, outputs) expected := make([]float32, 3) expected[iris.Labels[flower.Label]] = 1 loss := float32(0) for l, output := range outputs { diff := expected[l] - output loss += diff * diff } loss = float32(math.Sqrt(float64(loss))) sum += loss } sum /= float32(len(datum.Fisher)) * float32(math.Sqrt(3)) genomes[j].Fitness = sum } sort.Slice(genomes, func(i, j int) bool { if math.IsNaN(float64(genomes[i].Fitness)) { return false } if math.IsNaN(float64(genomes[j].Fitness)) { return true } return genomes[i].Fitness < genomes[j].Fitness }) genomes = genomes[:NumGenomes] i++ if i > 127 { break } for i := 0; i < 256; i++ { a, b := get(), get() layer, valueA, valueB := rnd.Uint32()&1, rnd.Uint32()&3, rnd.Uint32()&3 networkA, networkB := genomes[a].Network.Copy(), genomes[b].Network.Copy() layerA, layerB := networkA[layer], networkB[layer] if layer == 1 { for valueA > 2 { valueA = rnd.Uint32() & 3 } for valueB > 2 { valueB = rnd.Uint32() & 3 } } layerA.Weights[valueA], layerB.Weights[valueB] = layerB.Weights[valueB], layerA.Weights[valueA] genomes = append(genomes, Genome{ Network: networkA, }) genomes = append(genomes, Genome{ Network: networkB, }) } for i := 0; i < NumGenomes; i++ { layer, value := rnd.Uint32()&1, rnd.Uint32()&3 network := genomes[i].Network.Copy() l := network[layer] if layer == 1 { for value > 2 { value = rnd.Uint32() & 3 } } l.Weights[value] += ((2 * rnd.Float32()) - 1) genomes = append(genomes, Genome{ Network: network, }) } } network := genomes[0].Network misses, total := 0, 0 for _, flower := range datum.Fisher { for k, value := range flower.Measures { inputs[k] = float32(value) } network.Inference(inputs, outputs) max, index := float32(0), 0 for j, output := range outputs { if output > max { max, index = output, j } } if index != iris.Labels[flower.Label] { misses++ } total++ } quality := float64(misses) / float64(total) fmt.Println(genomes[0].Fitness, quality) return quality }
shared.go
0.600657
0.426979
shared.go
starcoder
package multicrc import ( "sync" ) //Params describes the parameters of a CRC. It also contains a table that is calculated on first use, //therefore it is best to share the params as much as possible type Params struct { Len uint Name string Polynomial uint64 ReflectInput bool ReflectOutput bool InitialValue uint64 FinalXOR uint64 tableLock sync.Mutex table interface{} } func makeMask(len uint) uint64 { return (uint64(1)<<(len) - 1) } func polynomialDivision(polynomial uint64, input uint64, len uint) uint64 { mask := makeMask(len) for i := uint(0); i < len; i++ { bitOut := input>>(len-1) > 0 input <<= 1 if bitOut { input ^= polynomial } input &= mask } return input } func (params *Params) makeTable() { params.tableLock.Lock() defer params.tableLock.Unlock() if params.table != nil { return } /* Sadly, no generics :( */ if params.Len == 0 { } else if params.Len <= 8 { table := make([]uint8, 256) for i := 0; i < 256; i++ { table[i] = uint8(polynomialDivision(params.Polynomial, uint64(i), params.Len)) } params.table = table } else if params.Len <= 16 { table := make([]uint16, 256) for i := 0; i < 256; i++ { table[i] = uint16(polynomialDivision(params.Polynomial, uint64(i), params.Len)) } params.table = table } else if params.Len <= 16 { table := make([]uint32, 256) for i := 0; i < 256; i++ { table[i] = uint32(polynomialDivision(params.Polynomial, uint64(i), params.Len)) } params.table = table } else if params.Len <= 64 { table := make([]uint64, 256) for i := 0; i < 256; i++ { table[i] = uint64(polynomialDivision(params.Polynomial, uint64(i), params.Len)) } params.table = table } else { panic("CRC length too long") } } func (params *Params) updateCRC(shiftReg uint64, input []uint8) uint64 { if params.Len == 0 { return 0 } for _, m := range input { if params.ReflectInput { m = reflectByte(m) } var tableIndex uint8 if params.Len >= 8 { tableIndex = uint8(shiftReg>>(params.Len-8)) ^ m } else { tableIndex = uint8(shiftReg<<(8-params.Len)) ^ m } switch t := params.table.(type) { case []uint8: shiftReg = uint64(t[tableIndex]) case []uint16: shiftReg = (shiftReg << 8) ^ uint64(t[tableIndex]) case []uint32: shiftReg = (shiftReg << 8) ^ uint64(t[tableIndex]) case []uint64: shiftReg = (shiftReg << 8) ^ t[tableIndex] default: panic("Wrong type of table") } } mask := makeMask(params.Len) return shiftReg & mask } func (params *Params) finalizeCRC(shiftReg uint64) uint64 { if params.Len == 0 { return 0 } if params.ReflectOutput { shiftReg = reflectWithLen(shiftReg, params.Len) } return shiftReg ^ params.FinalXOR }
multicrc/core.go
0.573798
0.404096
core.go
starcoder
package goassessment // SLICES // For those functions that take a slice as input and return a slice, // you can either modify the input slice or make a copy for the return // the tests don't require that the input slice is not modified // write a function that returns the index of an item in a slice func indexOf(a []int, item int) int { return -1 } // write a function that sums the values in a slice func sum(a []int) int { return -1 } // write a function that removes all instances of a value from a slice func remove(a []int, item int) []int { return nil } // write a function that returns the value of the first element in a slice (wihtout removing it) func front(a []int) int { return -1 } // write a function that returns the value of the last element in a slice (wihtout removing it) func back(a []int) int { return -1 } // write a function that adds an item to the end of a slice func pushBack(a []int, item int) []int { return nil } // write a function that removes an item to the end of a slice func popBack(a []int) []int { return nil } // write a function that adds an item to the front of a slice func pushFront(a []int, item int) []int { return nil } // write a function that removes an item from the front of a slice func popFront(a []int) []int { return nil } // write a function that concatenates two slices func concat(a []int, b []int) []int { return nil } // write a function that adds an item to a slice at the specified index func insert(a []int, item int, index int) []int { return nil } // write a function that returns a count of matching items in a slice func count(a []int, item int) int { return -1 } // write a function that finds duplicates in a slice func duplicates(a []int) []int { return nil } // write a function that sqaures all items in a slice func square(a []int) []int { return nil } // write a function that returns all the indices in a slice that matches an item func findAllOccurrences(a []int, item int) []int { return nil }
app/slices.go
0.815196
0.424173
slices.go
starcoder
package wasmlib import ( "encoding/binary" "strconv" ) type ScImmutableAddress struct { objId int32 keyId Key32 } func (o ScImmutableAddress) Exists() bool { return Exists(o.objId, o.keyId, TYPE_ADDRESS) } func (o ScImmutableAddress) String() string { return o.Value().String() } func (o ScImmutableAddress) Value() *ScAddress { return NewScAddressFromBytes(GetBytes(o.objId, o.keyId, TYPE_ADDRESS)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableAddressArray struct { objId int32 } func (o ScImmutableAddressArray) GetAddress(index int32) ScImmutableAddress { return ScImmutableAddress{objId: o.objId, keyId: Key32(index)} } func (o ScImmutableAddressArray) Length() int32 { return GetLength(o.objId) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableAgentId struct { objId int32 keyId Key32 } func (o ScImmutableAgentId) Exists() bool { return Exists(o.objId, o.keyId, TYPE_AGENT_ID) } func (o ScImmutableAgentId) String() string { return o.Value().String() } func (o ScImmutableAgentId) Value() *ScAgentId { return NewScAgentIdFromBytes(GetBytes(o.objId, o.keyId, TYPE_AGENT_ID)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableAgentArray struct { objId int32 } func (o ScImmutableAgentArray) GetAgentId(index int32) ScImmutableAgentId { return ScImmutableAgentId{objId: o.objId, keyId: Key32(index)} } func (o ScImmutableAgentArray) Length() int32 { return GetLength(o.objId) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableBytes struct { objId int32 keyId Key32 } func (o ScImmutableBytes) Exists() bool { return Exists(o.objId, o.keyId, TYPE_BYTES) } func (o ScImmutableBytes) String() string { return base58Encode(o.Value()) } func (o ScImmutableBytes) Value() []byte { return GetBytes(o.objId, o.keyId, TYPE_BYTES) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableBytesArray struct { objId int32 } func (o ScImmutableBytesArray) GetBytes(index int32) ScImmutableBytes { return ScImmutableBytes{objId: o.objId, keyId: Key32(index)} } func (o ScImmutableBytesArray) Length() int32 { return GetLength(o.objId) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableChainId struct { objId int32 keyId Key32 } func (o ScImmutableChainId) Exists() bool { return Exists(o.objId, o.keyId, TYPE_CHAIN_ID) } func (o ScImmutableChainId) String() string { return o.Value().String() } func (o ScImmutableChainId) Value() *ScChainId { return NewScChainIdFromBytes(GetBytes(o.objId, o.keyId, TYPE_CHAIN_ID)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableColor struct { objId int32 keyId Key32 } func (o ScImmutableColor) Exists() bool { return Exists(o.objId, o.keyId, TYPE_COLOR) } func (o ScImmutableColor) String() string { return o.Value().String() } func (o ScImmutableColor) Value() *ScColor { return NewScColorFromBytes(GetBytes(o.objId, o.keyId, TYPE_COLOR)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableColorArray struct { objId int32 } func (o ScImmutableColorArray) GetColor(index int32) ScImmutableColor { return ScImmutableColor{objId: o.objId, keyId: Key32(index)} } func (o ScImmutableColorArray) Length() int32 { return GetLength(o.objId) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableContractId struct { objId int32 keyId Key32 } func (o ScImmutableContractId) Exists() bool { return Exists(o.objId, o.keyId, TYPE_CONTRACT_ID) } func (o ScImmutableContractId) String() string { return o.Value().String() } func (o ScImmutableContractId) Value() *ScContractId { return NewScContractIdFromBytes(GetBytes(o.objId, o.keyId, TYPE_CONTRACT_ID)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableHash struct { objId int32 keyId Key32 } func (o ScImmutableHash) Exists() bool { return Exists(o.objId, o.keyId, TYPE_HASH) } func (o ScImmutableHash) String() string { return o.Value().String() } func (o ScImmutableHash) Value() *ScHash { return NewScHashFromBytes(GetBytes(o.objId, o.keyId, TYPE_HASH)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableHashArray struct { objId int32 } func (o ScImmutableHashArray) GetHash(index int32) ScImmutableHash { return ScImmutableHash{objId: o.objId, keyId: Key32(index)} } func (o ScImmutableHashArray) Length() int32 { return GetLength(o.objId) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableHname struct { objId int32 keyId Key32 } func (o ScImmutableHname) Exists() bool { return Exists(o.objId, o.keyId, TYPE_HNAME) } func (o ScImmutableHname) String() string { return o.Value().String() } func (o ScImmutableHname) Value() ScHname { return NewScHnameFromBytes(GetBytes(o.objId, o.keyId, TYPE_HNAME)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableInt struct { objId int32 keyId Key32 } func (o ScImmutableInt) Exists() bool { return Exists(o.objId, o.keyId, TYPE_INT) } func (o ScImmutableInt) String() string { return strconv.FormatInt(o.Value(), 10) } func (o ScImmutableInt) Value() int64 { bytes := GetBytes(o.objId, o.keyId, TYPE_INT) return int64(binary.LittleEndian.Uint64(bytes)) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableIntArray struct { objId int32 } func (o ScImmutableIntArray) GetInt(index int32) ScImmutableInt { return ScImmutableInt{objId: o.objId, keyId: Key32(index)} } func (o ScImmutableIntArray) Length() int32 { return GetLength(o.objId) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableMap struct { objId int32 } func (o ScImmutableMap) GetAddress(key MapKey) ScImmutableAddress { return ScImmutableAddress{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetAddressArray(key MapKey) ScImmutableAddressArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_ADDRESS|TYPE_ARRAY) return ScImmutableAddressArray{objId: arrId} } func (o ScImmutableMap) GetAgentId(key MapKey) ScImmutableAgentId { return ScImmutableAgentId{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetAgentIdArray(key MapKey) ScImmutableAgentArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_AGENT_ID|TYPE_ARRAY) return ScImmutableAgentArray{objId: arrId} } func (o ScImmutableMap) GetBytes(key MapKey) ScImmutableBytes { return ScImmutableBytes{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetBytesArray(key MapKey) ScImmutableBytesArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_BYTES|TYPE_ARRAY) return ScImmutableBytesArray{objId: arrId} } func (o ScImmutableMap) GetChainId(key MapKey) ScImmutableChainId { return ScImmutableChainId{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetColor(key MapKey) ScImmutableColor { return ScImmutableColor{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetColorArray(key MapKey) ScImmutableColorArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_COLOR|TYPE_ARRAY) return ScImmutableColorArray{objId: arrId} } func (o ScImmutableMap) GetContractId(key MapKey) ScImmutableContractId { return ScImmutableContractId{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetHash(key MapKey) ScImmutableHash { return ScImmutableHash{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetHashArray(key MapKey) ScImmutableHashArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_HASH|TYPE_ARRAY) return ScImmutableHashArray{objId: arrId} } func (o ScImmutableMap) GetHname(key MapKey) ScImmutableHname { return ScImmutableHname{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetInt(key MapKey) ScImmutableInt { return ScImmutableInt{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetIntArray(key MapKey) ScImmutableIntArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_INT|TYPE_ARRAY) return ScImmutableIntArray{objId: arrId} } func (o ScImmutableMap) GetMap(key MapKey) ScImmutableMap { mapId := GetObjectId(o.objId, key.KeyId(), TYPE_MAP) return ScImmutableMap{objId: mapId} } func (o ScImmutableMap) GetMapArray(key MapKey) ScImmutableMapArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_MAP|TYPE_ARRAY) return ScImmutableMapArray{objId: arrId} } func (o ScImmutableMap) GetString(key MapKey) ScImmutableString { return ScImmutableString{objId: o.objId, keyId: key.KeyId()} } func (o ScImmutableMap) GetStringArray(key MapKey) ScImmutableStringArray { arrId := GetObjectId(o.objId, key.KeyId(), TYPE_STRING|TYPE_ARRAY) return ScImmutableStringArray{objId: arrId} } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableMapArray struct { objId int32 } func (o ScImmutableMapArray) GetMap(index int32) ScImmutableMap { mapId := GetObjectId(o.objId, Key32(index), TYPE_MAP) return ScImmutableMap{objId: mapId} } func (o ScImmutableMapArray) Length() int32 { return GetLength(o.objId) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableString struct { objId int32 keyId Key32 } func (o ScImmutableString) Exists() bool { return Exists(o.objId, o.keyId, TYPE_STRING) } func (o ScImmutableString) String() string { return o.Value() } func (o ScImmutableString) Value() string { bytes := GetBytes(o.objId, o.keyId, TYPE_STRING) if bytes == nil { return "" } return string(bytes) } // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ // \\ type ScImmutableStringArray struct { objId int32 } func (o ScImmutableStringArray) GetString(index int32) ScImmutableString { return ScImmutableString{objId: o.objId, keyId: Key32(index)} } func (o ScImmutableStringArray) Length() int32 { return GetLength(o.objId) }
packages/vm/wasmlib/immutable.go
0.649245
0.435121
immutable.go
starcoder
package asp import ( "reflect" "strings" ) // FindTarget returns the statement in a BUILD file that corresponds to a target // of the given name (or nil if one does not exist). func FindTarget(statements []*Statement, name string) (target *Statement) { WalkAST(statements, func(stmt *Statement) bool { if arg := FindArgument(stmt, "name"); arg != nil && arg.Value.Val != nil && arg.Value.Val.String != "" && strings.Trim(arg.Value.Val.String, `"`) == name { target = stmt } return false // FindArgument is recursive so we never need to visit more deeply. }) return } // NextStatement finds the statement that follows the given one. // This is often useful to find the extent of a statement in source code. // It will return nil if there is not one following it. func NextStatement(statements []*Statement, statement *Statement) *Statement { for i, s := range statements { if s == statement && i < len(statements)-1 { return statements[i+1] } } return nil } // GetExtents returns the "extents" of a statement, i.e. the lines that it covers in source. // The caller must pass a value for the maximum extent of the file; we can't detect it here // because the AST only contains positions for the beginning of the statements. func GetExtents(statements []*Statement, statement *Statement, max int) (int, int) { next := NextStatement(statements, statement) if next == nil { // Assume it reaches to the end of the file return statement.Pos.Line, max } return statement.Pos.Line, next.Pos.Line - 1 } // FindArgument finds an argument of any one of the given names, or nil if there isn't one. // The statement must be a function call (e.g. as returned by FindTarget). func FindArgument(statement *Statement, args ...string) (argument *CallArgument) { WalkAST([]*Statement{statement}, func(arg *CallArgument) bool { for _, a := range args { if arg.Name == a { argument = arg break } } return false // CallArguments can't contain other arguments so no point recursing further. }) return } // WalkAST is a generic function that walks through the ast recursively, // It accepts a sequence of functions to look for a particular grammar object; any matching one will be called on // each instance of that type, and returns a bool - for example // WalkAST(ast, func(expr *Expression) bool { ... }) // If the callback returns true, the node will be further visited; if false it (and // all children) will be skipped. func WalkAST(ast []*Statement, callback ...interface{}) { types := make([]reflect.Type, len(callback)) callbacks := make([]reflect.Value, len(callback)) for i, cb := range callback { v := reflect.ValueOf(cb) types[i] = v.Type().In(0) callbacks[i] = v } for _, node := range ast { walkAST(reflect.ValueOf(node), types, callbacks) } } func walkAST(v reflect.Value, types []reflect.Type, callbacks []reflect.Value) { call := func(v reflect.Value) bool { for i, typ := range types { if v.Type() == typ { vs := callbacks[i].Call([]reflect.Value{v}) return vs[0].Bool() } } return true } if v.Kind() == reflect.Ptr && !v.IsNil() { walkAST(v.Elem(), types, callbacks) } else if v.Kind() == reflect.Slice { for i := 0; i < v.Len(); i++ { walkAST(v.Index(i), types, callbacks) } } else if v.Kind() == reflect.Struct { if call(v.Addr()) { for i := 0; i < v.NumField(); i++ { walkAST(v.Field(i), types, callbacks) } } } } // WithinRange returns true if the input position is within the range of the given positions. func WithinRange(needle, start, end Position) bool { if needle.Line < start.Line || needle.Line > end.Line { return false } else if needle.Line == start.Line && needle.Column < start.Column { return false } else if needle.Line == end.Line && needle.Column > end.Column { return false } return true }
src/parse/asp/util.go
0.707
0.407569
util.go
starcoder
package main import ( "fmt" "log" "sort" ) /* Activity Selection Problem Greedy Algorithm - 1 ------------------------------------------------------- You are given n activities with their start and finish times. Select the maximum number of activities that can be performed by a single person, assuming that a person can only work on a single activity at a time. Example: -------- Input: {(2, 3), (1, 4), (5, 8), (6, 10)} Output: 2 Here, Task1 and Task2 cannot be performed at the same time, since their times are overlapping. Similarly, Task3 and Task4 cannot be performed at the same time. Hence, person can choose at-most 2 tasks consecutively. Possible variations are: (Task1, Task3), (Task2, Task4), (Task2, Task3), (Task1, Task4) */ func activitySelection(tasks [][]int, numTasks int) (int, error) { possibleTasks := 1 if len(tasks) == 0 { return 0, fmt.Errorf("you didn't specify any tasks to perform") } tasks, err := TasksSorting(tasks, numTasks) if err != nil { return 0, err } prevEnd := tasks[0][1] for i := 1; i < numTasks; i++ { currStart := tasks[i][0] currEnd := tasks[i][1] if prevEnd >= currStart { prevEnd = currEnd possibleTasks++ } } return possibleTasks, nil } func main() { var numTasks int fmt.Println("Enter how many tasks you want to perform: ") _, err := fmt.Scanln(&numTasks) if err != nil { log.Fatal(err) } var tasks [][]int var start, end int fmt.Println("Enter the Task Timings ") for i := 0; i < numTasks; i++ { var row []int fmt.Printf("----------\nTask %d starts at: ", i+1) _, _ = fmt.Scanf("%d", &start) row = append(row, start) fmt.Printf("Task %d ends at: ", i+1) _, _ = fmt.Scanf("%d", &end) if end < start { _ = fmt.Errorf("end timings must be greater than start") } row = append(row, end) tasks = append(tasks, row) } fmt.Println(tasks) possibleTasks, _ := activitySelection(tasks, numTasks) fmt.Println("Maximum number of Tasks that can be performed: ", possibleTasks) } func TasksSorting(tasks [][]int, len int) ([][]int, error) { var endTimings []int var sortedTasks [][]int for i := 0; i < len; i++ { endTimings = append(endTimings, tasks[i][1]) } sort.Ints(endTimings) for _, end := range endTimings { for j := 0; j < len; j++ { if tasks[j][1] == end { sortedTasks = append(sortedTasks, tasks[j]) } } } return sortedTasks, nil }
Golang/Greedy Algorithms/activitySelection.go
0.589716
0.406567
activitySelection.go
starcoder
package framework import ( "fmt" "github.com/onsi/ginkgo" "github.com/onsi/gomega" ) // ExpectEqual expects the specified two are the same, otherwise an exception raises func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) { gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...) } // ExpectNotEqual expects the specified two are not the same, otherwise an exception raises func ExpectNotEqual(actual interface{}, extra interface{}, explain ...interface{}) { gomega.ExpectWithOffset(1, actual).NotTo(gomega.Equal(extra), explain...) } // ExpectError expects an error happens, otherwise an exception raises func ExpectError(err error, explain ...interface{}) { gomega.ExpectWithOffset(1, err).To(gomega.HaveOccurred(), explain...) } // ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error. func ExpectNoError(err error, explain ...interface{}) { ExpectNoErrorWithOffset(1, err, explain...) } // ExpectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller // (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f"). func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) { gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...) } // ExpectConsistOf expects actual contains precisely the extra elements. The ordering of the elements does not matter. func ExpectConsistOf(actual interface{}, extra interface{}, explain ...interface{}) { gomega.ExpectWithOffset(1, actual).To(gomega.ConsistOf(extra), explain...) } // ExpectHaveKey expects the actual map has the key in the keyset func ExpectHaveKey(actual interface{}, key interface{}, explain ...interface{}) { gomega.ExpectWithOffset(1, actual).To(gomega.HaveKey(key), explain...) } // ExpectEmpty expects actual is empty func ExpectEmpty(actual interface{}, explain ...interface{}) { gomega.ExpectWithOffset(1, actual).To(gomega.BeEmpty(), explain...) } // ExpectMatchRegexp expects the string to match the provided regular expression func ExpectMatchRegexp(actual string, regexp string, explain ...interface{}) { gomega.ExpectWithOffset(1, actual).To(gomega.MatchRegexp(regexp), explain...) } // ExpectMatchRegexp expects the string to match the provided regular expression func Failf(format string, args ...interface{}) { ginkgo.Fail(fmt.Sprintf(format, args...)) }
e2e/framework/helper.go
0.745491
0.575916
helper.go
starcoder
package pack import "io" // A Match is the basic unit of LZ77 compression. type Match struct { Unmatched int // the number of unmatched bytes since the previous match Length int // the number of bytes in the matched string; it may be 0 at the end of the input Distance int // how far back in the stream to copy from } // A MatchFinder performs the LZ77 stage of compression, looking for matches. type MatchFinder interface { // FindMatches looks for matches in src, appends them to dst, and returns dst. FindMatches(dst []Match, src []byte) []Match // Reset clears any internal state, preparing the MatchFinder to be used with // a new stream. Reset() } // An Encoder encodes the data in its final format. type Encoder interface { // Encode appends the encoded format of src to dst, using the match // information from matches. Encode(dst []byte, src []byte, matches []Match, lastBlock bool) []byte // Reset clears any internal state, preparing the Encoder to be used with // a new stream. Reset() } // A Writer uses MatchFinder and Encoder to write compressed data to Dest. type Writer struct { Dest io.Writer MatchFinder MatchFinder Encoder Encoder // BlockSize is the number of bytes to compress at a time. If it is zero, // each Write operation will be treated as one block. BlockSize int err error inBuf []byte outBuf []byte matches []Match } func (w *Writer) Write(p []byte) (n int, err error) { if w.err != nil { return 0, w.err } if w.BlockSize == 0 { return w.writeBlock(p, false) } w.inBuf = append(w.inBuf, p...) var pos int for pos = 0; pos+w.BlockSize <= len(w.inBuf) && w.err == nil; pos += w.BlockSize { w.writeBlock(w.inBuf[pos:pos+w.BlockSize], false) } if pos > 0 { n := copy(w.inBuf, w.inBuf[pos:]) w.inBuf = w.inBuf[:n] } return len(p), w.err } func (w *Writer) writeBlock(p []byte, lastBlock bool) (n int, err error) { w.outBuf = w.outBuf[:0] w.matches = w.MatchFinder.FindMatches(w.matches[:0], p) w.outBuf = w.Encoder.Encode(w.outBuf, p, w.matches, lastBlock) _, w.err = w.Dest.Write(w.outBuf) return len(p), w.err } func (w *Writer) Close() error { w.writeBlock(w.inBuf, true) w.inBuf = w.inBuf[:0] return w.err } func (w *Writer) Reset(newDest io.Writer) { w.MatchFinder.Reset() w.Encoder.Reset() w.err = nil w.inBuf = w.inBuf[:0] w.outBuf = w.outBuf[:0] w.matches = w.matches[:0] w.Dest = newDest }
pack.go
0.611266
0.571707
pack.go
starcoder
package transform import ( "errors" "fmt" "reflect" "strconv" "strings" ) // Note: isEmpty panics if v is nil. func isEmpty(v interface{}) bool { value := reflect.ValueOf(v) switch value.Kind() { case reflect.Slice, reflect.Map, reflect.Array, reflect.String, reflect.Chan: return value.Len() == 0 } return false } type convFunc func(v interface{}) (interface{}, error) var convStrToInt convFunc = func(v interface{}) (interface{}, error) { return strconv.ParseInt(v.(string), 10, 64) } var convStrToFloat convFunc = func(v interface{}) (interface{}, error) { return strconv.ParseFloat(v.(string), 64) } var convStrToBool convFunc = func(v interface{}) (interface{}, error) { return strconv.ParseBool(v.(string)) } var convIntToFloat convFunc = func(v interface{}) (interface{}, error) { return float64(reflect.ValueOf(v).Int()), nil } var convUintToFloat convFunc = func(v interface{}) (interface{}, error) { return float64(reflect.ValueOf(v).Uint()), nil } var convFloatToInt convFunc = func(v interface{}) (interface{}, error) { return int64(reflect.ValueOf(v).Float()), nil } var convToStr convFunc = func(v interface{}) (interface{}, error) { return fmt.Sprintf("%v", v), nil } var errTypeConversionNotSupported = errors.New("type conversion not supported") func resultTypeConversion(v interface{}, resultType resultType) (interface{}, error) { switch reflect.ValueOf(v).Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: switch resultType { case resultTypeInt: return v, nil case resultTypeFloat: return convIntToFloat(v) case resultTypeString: return convToStr(v) } case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: switch resultType { case resultTypeInt: return v, nil case resultTypeFloat: return convUintToFloat(v) case resultTypeString: return convToStr(v) } case reflect.Float32, reflect.Float64: switch resultType { case resultTypeInt: return convFloatToInt(v) case resultTypeFloat: return v, nil case resultTypeString: return convToStr(v) } case reflect.Bool: switch resultType { case resultTypeBoolean: return v, nil case resultTypeString: return convToStr(v) } case reflect.String: switch resultType { case resultTypeInt: return convStrToInt(v) case resultTypeFloat: return convStrToFloat(v) case resultTypeBoolean: return convStrToBool(v) case resultTypeString: return v, nil } } return nil, errTypeConversionNotSupported } func normalizeAndSaveValue(decl *Decl, v interface{}, save func(interface{})) error { vv := reflect.ValueOf(v) if vv.Kind() == reflect.String && !decl.NoTrim { v = strings.TrimSpace(v.(string)) vv = reflect.ValueOf(v) } checkToSave := func(v interface{}) { if v != nil && !isEmpty(v) { save(v) return } if !decl.KeepEmptyOrNull { return } if v == nil || vv.Kind() != reflect.String { save(v) return } save(v) return } if v == nil || decl.ResultType == nil { checkToSave(v) return nil } converted, err := resultTypeConversion(v, *decl.ResultType) if err != nil { return fmt.Errorf("unable to convert value '%v' to type '%s' on '%s', err: %s", v, *decl.ResultType, decl.fqdn, err.Error()) } checkToSave(converted) return nil } func normalizeAndReturnValue(decl *Decl, v interface{}) (interface{}, error) { var ret interface{} err := normalizeAndSaveValue(decl, v, func(normalizedValue interface{}) { ret = normalizedValue }) if err != nil { return nil, err } return ret, nil }
extensions/omniv21/transform/value.go
0.500488
0.447823
value.go
starcoder
package pdf import ( "fmt" "strings" "github.com/jung-kurt/gofpdf" ) var Formats = map[string]gofpdf.SizeType{ "100x70": gofpdf.SizeType{Wd: 707.00, Ht: 1000.00}, "36x90_standard": gofpdf.SizeType{Wd: 914.40, Ht: 2286.00}, "500x700": gofpdf.SizeType{Wd: 500.00, Ht: 700.00}, "50x70": gofpdf.SizeType{Wd: 500.00, Ht: 707.00}, "5R": gofpdf.SizeType{Wd: 119.00, Ht: 170.00}, "5R_CA": gofpdf.SizeType{Wd: 127.00, Ht: 178.00}, "700x1000": gofpdf.SizeType{Wd: 700.00, Ht: 1000.00}, "850x2000_one_time": gofpdf.SizeType{Wd: 850.00, Ht: 2000.00}, "850x2000_standard": gofpdf.SizeType{Wd: 850.00, Ht: 2000.00}, "92x210": gofpdf.SizeType{Wd: 210.00, Ht: 296.00}, "A0": gofpdf.SizeType{Wd: 841.00, Ht: 1189.00}, "A1": gofpdf.SizeType{Wd: 594.00, Ht: 841.00}, "A2": gofpdf.SizeType{Wd: 420.00, Ht: 594.00}, "A3": gofpdf.SizeType{Wd: 297.00, Ht: 420.00}, "A4": gofpdf.SizeType{Wd: 210.00, Ht: 297.00}, "A5": gofpdf.SizeType{Wd: 148.00, Ht: 210.00}, "A6": gofpdf.SizeType{Wd: 105.00, Ht: 148.00}, "A7": gofpdf.SizeType{Wd: 125.40, Ht: 176.20}, "A9": gofpdf.SizeType{Wd: 128.53, Ht: 204.73}, "ArchC": gofpdf.SizeType{Wd: 457.20, Ht: 609.60}, "ArchD": gofpdf.SizeType{Wd: 609.60, Ht: 914.40}, "B5": gofpdf.SizeType{Wd: 176.00, Ht: 250.00}, "BB": gofpdf.SizeType{Wd: 50.00, Ht: 90.00}, "BC": gofpdf.SizeType{Wd: 55.00, Ht: 90.00}, "BD": gofpdf.SizeType{Wd: 55.00, Ht: 85.00}, "BX": gofpdf.SizeType{Wd: 50.81, Ht: 88.91}, "C4": gofpdf.SizeType{Wd: 221.00, Ht: 316.00}, "DL": gofpdf.SizeType{Wd: 99.00, Ht: 210.00}, "DX": gofpdf.SizeType{Wd: 215.90, Ht: 299.30}, "EC4": gofpdf.SizeType{Wd: 221.00, Ht: 316.00}, "LG": gofpdf.SizeType{Wd: 139.71, Ht: 215.91}, "LT": gofpdf.SizeType{Wd: 215.91, Ht: 279.41}, "MA": gofpdf.SizeType{Wd: 210.00, Ht: 279.00}, "SM": gofpdf.SizeType{Wd: 107.96, Ht: 139.71}, "XL": gofpdf.SizeType{Wd: 279.41, Ht: 431.81}, } func GetSize(format string, orientation string, bleed bool) (*gofpdf.SizeType, bool) { size, found := Formats[format] if !found { return nil, false } if orientation == "L" { size = gofpdf.SizeType{Wd: size.Ht, Ht: size.Wd} } if bleed { size = gofpdf.SizeType{Wd: size.Wd + 8, Ht: size.Ht + 8} } return &size, true } func GetFormatsHelp() string { res := []string{ "Format name: width height", } for k := range Formats { format := Formats[k] res = append(res, fmt.Sprintf("%s: %.2f %.2f", k, format.Wd, format.Ht)) } return strings.Join(res, "\n") }
internal/pdf/formats.go
0.534612
0.685242
formats.go
starcoder
package preview import ( "bytes" "errors" "fmt" ) // MediaPart has the binary data expected after downloading a PieceRange from the // DownloadPlan. So, usually it's going to be a partial video from a Torrent. type MediaPart struct { torrentID string pieceRange PieceRange data []byte } // NewMediaPart creates a MediaPart func NewMediaPart(torrentID string, pieceRange PieceRange, data []byte) MediaPart { return MediaPart{torrentID: torrentID, pieceRange: pieceRange, data: data} } // PieceRange returns the obvious func (p MediaPart) PieceRange() PieceRange { return p.pieceRange } // Data raw data of the file func (p MediaPart) Data() []byte { return p.data } type pieceRangeCounter struct { pieceRange PieceRange piecesDownloaded int } func newPieceRangeCounter( pieceRange PieceRange, ) *pieceRangeCounter { return &pieceRangeCounter{pieceRange: pieceRange} } func (c *pieceRangeCounter) addOne() { c.piecesDownloaded++ } func (c *pieceRangeCounter) areAllPiecesDownloaded() bool { return c.piecesDownloaded >= c.pieceRange.PieceCount() } // BundlePlan gets a PieceRange which is the definition of a file we want to download, // and a PieceRegistry which is were we have stored individual pieces, // and reads the whole file from the pieces. // We have to remember that a piece might contain multiple files. Once piece is not a file // and each files does not start at the start of a piece. That would be coincidence. // BundlePlan takes care of this logic and returns a MediaPart, which is the closes thing // of a file we're going to have. type BundlePlan struct{} // NewBundlePlan creates a BundlePlan func NewBundlePlan() BundlePlan { return BundlePlan{} } // Bundle transform a PieceRange (the file we want) to a MediaPart (the actual file) func (b BundlePlan) Bundle(registry *PieceRegistry, pieceRange PieceRange) (MediaPart, error) { piece := new(bytes.Buffer) for pieceIdx := pieceRange.Start(); pieceIdx <= pieceRange.End(); pieceIdx++ { p, found := registry.GetPiece(pieceIdx) if !found { return MediaPart{}, errors.New("piece not found in the registry. could be ignored but kept to further investigate") } start := pieceRange.StartOffset(pieceIdx) end := pieceRange.EndOffset(pieceIdx) if start > end { return MediaPart{}, fmt.Errorf("start offset %v bigger than end offset %v", start, end) } if start > len(p.data) { return MediaPart{}, fmt.Errorf("start offset %v is bigger than length of slice %v", start, len(p.data)) } if end > len(p.data) { return MediaPart{}, fmt.Errorf("end offset %v is bigger than length of slice %v", start, len(p.data)) } rawData := p.data[start:end] _, err := piece.Write(rawData) if err != nil { return MediaPart{}, err } } return NewMediaPart(pieceRange.Torrent().ID(), pieceRange, piece.Bytes()), nil } // TorrentImages represents all the images of a torrent type TorrentImages struct { images []Image imageName map[string]interface{} } // NewTorrentImages returns a TorrentImages func NewTorrentImages(images []Image) *TorrentImages { imageName := make(map[string]interface{}) for _, img := range images { imageName[img.name] = struct{}{} } return &TorrentImages{images: images, imageName: imageName} } // Images return all the images func (a *TorrentImages) Images() []Image { return a.images } // HaveImage stupid name to check if we already have the filename func (a *TorrentImages) HaveImage(name string) bool { _, found := a.imageName[name] return found }
internal/preview/media.go
0.742328
0.617945
media.go
starcoder
package base import ( r "reflect" ) func KindToCategory(k r.Kind) r.Kind { switch k { case r.Int, r.Int8, r.Int16, r.Int32, r.Int64: return r.Int case r.Uint, r.Uint8, r.Uint16, r.Uint32, r.Uint64, r.Uintptr: return r.Uint case r.Float32, r.Float64: return r.Float64 case r.Complex64, r.Complex128: return r.Complex128 default: return k } } func IsCategory(k r.Kind, categories ...r.Kind) bool { k = KindToCategory(k) for _, c := range categories { if k == c { return true } } return false } // IsOptimizedKind returns true if fast interpreter expects optimized expressions for given Kind func IsOptimizedKind(k r.Kind) bool { switch k { case r.Bool, r.Int, r.Int8, r.Int16, r.Int32, r.Int64, r.Uint, r.Uint8, r.Uint16, r.Uint32, r.Uint64, r.Uintptr, r.Float32, r.Float64, r.Complex64, r.Complex128, r.String: return true } return false } var kindToType = [...]r.Type{ r.Bool: TypeOfBool, r.Int: TypeOfInt, r.Int8: TypeOfInt8, r.Int16: TypeOfInt16, r.Int32: TypeOfInt32, r.Int64: TypeOfInt64, r.Uint: TypeOfUint, r.Uint8: TypeOfUint8, r.Uint16: TypeOfUint16, r.Uint32: TypeOfUint32, r.Uint64: TypeOfUint64, r.Uintptr: TypeOfUintptr, r.Float32: TypeOfFloat32, r.Float64: TypeOfFloat64, r.Complex64: TypeOfComplex64, r.Complex128: TypeOfComplex128, r.String: TypeOfString, } func KindToType(k r.Kind) r.Type { if int(k) < len(kindToType) { return kindToType[k] } return nil } // ConvertValue converts a value to type t and returns the converted value. // extends reflect.Value.Convert(t) by allowing conversions from/to complex numbers. // does not check for overflows or truncation. func ConvertValue(v r.Value, to r.Type) r.Value { t := ValueType(v) if t == to { return v } if !t.ConvertibleTo(to) { // reflect.Value does not allow conversions from/to complex types k := v.Kind() kto := to.Kind() if IsCategory(kto, r.Complex128) { if IsCategory(k, r.Int, r.Uint, r.Float64) { temp := v.Convert(TypeOfFloat64).Float() v = r.ValueOf(complex(temp, 0.0)) } } else if IsCategory(k, r.Complex128) { if IsCategory(k, r.Int, r.Uint, r.Float64) { temp := real(v.Complex()) v = r.ValueOf(temp) } } } return v.Convert(to) }
vendor/github.com/cosmos72/gomacro/base/literal.go
0.666171
0.480235
literal.go
starcoder
package wordShuffler import ( "fmt" "sort" ) type GramSequencer struct { // the sequence involved for "word" formation and matching later on Sequence string // the rule implementation on formulation of "words" based on the given sequence shuffleRule AdvanceSuffleRule // the rule implementation on matching words based on a "source" matcherRule MatcherRule // the valid sequences / "words" AFTER matching; need to convert back to []string validSequences map[string]bool validSequencesArray []string } // method to create an instance of GramSequencer func NewGramSequencer(sequence string, minSeqSize, maxSeqSize int, matcherRule MatcherRule, shuffleRule AdvanceSuffleRule) GramSequencer { m := new(GramSequencer) m.Sequence = sequence // shuffeRule if shuffleRule == nil { rule := NewSequenceShufflerRule(minSeqSize, maxSeqSize, sequence) m.shuffleRule = &rule } else { m.shuffleRule = shuffleRule } // matcherRule if matcherRule == nil { rule := NewDictionaryMatcher() m.matcherRule = &rule } else { m.matcherRule = matcherRule } // create slice m.validSequences = make(map[string]bool, 0) return *m } // method to create an instance of GramSequencer func NewGramSequencerSimple(sequence string) GramSequencer { return NewGramSequencer(sequence, -1, -1, nil, nil) } // method to generate "valid" sequences created from the given sequence. // Valid or not depends on the implementation of the Matcher func (g *GramSequencer) GenerateValidSequences() error { var newGrams []string // special handling for characters of length of "1" if len(g.Sequence) > 0 && len(g.Sequence) == 1 { newGrams = append(newGrams, g.Sequence) g.populateValidSequenceMap(newGrams) } else if len(g.Sequence) == 2 { newGrams = make([]string, 2) newGrams = append(newGrams, g.Sequence) newGrams = append(newGrams, fmt.Sprintf("%v%v", g.Sequence[1:], g.Sequence[0:1])) g.populateValidSequenceMap(newGrams) } else { newGrams, err := g.shuffleRule.Shuffle(g.Sequence) if err != nil { return err } if len(newGrams) == 0 { return fmt.Errorf("length of the words created after the shuffle should be at least 1~ [%v]", g.Sequence) } err = g.populateValidSequenceMap(newGrams) if err != nil { return err } } return nil } // populate a map from the given words slice / array; de-duplication logic // applied as well. func (g *GramSequencer) populateValidSequenceMap(grams []string) error { needUpdate := false for _, gram := range grams { if g.validSequences[gram] == false { g.validSequences[gram] = true needUpdate = true } } if needUpdate { // sorted uniqueSeqArr := g.convertValidSequenceMapToArray() sort.Strings(uniqueSeqArr) // do matching // reset the validSequencesArray first (0 length) g.validSequencesArray = make([]string, 0) if uniqueSeqArr != nil { for _, seq := range uniqueSeqArr { bMatched, err := g.matcherRule.MatchWord(seq) if err != nil { return err } if bMatched == true { g.validSequencesArray = append(g.validSequencesArray, seq) } } // end -- for (per entry within uniqueSeqArr) } // end -- if (uniqueSeqArr VALID) } return nil } // convert a map back to a string slice / array func (g *GramSequencer) convertValidSequenceMapToArray() []string { arr := make([]string, 0) for key := range g.validSequences { arr = append(arr, key) } return arr } // simple getter func (g *GramSequencer) GetValidSequences() []string { return g.validSequencesArray }
GramSequencer.go
0.720368
0.475118
GramSequencer.go
starcoder
package schema import ( "github.com/graphql-go/graphql" "github.com/ob-vss-ss18/ppl-stock/models" ) var skiType = graphql.NewObject(graphql.ObjectConfig{ Name: "Ski", Description: "A ski.", Fields: graphql.Fields{ "id": &graphql.Field{ Type: graphql.NewNonNull(graphql.Int), Description: "The id of the ski", Resolve: func(parameter graphql.ResolveParams) (interface{}, error) { if ski, ok := parameter.Source.(models.Ski); ok { return ski.Id, nil } return nil, nil }, }, "usage": &graphql.Field{ Type: graphql.NewNonNull(graphql.String), Description: "The use case of the ski", Resolve: func(parameter graphql.ResolveParams) (interface{}, error) { if ski, ok := parameter.Source.(models.Ski); ok { return ski.Usage, nil } return nil, nil }, }, "category": &graphql.Field{ Type: graphql.NewNonNull(graphql.String), Description: "The category of the ski", Resolve: func(parameter graphql.ResolveParams) (interface{}, error) { if ski, ok := parameter.Source.(models.Ski); ok { return ski.Category, nil } return nil, nil }, }, "usertype": &graphql.Field{ Type: graphql.NewNonNull(graphql.String), Description: "The usertype of the ski", Resolve: func(parameter graphql.ResolveParams) (interface{}, error) { if ski, ok := parameter.Source.(models.Ski); ok { return ski.Usertype, nil } return nil, nil }, }, "gender": &graphql.Field{ Type: graphql.NewNonNull(graphql.String), Description: "The gender by which the ski is intended to be used.", Resolve: func(parameter graphql.ResolveParams) (interface{}, error) { if ski, ok := parameter.Source.(models.Ski); ok { return ski.Gender, nil } return nil, nil }, }, "manufacturer": &graphql.Field{ Type: graphql.NewNonNull(graphql.String), Description: "The manufacturer of the ski.", Resolve: func(parameter graphql.ResolveParams) (interface{}, error) { if ski, ok := parameter.Source.(models.Ski); ok { return ski.Manufacturer, nil } return nil, nil }, }, "modell": &graphql.Field{ Type: graphql.NewNonNull(graphql.String), Description: "The model of the ski.", Resolve: func(parameter graphql.ResolveParams) (interface{}, error) { if ski, ok := parameter.Source.(models.Ski); ok { return ski.Model, nil } return nil, nil }, }, "length": &graphql.Field{ Type: graphql.NewNonNull(graphql.Int), Description: "The length of the ski.", Resolve: func(parameter graphql.ResolveParams) (interface{}, error) { if ski, ok := parameter.Source.(models.Ski); ok { return ski.Length, nil } return nil, nil }, }, "bodyheight": &graphql.Field{ Type: graphql.NewNonNull(graphql.Int), Description: "The best bodyheight for using this ski.", Resolve: func(parameter graphql.ResolveParams) (interface{}, error) { if ski, ok := parameter.Source.(models.Ski); ok { return ski.Bodyheight, nil } return nil, nil }, }, "bodyweight": &graphql.Field{ Type: graphql.NewNonNull(graphql.Int), Description: "The best bodyweight for using this ski.", Resolve: func(parameter graphql.ResolveParams) (interface{}, error) { if ski, ok := parameter.Source.(models.Ski); ok { return ski.Bodyweight, nil } return nil, nil }, }, "color": &graphql.Field{ Type: graphql.NewNonNull(graphql.String), Description: "The color of the ski.", Resolve: func(parameter graphql.ResolveParams) (interface{}, error) { if ski, ok := parameter.Source.(models.Ski); ok { return ski.Color, nil } return nil, nil }, }, "price_new": &graphql.Field{ Type: graphql.NewNonNull(graphql.Float), Description: "The new price of the ski.", Resolve: func(parameter graphql.ResolveParams) (interface{}, error) { if ski, ok := parameter.Source.(models.Ski); ok { return ski.PriceNew, nil } return nil, nil }, }, "condition": &graphql.Field{ Type: graphql.NewNonNull(graphql.String), Description: "The condition of the ski.", Resolve: func(parameter graphql.ResolveParams) (interface{}, error) { if ski, ok := parameter.Source.(models.Ski); ok { return ski.Condition, nil } return nil, nil }, }, "availability": &graphql.Field{ Type: graphql.NewNonNull(graphql.String), Description: "The status/availability of the ski.", Resolve: func(parameter graphql.ResolveParams) (interface{}, error) { if ski, ok := parameter.Source.(models.Ski); ok { return ski.Status, nil } return nil, nil }, }, }, })
schema/ski.go
0.517571
0.468487
ski.go
starcoder
package staticarray import ( "github.com/influxdata/flux/array" "github.com/influxdata/flux/memory" "github.com/influxdata/flux/semantic" ) type floats struct { data []float64 alloc *memory.Allocator } func Float(data []float64) array.Float { return &floats{data: data} } func (a *floats) Type() semantic.Type { return semantic.Float } func (a *floats) IsNull(i int) bool { return false } func (a *floats) IsValid(i int) bool { return i >= 0 && i < len(a.data) } func (a *floats) Len() int { return len(a.data) } func (a *floats) NullN() int { return 0 } func (a *floats) Value(i int) float64 { return a.data[i] } func (a *floats) Copy() array.Base { panic("implement me") } func (a *floats) Free() { if a.alloc != nil { a.alloc.Free(cap(a.data) * float64Size) } a.data = nil } func (a *floats) Slice(start, stop int) array.BaseRef { return a.FloatSlice(start, stop) } func (a *floats) FloatSlice(start, stop int) array.FloatRef { return Float(a.data[start:stop]) } func (a *floats) Float64Values() []float64 { return a.data } func FloatBuilder(a *memory.Allocator) array.FloatBuilder { return &floatBuilder{alloc: a} } type floatBuilder struct { data []float64 alloc *memory.Allocator } func (b *floatBuilder) Type() semantic.Type { return semantic.Float } func (b *floatBuilder) Len() int { if b == nil { return 0 } return len(b.data) } func (b *floatBuilder) Cap() int { return cap(b.data) } func (b *floatBuilder) Reserve(n int) { newCap := len(b.data) + n if newCap := len(b.data) + n; newCap <= cap(b.data) { return } if err := b.alloc.Allocate(newCap * float64Size); err != nil { panic(err) } data := make([]float64, len(b.data), newCap) copy(data, b.data) b.alloc.Free(cap(b.data) * float64Size) b.data = data } func (b *floatBuilder) BuildArray() array.Base { return b.BuildFloatArray() } func (b *floatBuilder) Free() { panic("implement me") } func (b *floatBuilder) Append(v float64) { if len(b.data) == cap(b.data) { // Grow the slice in the same way as built-in append. n := len(b.data) if n == 0 { n = 2 } b.Reserve(n) } b.data = append(b.data, v) } func (b *floatBuilder) AppendNull() { // The staticarray does not support nulls so it will do the current behavior of just appending // the zero value. b.Append(0) } func (b *floatBuilder) AppendValues(v []float64, valid ...[]bool) { if newCap := len(b.data) + len(v); newCap > cap(b.data) { b.Reserve(newCap - cap(b.data)) } b.data = append(b.data, v...) } func (b *floatBuilder) BuildFloatArray() array.Float { return &floats{ data: b.data, alloc: b.alloc, } }
internal/staticarray/float.go
0.67822
0.53959
float.go
starcoder
package main import "fmt" type visitor interface { visitForSquare(*square) visitForCircle(*circle) visitForRectangle(*rectangle) } //square квадрат type square struct { side int } //rectangle треугольник type rectangle struct { l int b int } //circle круг type circle struct { radius int } //shape методы для фигур type shape interface { getType() string accept(visitor) } //middleCoordinates координаты центра type middleCoordinates struct { x int y int } //areaCalculator площадь фигуры type areaCalculator struct { area int } func main() { square := &square{side: 2} circle := &circle{radius: 3} rectangle := &rectangle{l: 2, b: 3} areaCalculator := &areaCalculator{} square.accept(areaCalculator) circle.accept(areaCalculator) rectangle.accept(areaCalculator) fmt.Println() middleCoordinates := &middleCoordinates{} square.accept(middleCoordinates) circle.accept(middleCoordinates) rectangle.accept(middleCoordinates) } //accept выбор фигуры func (s *square) accept(v visitor) { v.visitForSquare(s) } //accept выбор фигуры func (t *rectangle) accept(v visitor) { v.visitForRectangle(t) } //accept выбор фигуры func (c *circle) accept(v visitor) { v.visitForCircle(c) } //getType определение типа func (s *square) getType() string { return "Square" } //getType определение типа func (t *rectangle) getType() string { return "rectangle" } //getType определение типа func (c *circle) getType() string { return "Circle" } //visitForSquare вычисление координат центра квадрата func (a *middleCoordinates) visitForSquare(s *square) { // Calculate middle point coordinates for square. // Then assign in to the x and y instance variable. fmt.Println("Calculating middle point coordinates for square") } //visitForCircle вычисление координат центра круга func (a *middleCoordinates) visitForCircle(c *circle) { fmt.Println("Calculating middle point coordinates for circle") } //visitForRectangle вычисление координат центра треугольника func (a *middleCoordinates) visitForRectangle(t *rectangle) { fmt.Println("Calculating middle point coordinates for rectangle") } //visitForSquares случай для квадрата func (a *areaCalculator) visitForSquare(s *square) { // Calculate area for square. // Then assign in to the area instance variable. fmt.Println("Calculating area for square") } //visitForCircle случай для круга func (a *areaCalculator) visitForCircle(s *circle) { fmt.Println("Calculating area for circle") } //visitForRectangle случай для треугольника func (a *areaCalculator) visitForRectangle(s *rectangle) { fmt.Println("Calculating area for rectangle") }
pattern/03_visiter.go
0.639173
0.525917
03_visiter.go
starcoder