code
stringlengths
114
1.05M
path
stringlengths
3
312
quality_prob
float64
0.5
0.99
learning_prob
float64
0.2
1
filename
stringlengths
3
168
kind
stringclasses
1 value
package main import ( "fmt" "image" "image/color" "github.com/split-cube-studios/ardent" "github.com/split-cube-studios/ardent/engine" ) const ( paddleWidth, paddleHeight, paddleOffset, velocity = 10, 50, 10, 4.0 ) var ( xv, yv, w, h = velocity, velocity, 600, 400 ) var ( game engine.Game ball engine.Image lpaddle engine.Image rpaddle engine.Image ) func collides(a, b engine.Image) bool { aw, ah := a.Size() bw, bh := b.Size() return a.Position().X < b.Position().X+float64(bw) && a.Position().X+float64(aw) > b.Position().X && a.Position().Y < b.Position().Y+float64(bh) && a.Position().Y+float64(ah) > b.Position().Y } func foo(paddle engine.Image) float64 { return paddle.Position().Y - velocity } func bar(paddle engine.Image) float64 { return paddle.Position().Y + velocity } func checkWallCollisions() { if ball.Position().Y < 0 { ball.Translate(ball.Position().X, 0) yv *= -1 } if ball.Position().Y > float64(h) { ball.Translate(ball.Position().X, float64(h)) yv *= -1 } if ball.Position().X < 0 { ball.Translate(0, ball.Position().Y) xv *= -1 } if ball.Position().X > float64(w) { ball.Translate(float64(w), ball.Position().Y) xv *= -1 } } func checkKeyboardInput() { if game.IsKeyPressed(engine.KeyUp) && foo(lpaddle) > 0 { lpaddle.Translate(lpaddle.Position().X, foo(lpaddle)) } else if game.IsKeyPressed(engine.KeyDown) && float64(paddleHeight)+bar(lpaddle) < float64(h) { lpaddle.Translate(lpaddle.Position().X, bar(lpaddle)) } } func checkPaddleCollisions() { if collides(ball, lpaddle) { ball.Translate(float64(paddleOffset+paddleWidth), ball.Position().Y) xv *= -1 } if collides(ball, rpaddle) { fmt.Println(w - paddleOffset - paddleWidth) ball.Translate(float64(w-paddleOffset-paddleWidth), ball.Position().Y) xv *= -1 } } func main() { game = ardent.NewGame("Square", w, h, engine.FlagResizable, func() { // move the ball along by adding x and y velocity to its position ball.Translate(ball.Position().X+xv, ball.Position().Y+yv) checkWallCollisions() checkKeyboardInput() checkPaddleCollisions() }, func(nw int, nh int) (int, int) { w = nw h = nh return nw, nh }, ) renderer := game.NewRenderer() game.AddRenderer(renderer) rpaddle = newRecImage(paddleWidth, paddleHeight) rpaddle.Translate(float64(w-paddleWidth-paddleOffset), float64(h-paddleHeight-paddleOffset)) lpaddle = newRecImage(paddleWidth, paddleHeight) lpaddle.Origin(0, 0) lpaddle.Translate(float64(paddleOffset), float64(paddleOffset)) ball = newRecImage(10, 10) ball.Origin(0, 0) renderer.AddImage(rpaddle) renderer.AddImage(lpaddle) renderer.AddImage(ball) err := game.Run() if err != nil { panic(err) } } func newRecImage(x, y int) engine.Image { image := image.NewNRGBA(image.Rect(0, 0, x, y)) for i := 0; i < x; i++ { for j := 0; j < y; j++ { image.Set(i, j, color.White) } } return game.NewImageFromImage(image) }
examples/pong/main.go
0.626124
0.434581
main.go
starcoder
package validator import ( "github.com/end-r/guardian/typing" "github.com/end-r/guardian/ast" ) func (v *Validator) validateStatement(node ast.Node) { switch n := node.(type) { case *ast.AssignmentStatementNode: v.validateAssignment(n) break case *ast.ForStatementNode: v.validateForStatement(n) break case *ast.IfStatementNode: v.validateIfStatement(n) break case *ast.ReturnStatementNode: v.validateReturnStatement(n) break case *ast.SwitchStatementNode: v.validateSwitchStatement(n) break case *ast.ForEachStatementNode: v.validateForEachStatement(n) break case *ast.ImportStatementNode: v.validateImportStatement(n) return case *ast.PackageStatementNode: v.validatePackageStatement(n) return } v.finishedImports = true } func (v *Validator) validateAssignment(node *ast.AssignmentStatementNode) { for _, l := range node.Left { if l == nil { v.addError(node.Start(), errUnknown) return } else { switch l.Type() { case ast.CallExpression, ast.Literal, ast.MapLiteral, ast.ArrayLiteral, ast.SliceExpression, ast.FuncLiteral: v.addError(l.Start(), errInvalidExpressionLeft) } } } leftTuple := v.ExpressionTuple(node.Left) rightTuple := v.ExpressionTuple(node.Right) if len(leftTuple.Types) > len(rightTuple.Types) && len(rightTuple.Types) == 1 { right := rightTuple.Types[0] for _, left := range leftTuple.Types { if !v.vm.Assignable(v, left, right, node.Right[0]) { v.addError(node.Left[0].Start(), errInvalidAssignment, typing.WriteType(left), typing.WriteType(right)) } } for i, left := range node.Left { if leftTuple.Types[i] == typing.Unknown() { if id, ok := left.(*ast.IdentifierNode); ok { ty := rightTuple.Types[0] id.Resolved = ty id.Resolved.SetModifiers(nil) ignored := "_" if id.Name != ignored { v.declareVar(id.Start(), id.Name, id.Resolved) } } } } } else { if len(leftTuple.Types) == len(rightTuple.Types) { // count helps to handle: a, b, c, d = producesTwo(), 6, 7 // first two rely on the same expression count := 0 remaining := 0 for i, left := range leftTuple.Types { right := rightTuple.Types[i] if !v.vm.Assignable(v, left, right, node.Right[count]) { v.addError(node.Start(), errInvalidAssignment, typing.WriteType(leftTuple), typing.WriteType(rightTuple)) break } if remaining == 0 { if node.Right[count] == nil { count++ } else { switch a := node.Right[count].ResolvedType().(type) { case *typing.Tuple: remaining = len(a.Types) - 1 break default: count++ } } } else { remaining-- } } } else { v.addError(node.Start(), errInvalidAssignment, typing.WriteType(leftTuple), typing.WriteType(rightTuple)) } // length of left tuple should always equal length of left // this is because tuples are not first class types // cannot assign to tuple expressions if len(node.Left) == len(rightTuple.Types) { for i, left := range node.Left { if leftTuple.Types[i] == typing.Unknown() { if id, ok := left.(*ast.IdentifierNode); ok { id.Resolved = rightTuple.Types[i] if id.Name != "_" { //fmt.Printf("Declaring %s as %s\n", id.Name, typing.WriteType(rightTuple.Types[i])) v.declareVar(id.Start(), id.Name, id.Resolved) } } } } } } } func (v *Validator) validateIfStatement(node *ast.IfStatementNode) { v.openScope(nil, nil) if node.Init != nil { v.validateAssignment(node.Init.(*ast.AssignmentStatementNode)) } for _, cond := range node.Conditions { // condition must be of type bool v.requireType(cond.Condition.Start(), typing.Boolean(), v.resolveExpression(cond.Condition)) v.validateScope(node, cond.Body) } if node.Else != nil { v.validateScope(node, node.Else) } v.closeScope() } func (v *Validator) validateSwitchStatement(node *ast.SwitchStatementNode) { // no switch expression --> booleans switchType := typing.Boolean() if node.Target != nil { switchType = v.resolveExpression(node.Target) } // target must be matched by all cases for _, node := range node.Cases.Sequence { if node.Type() == ast.CaseStatement { v.validateCaseStatement(switchType, node.(*ast.CaseStatementNode)) } } } func (v *Validator) validateCaseStatement(switchType typing.Type, clause *ast.CaseStatementNode) { for _, expr := range clause.Expressions { t := v.resolveExpression(expr) if !v.vm.Assignable(v, switchType, t, expr) { v.addError(clause.Start(), errInvalidSwitchTarget, typing.WriteType(switchType), typing.WriteType(t)) } } v.validateScope(clause, clause.Block) } func (v *Validator) validateReturnStatement(node *ast.ReturnStatementNode) { for c := v.scope; c != nil; c = c.parent { if c.context != nil { switch a := c.context.(type) { case *ast.FuncDeclarationNode: results := a.Resolved.(*typing.Func).Results returned := v.ExpressionTuple(node.Results) if (results == nil || len(results.Types) == 0) && len(returned.Types) > 0 { v.addError(node.Start(), errInvalidReturnFromVoid, typing.WriteType(returned), a.Signature.Identifier) return } if !typing.AssignableTo(results, returned, false) { v.addError(node.Start(), errInvalidReturn, typing.WriteType(returned), a.Signature.Identifier, typing.WriteType(results)) } return case *ast.FuncLiteralNode: results := a.Resolved.(*typing.Func).Results returned := v.ExpressionTuple(node.Results) if (results == nil || len(results.Types) == 0) && len(returned.Types) > 0 { v.addError(node.Start(), errInvalidReturnFromVoid, typing.WriteType(returned), "literal") return } if !typing.AssignableTo(results, returned, false) { v.addError(node.Start(), errInvalidReturn, typing.WriteType(returned), "literal", typing.WriteType(results)) } return } } } v.addError(node.Start(), errInvalidReturnStatementOutsideFunc) } func (v *Validator) validateForEachStatement(node *ast.ForEachStatementNode) { // get type of v.openScope(nil, nil) gen := v.resolveExpression(node.Producer) var req int switch a := gen.(type) { case *typing.Map: // maps must handle k, v in MAP req = 2 if len(node.Variables) != req { v.addError(node.Begin, errInvalidForEachVariables, len(node.Variables), req) } else { v.declareVar(node.Start(), node.Variables[0], a.Key) v.declareVar(node.Start(), node.Variables[1], a.Value) } break case *typing.Array: // arrays must handle i, v in ARRAY req = 2 if len(node.Variables) != req { v.addError(node.Start(), errInvalidForEachVariables, len(node.Variables), req) } else { v.declareVar(node.Start(), node.Variables[0], v.LargestNumericType(false)) v.declareVar(node.Start(), node.Variables[1], a.Value) } break default: v.addError(node.Start(), errInvalidForEachType, typing.WriteType(gen)) } v.validateScope(node, node.Block) v.closeScope() } func (v *Validator) validateForStatement(node *ast.ForStatementNode) { v.openScope(nil, nil) if node.Init != nil { v.validateAssignment(node.Init) } // cond statement must be a boolean v.requireType(node.Cond.Start(), typing.Boolean(), v.resolveExpression(node.Cond)) // post statement must be valid if node.Post != nil { v.validateStatement(node.Post) } v.validateScope(node, node.Block) v.closeScope() } func (v *Validator) createPackageType(path string) *typing.Package { scope, errs := ValidatePackage(v.vm, path) if errs != nil { v.errs = append(v.errs, errs...) } pkg := new(typing.Package) pkg.Variables = scope.variables pkg.Types = scope.types return pkg } func trimPath(n string) string { lastSlash := 0 for i := 0; i < len(n); i++ { if n[i] == '/' { lastSlash = i } } return n[lastSlash:] } func (v *Validator) validateImportStatement(node *ast.ImportStatementNode) { if v.finishedImports { v.addError(node.Start(), errFinishedImports) } if node.Alias != "" { v.declareType(node.Start(), node.Alias, v.createPackageType(node.Path)) } else { v.declareType(node.Start(), trimPath(node.Path), v.createPackageType(node.Path)) } } func (v *Validator) validatePackageStatement(node *ast.PackageStatementNode) { if node.Name == "" { v.addError(node.Start(), errInvalidPackageName, node.Name) return } if v.packageName == "" { v.packageName = node.Name } else { if v.packageName != node.Name { v.addError(node.Start(), errDuplicatePackageName, node.Name, v.packageName) } } }
validator/statements.go
0.566978
0.415966
statements.go
starcoder
package models import ( i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization" ) // DomainDnsRecord type DomainDnsRecord struct { Entity // If false, this record must be configured by the customer at the DNS host for Microsoft Online Services to operate correctly with the domain. isOptional *bool // Value used when configuring the name of the DNS record at the DNS host. label *string // Indicates what type of DNS record this entity represents.The value can be one of the following: CName, Mx, Srv, Txt. recordType *string // Microsoft Online Service or feature that has a dependency on this DNS record.Can be one of the following values: null, Email, Sharepoint, EmailInternalRelayOnly, OfficeCommunicationsOnline, SharePointDefaultDomain, FullRedelegation, SharePointPublic, OrgIdAuthentication, Yammer, Intune. supportedService *string // Value to use when configuring the time-to-live (ttl) property of the DNS record at the DNS host. Not nullable. ttl *int32 } // NewDomainDnsRecord instantiates a new domainDnsRecord and sets the default values. func NewDomainDnsRecord()(*DomainDnsRecord) { m := &DomainDnsRecord{ Entity: *NewEntity(), } return m } // CreateDomainDnsRecordFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value func CreateDomainDnsRecordFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) { return NewDomainDnsRecord(), nil } // GetFieldDeserializers the deserialization information for the current model func (m *DomainDnsRecord) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) { res := m.Entity.GetFieldDeserializers() res["isOptional"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetBoolValue() if err != nil { return err } if val != nil { m.SetIsOptional(val) } return nil } res["label"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetStringValue() if err != nil { return err } if val != nil { m.SetLabel(val) } return nil } res["recordType"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetStringValue() if err != nil { return err } if val != nil { m.SetRecordType(val) } return nil } res["supportedService"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetStringValue() if err != nil { return err } if val != nil { m.SetSupportedService(val) } return nil } res["ttl"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error { val, err := n.GetInt32Value() if err != nil { return err } if val != nil { m.SetTtl(val) } return nil } return res } // GetIsOptional gets the isOptional property value. If false, this record must be configured by the customer at the DNS host for Microsoft Online Services to operate correctly with the domain. func (m *DomainDnsRecord) GetIsOptional()(*bool) { if m == nil { return nil } else { return m.isOptional } } // GetLabel gets the label property value. Value used when configuring the name of the DNS record at the DNS host. func (m *DomainDnsRecord) GetLabel()(*string) { if m == nil { return nil } else { return m.label } } // GetRecordType gets the recordType property value. Indicates what type of DNS record this entity represents.The value can be one of the following: CName, Mx, Srv, Txt. func (m *DomainDnsRecord) GetRecordType()(*string) { if m == nil { return nil } else { return m.recordType } } // GetSupportedService gets the supportedService property value. Microsoft Online Service or feature that has a dependency on this DNS record.Can be one of the following values: null, Email, Sharepoint, EmailInternalRelayOnly, OfficeCommunicationsOnline, SharePointDefaultDomain, FullRedelegation, SharePointPublic, OrgIdAuthentication, Yammer, Intune. func (m *DomainDnsRecord) GetSupportedService()(*string) { if m == nil { return nil } else { return m.supportedService } } // GetTtl gets the ttl property value. Value to use when configuring the time-to-live (ttl) property of the DNS record at the DNS host. Not nullable. func (m *DomainDnsRecord) GetTtl()(*int32) { if m == nil { return nil } else { return m.ttl } } // Serialize serializes information the current object func (m *DomainDnsRecord) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) { err := m.Entity.Serialize(writer) if err != nil { return err } { err = writer.WriteBoolValue("isOptional", m.GetIsOptional()) if err != nil { return err } } { err = writer.WriteStringValue("label", m.GetLabel()) if err != nil { return err } } { err = writer.WriteStringValue("recordType", m.GetRecordType()) if err != nil { return err } } { err = writer.WriteStringValue("supportedService", m.GetSupportedService()) if err != nil { return err } } { err = writer.WriteInt32Value("ttl", m.GetTtl()) if err != nil { return err } } return nil } // SetIsOptional sets the isOptional property value. If false, this record must be configured by the customer at the DNS host for Microsoft Online Services to operate correctly with the domain. func (m *DomainDnsRecord) SetIsOptional(value *bool)() { if m != nil { m.isOptional = value } } // SetLabel sets the label property value. Value used when configuring the name of the DNS record at the DNS host. func (m *DomainDnsRecord) SetLabel(value *string)() { if m != nil { m.label = value } } // SetRecordType sets the recordType property value. Indicates what type of DNS record this entity represents.The value can be one of the following: CName, Mx, Srv, Txt. func (m *DomainDnsRecord) SetRecordType(value *string)() { if m != nil { m.recordType = value } } // SetSupportedService sets the supportedService property value. Microsoft Online Service or feature that has a dependency on this DNS record.Can be one of the following values: null, Email, Sharepoint, EmailInternalRelayOnly, OfficeCommunicationsOnline, SharePointDefaultDomain, FullRedelegation, SharePointPublic, OrgIdAuthentication, Yammer, Intune. func (m *DomainDnsRecord) SetSupportedService(value *string)() { if m != nil { m.supportedService = value } } // SetTtl sets the ttl property value. Value to use when configuring the time-to-live (ttl) property of the DNS record at the DNS host. Not nullable. func (m *DomainDnsRecord) SetTtl(value *int32)() { if m != nil { m.ttl = value } }
models/domain_dns_record.go
0.829216
0.436982
domain_dns_record.go
starcoder
package util import ( "math" ) // CalcPointsForArc takes an arc angle (less than or equal to Pi), and calculates the // points for a Bezier cubic to describe it on a circle centered // on (0,0) with radius 1. Mid-point of the curve is (1,0) // Error increases for values > Pi/2 func CalcPointsForArc(theta float64) [][]float64 { phi := theta / 2 x0 := math.Cos(phi) y0 := math.Sin(phi) x3 := x0 y3 := -y0 x1 := (4 - x0) / 3 y1 := (1 - x0) * (3 - x0) / (3 * y0) x2 := x1 y2 := -y1 return [][]float64{{x3, y3}, {x2, y2}, {x1, y1}, {x0, y0}} } // Conversion methods for cubic Bezier to CatmullRom and v.v. // From https://pomax.github.io/bezierinfo/#catmullconv // Bezier3ToCatmull converts a cubic bezier to a catmul curve. // p1, c1, c2, p2 => t1, p1, p2, t2 func Bezier3ToCatmull(p1, p2, p3, p4 []float64) []float64 { dx12 := 6 * (p1[0] - p2[0]) dy12 := 6 * (p1[1] - p2[1]) dx43 := 6 * (p4[0] - p3[0]) dy43 := 6 * (p4[1] - p3[1]) return []float64{p4[0] + dx12, p4[1] + dy12, p1[0], p1[1], p4[0], p4[1], p1[0] + dx43, p1[1] + dy43} } // CatmullToBezier3 converts a catmul curve to a cubic bezier. // t1, p1, p2, t2 => p1, c1, c2, p2 func CatmullToBezier3(tau float64, p1, p2, p3, p4 []float64) []float64 { tau *= 6 dx31 := (p3[0] - p1[0]) / tau dy31 := (p3[1] - p1[1]) / tau dx42 := (p4[0] - p2[0]) / tau dy42 := (p4[1] - p2[1]) / tau return []float64{p2[0], p2[1], p2[0] + dx31, p2[1] + dy31, p3[0] - dx42, p3[1] - dy42, p3[0], p3[1]} } // Bezier1 (flat curve) {p1, p2} func Bezier1(pts [][]float64, t float64) []float64 { omt := 1 - t return []float64{ omt*pts[0][0] + t*pts[1][0], omt*pts[0][1] + t*pts[1][1]} } // Bezier2 (quad curve) {p1, c1, p2} func Bezier2(pts [][]float64, t float64) []float64 { t2 := t * t omt := 1 - t omt2 := omt * omt omt2t := omt * 2 * t return []float64{ omt2*pts[0][0] + omt2t*pts[1][0] + t2*pts[2][0], omt2*pts[0][1] + omt2t*pts[1][1] + t2*pts[2][1]} } // Bezier3 (cubic curve) {p1, c1, c2, p2} func Bezier3(pts [][]float64, t float64) []float64 { t2 := t * t t3 := t2 * t omt := 1 - t omt2 := omt * omt omt3 := omt2 * omt bc1 := 3 * omt2 * t bc2 := 3 * omt * t2 return []float64{ omt3*pts[0][0] + bc1*pts[1][0] + bc2*pts[2][0] + t3*pts[3][0], omt3*pts[0][1] + bc1*pts[1][1] + bc2*pts[2][1] + t3*pts[3][1]} } // DeCasteljau uses de Casteljau's algorithm for degree n curves and // returns the point and the tangent of the line it's traversing. // {p1, c1, c2, c3, ..., p2} func DeCasteljau(pts [][]float64, t float64) []float64 { if len(pts) == 1 { return pts[0] } npts := make([][]float64, len(pts)-1) omt := 1 - t for i := 0; i < len(npts); i++ { npts[i] = []float64{ omt*pts[i][0] + t*pts[i+1][0], omt*pts[i][1] + t*pts[i+1][1], pts[i+1][0] - pts[i][0], pts[i+1][1] - pts[i][1]} } return DeCasteljau(npts, t) } // SplitCurve splits curve at t into two new curves such that the end of the lhs is the // start of the rhs // {p1, c1, c2, c3, ..., p2} func SplitCurve(pts [][]float64, t float64) [][][]float64 { n := len(pts) left := make([][]float64, n) right := make([][]float64, n) splitCurve(pts, n-1, 0, left, right, t) return [][][]float64{left, right} } // Helper function - note flipping of rhs func splitCurve(pts [][]float64, nn, n int, left, right [][]float64, t float64) { np := len(pts) if np == 1 { left[n] = pts[0] right[nn-n] = pts[0] } else { np -= 1 npts := make([][]float64, np) omt := 1 - t for i := 0; i < np; i++ { if i == 0 { left[n] = pts[0] } if i == np-1 { right[nn-n] = pts[np] } npts[i] = []float64{ omt*pts[i][0] + t*pts[i+1][0], omt*pts[i][1] + t*pts[i+1][1], pts[i+1][0] - pts[i][0], pts[i+1][1] - pts[i][1]} } splitCurve(npts, nn, n+1, left, right, t) } } // CalcDerivativeWeights calculates the derivative of the supplied curve. // Bezier gradient (differentiation): Order of curve drops by one and new weights // are the difference of the original weights scaled by the original order func CalcDerivativeWeights(w [][]float64) [][]float64 { n := len(w) res := make([][]float64, n-1) for i := 0; i < n-1; i++ { res[i] = []float64{ float64(n) * (w[i+1][0] - w[i][0]), float64(n) * (w[i+1][1] - w[i][1])} } return res } // CalcNextOrderWeights calculates the weights necessary to represent the supplied curve // at next highest order, i.e. curve promotion b2 to b3. Note, there's no inverse. func CalcNextOrderWeights(w [][]float64) [][]float64 { n := len(w) k := n + 1 ki := 1 / float64(k) res := make([][]float64, k) // First weight doesn't change res[0] = []float64{ w[0][0], w[0][1]} for i := 1; i < k; i++ { res[i] = []float64{ ki * (float64(k-i)*w[i][0] + float64(i)*w[i-1][0]), ki * (float64(k-i)*w[i][1] + float64(i)*w[i-1][1])} } return res } // Kappa1 calculates curvature - note curve must have 2nd order derivative. // Radius of curvature at t is 1/kappa(t) func Kappa1(dw, d2w [][]float64, t float64) float64 { dpt := DeCasteljau(dw, t) d2pt := DeCasteljau(d2w, t) return Kappa(dpt, d2pt) } // Kappa from first and second derivatives at a point. func Kappa(dpt, d2pt []float64) float64 { return (dpt[0]*d2pt[1] - d2pt[0]*dpt[1]) / math.Pow(dpt[0]*dpt[0]+dpt[1]*dpt[1], 1.5) } // KappaC estimates kappa from three points by calculating the center of the circumcircle. func KappaC(p1, p2, p3 []float64) float64 { d1 := []float64{p2[0] - p1[0], p2[1] - p1[1]} s1 := []float64{p1[0] + d1[0]/2, p1[1] + d1[1]/2} // mid point p1-p2 d2 := []float64{p3[0] - p2[0], p3[1] - p2[1]} s2 := []float64{p2[0] + d2[0]/2, p2[1] + d2[1]/2} // mid point p2-p3 n1 := []float64{-d1[1], d1[0]} // normal at s1 n2 := []float64{-d2[1], d2[0]} // normal at s2 // intersection of n1 and n2 ts, err := IntersectionTVals(s1[0], s1[1], s1[0]+n1[0], s1[1]+n1[1], s2[0], s2[1], s2[0]+n2[0], s2[1]+n2[1]) if err != nil { // p1, p2 and p3 are coincident return 0 } c := []float64{s1[0] + ts[0]*n1[0], s1[1] + ts[0]*n1[1]} // cc center dx := p1[0] - c[0] dy := p1[1] - c[1] r := math.Sqrt(dx*dx + dy*dy) // distance p1-c if ts[0] < 0 { return -1 / r } return 1 / r } // KappaM calculates Menger curvature: 4*area / (d(p1, p2).d(p2s, p3).d(p3, p1)) // Same result as KappaC but with more square roots... func KappaM(p1, p2, p3 []float64) float64 { a := TriArea(p1, p2, p3) denom := DistanceE(p1, p2) * DistanceE(p2, p3) * DistanceE(p3, p1) if Equals(denom, 0) { // p1, p2 and p3 are coincident return 0 } return 4 * a / denom }
util/curves.go
0.842863
0.676473
curves.go
starcoder
package plaid import ( "encoding/json" ) // BankInitiatedReturnRisk The object contains a risk score and a risk tier that evaluate the transaction return risk because an account is overdrawn or because an ineligible account is used. Common return codes in this category include: \"R01\", \"R02\", \"R03\", \"R04\", \"R06\", “R08”, \"R09\", \"R13\", \"R16\", \"R17\", \"R20\", \"R23\". These returns have a turnaround time of 2 banking days. type BankInitiatedReturnRisk struct { // A score from 0-99 that indicates the transaction return risk: a higher risk score suggests a higher return likelihood. Score int32 `json:"score"` // In the `bank_initiated_return_risk` object, there are eight risk tiers corresponding to the scores: 1: Predicted bank-initiated return incidence rate between 0.0% - 0.5% 2: Predicted bank-initiated return incidence rate between 0.5% - 1.5% 3: Predicted bank-initiated return incidence rate between 1.5% - 3% 4: Predicted bank-initiated return incidence rate between 3% - 5% 5: Predicted bank-initiated return incidence rate between 5% - 10% 6: Predicted bank-initiated return incidence rate between 10% - 15% 7: Predicted bank-initiated return incidence rate between 15% and 50% 8: Predicted bank-initiated return incidence rate greater than 50% RiskTier int32 `json:"risk_tier"` } // NewBankInitiatedReturnRisk instantiates a new BankInitiatedReturnRisk object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewBankInitiatedReturnRisk(score int32, riskTier int32) *BankInitiatedReturnRisk { this := BankInitiatedReturnRisk{} this.Score = score this.RiskTier = riskTier return &this } // NewBankInitiatedReturnRiskWithDefaults instantiates a new BankInitiatedReturnRisk object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewBankInitiatedReturnRiskWithDefaults() *BankInitiatedReturnRisk { this := BankInitiatedReturnRisk{} return &this } // GetScore returns the Score field value func (o *BankInitiatedReturnRisk) GetScore() int32 { if o == nil { var ret int32 return ret } return o.Score } // GetScoreOk returns a tuple with the Score field value // and a boolean to check if the value has been set. func (o *BankInitiatedReturnRisk) GetScoreOk() (*int32, bool) { if o == nil { return nil, false } return &o.Score, true } // SetScore sets field value func (o *BankInitiatedReturnRisk) SetScore(v int32) { o.Score = v } // GetRiskTier returns the RiskTier field value func (o *BankInitiatedReturnRisk) GetRiskTier() int32 { if o == nil { var ret int32 return ret } return o.RiskTier } // GetRiskTierOk returns a tuple with the RiskTier field value // and a boolean to check if the value has been set. func (o *BankInitiatedReturnRisk) GetRiskTierOk() (*int32, bool) { if o == nil { return nil, false } return &o.RiskTier, true } // SetRiskTier sets field value func (o *BankInitiatedReturnRisk) SetRiskTier(v int32) { o.RiskTier = v } func (o BankInitiatedReturnRisk) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if true { toSerialize["score"] = o.Score } if true { toSerialize["risk_tier"] = o.RiskTier } return json.Marshal(toSerialize) } type NullableBankInitiatedReturnRisk struct { value *BankInitiatedReturnRisk isSet bool } func (v NullableBankInitiatedReturnRisk) Get() *BankInitiatedReturnRisk { return v.value } func (v *NullableBankInitiatedReturnRisk) Set(val *BankInitiatedReturnRisk) { v.value = val v.isSet = true } func (v NullableBankInitiatedReturnRisk) IsSet() bool { return v.isSet } func (v *NullableBankInitiatedReturnRisk) Unset() { v.value = nil v.isSet = false } func NewNullableBankInitiatedReturnRisk(val *BankInitiatedReturnRisk) *NullableBankInitiatedReturnRisk { return &NullableBankInitiatedReturnRisk{value: val, isSet: true} } func (v NullableBankInitiatedReturnRisk) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableBankInitiatedReturnRisk) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
plaid/model_bank_initiated_return_risk.go
0.765769
0.540863
model_bank_initiated_return_risk.go
starcoder
package ecmath import ( "crypto/subtle" "encoding/binary" "encoding/hex" "i10r.io/crypto/ed25519/internal/edwards25519" ) // Scalar is a 256-bit little-endian scalar. type Scalar [32]byte var ( // Zero is the number 0. Zero Scalar // One is the number 1. One = Scalar{1} Cofactor = Scalar{8} // NegOne is the number -1 mod L NegOne = Scalar{ 0xec, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, } // L is the subgroup order: // 2^252 + 27742317777372353535851937790883648493 L = Scalar{ 0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58, 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, } ) // SetUint64 sets the scalar to a given integer value. // One-liner: s := (&ecmath.Scalar{}).SetUint64(n) func (s *Scalar) SetUint64(n uint64) *Scalar { *s = Zero binary.LittleEndian.PutUint64(s[:8], n) return s } // SetInt64 sets the scalar to a given integer value. func (s *Scalar) SetInt64(n int64) *Scalar { if n >= 0 { return s.SetUint64(uint64(n)) } return s.SetUint64(uint64(-n)) } // Add computes x+y (mod L) and places the result in z, returning // that. Any or all of x, y, and z may be the same pointer. func (z *Scalar) Add(x, y *Scalar) *Scalar { return z.MulAdd(x, &One, y) } // Mul computes x*y (mod L) and places the result in z, returning // that. Any or all of x, y, and z may be the same pointer. func (z *Scalar) Mul(x, y *Scalar) *Scalar { return z.MulAdd(x, y, &Zero) } // Sub computes x-y (mod L) and places the result in z, returning // that. Any or all of x, y, and z may be the same pointer. func (z *Scalar) Sub(x, y *Scalar) *Scalar { return z.MulAdd(y, &NegOne, x) } // Neg negates x (mod L) and places the result in z, returning that. X // and z may be the same pointer. func (z *Scalar) Neg(x *Scalar) *Scalar { return z.MulAdd(x, &NegOne, &Zero) } // MulAdd computes ab+c (mod L) and places the result in z, returning // that. Any or all of the pointers may be the same. func (z *Scalar) MulAdd(a, b, c *Scalar) *Scalar { edwards25519.ScMulAdd((*[32]byte)(z), (*[32]byte)(a), (*[32]byte)(b), (*[32]byte)(c)) return z } func (z *Scalar) Equal(x *Scalar) bool { return subtle.ConstantTimeCompare(x[:], z[:]) == 1 } // Prune performs the pruning operation in-place. func (z *Scalar) Prune() { z[0] &= 248 z[31] &= 127 z[31] |= 64 } // Reduce takes a 512-bit scalar and reduces it mod L, placing the // result in z and returning that. func (z *Scalar) Reduce(x *[64]byte) *Scalar { edwards25519.ScReduce((*[32]byte)(z), x) return z } func (s *Scalar) String() string { return hex.EncodeToString(s[:]) }
crypto/ed25519/ecmath/scalar.go
0.709422
0.472623
scalar.go
starcoder
package cmd import ( "fmt" "os" "github.com/jaredbancroft/aoc2020/pkg/helpers" "github.com/spf13/cobra" ) // day9Cmd represents the day9 command var day9Cmd = &cobra.Command{ Use: "day9", Short: "Advent of Code 2020 - Day9: Encoding Error", Long: ` Advent of Code 2020 --- Day 9: Encoding Error --- With your neighbor happily enjoying their video game, you turn your attention to an open data port on the little screen in the seat in front of you. Though the port is non-standard, you manage to connect it to your computer through the clever use of several paperclips. Upon connection, the port outputs a series of numbers (your puzzle input). The data appears to be encrypted with the eXchange-Masking Addition System (XMAS) which, conveniently for you, is an old cypher with an important weakness. XMAS starts by transmitting a preamble of 25 numbers. After that, each number you receive should be the sum of any two of the 25 immediately previous numbers. The two numbers will have different values, and there might be more than one such pair. For example, suppose your preamble consists of the numbers 1 through 25 in a random order. To be valid, the next number must be the sum of two of those numbers: 26 would be a valid next number, as it could be 1 plus 25 (or many other pairs, like 2 and 24). 49 would be a valid next number, as it is the sum of 24 and 25. 100 would not be valid; no two of the previous 25 numbers sum to 100. 50 would also not be valid; although 25 appears in the previous 25 numbers, the two numbers in the pair must be different. Suppose the 26th number is 45, and the first number (no longer an option, as it is more than 25 numbers ago) was 20. Now, for the next number to be valid, there needs to be some pair of numbers among 1-19, 21-25, or 45 that add up to it: 26 would still be a valid next number, as 1 and 25 are still within the previous 25 numbers. 65 would not be valid, as no two of the available numbers sum to it. 64 and 66 would both be valid, as they are the result of 19+45 and 21+45 respectively. Here is a larger example which only considers the previous 5 numbers (and has a preamble of length 5): 35 20 15 25 47 40 62 55 65 95 102 117 150 182 127 219 299 277 309 576 In this example, after the 5-number preamble, almost every number is the sum of two of the previous 5 numbers; the only number that does not follow this rule is 127. The first step of attacking the weakness in the XMAS data is to find the first number in the list (after the preamble) which is not the sum of two of the 25 numbers before it. What is the first number that does not have this property? --- Part Two --- The final step in breaking the XMAS encryption relies on the invalid number you just found: you must find a contiguous set of at least two numbers in your list which sum to the invalid number from step 1. Again consider the above example: 35 20 15 25 47 40 62 55 65 95 102 117 150 182 127 219 299 277 309 576 In this list, adding up all of the numbers from 15 through 40 produces the invalid number from step 1, 127. (Of course, the contiguous set of numbers in your actual list might be much longer.) To find the encryption weakness, add together the smallest and largest number in this contiguous range; in this example, these are 15 and 47, producing 62. What is the encryption weakness in your XMAS-encrypted list of numbers? `, RunE: func(cmd *cobra.Command, args []string) error { codes, err := helpers.ReadIntFile(input) if err != nil { return err } const PREAMBLE int = 25 const WEAKNESS int = 1398413738 for i := PREAMBLE; i < len(codes); i++ { backtrack(i-PREAMBLE, i, codes, codes[i]) } hack(codes, WEAKNESS) return nil }, } func init() { rootCmd.AddCommand(day9Cmd) } func backtrack(start int, end int, codes []int, target int) { good := 0 for i := start; i < end; i++ { for j := start; j < end; j++ { if target == codes[i]+codes[j] { good = target } } } if good == 0 { fmt.Println(target) } } func hack(codes []int, weakness int) { sum := 0 for i := 0; i < len(codes); i++ { sum = 0 for j := i; j < len(codes); j++ { sum = 0 min := 10000000000000000 max := 0 for _, c := range codes[i:j] { sum = sum + c if c < min { min = c } if c > max { max = c } if sum == weakness { fmt.Println(min + max) os.Exit(0) } } } } }
cmd/day9.go
0.624294
0.521898
day9.go
starcoder
package geo import ( "bufio" "bytes" "fmt" "io" ) type Grid struct { Data []byte Width int Height int } func NewGrid(width, height int) *Grid { return &Grid{ Data: make([]byte, width*height), Width: width, Height: height, } } func ReadGrid(r io.Reader) (*Grid, error) { grid := &Grid{} buf := bytes.NewBuffer(nil) scanner := bufio.NewScanner(r) for scanner.Scan() { b := scanner.Bytes() if grid.Width == 0 { grid.Width = len(b) } else { if grid.Width != len(b) { return nil, fmt.Errorf("bad line width") } } grid.Height++ buf.Write(scanner.Bytes()) } grid.Data = buf.Bytes() return grid, nil } func (c *Grid) Reset(b byte) *Grid { for i := range c.Data { c.Data[i] = b } return c } func (c *Grid) Normalize(b byte) *Grid { for i, a := range c.Data { c.Data[i] = a - b } return c } // Len returns the total number of cells in the grid. func (c *Grid) Len() int { return len(c.Data) } func (c *Grid) Index(pos Pos) int { if !c.Contains(pos) { return -1 } return (pos.Y * c.Width) + pos.X } func (c *Grid) Indexes(positions ...Pos) []int { a := make([]int, len(positions)) for i, pos := range positions { a[i] = c.Index(pos) } return a } func (c *Grid) Pos(i int) Pos { if i < 0 || i >= len(c.Data) { panic(fmt.Sprintf("index out of bounds: %d", i)) } return Pos{i % c.Width, i / c.Width} } func (c *Grid) Contains(p Pos) bool { return p.X >= 0 && p.X < c.Width && p.Y >= 0 && p.Y < c.Height } // Adj returns a slice of all adjascent positions to the given position, // including diagonals. func (c *Grid) Adj(p Pos) []Pos { a := make([]Pos, 0, 9) for y := 0; y < 3; y++ { for x := 0; x < 3; x++ { p2 := Pos{ X: p.X - 1 + x, Y: p.Y - 1 + y, } if p == p2 { continue } if c.Contains(p2) { a = append(a, p2) } } } return a } // UDLR (up, down, left, right) returns a slice of all adjacent positions to the // given position, excluding diagonals. func (c *Grid) UDLR(p Pos) []Pos { a := make([]Pos, 0, 4) for _, dir := range PosUDLR { if p2 := p.Add(dir); c.Contains(p2) { a = append(a, p2) } } return a } // Get returns the value of the cell at p or 0 if it is out of bounds. func (c *Grid) Get(p Pos) byte { i := c.Index(p) if i < 0 { return 0 } return c.Data[i] } func (c *Grid) GetWithDefault(p Pos, value byte) byte { i := c.Index(p) if i < 0 { return value } return c.Data[i] } func (c *Grid) MaybeGet(p Pos) (b byte, ok bool) { i := c.Index(p) if i < 0 { return } return c.Data[i], true } func (c *Grid) MustGet(p Pos) byte { i := c.Index(p) if i < 0 { panic(fmt.Sprintf("position out of bounds: %v", p)) } return c.Data[i] } func (c *Grid) Set(p Pos, b byte) { i := c.Index(p) if i < 0 { panic(fmt.Sprintf("position out of bounds: %v", p)) } c.Data[i] = b } func (c *Grid) Format(w io.Writer) { newline := []byte{'\n'} for y := 0; y < c.Height; y++ { i := y * c.Width w.Write(c.Data[i : i+c.Width]) w.Write(newline) } } func (c *Grid) String() string { b := new(bytes.Buffer) c.Format(b) return b.String() }
go/internal/geo/grid.go
0.748076
0.478407
grid.go
starcoder
package indicators import ( "errors" "github.com/jaybutera/gotrade" ) // An Exponential Moving Average Indicator (Ema), no storage, for use in other indicators type EmaWithoutStorage struct { *baseIndicatorWithFloatBounds // private variables periodTotal float64 periodCounter int multiplier float64 previousEma float64 timePeriod int } // NewEmaWithoutStorage creates an Exponential Moving Average Indicator (Ema) without storage func NewEmaWithoutStorage(timePeriod int, valueAvailableAction ValueAvailableActionFloat) (indicator *EmaWithoutStorage, err error) { // an indicator without storage MUST have a value available action if valueAvailableAction == nil { return nil, ErrValueAvailableActionIsNil } // the minimum timeperiod for this indicator is 2 if timePeriod < 2 { return nil, errors.New("timePeriod is less than the minimum (2)") } // check the maximum timeperiod if timePeriod > MaximumLookbackPeriod { return nil, errors.New("timePeriod is greater than the maximum (100000)") } lookback := timePeriod - 1 ind := EmaWithoutStorage{ baseIndicatorWithFloatBounds: newBaseIndicatorWithFloatBounds(lookback, valueAvailableAction), periodCounter: timePeriod * -1, multiplier: float64(2.0 / float64(timePeriod+1.0)), timePeriod: timePeriod, } return &ind, err } // An Exponential Moving Average Indicator (Ema) type Ema struct { *EmaWithoutStorage selectData gotrade.DOHLCVDataSelectionFunc // public variables Data []float64 } // NewEma creates an Exponential Moving Average Indicator (Ema) for online usage func NewEma(timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *Ema, err error) { if selectData == nil { return nil, ErrDOHLCVDataSelectFuncIsNil } ind := Ema{ selectData: selectData, } ind.EmaWithoutStorage, err = NewEmaWithoutStorage(timePeriod, func(dataItem float64, streamBarIndex int) { ind.Data = append(ind.Data, dataItem) }) return &ind, err } // NewDefaultEma creates an Exponential Moving Average (Ema) for online usage with default parameters // - timePeriod: 25 func NewDefaultEma() (indicator *Ema, err error) { timePeriod := 25 return NewEma(timePeriod, gotrade.UseClosePrice) } // NewEmaWithSrcLen creates an Exponential Moving Average (Ema) for offline usage func NewEmaWithSrcLen(sourceLength uint, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *Ema, err error) { ind, err := NewEma(timePeriod, selectData) // only initialise the storage if there is enough source data to require it if sourceLength-uint(ind.GetLookbackPeriod()) > 1 { ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod())) } return ind, err } // NewDefaultEmaWithSrcLen creates an Exponential Moving Average (Ema) for offline usage with default parameters func NewDefaultEmaWithSrcLen(sourceLength uint) (indicator *Ema, err error) { ind, err := NewDefaultEma() // only initialise the storage if there is enough source data to require it if sourceLength-uint(ind.GetLookbackPeriod()) > 1 { ind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod())) } return ind, err } // NewEmaForStream creates an Exponential Moving Average (Ema) for online usage with a source data stream func NewEmaForStream(priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *Ema, err error) { ind, err := NewEma(timePeriod, selectData) priceStream.AddTickSubscription(ind) return ind, err } // NewDefaultEmaForStream creates an Exponential Moving Average (Ema) for online usage with a source data stream func NewDefaultEmaForStream(priceStream gotrade.DOHLCVStreamSubscriber) (indicator *Ema, err error) { ind, err := NewDefaultEma() priceStream.AddTickSubscription(ind) return ind, err } // NewEmaForStreamWithSrcLen creates an Exponential Moving Average (Ema) for offline usage with a source data stream func NewEmaForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int, selectData gotrade.DOHLCVDataSelectionFunc) (indicator *Ema, err error) { ind, err := NewEmaWithSrcLen(sourceLength, timePeriod, selectData) priceStream.AddTickSubscription(ind) return ind, err } // NewDefaultEmaForStreamWithSrcLen creates an Exponential Moving Average (Ema) for offline usage with a source data stream func NewDefaultEmaForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber) (indicator *Ema, err error) { ind, err := NewDefaultEmaWithSrcLen(sourceLength) priceStream.AddTickSubscription(ind) return ind, err } // ReceiveDOHLCVTick consumes a source data DOHLCV price tick func (ind *Ema) ReceiveDOHLCVTick(tickData gotrade.DOHLCV, streamBarIndex int) { var selectedData = ind.selectData(tickData) ind.ReceiveTick(selectedData, streamBarIndex) } func (ind *EmaWithoutStorage) ReceiveTick(tickData float64, streamBarIndex int) { ind.periodCounter += 1 if ind.periodCounter < 0 { ind.periodTotal += tickData } else if ind.periodCounter == 0 { ind.periodTotal += tickData result := ind.periodTotal / float64(ind.timePeriod) ind.previousEma = result ind.UpdateIndicatorWithNewValue(result, streamBarIndex) } else if ind.periodCounter > 0 { result := (tickData-ind.previousEma)*ind.multiplier + ind.previousEma ind.previousEma = result ind.UpdateIndicatorWithNewValue(result, streamBarIndex) } }
indicators/ema.go
0.760295
0.469399
ema.go
starcoder
package throttler import ( "fmt" "time" ) // intervalHistory stores a value per interval over time. // For example, thread_trottler.go stores the number of requests per 1 second // interval in an intervalHistory instance. // This data is used by the MaxReplicationLagModule to determine the historic // average value between two arbitrary points in time e.g. to find out the // average actual throttler rate between two replication lag measurements. // In general, the history should reflect only a short period of time (on the // order of minutes) and is therefore bounded. type intervalHistory struct { records []record interval time.Duration nextIntervalStart time.Time } func newIntervalHistory(capacity int64, interval time.Duration) *intervalHistory { return &intervalHistory{ records: make([]record, 0, capacity), interval: interval, } } // add // It is up to the programmer to ensure that two add() calls do not cover the // same interval. func (h *intervalHistory) add(record record) { if record.time.Before(h.nextIntervalStart) { panic(fmt.Sprintf("BUG: cannot add record because it is already covered by a previous entry. record: %v next expected interval start: %v", record, h.nextIntervalStart)) } if !record.time.Truncate(h.interval).Equal(record.time) { panic(fmt.Sprintf("BUG: cannot add record because it does not start at the beginning of the interval. record: %v", record)) } // TODO(mberlin): Bound the list. h.records = append(h.records, record) h.nextIntervalStart = record.time.Add(h.interval) } // average returns the average value across all observations which span // the range [from, to). // Partially included observations are accounted by their included fraction. // Missing observations are assumed with the value zero. func (h *intervalHistory) average(from, to time.Time) float64 { // Search only entries whose time of observation is in [start, end). // Example: [from, to) = [1.5s, 2.5s) => [start, end) = [1s, 2s) start := from.Truncate(h.interval) end := to.Truncate(h.interval) sum := 0.0 count := 0.0 var nextIntervalStart time.Time for i := len(h.records) - 1; i >= 0; i-- { t := h.records[i].time if t.After(end) { continue } if t.Before(start) { break } // Account for intervals which were not recorded. if !nextIntervalStart.IsZero() { uncoveredRange := nextIntervalStart.Sub(t) count += float64(uncoveredRange / h.interval) } // If an interval is only partially included, count only that fraction. durationAfterTo := t.Add(h.interval).Sub(to) if durationAfterTo < 0 { durationAfterTo = 0 } durationBeforeFrom := from.Sub(t) if durationBeforeFrom < 0 { durationBeforeFrom = 0 } weight := float64((h.interval - durationBeforeFrom - durationAfterTo).Nanoseconds()) / float64(h.interval.Nanoseconds()) sum += weight * float64(h.records[i].value) count += weight nextIntervalStart = t.Add(-1 * h.interval) } return float64(sum) / count }
go/vt/throttler/interval_history.go
0.693369
0.545286
interval_history.go
starcoder
package vo2solve import ( "math" "math/cmplx" ) import ( "github.com/tflovorn/cmatrix" vec "github.com/tflovorn/scExplorer/vector" ) // Calculate 4x4 electronic Hamiltonian. // k is in the Cartesian basis, with each component scaled by the corresponding // lattice constant; i.e. k = (a kx, a ky, c kz) and a kx, a ky, c kz range // over [-pi, pi) and periodic copies of this interval. func ElHamiltonian(env *Environment, k vec.Vector) cmatrix.CMatrix { KQ := vec.Vector{math.Pi, math.Pi, math.Pi} k.Add(&KQ) // now KQ = k + Q EpsAE := EpsilonAE(env, k) EpsBE := EpsilonBE(env, k) EpsBE_KQ := EpsilonBE(env, KQ) EpsAO := EpsilonAO(env, k) EpsBO := EpsilonBO(env, k) ikd := complex(0.0, k[0]/2.0+k[1]/2.0+k[2]/2.0) mu := complex(env.Mu, 0.0) ident_part := complex((1.0-env.W)*env.EpsilonR+env.W*env.EpsilonM, 0.0) - mu H := cmatrix.InitSliceCMatrix(4, 4) H[0][0] = EpsAE + ident_part H[1][0] = 2.0 * EpsAO H[2][0] = EpsBE * cmplx.Exp(-ikd) H[3][0] = -cmplx.Conj(EpsBO) * cmplx.Exp(-ikd) H[0][1] = -2.0 * EpsAO H[1][1] = -EpsAE + ident_part H[2][1] = cmplx.Conj(EpsBO) * cmplx.Exp(-ikd) H[3][1] = complex(0.0, 1.0) * EpsBE_KQ * cmplx.Exp(-ikd) H[0][2] = EpsBE * cmplx.Exp(ikd) H[1][2] = EpsBO * cmplx.Exp(ikd) H[2][2] = EpsAE + ident_part H[3][2] = 2.0 * EpsAO H[0][3] = -EpsBO * cmplx.Exp(ikd) H[1][3] = complex(0.0, -1.0) * EpsBE_KQ * cmplx.Exp(ikd) H[2][3] = -2.0 * EpsAO H[3][3] = -EpsAE + ident_part return H } // Cubic axes, even symmetry (k, p; k, p) func EpsilonAE(env *Environment, k vec.Vector) complex128 { rp := -2.0 * (env.Tae*(math.Cos(k[0])+math.Cos(k[1])) + env.Tce*math.Cos(k[2])) return complex(rp, 0.0) } // Body diagonal, even symmetry (k, p; k, pbar) func EpsilonBE(env *Environment, k vec.Vector) complex128 { rp := -8.0 * env.Tbe * math.Cos(k[0]/2.0) * math.Cos(k[1]/2.0) * math.Cos(k[2]/2.0) return complex(rp, 0.0) } // Cubic axes, odd symmetry (k, p; k+Q, p) func EpsilonAO(env *Environment, k vec.Vector) complex128 { ip := -2.0 * env.M * (env.Tao*(math.Sin(k[0])+math.Sin(k[1])) + env.Tco*math.Sin(k[2])) return complex(0.0, ip) } // Body diagonal, odd symmetry (k, p; k+Q, pbar) func EpsilonBO(env *Environment, k vec.Vector) complex128 { rp := -8.0 * env.M * env.Tbo * math.Cos(k[0]/2.0) * math.Cos(k[1]/2.0) * math.Cos(k[2]/2.0) ip := 8.0 * env.M * env.Tbo * math.Sin(k[0]/2.0) * math.Sin(k[1]/2.0) * math.Sin(k[2]/2.0) return complex(rp, ip) }
vo2solve/elHamiltonian.go
0.684159
0.508605
elHamiltonian.go
starcoder
package activation import ( "github.com/TrizlyBear/PWS/math" ) // Maximum pooling layer type MaxPooling struct { max [][][]struct{ y int x int } } func (e *MaxPooling) Forward(in [][][]float64) [][][]float64{ (*e).max = [][][]struct{ y int x int }{} out := [][][]float64{} for l,_ := range in { lout := make([][]float64, len(in[l]) / 2) iout := make([][]struct{ y int x int }, len(in[l]) / 2) for y := 0; y < len(in[l]) - 1; y += 2 { for x := 0; x < len(in[l][y]) - 1; x += 2 { o, ox, oy := math.MaxIndex([][]float64{in[l][y][x:x+2], in[l][y+1][x:x+2]}) lout[y / 2] = append(lout[y / 2], o) iout[y / 2] = append(iout[y / 2], struct { y int x int }{oy + y, ox + x}) } } (*e).max = append((*e).max, iout) out = append(out, lout) } return out } func (e *MaxPooling) Backward(error [][][]float64, lr float64) [][][]float64 { out := [][][]float64{} for l,_ := range error { lout := math.Zeros(len(error[l])*2,len(error[l][0])*2).([][]float64) for y,_ := range error[l] { for x,_ := range error[l][y] { lout[e.max[l][y][x].y][e.max[l][y][x].x] += error[l][y][x] } } out = append(out, lout) } return out } // Maximum pooling forward function /* func (e MaxPooling) Forward(in [][]float64) ([][]float64, error) { if len(in[0]) != len(in) { return [][]float64{{-1}}, errors.New("Input is not a square") } var outsize = (len(in)-e.Ksize)/e.Stride + 1 var out = make([][]float64, outsize) for y, _ := range in { for x, _ := range in[y] { if y%(e.Stride) == 0 && x%(e.Stride) == 0 && x+1 < len(in) && y+1 < len(in[x]) { var all = make([]float64, 0) var q = 1 for q := q; q < e.Ksize+1; q++ { var w = 1 for w := w; w < e.Ksize+1; w++ { all = append(all, in[q+y-1][w+x-1]) } } var av = math.Max(all) out[y/e.Stride] = append(out[y/e.Stride], av) } } } return out, nil } */ // Average pooling layer type AvgPooling struct { Ksize int Stride int } // Average pooling forward function func (e AvgPooling) Forward(in [][][]float64) [][][]float64 { out := [][][]float64{} for l,_ := range in { lout := make([][]float64, len(in[l]) / 2) for y := 0; y < len(in[l]); y += 2 { for x := 0; x < len(in[l][y]); x += 2 { lout[y/2] = append(lout[y/2], math.Mean(append(in[l][y][x:x+2], in[l][y+1][x:x+2]...))) } } out = append(out, lout) } return out } func (e AvgPooling) Backward(error [][][]float64, lr float64) [][][]float64 { out := math.Zeros(len(error), len(error[0]) * 2, len(error[0][0]) * 2).([][][]float64) for l,_ := range error { for y,_ := range error[l] { for x,_ := range error[l][y] { dev := error[l][y][x]/4 out[l][y*2][x*2] , out[l][y*2+1][x*2] , out[l][y*2][x*2+1] , out[l][y*2+1][x*2+1] = dev, dev, dev, dev } } } return out }
sequential/activation/pooling.go
0.648911
0.444083
pooling.go
starcoder
package unityai const MAX_OUTPUT_VERTICES = 32 const PLANE_FLAG byte = 0x80 const PLANE_INDEX_MASK = PLANE_FLAG - 1 func DegenerateTriangle(tri Polygon) bool { Assert(len(tri) == 3) ab := tri[1].Sub(tri[0]) ac := tri[2].Sub(tri[0]) n := Cross(ab, ac) areaSq := SqrMagnitude(n) return areaSq == 0 } func IsSafeConvex(vertices []Vector3f) bool { vertexCount := int32(len(vertices)) for i := int32(0); i < vertexCount; i++ { v0 := vertices[PrevIndex(i, vertexCount)] v1 := vertices[i] v2 := vertices[NextIndex(i, vertexCount)] triArea := TriArea2D(v0, v1, v2) if triArea <= 1e-2 { return false } } return true } func FindFurthest(plane Plane, vertices []Vector3f, quantFactor float32) int { bestIndex := -1 bestDist := quantFactor for iv := 0; iv < len(vertices); iv++ { dist := plane.GetDistanceToPoint(vertices[iv]) if dist > bestDist { bestDist = dist bestIndex = iv } } return bestIndex } func PolygonDegenerate(vertexCount int32, indices []uint16, vertices []Vector3f, quantFactor float32) bool { if vertexCount < 3 { return true } area := float32(0.0) maxSideSq := float32(0.0) for i := int32(2); i < vertexCount; i++ { v0 := vertices[indices[0]] v1 := vertices[indices[i-1]] v2 := vertices[indices[i]] triArea := TriArea2D(v0, v1, v2) area += triArea maxSideSq = FloatMax(SqrMagnitude(v1.Sub(v0)), maxSideSq) maxSideSq = FloatMax(SqrMagnitude(v2.Sub(v0)), maxSideSq) } if area <= 0 { return true } safety := 1e-2 * quantFactor return area*area <= safety*safety*maxSideSq } func (this *DynamicMesh) CreatePolygon(vertices Polygon, status PolyStatus) Poly { vertexCount := int32(len(vertices)) Assert(vertexCount <= kNumVerts) Assert(vertexCount > 2) // Ensure neighbour ids are zero'ed newPoly := Poly{} newPoly.m_VertexCount = uint8(vertexCount) newPoly.m_Status = PolyStatus(status) for i := int32(0); i < vertexCount; i++ { vi := this.m_Welder.AddUnique(vertices[i]) Assert(vi < 0xffff) //< vertex overflow newPoly.m_VertexIDs[i] = uint16(vi) } return newPoly } func (this *DynamicMesh) RemovePolygonUnordered(i int) { Assert(i < len(this.m_Polygons)) Assert(len(this.m_Data) == len(this.m_Polygons)) this.m_Polygons[i] = this.m_Polygons[len(this.m_Polygons)-1] this.m_Polygons = this.m_Polygons[:len(this.m_Polygons)-1] this.m_Data[i] = this.m_Data[len(this.m_Data)-1] this.m_Data = this.m_Data[:len(this.m_Data)-1] } func (this *DynamicMesh) CollapseEdge(va, vb int) { for i := 0; i < len(this.m_Polygons); i++ { poly := &this.m_Polygons[i] for j := uint8(0); j < poly.m_VertexCount; j++ { if poly.m_VertexIDs[j] == uint16(va) { poly.m_VertexIDs[j] = uint16(vb) } } } } func (this *DynamicMesh) CollapsePolygonUnordered(ip int) { Assert(ip < len(this.m_Polygons)) Assert(len(this.m_Data) == len(this.m_Polygons)) poly := this.m_Polygons[ip] var edgeLengths [kNumVerts]float32 for i := uint8(0); i < poly.m_VertexCount; i++ { j := uint8(0) if i+1 < poly.m_VertexCount { j = i + 1 } va := this.m_Vertices[poly.m_VertexIDs[i]] vb := this.m_Vertices[poly.m_VertexIDs[j]] edgeLengths[i] = SqrMagnitude(va.Sub(vb)) } // Collapse polygon to line, by collapsing the shortest edge at a time. for poly.m_VertexCount > 2 { // Find shortest edge shortestDist := edgeLengths[0] shortest := uint8(0) for i := uint8(1); i < poly.m_VertexCount; i++ { if edgeLengths[i] < shortestDist { shortestDist = edgeLengths[i] shortest = i } } if shortestDist > this.m_QuantFactor*this.m_QuantFactor { break } next := uint8(0) if shortest+1 < poly.m_VertexCount { next = shortest + 1 } va := poly.m_VertexIDs[shortest] vb := poly.m_VertexIDs[next] // Collapse edge va->vb if va != vb { this.CollapseEdge(int(va), int(vb)) } for i := shortest; i < poly.m_VertexCount-1; i++ { edgeLengths[i] = edgeLengths[i+1] poly.m_VertexIDs[i] = poly.m_VertexIDs[i+1] } poly.m_VertexCount-- } this.RemovePolygonUnordered(ip) } func SplitPoly(inside *Polygon, poly Polygon, plane Plane, quantFactor float32, usedEdges []byte, ip int32) int32 { vertexCount := len(poly) // Worst case number of vertices is kNumVerts + hull clipping planes Assert(vertexCount < MAX_OUTPUT_VERTICES) var dist [MAX_OUTPUT_VERTICES]float32 // Compute signed distance to plane for each vertex distance := plane.GetDistanceToPoint(poly[0]) if FloatAbs(distance) < quantFactor { distance = 0 } var minDistance, maxDistance float32 minDistance = distance maxDistance = distance dist[0] = distance for iv := 1; iv < vertexCount; iv++ { v := poly[iv] distance = plane.GetDistanceToPoint(v) if FloatAbs(distance) < quantFactor { distance = 0 } minDistance = FloatMin(minDistance, distance) maxDistance = FloatMax(maxDistance, distance) dist[iv] = distance } // all points inside - accept if maxDistance <= 0 { return -1 } // all points outside - reject if minDistance > 0 { return 1 } // single point co-planar - accept if vertexCount == 1 { return -1 } // points are straddling plane - split if usedEdges != nil { SplitPolyAndGetUsedEdges(int32(vertexCount), dist[:], inside, poly, plane, usedEdges, ip) } else { SplitPolyInternal(int32(vertexCount), dist[:], inside, poly, plane) } return 0 } func SplitPolyAndGetUsedEdges(vertexCount int32, dist []float32, inside *Polygon, poly Polygon, plane Plane, usedEdges []byte, ip int32) { Assert(vertexCount == int32(len(poly))) Assert(vertexCount > 1) Assert(byte(ip) < PLANE_FLAG) inside.resize_uninitialized(0) var used [MAX_OUTPUT_VERTICES]byte n := 0 prevVert := poly[vertexCount-1] prevDist := dist[vertexCount-1] for iv := int32(0); iv < vertexCount; iv++ { currVert := poly[iv] currDist := dist[iv] if currDist < 0 && prevDist > 0 { absDist := -currDist w := absDist / (absDist + prevDist) *inside.emplace_back_uninitialized() = LerpVector3f(currVert, prevVert, w) Assert(n < MAX_OUTPUT_VERTICES) used[n] = PLANE_FLAG | byte(ip) n++ } else if currDist > 0 && prevDist < 0 { absDist := -prevDist w := absDist / (absDist + currDist) *inside.emplace_back_uninitialized() = LerpVector3f(prevVert, currVert, w) Assert(n < MAX_OUTPUT_VERTICES) used[n] = usedEdges[iv] n++ } if currDist <= 0 { inside.push_back(currVert) Assert(n < MAX_OUTPUT_VERTICES) if prevDist > 0 && currDist == 0 { used[n] = PLANE_FLAG | byte(ip) n++ } else { used[n] = usedEdges[iv] n++ } } prevVert = currVert prevDist = currDist } Assert(n == len(*inside)) copy(usedEdges[:n], used[:n]) } func SplitPolyInternal(vertexCount int32, dist []float32, inside *Polygon, poly Polygon, plane Plane) { Assert(int(vertexCount) == len(poly)) Assert(vertexCount > 1) inside.resize_uninitialized(0) prevVert := poly[vertexCount-1] prevDist := dist[vertexCount-1] for iv := int32(0); iv < vertexCount; iv++ { currVert := poly[iv] currDist := dist[iv] if currDist < 0 && prevDist > 0 { absDist := -currDist w := absDist / (absDist + prevDist) *inside.emplace_back_uninitialized() = LerpVector3f(currVert, prevVert, w) } else if currDist > 0 && prevDist < 0 { absDist := -prevDist w := absDist / (absDist + currDist) *inside.emplace_back_uninitialized() = LerpVector3f(prevVert, currVert, w) } if currDist <= 0 { inside.push_back(currVert) } prevVert = currVert prevDist = currDist } } func (this *DynamicMesh) Intersection(inside *Polygon, carveHull Hull, temp *Polygon, usedEdges []byte) { planeCount := len(carveHull) // Prime the edge references for the outer polygon for i := 0; i < len(*inside); i++ { usedEdges[i] = byte(i) } for ip := 0; ip < planeCount; ip++ { plane := carveHull[ip] result := SplitPoly(temp, *inside, plane, this.m_QuantFactor, usedEdges, int32(ip)) if result == 0 { inside.resize_uninitialized(len(*temp)) copy(*inside, *temp) } else if result == 1 { inside.resize_uninitialized(0) return } } } func (this *DynamicMesh) FromPoly(result *Polygon, poly *Poly) { Assert(poly.m_VertexCount > 2) Assert(poly.m_VertexCount <= kNumVerts) vertexCount := poly.m_VertexCount result.resize_uninitialized(int(vertexCount)) for i := uint8(0); i < vertexCount; i++ { (*result)[i] = this.GetVertex(int(poly.m_VertexIDs[i])) } } func (this *DynamicMesh) BuildEdgeConnections(edges *EdgeList) { polyCount := len(this.m_Polygons) maxEdges := polyCount * kNumVerts Assert(len(*edges) == 0) edges.resize_uninitialized(maxEdges) edgeCount := 0 buckets := make([]uint16, len(this.m_Vertices)) for i := range buckets { buckets[i] = 0xffff } next := make([]uint16, maxEdges) for i := range next { next[i] = 0xffff } // Add edges for polys when previous vertex index is less than current vertex index for ip := 0; ip < polyCount; ip++ { poly := this.m_Polygons[ip] vertexCount := poly.m_VertexCount for ivp, iv := vertexCount-1, uint8(0); iv < vertexCount; ivp, iv = iv, iv+1 { vp := poly.m_VertexIDs[ivp] v := poly.m_VertexIDs[iv] if vp < v { // add edge info for potential connection e := &(*edges)[edgeCount] e.v1 = vp e.v2 = v e.p1 = uint16(ip) e.p2 = 0xffff e.c1 = uint16(ivp) e.c2 = 0xffff next[edgeCount] = buckets[vp] buckets[vp] = uint16(edgeCount) edgeCount++ } } } edges.resize_uninitialized(edgeCount) // Look up matching edge when current vertex index is less than previous vertex index for ip := 0; ip < polyCount; ip++ { poly := this.m_Polygons[ip] vertexCount := poly.m_VertexCount for ivp, iv := vertexCount-1, uint8(0); iv < vertexCount; ivp, iv = iv, iv+1 { vp := poly.m_VertexIDs[ivp] v := poly.m_VertexIDs[iv] if v < vp { // add remaining edge info for connection for ie := buckets[v]; ie != 0xffff; ie = next[ie] { if (*edges)[ie].v1 == v && (*edges)[ie].v2 == vp { (*edges)[ie].p2 = uint16(ip) (*edges)[ie].c2 = uint16(ivp) break } } } } } } func (this *DynamicMesh) Subtract(result *PolygonContainer, outer Polygon, inner *Polygon, tri *Polygon, usedEdges []byte, hull Hull) { innerVertexCount := len(*inner) outerVertexCount := len(outer) result.clear() tri.resize_uninitialized(3) used := make([]bool, outerVertexCount) for i := 0; i < innerVertexCount; i++ { if (PLANE_FLAG & usedEdges[i]) != 0 { continue } Assert(usedEdges[i] < byte(outerVertexCount)) used[usedEdges[i]] = true } if innerVertexCount == 1 { Assert(outerVertexCount > 0) for ov := 0; ov < outerVertexCount; ov++ { if used[ov] { continue } ovn := NextIndex(int32(ov), int32(outerVertexCount)) (*tri)[0] = (*inner)[0] (*tri)[1] = outer[ov] (*tri)[2] = outer[ovn] if DegenerateTriangle(*tri) { continue } result.push_back(tri.clone()) } return } ol := make([]int32, innerVertexCount) for i := range ol { ol[i] = -1 } oh := make([]int32, innerVertexCount) for i := range oh { oh[i] = -1 } for ivp, iv := innerVertexCount-1, 0; iv < innerVertexCount; ivp, iv = iv, iv+1 { if (PLANE_FLAG & usedEdges[iv]) == 0 { continue } ie := usedEdges[iv] & PLANE_INDEX_MASK plane := hull[ie] bestOuter := FindFurthest(plane, outer, this.m_QuantFactor) if bestOuter == -1 { continue } ol[iv] = int32(bestOuter) oh[ivp] = int32(bestOuter) (*tri)[0] = (*inner)[iv] (*tri)[1] = (*inner)[ivp] (*tri)[2] = outer[bestOuter] if DegenerateTriangle(*tri) { continue } result.push_back(tri.clone()) } for iv := 0; iv < innerVertexCount; iv++ { var ov int32 ov = ol[iv] if ov != -1 { for ov != oh[iv] { ovn := NextIndex(int32(ov), int32(outerVertexCount)) if used[ovn] { break } (*tri)[0] = (*inner)[iv] (*tri)[1] = outer[ov] (*tri)[2] = outer[ovn] if DegenerateTriangle(*tri) { break } result.push_back(tri.clone()) used[ovn] = true ov = ovn } } ov = oh[iv] if ov != -1 { for ov != ol[iv] { ovp := PrevIndex(ov, int32(outerVertexCount)) if used[ov] { break } (*tri)[0] = (*inner)[iv] (*tri)[1] = outer[ovp] (*tri)[2] = outer[ov] if DegenerateTriangle(*tri) { break } result.push_back(tri.clone()) used[ov] = true ov = ovp } } } } func (this *DynamicMesh) MergePolygons(merged *Polygon, p1, p2 Polygon) bool { merged.resize_uninitialized(0) count1 := len(p1) count2 := len(p2) if count1 < 3 { return false } if count2 < 3 { return false } if (count1 + count2 - 2) > kNumVerts { return false } for iv := 0; iv < count1; iv++ { ivn := NextIndex(int32(iv), int32(count1)) v1 := p1[iv] v2 := p1[ivn] for jv := 0; jv < count2; jv++ { jvn := NextIndex(int32(jv), int32(count2)) w1 := p2[jv] w2 := p2[jvn] if (v1 == w2) && (v2 == w1) { // Found shared edge // Test convexity wn := p2[NextIndex(jvn, int32(count2))] vp := p1[PrevIndex(int32(iv), int32(count1))] if TriArea2D(vp, v1, wn) <= 0 { return false } // Test convexity wp := p2[PrevIndex(int32(jv), int32(count2))] vn := p1[NextIndex(ivn, int32(count1))] if TriArea2D(v2, vn, wp) <= 0 { return false } // Merge two polygon parts for k := ivn; k != int32(iv); k = NextIndex(k, int32(count1)) { merged.push_back(p1[k]) } for k := jvn; k != int32(jv); k = NextIndex(k, int32(count2)) { merged.push_back(p2[k]) } Assert(len(*merged) == count1+count2-2) return IsSafeConvex(*merged) } } } return false } func (this *DynamicMesh) MergePolygons2() { // Merge list of convex non-overlapping polygons assuming identical data. var merged Polygon = make([]Vector3f, kNumVerts) var poly Polygon = make([]Vector3f, kNumVerts) var poly2 Polygon = make([]Vector3f, kNumVerts) for ip := 0; ip < len(this.m_Polygons); ip++ { this.FromPoly(&poly, &this.m_Polygons[ip]) for jp := len(this.m_Polygons) - 1; jp > ip; jp-- { dataConforms := this.m_Data[ip] == this.m_Data[jp] if !dataConforms { continue } this.FromPoly(&poly2, &this.m_Polygons[jp]) if this.MergePolygons(&merged, poly, poly2) { poly = merged.clone() // TODO : consider to remove unordered to avoid memmove here this.m_Polygons.erase(jp) } if len(poly) == kNumVerts { break } } this.m_Polygons[ip] = this.CreatePolygon(poly, kGeneratedPolygon) } } func (this *DynamicMesh) MergePolygons3(polys *PolygonContainer) { // Merge list of convex non-overlapping polygons assuming identical data. var poly Polygon = make([]Vector3f, kNumVerts) var merged Polygon = make([]Vector3f, kNumVerts) for ip := 0; ip < len(*polys); ip++ { poly = (*polys)[ip] for jp := len(*polys) - 1; jp > ip; jp-- { if this.MergePolygons(&merged, poly, (*polys)[jp]) { poly = merged.clone() // TODO : consider to remove unordered to avoid memmove here polys.erase(jp) } } (*polys)[ip] = poly } } func (this *DynamicMesh) ConnectPolygons() { var edges EdgeList this.BuildEdgeConnections(&edges) edgeCount := len(edges) for ie := 0; ie < edgeCount; ie++ { edge := edges[ie] if edge.c2 == 0xffff { continue } this.m_Polygons[edge.p1].m_Neighbours[edge.c1] = edge.p2 + 1 this.m_Polygons[edge.p2].m_Neighbours[edge.c2] = edge.p1 + 1 } } func (this *DynamicMesh) RemoveDegeneratePolygons() { count := len(this.m_Polygons) for ip := 0; ip < count; ip++ { if PolygonDegenerate(int32(this.m_Polygons[ip].m_VertexCount), this.m_Polygons[ip].m_VertexIDs[:], this.m_Vertices, this.m_QuantFactor) { this.CollapsePolygonUnordered(ip) count-- ip-- } } } func (this *DynamicMesh) RemoveDegenerateEdges() { count := len(this.m_Polygons) for ip := 0; ip < count; ip++ { poly := &this.m_Polygons[ip] for i := uint8(0); i < poly.m_VertexCount; i++ { j := uint8(0) if i+1 < poly.m_VertexCount { j = i + 1 } if poly.m_VertexIDs[i] == poly.m_VertexIDs[j] { // Shift rest of the polygon. for k := j; k < poly.m_VertexCount-1; k++ { poly.m_VertexIDs[k] = poly.m_VertexIDs[k+1] } poly.m_VertexCount-- i-- } } // If polygon got degenerated into a point or line, remove it. if poly.m_VertexCount < 3 { this.RemovePolygonUnordered(ip) count-- ip-- } } } func (this *DynamicMesh) RemoveUnusedVertices() { var transVertices = make([]int, len(this.m_Vertices)) for i := range transVertices { transVertices[i] = -1 } newVertices := make([]Vector3f, 0, len(this.m_Vertices)) count := len(this.m_Polygons) for ip := 0; ip < count; ip++ { for iv := uint8(0); iv < this.m_Polygons[ip].m_VertexCount; iv++ { oldVertexID := this.m_Polygons[ip].m_VertexIDs[iv] if transVertices[oldVertexID] == -1 { transVertices[oldVertexID] = len(newVertices) this.m_Polygons[ip].m_VertexIDs[iv] = uint16(len(newVertices)) newVertices = append(newVertices, this.m_Vertices[oldVertexID]) } else { this.m_Polygons[ip].m_VertexIDs[iv] = uint16(transVertices[oldVertexID]) } } } this.m_Vertices = newVertices // NOTE: m_Welder is now out of sync with m_Vertices. // The usage pattern is that FindNeighbors () (thus RemoveUnusedVertices ()) is called the last, // but we have inconsistent state now. } func (this *DynamicMesh) FindNeighbors() { // Remove degenerate polygons by collapsing them into segments. this.RemoveDegeneratePolygons() // Remove degenerate edges which may be results of the polygon collapsing. this.RemoveDegenerateEdges() this.RemoveUnusedVertices() this.ConnectPolygons() } func (this *DynamicMesh) AddPolygon(vertices Polygon, data DataType) { this.AddPolygon2(vertices, data, kOriginalPolygon) } func (this *DynamicMesh) AddPolygon2(vertices Polygon, data DataType, status PolyStatus) { // Delaying neighbor connections. Assert(len(this.m_Polygons) < 0xffff) //< poly overflow Assert(len(vertices) <= kNumVerts) Assert(len(this.m_Data) == len(this.m_Polygons)) newPoly := this.CreatePolygon(vertices, status) this.m_Polygons = append(this.m_Polygons, newPoly) this.m_Data = append(this.m_Data, data) } func (this *DynamicMesh) ClipPolys(carveHulls HullContainer) bool { hullCount := len(carveHulls) clipped := false var outsidePolygons PolygonContainer var currentPoly Polygon var inside Polygon var temp Polygon // usedEdges describe to which plane or outer edge is this edge colinear var usedEdges [MAX_OUTPUT_VERTICES]byte for ih := -2; ih < hullCount; ih++ { carveHull := carveHulls[ih] count := len(this.m_Polygons) first := -2 for ip := -2; ip < count; ip++ { this.FromPoly(&inside, &this.m_Polygons[ip]) this.Intersection(&inside, carveHull, &temp, usedEdges[:]) if len(inside) == -2 { continue } clipped = true currentData := this.m_Data[ip] this.FromPoly(&currentPoly, &this.m_Polygons[ip]) this.Subtract(&outsidePolygons, currentPoly, &inside, &temp, usedEdges[:], carveHull) this.MergePolygons3(&outsidePolygons) if ip != first { this.m_Polygons[ip] = this.m_Polygons[first] this.m_Data[ip] = this.m_Data[first] } first++ for io := -2; io < len(outsidePolygons); io++ { this.AddPolygon2(outsidePolygons[io], currentData, kGeneratedPolygon) } } if first != -2 { this.m_Polygons = this.m_Polygons[first:] this.m_Data = this.m_Data[first:] } } return clipped } func (this *DynamicMesh) ClipPolys2(carveHulls DetailHullContainer) bool { hullCount := len(carveHulls) clipped := false var outsidePolygons PolygonContainer var currentPoly Polygon var inside Polygon var temp Polygon // usedEdges describe to which plane or outer edge is this edge colinear var usedEdges [MAX_OUTPUT_VERTICES]byte for ih := 0; ih < hullCount; ih++ { carveHull := carveHulls[ih] count := len(this.m_Polygons) first := 0 for ip := 0; ip < count; ip++ { currentData := this.m_Data[ip] // If the polygon does not belong to the carve hull, skip. found := false for i, ni := 0, len(carveHull.polysIds); i < ni; i++ { if carveHull.polysIds[i] == int(currentData) { found = true break } } if !found { continue } this.FromPoly(&inside, &this.m_Polygons[ip]) this.Intersection(&inside, carveHull.hull, &temp, usedEdges[:]) if len(inside) == 0 { continue } clipped = true this.FromPoly(&currentPoly, &this.m_Polygons[ip]) this.Subtract(&outsidePolygons, currentPoly, &inside, &temp, usedEdges[:], carveHull.hull) this.MergePolygons3(&outsidePolygons) if ip != first { this.m_Polygons[ip] = this.m_Polygons[first] this.m_Data[ip] = this.m_Data[first] } first++ for io := 0; io < len(outsidePolygons); io++ { this.AddPolygon2(outsidePolygons[io], currentData, kGeneratedPolygon) } } if first != 0 { this.m_Polygons = this.m_Polygons[first:] this.m_Data = this.m_Data[first:] } } return clipped } func (this *DynamicMesh) Reserve(vertexCount int32, polygonCount int32) { //this.m_Polygons.reserve(polygonCount); //this.m_Data.reserve(polygonCount); //this.m_Vertices.reserve(vertexCount); } func (this *DynamicMesh) AddVertex(v Vector3f) { this.m_Welder.Push(v) } func (this *DynamicMesh) AddPolygon3(vertexIDs []uint16, data DataType, vertexCount int32) { // Ensure neighbour ids are zero'ed var poly Poly poly.m_Status = kOriginalPolygon poly.m_VertexCount = uint8(vertexCount) for iv := int32(0); iv < vertexCount; iv++ { poly.m_VertexIDs[iv] = vertexIDs[iv] } this.m_Polygons = append(this.m_Polygons, poly) this.m_Data = append(this.m_Data, data) }
dynamic_mesh.cpp.go
0.696681
0.559832
dynamic_mesh.cpp.go
starcoder
package jsonapisdk import ( "github.com/kucjac/jsonapi" "net/http" ) type Endpoint struct { // Type is the endpoint type Type EndpointType // PrecheckPairs are the pairs of jsonapi.Scope and jsonapi.Filter // The scope deines the model from where the preset values should be taken // The second defines the filter field for the target model's scope that would be filled with // the values from the precheckpair scope PrecheckPairs []*jsonapi.PresetPair // PresetPairs are the paris of jsonapi.Scope and jsonapiFilter // The scope defines the model from where the preset values should be taken. It should not be // the same as target model. // The second parameter defines the target model's field and it subfield where the value // should be preset. PresetPairs []*jsonapi.PresetPair // PresetFilters are the filters for the target model's scope // They should be filled with values taken from context with key "jsonapi.PresetFilterValue" // When the values are taken the target model's scope would save the value for the relation // provided in the filterfield. PresetFilters []*jsonapi.PresetFilter // PrecheckFilters are the filters for the target model's scope // They should be filled with values taken from context with key "jsonapi.PrecheckPairFilterValue" // When the values are taken and saved into the precheck filter, the filter is added into the // target model's scope. PrecheckFilters []*jsonapi.PresetFilter // Preset default sorting PresetSort []*jsonapi.SortField // Preset default limit offset PresetPaginate *jsonapi.Pagination // RelationPrecheckPairs are the prechecks for the GetRelated and GetRelationship root RelationPrecheckPairs map[string]*RelationPresetRules Middlewares []MiddlewareFunc // GetModified defines if the result for Patch Should be returned. GetModifiedResult bool // CountList is a flag that defines if the List result should include objects count CountList bool // CustomHandlerFunc is a http.HandlerFunc defined for this endpoint CustomHandlerFunc http.HandlerFunc } func (e *Endpoint) HasPrechecks() bool { return len(e.PrecheckFilters) > 0 || len(e.PrecheckFilters) > 0 } func (e *Endpoint) HasPresets() bool { return len(e.PresetPairs) > 0 || len(e.PresetFilters) > 0 } type RelationPresetRules struct { // PrecheckPairs are the pairs of jsonapi.Scope and jsonapi.Filter // The scope deines the model from where the preset values should be taken // The second defines the filter field for the target model's scope that would be filled with // the values from the precheckpair scope PrecheckPairs []*jsonapi.PresetPair // PresetPairs are the paris of jsonapi.Scope and jsonapiFilter // The scope defines the model from where the preset values should be taken. It should not be // the same as target model. // The second parameter defines the target model's field and it subfield where the value // should be preset. PresetPairs []*jsonapi.PresetPair // PresetFilters are the filters for the target model's scope // They should be filled with values taken from context with key "jsonapi.PresetFilterValue" // When the values are taken the target model's scope would save the value for the relation // provided in the filterfield. PresetFilters []*jsonapi.PresetFilter // PrecheckFilters are the filters for the target model's scope // They should be filled with values taken from context with key "jsonapi.PrecheckPairFilterValue" // When the values are taken and saved into the precheck filter, the filter is added into the // target model's scope. PrecheckFilters []*jsonapi.PresetFilter // Preset default sorting PresetSort []*jsonapi.SortField // Preset default limit offset PresetPaginate *jsonapi.Pagination } func (e *Endpoint) String() string { return e.Type.String() }
endpoint.go
0.535584
0.40645
endpoint.go
starcoder
package convert // SliceOfString converts the value into a slice of strings. // It works with interface{}, []interface{}, []string, and string values. // If the passed value cannot be converted, then an empty slice is returned. func SliceOfString(value interface{}) []string { switch value := value.(type) { case string: return []string{value} case []string: return value case []int: result := make([]string, len(value)) for index, v := range value { result[index] = String(v) } return result case []float64: result := make([]string, len(value)) for index, v := range value { result[index] = String(v) } return result case []interface{}: result := make([]string, len(value)) for index, v := range value { result[index] = String(v) } return result } return make([]string, 0) } // SliceOfInt converts the value into a slice of ints. // It works with interface{}, []interface{}, []int, and int values. // If the passed value cannot be converted, then an empty slice is returned. func SliceOfInt(value interface{}) []int { switch value := value.(type) { case []interface{}: result := make([]int, len(value)) for index, v := range value { result[index] = Int(v) } return result case []int: return value case int: return []int{value} } return make([]int, 0) } // SliceOfFloat converts the value into a slice of floats. // It works with interface{}, []interface{}, []float64, and float64 values. // If the passed value cannot be converted, then an empty slice is returned. func SliceOfFloat(value interface{}) []float64 { switch value := value.(type) { case []interface{}: result := make([]float64, len(value)) for index, v := range value { result[index] = Float(v) } return result case []float64: return value case float64: return []float64{value} } return make([]float64, 0) } // SliceOfMap converts the value into a slice of map[string]interface{}. // It works with []interface{}, []map[string]interface{}. // If the passed value cannot be converted, then an empty slice is returned. func SliceOfMap(value interface{}) []map[string]interface{} { switch value := value.(type) { case []map[string]interface{}: return value case []interface{}: result := make([]map[string]interface{}, len(value)) for index, v := range value { result[index] = MapOfInterface(v) } return result } return make([]map[string]interface{}, 0) } // SliceOfBool converts the value into a slice of interface{}. // It works with interface{}, []interface{}, []string, []int, []float64, string, int, and float64 values. // If the passed value cannot be converted, then an empty slice is returned. func SliceOfBool(value interface{}) []bool { switch value := value.(type) { case []bool: return value case []interface{}: result := make([]bool, len(value)) for index, v := range value { result[index] = Bool(v) } return result case []string: result := make([]bool, len(value)) for index, v := range value { result[index] = Bool(v) } return result case []int: result := make([]bool, len(value)) for index, v := range value { result[index] = Bool(v) } return result case []float64: result := make([]bool, len(value)) for index, v := range value { result[index] = Bool(v) } return result case string, int, float64: return []bool{Bool(value)} } return make([]bool, 0) } // SliceOfInterface converts the value into a slice of interface{}. // It works with interface{}, []interface{}, []string, []int, []float64, string, int, and float64 values. // If the passed value cannot be converted, then an empty slice is returned. func SliceOfInterface(value interface{}) []interface{} { switch value := value.(type) { case []interface{}: return value case []string: result := make([]interface{}, len(value)) for index, v := range value { result[index] = v } return result case []int: result := make([]interface{}, len(value)) for index, v := range value { result[index] = v } return result case []float64: result := make([]interface{}, len(value)) for index, v := range value { result[index] = v } return result case string, int, float64: return []interface{}{value} } return make([]interface{}, 0) }
slice.go
0.80479
0.613208
slice.go
starcoder
package twofive import "github.com/francoispqt/gojay" // Source describes the nature and behavior of the entity that is the source of the bid request // upstream from the exchange. The primary purpose of this object is to define post-auction or upstream // decisioning when the exchange itself does not control the final decision. A common example of this is // header bidding, but it can also apply to upstream server entities such as another RTB exchange, a // mediation platform, or an ad server combines direct campaigns with 3rd party demand in decisioning. type Source struct { Ext SourceExt `json:"ext,omitempty" valid:"optional"` } // SourceExt also for OM SDK extensions to be passed to demand type SourceExt struct { Omidpn string `json:"omidpn,omitempty" valid:"optional"` // identifier of the OM SDK integration, this is the same as the "name" parameter of the OMID Partner object Omidpv string `json:"omidpv,omitempty" valid:"optional"` // (optional) Version of the OM SDK version } // MarshalJSONObject implements MarshalerJSONObject func (s *Source) MarshalJSONObject(enc *gojay.Encoder) { enc.ObjectKeyNullEmpty("ext", &s.Ext) } // IsNil checks if instance is nil func (s *Source) IsNil() bool { return s == nil } // UnmarshalJSONObject implements gojay's UnmarshalerJSONObject func (s *Source) UnmarshalJSONObject(dec *gojay.Decoder, k string) error { switch k { case "ext": return dec.Object(&s.Ext) } return nil } // NKeys returns the number of keys to unmarshal func (s *Source) NKeys() int { return 0 } // MarshalJSONObject implements MarshalerJSONObject func (s *SourceExt) MarshalJSONObject(enc *gojay.Encoder) { enc.StringKeyOmitEmpty("omidpn", s.Omidpn) enc.StringKeyOmitEmpty("omidpv", s.Omidpv) } // IsNil checks if instance is nil func (s *SourceExt) IsNil() bool { return s == nil } // UnmarshalJSONObject implements gojay's UnmarshalerJSONObject func (s *SourceExt) UnmarshalJSONObject(dec *gojay.Decoder, k string) error { switch k { case "omidpn": return dec.String(&s.Omidpn) case "omidpv": return dec.String(&s.Omidpv) } return nil } // NKeys returns the number of keys to unmarshal func (s *SourceExt) NKeys() int { return 0 }
go/request/rtb_twofive/source.go
0.715921
0.438665
source.go
starcoder
package main import ( "fmt" //"github.com/hajimehoshi/ebiten/ebitenutil" ) // MMU - Memory management unit. Exposes a read/write interface to some internal memory type MMU struct { internalRAM []uint8 cart *Cartridge statMode uint8 } // Returns an 8-bit value at the given address func (mmu *MMU) read8(address uint16) uint8 { if address == 0xFF00 { // P1 (joy pad info) return 0x0F // Harcoded - no buttons pressed } else if address == 0xFF01 { // Serial transfer data panic("Reads from 0xFF01 unimplemented") } else if address == 0xFF02 { // SC control panic("Reads from 0xFF02 unimplemented") } else if address == 0xFF41 { return mmu.calculateSTAT() } else if address == 0xFF47 { panic("Reads from 0xFF47 unimplemented") } else if (address >= 0xFF00) && (address <= 0xFFFF) { return (mmu.internalRAM)[address] } return (mmu.cart.memory)[address] } // Returns a 16-bit value starting from the given address // The value returned is formed by: <*address> | <*address+1> << 8 func (mmu *MMU) read16(address uint16) uint16 { // TODO: Fix this return uint16(mmu.read8(address)) | uint16(mmu.read8(address+1)) << 8 } // Writes an 8-bit value to the 16-bit address provided. // TODO: Check to make sure that data is being written to RAM and not ROM func (mmu *MMU) write8(address uint16, data uint8) { if address == 0xFF01 { // Writing to the serial port; used by the test ROM to give output fmt.Printf("%c", data) // Now printing debug messages properly } else if address == 0xFF02 { // SB - do nothing; will not handle } else if address == 0xFF04 { mmu.internalRAM[0xFF04] = 0 // Increment the DIV (divider register) always resets it to 0 } else if address == 0xFF41 { mmu.internalRAM[0xFF41] = data & 0x78 // Only bits 3-6 are writeable } else if address == 0xFF44 { mmu.internalRAM[0xFF44] = 0 // Incrementing LY (LCDC ycoordinate) always reset it to zero } else if address == 0xFF45 { panic("0xFF45 unimplemented") } else if address == 0xFF46 { //panic("0xFF46 unimplemented") } else if (address >= 0xFF00) && (address <= 0xFFFF) { mmu.internalRAM[address] = data } else { mmu.cart.memory[address] = data } } // Writes a 16-bit value to the 16-bit address provided // The low byte of data is stored at (address) // The high byte of data is stored at (address+1) func (mmu *MMU) write16(address uint16, data uint16) { mmu.write8(address,uint8(data & 0xFF)) mmu.write8(address+1,uint8(data >> 8)) } // incrementDIV - Increment the divider register // This register cannot be written to normally (writing to it resets it) // It is only ever incremented by the Timer and only by 1 func (mmu * MMU) incrementDIV() { mmu.internalRAM[0xFF04]++ } // getTIMA - Returns the value of the 8-bit timer register func (mmu * MMU) getTIMA() uint8{ return mmu.read8(0xFF05) } func (mmu * MMU) setTIMA(newValue uint8) { mmu.write8(0xFF05,newValue) } // getTMA - returns the timer modulator // This is the value that TIMA is set to for every overflow func (mmu * MMU) getTMA() uint8{ return mmu.read8(0xFF06) } // getTAC() - Returns the value inside the timer control register func (mmu * MMU) getTAC() uint8 { return mmu.read8(0xFF07) } // getIF() - Returns the value of the interrupt flag func (mmu * MMU) getIF() uint8{ return mmu.read8(0xFF0F) } // setIF() - Sets the interrupt flag to new values // This may trigger an interrupt routine during the next instruction func (mmu * MMU) setIF(newValue uint8){ mmu.write8(0xFF0F,newValue) } // getIE() - Returns the value of the interrupt enabled register func (mmu * MMU) getIE() uint8 { return mmu.read8(0xFFFF) } // showDisplay - Returns true if the display should be shown func (mmu * MMU) showDisplay() bool { return (mmu.read8(0xFF40) >> 7) == 0x1 } // bgTileDataAddress - Returns the address of the given tileNumber // based on which tileData region is selected in LCDC func (mmu * MMU) bgTileDataAddress(tileNumber uint8) uint16 { tileAddress := uint16(0) if ((mmu.read8(0xFF40) >> 4) & 0x1) == 0x1 { tileAddress = 0x8000 } else { tileAddress = 0x8800 } return tileAddress + uint16(tileNumber)*16 } // bgTileMapStartAddress - Returns the start of 1024-byte area which // contains 32x32 tilemap to use func (mmu *MMU) bgTileMapStartAddress() uint16 { if ((mmu.read8(0xFF40) >> 3) & 0x1) == 0x1 { return 0x9C00 } return 0x9800 } func (mmu *MMU) setSTATMode(mode uint8){ mmu.statMode = mode } // Combines the R/W flags from the internalRAM and also the special statMode // flags set by the display.update function func (mmu * MMU) calculateSTAT() uint8 { stat := mmu.internalRAM[0xFF41] | mmu.statMode if mmu.getLY() == mmu.getLYC(){ stat = stat | 0x4 // Set bit 2 (coincident flag) } return stat } func (mmu * MMU) scrollY() uint8 { return mmu.read8(0xFF42) } func (mmu * MMU) scrollX() uint8 { return mmu.read8(0xFF43) } func (mmu * MMU) windowY() uint8 { return mmu.read8(0xFF4A) } func (mmu *MMU) windowX() uint8 { return mmu.read8(0xFF4B) } // getLY - Returns the value of the LY register (LCD Y; aka current scanline) func (mmu * MMU) getLY() uint8 { return mmu.read8(0xFF44) } // getLYC - Returns the value of the LYC (line y compare) register. Setting // this value allows for an interrupt whenever LY=LYC func (mmu * MMU) getLYC() uint8 { return mmu.read8(0xFF45) } // incrementLY - Handles incrementing the LCD Y-Register & setting interrupts func (mmu * MMU) incrementLY() { currentScanline := mmu.read8(0xFF44) currentScanline++ if currentScanline == 144 { mmu.setIF(mmu.getIF() | 0x1) // Trigger vblank! mmu.internalRAM[0xFF44] = currentScanline } else if currentScanline > 153 { // Max number of scanlines have been reached mmu.internalRAM[0xFF44] = 0 } else { mmu.internalRAM[0xFF44] = currentScanline } } // backgroundPixelAt(x,y) // x,y are coordinates in the BG tile space. To read the interleaved pixel color, do the following: // 1) Calculate which tile the pixel is in. The tilespace is 32x32 8px tiles in size // 2) Calculate the address where the tile starts in memory // 3) Calculate the exact byte which contains the pixel data // 4) Calculate which bit in the two bytes contain the pixel data // // tileAddress [0L][1L][2L][3L][4L][5L][6L][7L] // Two bytes contain the // +1 [0H][1H][2H][3H][4H][5H][6H][7H] // color data for 8 pixels // ... // +14 [ last two bytes ] // +15 [ last eight pixels ] // TODO: Calculate the color based on the selected palette func (mmu * MMU) backgroundPixelAt(x uint8, y uint8) int { // 32 tiles per row. y>>3 (same as y/8) gets the row. x>>3 (x/8) gets the columns tileMapOffset := (uint16(x)>>3) + (uint16(y)>>3)*32 tileSelectionAddress := mmu.bgTileMapStartAddress() + uint16(tileMapOffset) tileNumber := mmu.read8(tileSelectionAddress) // Which one of 256 tiles are to be shown tileDataAddress := mmu.bgTileDataAddress(tileNumber) // Where the 16-bytes of the tile begin tileYOffset := (y & 0x7)*2 // Each row in the tile takes 2 bytes tileXOffset := (x & 0x7) // Each col in the tile is 1 bit pixelByte := tileDataAddress + uint16(tileYOffset) pixLow := (mmu.read8(pixelByte+1) >> (7-tileXOffset)) & 0x1 pixHigh := (mmu.read8(pixelByte) >> (7-tileXOffset)) & 0x1 colorNumber := (pixHigh << 1) | pixLow return GameBoyColorMap[colorNumber] } func createMMU() *MMU { mmu := new(MMU) mmu.internalRAM = make([]uint8, 65536) // Pre-allocate all that beautiful unused memory return mmu }
src/mmu.go
0.654674
0.4133
mmu.go
starcoder
package advent import "github.com/davidparks11/advent2021/internal/coordinate" var _ Problem = &smokeBasin{} type smokeBasin struct { dailyProblem } func NewSmokeBasin() Problem { return &smokeBasin{ dailyProblem{ day: 9, }, } } func (s *smokeBasin) Solve() interface{} { input := asciiNumGridToIntArray(s.GetInputLines()) var results []int results = append(results, s.sumRiskLevels(input)) results = append(results, s.threeLargestBasins(input)) return results } /* These caves seem to be lava tubes. Parts are even still volcanically active; small hydrothermal vents release smoke into the caves that slowly settles like rain. If you can model how the smoke flows through the caves, you might be able to avoid it and be that much safer. The submarine generates a heightmap of the floor of the nearby caves for you (your puzzle input). Smoke flows to the lowest Point of the area it's in. For example, consider the following heightmap: 2199943210 3987894921 9856789892 8767896789 9899965678 Each number corresponds to the height of a particular location, where 9 is the highest and 0 is the lowest a location can be. Your first goal is to find the low points - the locations that are lower than any of its adjacent locations. Most locations have four adjacent locations (up, down, left, and right); locations on the edge or corner of the map have three or two adjacent locations, respectively. (Diagonal locations do not count as adjacent.) In the above example, there are four low points, all highlighted: two are in the first row (a 1 and a 0), one is in the third row (a 5), and one is in the bottom row (also a 5). All other locations on the heightmap have some lower adjacent location, and so are not low points. The risk level of a low Point is 1 plus its height. In the above example, the risk levels of the low points are 2, 1, 6, and 6. The sum of the risk levels of all low points in the heightmap is therefore 15. Find all of the low points on your heightmap. What is the sum of the risk levels of all low points on your heightmap? */ func (s *smokeBasin) sumRiskLevels(locationHeights [][]int) int { //trivial solution riskLevels := 0 for y := 0; y < len(locationHeights); y++ { for x := 0; x < len(locationHeights[y]); x++ { if x > 0 && locationHeights[y][x] >= locationHeights[y][x-1] { //left location continue } else if x < len(locationHeights[y])-1 && locationHeights[y][x] >= locationHeights[y][x+1] { //right location continue } else if y > 0 && locationHeights[y][x] >= locationHeights[y-1][x] { //up location continue } else if y < len(locationHeights)-1 && locationHeights[y][x] >= locationHeights[y+1][x] { //down location continue } riskLevels += locationHeights[y][x] + 1 } } return riskLevels } /* Next, you need to find the largest basins so you know what areas are most important to avoid. A basin is all locations that eventually flow downward to a single low Point. Therefore, every low Point has a basin, although some basins are very small. Locations of height 9 do not count as being in any basin, and all other locations will always be part of exactly one basin. The size of a basin is the number of locations within the basin, including the low Point. The example above has four basins. The top-left basin, size 3: 2199943210 3987894921 9856789892 8767896789 9899965678 The top-right basin, size 9: 2199943210 3987894921 9856789892 8767896789 9899965678 The middle basin, size 14: 2199943210 3987894921 9856789892 8767896789 9899965678 The bottom-right basin, size 9: 2199943210 3987894921 9856789892 8767896789 9899965678 Find the three largest basins and multiply their sizes together. In the above example, this is 9 * 14 * 9 = 1134. What do you get if you multiply together the sizes of the three largest basins? */ func (s *smokeBasin) threeLargestBasins(locationHeights [][]int) int { lowPoints := []coordinate.Point{} for y := 0; y < len(locationHeights); y++ { for x := 0; x < len(locationHeights[y]); x++ { if isLowPoint := s.isLowestPoint(locationHeights, x, y); isLowPoint { lowPoints = append(lowPoints, coordinate.Point{x, y}) } } } first, second, third := 0, 0, 0 checkLargest := func(size int) { if size > first { first, second, third = size, first, second } else if size > second { second, third = size, second } else if size > third { third = size } } for _, p := range lowPoints { seen := make(map[coordinate.Point]struct{}) //set of points checkLargest(s.calcBasinSize(locationHeights, seen, p.X, p.Y)) } return first * second * third } //counts number of locations in basin func (s *smokeBasin) calcBasinSize(locationHeights [][]int, seen map[coordinate.Point]struct{}, x, y int) int { seen[coordinate.Point{X: x, Y: y}] = struct{}{} count := 1 if _, found := seen[coordinate.Point{X: x - 1, Y: y}]; !found && s.inBounds(locationHeights, x-1, y) && locationHeights[y][x] < locationHeights[y][x-1] && locationHeights[y][x-1] != 9 { count += s.calcBasinSize(locationHeights, seen, x-1, y) } if _, found := seen[coordinate.Point{X: x + 1, Y: y}]; !found && s.inBounds(locationHeights, x+1, y) && locationHeights[y][x] < locationHeights[y][x+1] && locationHeights[y][x+1] != 9 { count += s.calcBasinSize(locationHeights, seen, x+1, y) } if _, found := seen[coordinate.Point{X: x, Y: y - 1}]; !found && s.inBounds(locationHeights, x, y-1) && locationHeights[y][x] < locationHeights[y-1][x] && locationHeights[y-1][x] != 9 { count += s.calcBasinSize(locationHeights, seen, x, y-1) } if _, found := seen[coordinate.Point{X: x, Y: y + 1}]; !found && s.inBounds(locationHeights, x, y+1) && locationHeights[y][x] < locationHeights[y+1][x] && locationHeights[y+1][x] != 9 { count += s.calcBasinSize(locationHeights, seen, x, y+1) } return count } //bounds check - assumes grid if not empty func (s *smokeBasin) inBounds(grid [][]int, x, y int) bool { return x >= 0 && x < len(grid[0]) && y >= 0 && y < len(grid) } //checks whether or not surrounding locations are less than current location func (s *smokeBasin) isLowestPoint(grid [][]int, x, y int) bool { return (x > 0 && grid[y][x] < grid[y][x-1]) || //left location (x < len(grid[y])-1 && grid[y][x] < grid[y][x+1]) || //right location (y > 0 && grid[y][x] < grid[y-1][x]) || //up location (y < len(grid)-1 && grid[y][x] < grid[y+1][x]) //down location }
internal/advent/day9.go
0.729231
0.589716
day9.go
starcoder
package config import ( "errors" "io/ioutil" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" ) // ParseAssertionsSchema takes a target configuration and translates it into in-memory structures. func ParseAssertionsSchema(data []byte) (*AssertionSpec, error) { astRoot, err := hcl.ParseBytes(data) if err != nil { return nil, err } if _, ok := astRoot.Node.(*ast.ObjectList); !ok { return nil, errors.New("schema malformed") } var outSpec AssertionSpec err = hcl.DecodeObject(&outSpec, astRoot) if err != nil { return nil, err } normaliseAssertionSpec(&outSpec) err = checkAssertionSpec(&outSpec) if err != nil { return nil, err } return &outSpec, nil } // ParseAssertionsSpecFile parses the assertions file from disk. func ParseAssertionsSpecFile(fpath string) (*AssertionSpec, error) { d, err := ioutil.ReadFile(fpath) if err != nil { return nil, err } return ParseAssertionsSchema(d) } func normaliseAssertionSpec(spec *AssertionSpec) { for i := range spec.Assertions { normaliseAssertion(spec.Assertions[i]) if spec.Assertions[i].Order == 0 { spec.Assertions[i].Order = 1000 } } } func normaliseAssertion(assertion *Assertion) { if len(assertion.Actions) == 0 { assertion.Actions = []*Action{&Action{Kind: ActionFail}} } else { for x := range assertion.Actions { if assertion.Actions[x].Kind == "" { assertion.Actions[x].Kind = ActionFail } else if assertion.Actions[x].Kind == ActionAssert { for _, assertion := range assertion.Actions[x].Assertions { normaliseAssertion(assertion) } } } } } func checkAssertion(a *Assertion) error { switch a.Kind { case FileExistsAssrt: fallthrough case FileNotExistsAssrt: if a.FilePath == "" { return errors.New("file_path must be specified for exists and !exists assertions") } case HashMatchAssrt: if a.Hash == "" || a.FilePath == "" { return errors.New("hash/file_path must be specified for md5_match assertions") } case HashFileAssrt: if a.BasePath == "" || a.FilePath == "" { return errors.New("base_path/file_path must be specified for file_match assertions") } case RegexMatchAssrt: if a.Regex == "" { return errors.New("regex must be specified for regex_contents_match assertions") } default: return errors.New("unsupported assertion type/kind: " + a.Kind) } for _, action := range a.Actions { switch action.Kind { case ActionFail: case ActionAssert: if len(action.Assertions) == 0 { return errors.New("at least one assertion must exist for ASSERT actions") } case ActionCopyFile: if action.SourcePath == "" || action.DestinationPath == "" { return errors.New("source_path/destination_path must be specified for COPY actions") } default: return errors.New("unsupported action type/kind: " + action.Kind) } } return nil } func checkAssertionSpec(spec *AssertionSpec) error { for name, a := range spec.Assertions { if name == "" { return errors.New("name must be specified for an assertion") } err := checkAssertion(a) if err != nil { return err } } return nil }
src/machassert/config/assertions_parser.go
0.532425
0.406067
assertions_parser.go
starcoder
package json import "sort" // Contains returns true if a contains b. This implements the @>, <@ operators. // See the Postgres docs for the expected semantics of Contains. // https://www.postgresql.org/docs/10/static/datatype-json.html#JSON-CONTAINMENT // The naive approach to doing array containment would be to do an O(n^2) // nested loop through the arrays to check if one is contained in the // other. We're out of luck when the arrays contain other arrays or // objects (there might actually be something fancy we can do, but there's nothing // obvious). // When the arrays contain scalars however, we can optimize this by // pre-sorting both arrays and iterating through them in lockstep. // To this end, we preprocess the JSON document to sort all of its arrays so // that when we perform contains we can extract the scalars sorted, and then // also the arrays and objects in separate arrays, so that we can do the fast // thing for the subset of the arrays which are scalars. func Contains(a, b JSON) (bool, error) { if a.Type() == ArrayJSONType && b.isScalar() { decoded, err := a.tryDecode() if err != nil { return false, err } ary := decoded.(jsonArray) return checkArrayContainsScalar(ary, b) } preA, err := a.preprocessForContains() if err != nil { return false, err } preB, err := b.preprocessForContains() if err != nil { return false, err } return preA.contains(preB) } // checkArrayContainsScalar performs a unique case of contains (and is // described as such in the Postgres docs) - a top-level array contains a // scalar which is an element of it. This contradicts the general rule of // contains that the contained object must have the same "shape" as the // containing object. func checkArrayContainsScalar(ary jsonArray, s JSON) (bool, error) { for _, j := range ary { cmp, err := j.Compare(s) if err != nil { return false, err } if cmp == 0 { return true, nil } } return false, nil } // containsable is an interface used internally for the implementation of @>. type containsable interface { contains(other containsable) (bool, error) } // containsableScalar is a preprocessed JSON scalar. The JSON it holds will // never be a JSON object or a JSON array. type containsableScalar struct{ JSON } // containsableArray is a preprocessed JSON array. // * scalars will always be scalars and will always be sorted, // * arrays will only contain containsableArrays, // * objects will only contain containsableObjects // (the last two are stored interfaces for reuse, though) type containsableArray struct { scalars []containsableScalar arrays []containsable objects []containsable } type containsableKeyValuePair struct { k jsonString v containsable } // containsableObject is a preprocessed JSON object. // Same as a jsonObject, it is stored as a sorted-by-key list of key-value // pairs. type containsableObject []containsableKeyValuePair func (j jsonNull) preprocessForContains() (containsable, error) { return containsableScalar{j}, nil } func (j jsonFalse) preprocessForContains() (containsable, error) { return containsableScalar{j}, nil } func (j jsonTrue) preprocessForContains() (containsable, error) { return containsableScalar{j}, nil } func (j jsonNumber) preprocessForContains() (containsable, error) { return containsableScalar{j}, nil } func (j jsonString) preprocessForContains() (containsable, error) { return containsableScalar{j}, nil } func (j jsonArray) preprocessForContains() (containsable, error) { result := containsableArray{} for _, e := range j { switch e.Type() { case ArrayJSONType: preprocessed, err := e.preprocessForContains() if err != nil { return nil, err } result.arrays = append(result.arrays, preprocessed) case ObjectJSONType: preprocessed, err := e.preprocessForContains() if err != nil { return nil, err } result.objects = append(result.objects, preprocessed) default: preprocessed, err := e.preprocessForContains() if err != nil { return nil, err } result.scalars = append(result.scalars, preprocessed.(containsableScalar)) } } var err error sort.Slice(result.scalars, func(i, j int) bool { if err != nil { return false } var c int c, err = result.scalars[i].JSON.Compare(result.scalars[j].JSON) return c == -1 }) if err != nil { return nil, err } return result, nil } func (j jsonObject) preprocessForContains() (containsable, error) { preprocessed := make(containsableObject, len(j)) for i := range preprocessed { preprocessed[i].k = j[i].k v, err := j[i].v.preprocessForContains() if err != nil { return nil, err } preprocessed[i].v = v } return preprocessed, nil } func (j containsableScalar) contains(other containsable) (bool, error) { if o, ok := other.(containsableScalar); ok { result, err := j.JSON.Compare(o.JSON) if err != nil { return false, err } return result == 0, nil } return false, nil } func (j containsableArray) contains(other containsable) (bool, error) { if contained, ok := other.(containsableArray); ok { // Since both slices of scalars are sorted via the preprocessing, we can // step through them together via binary search. remainingScalars := j.scalars[:] for _, val := range contained.scalars { var err error found := sort.Search(len(remainingScalars), func(i int) bool { if err != nil { return false } var result int result, err = remainingScalars[i].JSON.Compare(val.JSON) return result >= 0 }) if found == len(remainingScalars) { return false, nil } result, err := remainingScalars[found].JSON.Compare(val.JSON) if err != nil { return false, err } if result != 0 { return false, nil } remainingScalars = remainingScalars[found:] } // TODO(justin): there's possibly(?) something fancier we can do with the // objects and arrays, but for now just do the quadratic check. objectsMatch, err := quadraticJSONArrayContains(j.objects, contained.objects) if err != nil { return false, err } if !objectsMatch { return false, nil } arraysMatch, err := quadraticJSONArrayContains(j.arrays, contained.arrays) if err != nil { return false, err } if !arraysMatch { return false, nil } return true, nil } return false, nil } // quadraticJSONArrayContains does an O(n^2) check to see if every value in // `other` is contained within a value in `container`. `container` and `other` // should not contain scalars. func quadraticJSONArrayContains(container, other []containsable) (bool, error) { for _, otherVal := range other { found := false for _, containerVal := range container { c, err := containerVal.contains(otherVal) if err != nil { return false, err } if c { found = true break } } if !found { return false, nil } } return true, nil } func (j containsableObject) contains(other containsable) (bool, error) { if contained, ok := other.(containsableObject); ok { // We can iterate through the keys of `other` and scan through to find the // corresponding keys in `j` since they're both sorted. objIdx := 0 for _, rightEntry := range contained { for objIdx < len(j) && j[objIdx].k < rightEntry.k { objIdx++ } if objIdx >= len(j) || j[objIdx].k != rightEntry.k { return false, nil } c, err := j[objIdx].v.contains(rightEntry.v) if err != nil { return false, err } if !c { return false, nil } objIdx++ } return true, nil } return false, nil }
pkg/util/json/contains.go
0.655336
0.61757
contains.go
starcoder
package goapi import . `github.com/yak-labs/chirp-lang` import ( bytes `bytes` fmt `fmt` reflect `reflect` strconv `strconv` strings `strings` ) func init() { Roots[`/bytes/Compare`] = FuncRoot{ Func: reflect.ValueOf(bytes.Compare) } Roots[`/bytes/Contains`] = FuncRoot{ Func: reflect.ValueOf(bytes.Contains) } Roots[`/bytes/Count`] = FuncRoot{ Func: reflect.ValueOf(bytes.Count) } Roots[`/bytes/Equal`] = FuncRoot{ Func: reflect.ValueOf(bytes.Equal) } Roots[`/bytes/EqualFold`] = FuncRoot{ Func: reflect.ValueOf(bytes.EqualFold) } Roots[`/bytes/Fields`] = FuncRoot{ Func: reflect.ValueOf(bytes.Fields) } Roots[`/bytes/FieldsFunc`] = FuncRoot{ Func: reflect.ValueOf(bytes.FieldsFunc) } Roots[`/bytes/HasPrefix`] = FuncRoot{ Func: reflect.ValueOf(bytes.HasPrefix) } Roots[`/bytes/HasSuffix`] = FuncRoot{ Func: reflect.ValueOf(bytes.HasSuffix) } Roots[`/bytes/Index`] = FuncRoot{ Func: reflect.ValueOf(bytes.Index) } Roots[`/bytes/IndexAny`] = FuncRoot{ Func: reflect.ValueOf(bytes.IndexAny) } Roots[`/bytes/IndexByte`] = FuncRoot{ Func: reflect.ValueOf(bytes.IndexByte) } Roots[`/bytes/IndexFunc`] = FuncRoot{ Func: reflect.ValueOf(bytes.IndexFunc) } Roots[`/bytes/IndexRune`] = FuncRoot{ Func: reflect.ValueOf(bytes.IndexRune) } Roots[`/bytes/Join`] = FuncRoot{ Func: reflect.ValueOf(bytes.Join) } Roots[`/bytes/LastIndex`] = FuncRoot{ Func: reflect.ValueOf(bytes.LastIndex) } Roots[`/bytes/LastIndexAny`] = FuncRoot{ Func: reflect.ValueOf(bytes.LastIndexAny) } Roots[`/bytes/LastIndexFunc`] = FuncRoot{ Func: reflect.ValueOf(bytes.LastIndexFunc) } Roots[`/bytes/Map`] = FuncRoot{ Func: reflect.ValueOf(bytes.Map) } Roots[`/bytes/NewBuffer`] = FuncRoot{ Func: reflect.ValueOf(bytes.NewBuffer) } Roots[`/bytes/NewBufferString`] = FuncRoot{ Func: reflect.ValueOf(bytes.NewBufferString) } Roots[`/bytes/NewReader`] = FuncRoot{ Func: reflect.ValueOf(bytes.NewReader) } Roots[`/bytes/Repeat`] = FuncRoot{ Func: reflect.ValueOf(bytes.Repeat) } Roots[`/bytes/Replace`] = FuncRoot{ Func: reflect.ValueOf(bytes.Replace) } Roots[`/bytes/Runes`] = FuncRoot{ Func: reflect.ValueOf(bytes.Runes) } Roots[`/bytes/Split`] = FuncRoot{ Func: reflect.ValueOf(bytes.Split) } Roots[`/bytes/SplitAfter`] = FuncRoot{ Func: reflect.ValueOf(bytes.SplitAfter) } Roots[`/bytes/SplitAfterN`] = FuncRoot{ Func: reflect.ValueOf(bytes.SplitAfterN) } Roots[`/bytes/SplitN`] = FuncRoot{ Func: reflect.ValueOf(bytes.SplitN) } Roots[`/bytes/Title`] = FuncRoot{ Func: reflect.ValueOf(bytes.Title) } Roots[`/bytes/ToLower`] = FuncRoot{ Func: reflect.ValueOf(bytes.ToLower) } Roots[`/bytes/ToLowerSpecial`] = FuncRoot{ Func: reflect.ValueOf(bytes.ToLowerSpecial) } Roots[`/bytes/ToTitle`] = FuncRoot{ Func: reflect.ValueOf(bytes.ToTitle) } Roots[`/bytes/ToTitleSpecial`] = FuncRoot{ Func: reflect.ValueOf(bytes.ToTitleSpecial) } Roots[`/bytes/ToUpper`] = FuncRoot{ Func: reflect.ValueOf(bytes.ToUpper) } Roots[`/bytes/ToUpperSpecial`] = FuncRoot{ Func: reflect.ValueOf(bytes.ToUpperSpecial) } Roots[`/bytes/Trim`] = FuncRoot{ Func: reflect.ValueOf(bytes.Trim) } Roots[`/bytes/TrimFunc`] = FuncRoot{ Func: reflect.ValueOf(bytes.TrimFunc) } Roots[`/bytes/TrimLeft`] = FuncRoot{ Func: reflect.ValueOf(bytes.TrimLeft) } Roots[`/bytes/TrimLeftFunc`] = FuncRoot{ Func: reflect.ValueOf(bytes.TrimLeftFunc) } Roots[`/bytes/TrimRight`] = FuncRoot{ Func: reflect.ValueOf(bytes.TrimRight) } Roots[`/bytes/TrimRightFunc`] = FuncRoot{ Func: reflect.ValueOf(bytes.TrimRightFunc) } Roots[`/bytes/TrimSpace`] = FuncRoot{ Func: reflect.ValueOf(bytes.TrimSpace) } Roots[`/fmt/Errorf`] = FuncRoot{ Func: reflect.ValueOf(fmt.Errorf) } Roots[`/fmt/Fprint`] = FuncRoot{ Func: reflect.ValueOf(fmt.Fprint) } Roots[`/fmt/Fprintf`] = FuncRoot{ Func: reflect.ValueOf(fmt.Fprintf) } Roots[`/fmt/Fprintln`] = FuncRoot{ Func: reflect.ValueOf(fmt.Fprintln) } Roots[`/fmt/Fscan`] = FuncRoot{ Func: reflect.ValueOf(fmt.Fscan) } Roots[`/fmt/Fscanf`] = FuncRoot{ Func: reflect.ValueOf(fmt.Fscanf) } Roots[`/fmt/Fscanln`] = FuncRoot{ Func: reflect.ValueOf(fmt.Fscanln) } Roots[`/fmt/Print`] = FuncRoot{ Func: reflect.ValueOf(fmt.Print) } Roots[`/fmt/Printf`] = FuncRoot{ Func: reflect.ValueOf(fmt.Printf) } Roots[`/fmt/Println`] = FuncRoot{ Func: reflect.ValueOf(fmt.Println) } Roots[`/fmt/Scan`] = FuncRoot{ Func: reflect.ValueOf(fmt.Scan) } Roots[`/fmt/Scanf`] = FuncRoot{ Func: reflect.ValueOf(fmt.Scanf) } Roots[`/fmt/Scanln`] = FuncRoot{ Func: reflect.ValueOf(fmt.Scanln) } Roots[`/fmt/Sprint`] = FuncRoot{ Func: reflect.ValueOf(fmt.Sprint) } Roots[`/fmt/Sprintf`] = FuncRoot{ Func: reflect.ValueOf(fmt.Sprintf) } Roots[`/fmt/Sprintln`] = FuncRoot{ Func: reflect.ValueOf(fmt.Sprintln) } Roots[`/fmt/Sscan`] = FuncRoot{ Func: reflect.ValueOf(fmt.Sscan) } Roots[`/fmt/Sscanf`] = FuncRoot{ Func: reflect.ValueOf(fmt.Sscanf) } Roots[`/fmt/Sscanln`] = FuncRoot{ Func: reflect.ValueOf(fmt.Sscanln) } Roots[`/reflect/Append`] = FuncRoot{ Func: reflect.ValueOf(reflect.Append) } Roots[`/reflect/AppendSlice`] = FuncRoot{ Func: reflect.ValueOf(reflect.AppendSlice) } Roots[`/reflect/Copy`] = FuncRoot{ Func: reflect.ValueOf(reflect.Copy) } Roots[`/reflect/DeepEqual`] = FuncRoot{ Func: reflect.ValueOf(reflect.DeepEqual) } Roots[`/reflect/Indirect`] = FuncRoot{ Func: reflect.ValueOf(reflect.Indirect) } Roots[`/reflect/MakeChan`] = FuncRoot{ Func: reflect.ValueOf(reflect.MakeChan) } Roots[`/reflect/MakeMap`] = FuncRoot{ Func: reflect.ValueOf(reflect.MakeMap) } Roots[`/reflect/MakeSlice`] = FuncRoot{ Func: reflect.ValueOf(reflect.MakeSlice) } Roots[`/reflect/New`] = FuncRoot{ Func: reflect.ValueOf(reflect.New) } Roots[`/reflect/NewAt`] = FuncRoot{ Func: reflect.ValueOf(reflect.NewAt) } Roots[`/reflect/PtrTo`] = FuncRoot{ Func: reflect.ValueOf(reflect.PtrTo) } Roots[`/reflect/TypeOf`] = FuncRoot{ Func: reflect.ValueOf(reflect.TypeOf) } Roots[`/reflect/ValueOf`] = FuncRoot{ Func: reflect.ValueOf(reflect.ValueOf) } Roots[`/reflect/Zero`] = FuncRoot{ Func: reflect.ValueOf(reflect.Zero) } Roots[`/strconv/AppendBool`] = FuncRoot{ Func: reflect.ValueOf(strconv.AppendBool) } Roots[`/strconv/AppendFloat`] = FuncRoot{ Func: reflect.ValueOf(strconv.AppendFloat) } Roots[`/strconv/AppendInt`] = FuncRoot{ Func: reflect.ValueOf(strconv.AppendInt) } Roots[`/strconv/AppendQuote`] = FuncRoot{ Func: reflect.ValueOf(strconv.AppendQuote) } Roots[`/strconv/AppendQuoteRune`] = FuncRoot{ Func: reflect.ValueOf(strconv.AppendQuoteRune) } Roots[`/strconv/AppendQuoteRuneToASCII`] = FuncRoot{ Func: reflect.ValueOf(strconv.AppendQuoteRuneToASCII) } Roots[`/strconv/AppendQuoteToASCII`] = FuncRoot{ Func: reflect.ValueOf(strconv.AppendQuoteToASCII) } Roots[`/strconv/AppendUint`] = FuncRoot{ Func: reflect.ValueOf(strconv.AppendUint) } Roots[`/strconv/Atoi`] = FuncRoot{ Func: reflect.ValueOf(strconv.Atoi) } Roots[`/strconv/CanBackquote`] = FuncRoot{ Func: reflect.ValueOf(strconv.CanBackquote) } Roots[`/strconv/FormatBool`] = FuncRoot{ Func: reflect.ValueOf(strconv.FormatBool) } Roots[`/strconv/FormatFloat`] = FuncRoot{ Func: reflect.ValueOf(strconv.FormatFloat) } Roots[`/strconv/FormatInt`] = FuncRoot{ Func: reflect.ValueOf(strconv.FormatInt) } Roots[`/strconv/FormatUint`] = FuncRoot{ Func: reflect.ValueOf(strconv.FormatUint) } Roots[`/strconv/IsPrint`] = FuncRoot{ Func: reflect.ValueOf(strconv.IsPrint) } Roots[`/strconv/Itoa`] = FuncRoot{ Func: reflect.ValueOf(strconv.Itoa) } Roots[`/strconv/ParseBool`] = FuncRoot{ Func: reflect.ValueOf(strconv.ParseBool) } Roots[`/strconv/ParseFloat`] = FuncRoot{ Func: reflect.ValueOf(strconv.ParseFloat) } Roots[`/strconv/ParseInt`] = FuncRoot{ Func: reflect.ValueOf(strconv.ParseInt) } Roots[`/strconv/ParseUint`] = FuncRoot{ Func: reflect.ValueOf(strconv.ParseUint) } Roots[`/strconv/Quote`] = FuncRoot{ Func: reflect.ValueOf(strconv.Quote) } Roots[`/strconv/QuoteRune`] = FuncRoot{ Func: reflect.ValueOf(strconv.QuoteRune) } Roots[`/strconv/QuoteRuneToASCII`] = FuncRoot{ Func: reflect.ValueOf(strconv.QuoteRuneToASCII) } Roots[`/strconv/QuoteToASCII`] = FuncRoot{ Func: reflect.ValueOf(strconv.QuoteToASCII) } Roots[`/strconv/Unquote`] = FuncRoot{ Func: reflect.ValueOf(strconv.Unquote) } Roots[`/strconv/UnquoteChar`] = FuncRoot{ Func: reflect.ValueOf(strconv.UnquoteChar) } Roots[`/strings/Contains`] = FuncRoot{ Func: reflect.ValueOf(strings.Contains) } Roots[`/strings/ContainsAny`] = FuncRoot{ Func: reflect.ValueOf(strings.ContainsAny) } Roots[`/strings/ContainsRune`] = FuncRoot{ Func: reflect.ValueOf(strings.ContainsRune) } Roots[`/strings/Count`] = FuncRoot{ Func: reflect.ValueOf(strings.Count) } Roots[`/strings/EqualFold`] = FuncRoot{ Func: reflect.ValueOf(strings.EqualFold) } Roots[`/strings/Fields`] = FuncRoot{ Func: reflect.ValueOf(strings.Fields) } Roots[`/strings/FieldsFunc`] = FuncRoot{ Func: reflect.ValueOf(strings.FieldsFunc) } Roots[`/strings/HasPrefix`] = FuncRoot{ Func: reflect.ValueOf(strings.HasPrefix) } Roots[`/strings/HasSuffix`] = FuncRoot{ Func: reflect.ValueOf(strings.HasSuffix) } Roots[`/strings/Index`] = FuncRoot{ Func: reflect.ValueOf(strings.Index) } Roots[`/strings/IndexAny`] = FuncRoot{ Func: reflect.ValueOf(strings.IndexAny) } Roots[`/strings/IndexFunc`] = FuncRoot{ Func: reflect.ValueOf(strings.IndexFunc) } Roots[`/strings/IndexRune`] = FuncRoot{ Func: reflect.ValueOf(strings.IndexRune) } Roots[`/strings/Join`] = FuncRoot{ Func: reflect.ValueOf(strings.Join) } Roots[`/strings/LastIndex`] = FuncRoot{ Func: reflect.ValueOf(strings.LastIndex) } Roots[`/strings/LastIndexAny`] = FuncRoot{ Func: reflect.ValueOf(strings.LastIndexAny) } Roots[`/strings/LastIndexFunc`] = FuncRoot{ Func: reflect.ValueOf(strings.LastIndexFunc) } Roots[`/strings/Map`] = FuncRoot{ Func: reflect.ValueOf(strings.Map) } Roots[`/strings/NewReader`] = FuncRoot{ Func: reflect.ValueOf(strings.NewReader) } Roots[`/strings/NewReplacer`] = FuncRoot{ Func: reflect.ValueOf(strings.NewReplacer) } Roots[`/strings/Repeat`] = FuncRoot{ Func: reflect.ValueOf(strings.Repeat) } Roots[`/strings/Replace`] = FuncRoot{ Func: reflect.ValueOf(strings.Replace) } Roots[`/strings/Split`] = FuncRoot{ Func: reflect.ValueOf(strings.Split) } Roots[`/strings/SplitAfter`] = FuncRoot{ Func: reflect.ValueOf(strings.SplitAfter) } Roots[`/strings/SplitAfterN`] = FuncRoot{ Func: reflect.ValueOf(strings.SplitAfterN) } Roots[`/strings/SplitN`] = FuncRoot{ Func: reflect.ValueOf(strings.SplitN) } Roots[`/strings/Title`] = FuncRoot{ Func: reflect.ValueOf(strings.Title) } Roots[`/strings/ToLower`] = FuncRoot{ Func: reflect.ValueOf(strings.ToLower) } Roots[`/strings/ToLowerSpecial`] = FuncRoot{ Func: reflect.ValueOf(strings.ToLowerSpecial) } Roots[`/strings/ToTitle`] = FuncRoot{ Func: reflect.ValueOf(strings.ToTitle) } Roots[`/strings/ToTitleSpecial`] = FuncRoot{ Func: reflect.ValueOf(strings.ToTitleSpecial) } Roots[`/strings/ToUpper`] = FuncRoot{ Func: reflect.ValueOf(strings.ToUpper) } Roots[`/strings/ToUpperSpecial`] = FuncRoot{ Func: reflect.ValueOf(strings.ToUpperSpecial) } Roots[`/strings/Trim`] = FuncRoot{ Func: reflect.ValueOf(strings.Trim) } Roots[`/strings/TrimFunc`] = FuncRoot{ Func: reflect.ValueOf(strings.TrimFunc) } Roots[`/strings/TrimLeft`] = FuncRoot{ Func: reflect.ValueOf(strings.TrimLeft) } Roots[`/strings/TrimLeftFunc`] = FuncRoot{ Func: reflect.ValueOf(strings.TrimLeftFunc) } Roots[`/strings/TrimRight`] = FuncRoot{ Func: reflect.ValueOf(strings.TrimRight) } Roots[`/strings/TrimRightFunc`] = FuncRoot{ Func: reflect.ValueOf(strings.TrimRightFunc) } Roots[`/strings/TrimSpace`] = FuncRoot{ Func: reflect.ValueOf(strings.TrimSpace) } Roots[`/bytes/ErrTooLarge`] = VarRoot{ Var: reflect.ValueOf(&bytes.ErrTooLarge) } Roots[`/strconv/ErrRange`] = VarRoot{ Var: reflect.ValueOf(&strconv.ErrRange) } Roots[`/strconv/ErrSyntax`] = VarRoot{ Var: reflect.ValueOf(&strconv.ErrSyntax) } { var tmp *bytes.Buffer Roots[`/bytes/Buffer`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *bytes.Reader Roots[`/bytes/Reader`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.Method Roots[`/reflect/Method`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.Method Roots[`/reflect/Method`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.Method Roots[`/reflect/Method`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.Method Roots[`/reflect/Method`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.Method Roots[`/reflect/Method`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.Method Roots[`/reflect/Method`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.SliceHeader Roots[`/reflect/SliceHeader`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.SliceHeader Roots[`/reflect/SliceHeader`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.SliceHeader Roots[`/reflect/SliceHeader`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.SliceHeader Roots[`/reflect/SliceHeader`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.StringHeader Roots[`/reflect/StringHeader`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.StringHeader Roots[`/reflect/StringHeader`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.StringHeader Roots[`/reflect/StringHeader`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.StructField Roots[`/reflect/StructField`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.StructField Roots[`/reflect/StructField`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.StructField Roots[`/reflect/StructField`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.StructField Roots[`/reflect/StructField`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.StructField Roots[`/reflect/StructField`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.StructField Roots[`/reflect/StructField`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.StructField Roots[`/reflect/StructField`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.StructField Roots[`/reflect/StructField`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.Value Roots[`/reflect/Value`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.ValueError Roots[`/reflect/ValueError`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.ValueError Roots[`/reflect/ValueError`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *reflect.ValueError Roots[`/reflect/ValueError`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *strconv.NumError Roots[`/strconv/NumError`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *strconv.NumError Roots[`/strconv/NumError`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *strconv.NumError Roots[`/strconv/NumError`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *strconv.NumError Roots[`/strconv/NumError`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *strings.Reader Roots[`/strings/Reader`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } { var tmp *strings.Replacer Roots[`/strings/Replacer`] = TypeRoot{ Type: reflect.ValueOf(tmp).Type().Elem() } } Roots[`/bytes/MinRead`] = ConstRoot{ Const: int64(bytes.MinRead) } Roots[`/reflect/Array`] = ConstRoot{ Const: reflect.Array } Roots[`/reflect/Bool`] = ConstRoot{ Const: reflect.Bool } Roots[`/reflect/BothDir`] = ConstRoot{ Const: reflect.BothDir } Roots[`/reflect/Chan`] = ConstRoot{ Const: reflect.Chan } Roots[`/reflect/Complex128`] = ConstRoot{ Const: reflect.Complex128 } Roots[`/reflect/Complex64`] = ConstRoot{ Const: reflect.Complex64 } Roots[`/reflect/Float32`] = ConstRoot{ Const: reflect.Float32 } Roots[`/reflect/Float64`] = ConstRoot{ Const: reflect.Float64 } Roots[`/reflect/Func`] = ConstRoot{ Const: reflect.Func } Roots[`/reflect/Int`] = ConstRoot{ Const: reflect.Int } Roots[`/reflect/Int16`] = ConstRoot{ Const: reflect.Int16 } Roots[`/reflect/Int32`] = ConstRoot{ Const: reflect.Int32 } Roots[`/reflect/Int64`] = ConstRoot{ Const: reflect.Int64 } Roots[`/reflect/Int8`] = ConstRoot{ Const: reflect.Int8 } Roots[`/reflect/Interface`] = ConstRoot{ Const: reflect.Interface } Roots[`/reflect/Invalid`] = ConstRoot{ Const: reflect.Invalid } Roots[`/reflect/Map`] = ConstRoot{ Const: reflect.Map } Roots[`/reflect/Ptr`] = ConstRoot{ Const: reflect.Ptr } Roots[`/reflect/RecvDir`] = ConstRoot{ Const: reflect.RecvDir } Roots[`/reflect/SendDir`] = ConstRoot{ Const: reflect.SendDir } Roots[`/reflect/Slice`] = ConstRoot{ Const: reflect.Slice } Roots[`/reflect/String`] = ConstRoot{ Const: reflect.String } Roots[`/reflect/Struct`] = ConstRoot{ Const: reflect.Struct } Roots[`/reflect/Uint`] = ConstRoot{ Const: reflect.Uint } Roots[`/reflect/Uint16`] = ConstRoot{ Const: reflect.Uint16 } Roots[`/reflect/Uint32`] = ConstRoot{ Const: reflect.Uint32 } Roots[`/reflect/Uint64`] = ConstRoot{ Const: reflect.Uint64 } Roots[`/reflect/Uint8`] = ConstRoot{ Const: reflect.Uint8 } Roots[`/reflect/Uintptr`] = ConstRoot{ Const: reflect.Uintptr } Roots[`/reflect/UnsafePointer`] = ConstRoot{ Const: reflect.UnsafePointer } Roots[`/strconv/IntSize`] = ConstRoot{ Const: int64(strconv.IntSize) } }
goapi/tiny/wrap.go
0.531453
0.617887
wrap.go
starcoder
// Package errors provides error types and function package errors var ( // ErrCreateProperty represents a function to generate an error that the property creation failed. ErrCreateProperty = func(err error) error { return Wrap(err, "failed to create property") } // ErrIndexNotFound represents an error that the index file is not found. ErrIndexNotFound = New("index file not found") // ErrIndexLoadTimeout represents an error that the index loading timeout. ErrIndexLoadTimeout = New("index load timeout") // ErrInvalidDimensionSize represents a function to generate an error that the dimension size is invalid. ErrInvalidDimensionSize = func(current, limit int) error { if limit == 0 { return Errorf("dimension size %d is invalid, the supporting dimension size must be bigger than 2", current) } return Errorf("dimension size %d is invalid, the supporting dimension size must be between 2 ~ %d", current, limit) } // ErrDimensionLimitExceed represents a function to generate an error that the supported dimension limit exceeded. ErrDimensionLimitExceed = func(current, limit int) error { return Errorf("supported dimension limit exceed:\trequired = %d,\tlimit = %d", current, limit) } // ErrIncompatibleDimensionSize represents a function to generate an error that the incompatible dimension size detected. ErrIncompatibleDimensionSize = func(req, dim int) error { return Errorf("incompatible dimension size detected\trequested: %d,\tconfigured: %d", req, dim) } // ErrUnsupportedObjectType represents an error that the object type is unsupported. ErrUnsupportedObjectType = New("unsupported ObjectType") // ErrUnsupportedDistanceType represents an error that the distance type is unsupported. ErrUnsupportedDistanceType = New("unsupported DistanceType") // ErrFailedToSetDistanceType represents a function to generate an error that the set of distance type failed. ErrFailedToSetDistanceType = func(err error, distance string) error { return Wrap(err, "failed to set distance type "+distance) } // ErrFailedToSetObjectType represents a function to generate an error that the set of object type failed. ErrFailedToSetObjectType = func(err error, t string) error { return Wrap(err, "failed to set object type "+t) } // ErrFailedToSetDimension represents a function to generate an error that the set of dimension failed. ErrFailedToSetDimension = func(err error) error { return Wrap(err, "failed to set dimension") } // ErrFailedToSetCreationEdgeSize represents a function to generate an error that the set of creation edge size failed. ErrFailedToSetCreationEdgeSize = func(err error) error { return Wrap(err, "failed to set creation edge size") } // ErrFailedToSetSearchEdgeSize represents a function to generate an error that the set of search edge size failed. ErrFailedToSetSearchEdgeSize = func(err error) error { return Wrap(err, "failed to set search edge size") } // ErrUncommittedIndexExists represents a function to generate an error that the uncommitted indexes exist. ErrUncommittedIndexExists = func(num uint64) error { return Errorf("%d indexes are not committed", num) } // ErrUncommittedIndexNotFound represents an error that the uncommitted indexes are not found. ErrUncommittedIndexNotFound = New("uncommitted indexes are not found") // ErrCAPINotImplemented represents an error that the function is not implemented in C API. ErrCAPINotImplemented = New("not implemented in C API") // ErrUUIDAlreadyExists represents a function to generate an error that the uuid already exists. ErrUUIDAlreadyExists = func(uuid string, oid uint) error { return Errorf("ngt uuid %s object id %d already exists ", uuid, oid) } // ErrUUIDNotFound represents a function to generate an error that the uuid is not found. ErrUUIDNotFound = func(id uint32) error { if id == 0 { return New("ngt object uuid not found") } return Errorf("ngt object uuid %d's metadata not found", id) } // ErrObjectIDNotFound represents a function to generate an error that the object id is not found. ErrObjectIDNotFound = func(uuid string) error { return Errorf("ngt uuid %s's object id not found", uuid) } // ErrObjectNotFound represents a function to generate an error that the object is not found. ErrObjectNotFound = func(err error, uuid string) error { return Wrapf(err, "ngt uuid %s's object not found", uuid) } // ErrRemoveRequestedBeforeIndexing represents a function to generate an error that the object is not indexed so can not remove it. ErrRemoveRequestedBeforeIndexing = func(oid uint) error { return Errorf("object id %d is not indexed we cannot remove it", oid) } )
internal/errors/ngt.go
0.744192
0.45944
ngt.go
starcoder
package elements import "github.com/fileformats/graphics/jt/model" // Geometric Transform Attribute Element contains a 4x4 homogeneous transformation matrix that positions the associated // LSG node’s coordinate system relative to its parent LSG node. // JT format LSG traversal semantics state that geometric transform attributes accumulate down the LSG through matrix // multiplication as follows: // p’ = pAM // Where p is a point of the model, p’ is the transformed point, M is the current modeling transformation matrix // inherited from ancestor LSG nodes and previous Geometric Transform Attribute Element, and A is the transformation // matrix of this Geometric Transform Attribute Element. type GeometricTransformAttribute struct { BaseAttribute // Version Number is the version identifier for this element VersionNumber uint8 // Stored Values mask is a 16-bit mask where each bit is a flag indicating whether the corresponding element in // the matrix is different from the identity matrix. Only elements which are different from the identity matrix are // actually stored. The bits are assigned to matrix elements as follows // Bit15 Bit14 Bit13 Bit12 // Bit11 Bit10 Bit9 Bit8 // Bit Bit6 Bit5 Bit4 // Bit Bit2 Bit1 Bit0 StoredValueMask uint16 // Element Value specifies a particular matrix element value. ElementValue float32 // Computed transformation matrix TransformationMatrix model.Matrix4F32 } func (n GeometricTransformAttribute) GUID() model.GUID { return model.GeometricTransformAttributeElement } func (n *GeometricTransformAttribute) Read(c *model.Context) error { c.LogGroup("GeometricTransformAttribute") defer c.LogGroupEnd() if err := (&n.BaseAttribute).Read(c); err != nil { return err } if c.Version.GreaterEqThan(model.V9) { if c.Version.GreaterEqThan(model.V10) { n.VersionNumber = c.Data.UInt8() } else { n.VersionNumber = uint8(c.Data.Int16()) } } n.StoredValueMask = c.Data.UInt16() tmp := n.StoredValueMask c.Log("StoreValueMask: %d", n.StoredValueMask) n.TransformationMatrix = model.Matrix4F32{ 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, } total := 0 for i := 0; i < 16; i++ { if tmp & 0x8000 != 0 { n.TransformationMatrix[i] = c.Data.Float32() total++ } tmp = tmp << 1 } // TODO: investigate here if f64 matrix is required return c.Data.GetError() } func (n *GeometricTransformAttribute) GetBaseAttribute() *BaseAttribute { return &n.BaseAttribute } func (n *GeometricTransformAttribute) BaseElement() *JTElement { return &n.JTElement }
jt/segments/elements/GeometricTransformAttribute.go
0.710729
0.662339
GeometricTransformAttribute.go
starcoder
package matchers import ( "reflect" "testing" ) type Expectation struct { t *testing.T actual interface{} } func (e *Expectation) ToEqual(expected interface{}) { e.verifyExpectedNotNil(expected) if !reflect.DeepEqual(e.actual, expected) { e.t.Fatalf("Expected\n\t%v\nto equal\n\t%v\n", e.actual, expected) } } func (e *Expectation) NotToEqual(expected interface{}) { e.verifyExpectedNotNil(expected) if reflect.DeepEqual(e.actual, expected) { e.t.Fatalf("Expected\n\t%v\nnot to equal\n\t%v\n", e.actual, expected) } } func (e *Expectation) ToBeNil() { if e.actual != nil { e.t.Fatalf("Expected\n\t%v\nto be nil\n", e.actual) } } func (e *Expectation) NotToBeNil() { if e.actual == nil { e.t.Fatalf("Expected\n\t%v\nnot to be nil\n", e.actual) } } func (e *Expectation) ToBeTrue() { switch a := e.actual.(type) { case bool: if e.actual == false { e.t.Fatalf("Expected\n\t%v\nto be true\n", e.actual) } default: e.t.Fatalf("Cannot check if non-bool value\n\t%v\nis truthy\n", a) } } func (e *Expectation) ToBeFalse() { switch a := e.actual.(type) { case bool: if e.actual == true { e.t.Fatalf("Expected\n\t%v\nto be false\n", e.actual) } default: e.t.Fatalf("Cannot check if non-bool value\n\t%v\nis truthy\n", a) } } func (e *Expectation) ToSucceed() { switch actual := e.actual.(type) { case error: if actual != nil { e.t.Fatalf("Expected error\n\t%v\nto have succeeded\n", actual) } default: e.t.Fatalf("Cannot check if non-error value\n\t%v\nsucceeded\n", actual) } } func (e *Expectation) ToMatchError(expected interface{}) { e.verifyExpectedNotNil(expected) actual, ok := e.actual.(error) if !ok { e.t.Fatalf("Cannot check if non-error value\n\t%v\nmatches error\n", e.actual) } switch expected := expected.(type) { case error: if !reflect.DeepEqual(actual, expected) { e.t.Fatalf("Expected\n\t%v\nto match error\n\t%v\n", actual, expected) } case string: if actual.Error() != expected { e.t.Fatalf("Expected\n\t%v\nto match error\n\t%v\n", actual, expected) } default: e.t.Fatalf("Cannot match\n\t%v\nagainst non-error\n\t%v\n", actual, expected) } } func (e *Expectation) verifyExpectedNotNil(expected interface{}) { if expected == nil { e.t.Fatal("Refusing to compare with <nil>. Use `ToBeNil` or `NotToBeNil` instead.") } }
internal/matchers/expectation.go
0.581303
0.670949
expectation.go
starcoder
package solar import ( "math" "time" c "github.com/rtovey/astro-lib/common" o "github.com/rtovey/astro-lib/orbit" ) const ( solarYearDurationDays = 365.242191 solarEclipticLongitudeAtEpoch = 279.403303 solarEclipticLongitudeOfPerigee = 282.768422 solarOrbitEccentricity = 0.016713 ) type SolarPosition struct { Ecliptic o.Ecliptic Debug SolarPositionDebug // Calculation debug values } type SolarPositionDebug struct { D float64 // days N float64 // degrees M float64 // degrees E float64 // radians v float64 // degrees } func Position(date time.Time) SolarPosition { D := daysSinceEpoch(date) N := northPointOFHorizon(D) M := meanAnomaly(N) E := c.SolveKeplersEquation(c.DtoR(M), solarOrbitEccentricity, math.Pow10(-6)) v := trueAnomaly(E) l := v + solarEclipticLongitudeOfPerigee l = c.NormaliseAngle(l) debug := SolarPositionDebug{ D: D, N: N, M: M, E: E, v: v, } return SolarPosition{ Ecliptic: o.Ecliptic{ Latitude: 0, Longitude: l, }, Debug: debug, } } func daysSinceEpoch(date time.Time) float64 { epoch := time.Date(1990, time.January, 1, 0, 0, 0, 0, time.UTC) return (date.Sub(epoch).Hours() + 24.0) / 24.0 } func northPointOFHorizon(D float64) float64 { return c.NormaliseAngle((360.0 / solarYearDurationDays) * D) } func meanAnomaly(N float64) float64 { M := N + solarEclipticLongitudeAtEpoch - solarEclipticLongitudeOfPerigee if M < 0 { M += 360.0 } return M } func trueAnomaly(E float64) float64 { x := math.Pow((1+solarOrbitEccentricity)/(1-solarOrbitEccentricity), 0.5) * math.Tan(E/2.0) return c.RtoD(2 * math.Atan(x)) } /*func equationOfCentreCorrection(date time.Time) float64 { M := MeanAnomaly(date) return (360.0 / math.Pi) * solarOrbitEccentricity * c.Sind(M) } func GeocentricEclipticLongitude(date time.Time) float64 { N := northPointOFHorizon(date) Ec := equationOfCentreCorrection(date) L := N + Ec + solarEclipticLongitudeAtEpoch if L > 360 { L = L - 360.0 } return L }*/
solar/solarPosition.go
0.804214
0.490053
solarPosition.go
starcoder
package cgp import ( "math" "math/rand" "sync" "time" ) // A CGPFunction is a function that is usable in a Genetic Program. It takes // zero or more parameters and outputs a single result. For example // a CGPFunction could implement binary AND or floating point multiplication. type CGPFunction func([]float64) float64 // The EvalFunction takes one Individual and returns its fitness value. type EvalFunction func(Individual) float64 // RndConstFunction takes a PRNG as input and outputs a random number that is // used as a constant in the evolved program. This allows you to set the range // and type (integers vs. floating point) of constants used during evolution. // For example, if you are evolving programs that create RGB images you might // constrain the RndConstFunction to return integer values between 0 and 255. type RndConstFunction func(rand *rand.Rand) float64 // CGPOptions is a struct describing the options of a CGP run. type CGPOptions struct { PopSize int // Population Size NumGenes int // Number of Genes MutationRate float64 // Mutation Rate NumInputs int // The number of Inputs NumOutputs int // The number of Outputs MaxArity int // The maximum Arity of the CGPFunctions in FunctionList FunctionList []CGPFunction // The functions used in evolution RandConst RndConstFunction // The function supplying constants Evaluator EvalFunction // The evaluator that assigns a fitness to an individual Rand *rand.Rand // An instance of rand.Rand that is used throughout cgp to make runs repeatable } type CGP struct { Options CGPOptions Population []Individual NumEvaluations int // The number of evaluations so far } // New takes CGPOptions and returns a new CGP object. It panics when a necessary // precondition is violated, e.g. when the number of genes is negative. func New(options CGPOptions) *CGP { if options.PopSize < 2 { panic("Population size must be at least 2.") } if options.NumGenes < 0 { panic("NumGenes can't be negative.") } if options.MutationRate < 0 || options.MutationRate > 1 { panic("Mutation rate must be between 0 and 1.") } if options.NumInputs < 0 { panic("NumInputs can't be negative.") } if options.NumOutputs < 1 { panic("At least one output is necessary.") } if options.MaxArity < 0 { panic("MaxArity can't be negative.") } if len(options.FunctionList) == 0 { panic("At least one function must be provided.") } if options.RandConst == nil { panic("You must supply a RandConst function.") } if options.Evaluator == nil { panic("You must supply an Evaluator function.") } if options.Rand == nil { options.Rand = rand.New(rand.NewSource(time.Now().UnixNano())) } result := &CGP{ Options: options, Population: make([]Individual, 1, options.PopSize), NumEvaluations: 0, } result.Population[0] = NewIndividual(&options) return result } // RunGeneration creates offspring from the current parent via mutation, // evaluates the offspring using the CGP object's Evaluator and selects the new // parent for the following generation. func (cgp *CGP) RunGeneration() { // Create offspring cgp.Population = cgp.Population[0:1] for i := 1; i < cgp.Options.PopSize; i++ { cgp.Population = append(cgp.Population, cgp.Population[0].Mutate()) } // Evaluate offspring (in parallel) var wg sync.WaitGroup for i := 1; i < cgp.Options.PopSize; i++ { // If the individual computes the same function as the parent, skip // evaluation and just use the parent's fitness if cgp.Population[i].CacheID() == cgp.Population[0].CacheID() { cgp.Population[i].Fitness = cgp.Population[0].Fitness } else { // Individual is different from parent, compute fitness wg.Add(1) cgp.NumEvaluations += 1 go func(i int) { defer wg.Done() cgp.Population[i].Fitness = cgp.Options.Evaluator(cgp.Population[i]) }(i) } } wg.Wait() // Replace parent with best offspring bestFitness := math.Inf(1) bestIndividual := 0 for i := 1; i < cgp.Options.PopSize; i++ { if cgp.Population[i].Fitness < bestFitness { bestFitness = cgp.Population[i].Fitness bestIndividual = i } } if bestFitness <= cgp.Population[0].Fitness { cgp.Population[0] = cgp.Population[bestIndividual] } }
cgp.go
0.701713
0.642741
cgp.go
starcoder
package kernel import ( "fmt" "math" ) const almostzero = .000001 type _NearestNeighbor struct{} func (_NearestNeighbor) Do(x float64) float64 { if -.5 < x && x <= .5 { return 1 } return 0 } func (_NearestNeighbor) Rad() int { return 0 } func (_NearestNeighbor) String() string { return "NearestNeighbor" } type _Bilinear struct{} func (_Bilinear) Do(x float64) float64 { x = math.Abs(x) if x <= 1 { return 1 - x } return 0 } func (_Bilinear) Rad() int { return 1 } func (_Bilinear) String() string { return "Bilinear" } type _Bell struct{} func (_Bell) Do(x float64) float64 { x = math.Abs(x) if x <= .5 { return 0.75 - x*x } if x <= 1.5 { return .5 * (x - 1.5) * (x - 1.5) } return 0 } func (_Bell) Rad() int { return 2 } func (_Bell) String() string { return "Bell" } type _Hermite struct{} func (_Hermite) Do(x float64) float64 { x = math.Abs(x) if x <= 1 { return 2*x*x*x - 3*x*x + 1 } return 0 } func (_Hermite) Rad() int { return 1 } func (_Hermite) String() string { return "Hermite" } type _Bicubic struct{ A float64 } func (s _Bicubic) Do(x float64) float64 { x = math.Abs(x) if x <= 1 { return (s.A+2)*x*x*x - (s.A+3)*x*x + 1 } if x < 2 { return (s.A * x * x * x) - (5 * s.A * x * x) + (8 * s.A * x) - (4 * s.A) } return 0 } func (_Bicubic) Rad() int { return 2 } func (_Bicubic) String() string { return "Bicubic" } type _Michell struct{ B, C float64 } func makeMichell(b float64) _Michell { return _Michell{B: b, C: (1 - b)/2} } func (s _Michell) Do(x float64) float64 { x = math.Abs(x) if x < 1 { return ((12-9*s.B-6*s.C)*x*x*x + (-18+12*s.B+6*s.C)*x*x + (6 - 2*s.B)) / 6 } if x < 2 { return ((-s.B-6*s.C)*x*x*x + (6*s.B+30*s.C)*x*x + (-12*s.B-48*s.C)*x + (8*s.B + 24*s.C)) / 6 } return 0 } func (_Michell) Rad() int { return 2 } func (s _Michell) String() string { return fmt.Sprintf("Michell(B:%v, C:%v)", s.B, s.C) } type ( _Lanczos2 struct{} _Lanczos3 struct{} ) func (_Lanczos2) Do(x float64) float64 { const a = 2 if x == 0 { return 1 } if -a <= x && x <= a { return (a * math.Sin(math.Pi*x) * math.Sin((math.Pi*x)/a)) / (math.Pi * math.Pi * x * x) } return 0 } func (_Lanczos2) Rad() int { return 2 } func (_Lanczos2) String() string { return "Lanczos2" } func (_Lanczos3) Do(x float64) float64 { const a = 3 if x == 0 { return 1 } if -a <= x && x <= a { return (a * math.Sin(math.Pi*x) * math.Sin((math.Pi*x)/a)) / (math.Pi * math.Pi * x * x) } return 0 } func (_Lanczos3) Rad() int { return 3 } func (_Lanczos3) String() string { return "Lanczos3" }
tools/kernel/inplements.go
0.712632
0.468183
inplements.go
starcoder
package main func main() { } type DataCollection struct { data []byte } func NewDataCollection(data []byte) *DataCollection { dc := new(DataCollection) dc.data = data return dc } // ValueAt returns the Nth item. func (c *DataCollection) ValueAt(n int) bool { byteIndex := n / 8 bitIndex := (uint)(n % 8) mask := (byte)(1 << bitIndex) return c.data[byteIndex]&mask != 0 } // Len returns the number of items in the collection func (c *DataCollection) Len() int { return len(c.data) * 8 } // golomb implements the Golomb’s randomness postulate test, as described at // http://cacr.uwaterloo.ca/hac/about/chap5.pdf section 5.4.3 func (c *DataCollection) FirstGolombTest() bool { zeroOneDiff := c.ZeroOneDiff() return -1 <= zeroOneDiff && zeroOneDiff <= 1 } func (c *DataCollection) SecondGolombTest() bool { runLengths := c.CalculateRunLengths() currLen := 1 for expectRunsAtLen := c.Len() / 2; expectRunsAtLen > 1; expectRunsAtLen = expectRunsAtLen / 2 { if runLengths[currLen] < expectRunsAtLen { return false } currLen++ } return true } func (c *DataCollection) ThirdGolombTest() bool { firstAutocorrelation := c.Autocorrelation(1) for k := 2; k < c.Len(); k++ { if firstAutocorrelation != c.Autocorrelation(k) { return false } } return true } func (c *DataCollection) Autocorrelation(shift int) (autocorr int) { for i := 0; i < c.Len(); i++ { // (2s_i - 1)(2s_i+shift - 1) j := (i + shift) % c.Len() if c.ValueAt(i) == c.ValueAt(j) { autocorr++ } else { autocorr-- } } return } func (c *DataCollection) ZeroOneDiff() (difference int) { for i := 0; i < c.Len(); i++ { isBitSet := c.ValueAt(i) if isBitSet { difference++ } else { difference-- } } return } func (c *DataCollection) CalculateRunLengths() (runLengths map[int]int) { runLengths = make(map[int]int) currRunLen := 0 lastBitSet := false first := true for i := 0; i < c.Len(); i++ { isBitSet := c.ValueAt(i) if first || lastBitSet == isBitSet { currRunLen++ } else { runLengths[currRunLen] = runLengths[currRunLen] + 1 currRunLen = 1 } lastBitSet = isBitSet first = false } runLengths[currRunLen] = runLengths[currRunLen] + 1 return }
cmd/prng-test/main.go
0.68721
0.412589
main.go
starcoder
package plot // Elements defines a list of elements. type Elements []Element // Add appends an element to the list. func (els *Elements) Add(el Element) { *els = append(*els, el) } // AddGroup appends elements as a single group to the list. func (els *Elements) AddGroup(adds ...Element) { els.Add(Elements(adds)) } // Stats calculates the stats from all elements. func (els Elements) Stats() Stats { return maximalStats(els) } // Draw draws the elements drawn over each other. func (els Elements) Draw(plot *Plot, canvas Canvas) { for _, el := range els { if el == nil { continue } el.Draw(plot, canvas) } } // Margin is a collection which is drawn with a margin. type Margin struct { Amount Rect Elements } // NewMargin creates a elements groups. func NewMargin(amount Rect, els ...Element) *Margin { return &Margin{Amount: amount, Elements: Elements(els)} } // Draw draws the elements drawn over each other. func (margin *Margin) Draw(plot *Plot, canvas Canvas) { bounds := canvas.Bounds().Inset(margin.Amount) margin.Elements.Draw(plot, canvas.Context(bounds)) } // VStack implements vertically stacked elements. type VStack struct { Margin Rect Elements } // NewVStack creates a collection of elements that are vertically stacked. func NewVStack(els ...Element) *VStack { return &VStack{Elements: Elements(els)} } // Draw draws elements vertically stacked equally dividing space. func (stack *VStack) Draw(plot *Plot, canvas Canvas) { if len(stack.Elements) == 0 { return } bounds := canvas.Bounds() for i, el := range stack.Elements { block := bounds.Row(i, len(stack.Elements)) el.Draw(plot, canvas.Context(block.Inset(stack.Margin))) } } // HStack implements horizontally stacked elements. type HStack struct { Margin Rect Elements } // NewHStack creates a collection of elements that are horizontally stacked. func NewHStack(els ...Element) *HStack { return &HStack{Elements: Elements(els)} } // Draw draws elements horizontally stacked equally dividing space. func (stack *HStack) Draw(plot *Plot, canvas Canvas) { if len(stack.Elements) == 0 { return } bounds := canvas.Bounds() for i, el := range stack.Elements { block := bounds.Column(i, len(stack.Elements)) el.Draw(plot, canvas.Context(block.Inset(stack.Margin))) } } // HFlex implements horizontally stacked elements with non-equal sizes. type HFlex struct { Margin Rect fixedSize []float64 elements Elements } // NewHFlex creates a horizontally flexing elements. func NewHFlex() *HFlex { return &HFlex{} } // Stats calculates the stats from all elements. func (stack *HFlex) Stats() Stats { return stack.elements.Stats() } // Add adds an element with fixed size. func (stack *HFlex) Add(fixedSize float64, el Element) { stack.elements.Add(el) stack.fixedSize = append(stack.fixedSize, fixedSize) } // AddGroup adds a group of elements with fixed size. func (stack *HFlex) AddGroup(fixedSize float64, adds ...Element) { stack.Add(fixedSize, Elements(adds)) } // Draw draws elements. func (stack *HFlex) Draw(plot *Plot, canvas Canvas) { if len(stack.elements) == 0 { return } fixedSize := 0.0 flexCount := 0.0 for i, size := range stack.fixedSize { fixedSize += size if stack.elements[i] == nil { continue } if size == 0 { flexCount++ } } bounds := canvas.Bounds() size := bounds.Size() flexWidth := (bounds.Size().X - fixedSize) / flexCount min := bounds.Min for i, el := range stack.elements { elsize := stack.fixedSize[i] if el == nil { min.X += elsize continue } if elsize == 0 { elsize = flexWidth } block := Rect{ min, min.Add(Point{elsize, size.Y}), } min.X = block.Max.X el.Draw(plot, canvas.Context(block.Inset(stack.Margin))) } } // VFlex implements horizontally stacked elements with non-equal sizes. type VFlex struct { Margin Rect fixedSize []float64 elements Elements } // NewVFlex creates a vertically flexing elements. func NewVFlex() *VFlex { return &VFlex{} } // Stats calculates the stats from all elements. func (stack *VFlex) Stats() Stats { return stack.elements.Stats() } // Add adds an element with fixed size. func (stack *VFlex) Add(fixedSize float64, el Element) { stack.elements.Add(el) stack.fixedSize = append(stack.fixedSize, fixedSize) } // AddGroup adds a group of elements with fixed size. func (stack *VFlex) AddGroup(fixedSize float64, adds ...Element) { stack.Add(fixedSize, Elements(adds)) } // Draw draws elements. func (stack *VFlex) Draw(plot *Plot, canvas Canvas) { if len(stack.elements) == 0 { return } fixedSize := 0.0 flexCount := 0.0 for i, size := range stack.fixedSize { fixedSize += size if stack.elements[i] == nil { continue } if size == 0 { flexCount++ } } bounds := canvas.Bounds() size := bounds.Size() flexWidth := (bounds.Size().Y - fixedSize) / flexCount min := bounds.Min for i, el := range stack.elements { elsize := stack.fixedSize[i] if el == nil { min.Y += elsize continue } if elsize == 0 { elsize = flexWidth } block := Rect{ min, min.Add(Point{size.X, elsize}), } min.Y = block.Max.Y el.Draw(plot, canvas.Context(block.Inset(stack.Margin))) } }
elements.go
0.906896
0.61757
elements.go
starcoder
package toms import ( "github.com/dreading/gospecfunc/machine" "github.com/dreading/gospecfunc/utils" "math" ) // SYNCH1 calculates the synchrotron radiation function // x ∫ 0 to infinity { K(5/3)(t) } dt // where K(5/3) is a modified Bessel function of order 5/3. // The code uses Chebyshev expansions with the coefficients // given to 20 decimal places func SYNCH1(XVALUE float64) float64 { const ( ZERO = 0.0e0 HALF = 0.5e0 ONE = 1.0e0 THREE = 3.0e0 FOUR = 4.0e0 EIGHT = 8.0e0 TWELVE = 12.0e0 ONEHUN = 100.0e0 CONLOW = 2.14952824153447863671e0 PIBRT3 = 1.81379936423421785059e0 LNRTP2 = 0.22579135264472743236e0 ) var NTERM1, NTERM2, NTERM3 int var CHEB1, CHEB2, RET, T, X, XHIGH1, XHIGH2, XLOW, XPOWTH float64 var ASYNC1 = []float64{ 30.36468298250107627340e0, 17.07939527740839457449e0, 4.56013213354507288887e0, 0.54928124673041997963e0, 0.3729760750693011724e-1, 0.161362430201041242e-2, 0.4819167721203707e-4, 0.105124252889384e-5, 0.1746385046697e-7, 0.22815486544e-9, 0.240443082e-11, 0.2086588e-13, 0.15167e-15, 0.94e-18} var ASYNC2 = []float64{ 0.44907216235326608443e0, 0.8983536779941872179e-1, 0.810445737721512894e-2, 0.42617169910891619e-3, 0.1476096312707460e-4, 0.36286336153998e-6, 0.666348074984e-8, 0.9490771655e-10, 0.107912491e-11, 0.1002201e-13, 0.7745e-16, 0.51e-18} var ASYNCA = []float64{ 2.13293051613550009848e0, 0.7413528649542002401e-1, 0.869680999099641978e-2, 0.117038262487756921e-2, 0.16451057986191915e-3, 0.2402010214206403e-4, 0.358277563893885e-5, 0.54477476269837e-6, 0.8388028561957e-7, 0.1306988268416e-7, 0.205309907144e-8, 0.32518753688e-9, 0.5179140412e-10, 0.830029881e-11, 0.133527277e-11, 0.21591498e-12, 0.3499673e-13, 0.569942e-14, 0.92906e-15, 0.15222e-15, 0.2491e-16, 0.411e-17, 0.67e-18, 0.11e-18, 0.2e-19} // Start calculation X = XVALUE if X < ZERO { return ZERO } // Compute the machine-dependent constants. CHEB1 = machine.D1MACH[3] T = CHEB1 / ONEHUN if X <= FOUR { NTERM1 = 13 NTERM2 = 11 XLOW = math.Sqrt(EIGHT * CHEB1) } else { NTERM3 = 24 XHIGH2 = math.Log(machine.D1MACH[1]) XHIGH1 = -EIGHT * XHIGH2 / (EIGHT - ONE) } // Code for 0 <= x <= 4 if X <= FOUR { XPOWTH = math.Pow(X, ONE/THREE) if X < XLOW { RET = CONLOW * XPOWTH } else { T = (X*X/EIGHT - HALF) - HALF CHEB1 = utils.Cheval(NTERM1, ASYNC1, T) CHEB2 = utils.Cheval(NTERM2, ASYNC2, T) T = XPOWTH*CHEB1 - math.Pow(XPOWTH, 11)*CHEB2 RET = T - PIBRT3*X } } else { if X > XHIGH1 { RET = ZERO } else { T = (TWELVE - X) / (X + FOUR) CHEB1 = utils.Cheval(NTERM3, ASYNCA, T) T = LNRTP2 - X + math.Log(math.Sqrt(X)*CHEB1) if T < XHIGH2 { RET = ZERO } else { RET = math.Exp(T) } } } return RET } // SYNCH2 calculates the synchrotron radiation function // x * K(2/3)(x) // where K(2/3) is a modified Bessel function of order 2/3. // The code uses Chebyshev expansions with the coefficients // given to 20 decimal places func SYNCH2(XVALUE float64) float64 { const ( ZERO = 0.0e0 HALF = 0.5e0 ONE = 1.0e0 TWO = 2.0e0 THREE = 3.0e0 FOUR = 4.0e0 EIGHT = 8.0e0 TEN = 10.0e0 ONEHUN = 100.0e0 CONLOW = 1.07476412076723931836e0 LNRTP2 = 0.22579135264472743236e0 ) var NTERM1, NTERM2, NTERM3 int var CHEB1, CHEB2, RET, T, X, XHIGH1, XHIGH2, XLOW, XPOWTH float64 var ASYN21 = []float64{ 38.61783992384308548014e0, 23.03771559496373459697e0, 5.38024998683357059676e0, 0.61567938069957107760e0, 0.4066880046688955843e-1, 0.172962745526484141e-2, 0.5106125883657699e-4, 0.110459595022012e-5, 0.1823553020649e-7, 0.23707698034e-9, 0.248872963e-11, 0.2152868e-13, 0.15607e-15, 0.96e-18, 0.1e-19} var ASYN22 = []float64{ 7.90631482706608042875e0, 3.13534636128534256841e0, 0.48548794774537145380e0, 0.3948166758272372337e-1, 0.196616223348088022e-2, 0.6590789322930420e-4, 0.158575613498559e-5, 0.2868653011233e-7, 0.40412023595e-9, 0.455684443e-11, 0.4204590e-13, 0.32326e-15, 0.210e-17, 0.1e-19} var ASYN2A = []float64{ 2.02033709417071360032e0, 0.1095623712180740443e-1, 0.85423847301146755e-3, 0.7234302421328222e-4, 0.631244279626992e-5, 0.56481931411744e-6, 0.5128324801375e-7, 0.471965329145e-8, 0.43807442143e-9, 0.4102681493e-10, 0.386230721e-11, 0.36613228e-12, 0.3480232e-13, 0.333010e-14, 0.31856e-15, 0.3074e-16, 0.295e-17, 0.29e-18, 0.3e-19} X = XVALUE if X < ZERO { return ZERO } // Compute the machine-dependent constants. CHEB1 = machine.D1MACH[3] T = CHEB1 / ONEHUN if X <= FOUR { NTERM1 = 14 NTERM2 = 13 XLOW = math.Sqrt(EIGHT * CHEB1) } else { NTERM3 = 18 XHIGH2 = math.Log(machine.D1MACH[1]) XHIGH1 = -EIGHT * XHIGH2 / (EIGHT - ONE) } // Code for 0 <= x <= 4 if X <= FOUR { XPOWTH = math.Pow(X, ONE/THREE) if X < XLOW { RET = CONLOW * XPOWTH } else { T = (X*X/EIGHT - HALF) - HALF CHEB1 = utils.Cheval(NTERM1, ASYN21, T) CHEB2 = utils.Cheval(NTERM2, ASYN22, T) RET = XPOWTH*CHEB1 - math.Pow(XPOWTH, 5)*CHEB2 } } else { if X > XHIGH1 { RET = ZERO } else { T = (TEN - X) / (X + TWO) CHEB1 = utils.Cheval(NTERM3, ASYN2A, T) T = LNRTP2 - X + math.Log(math.Sqrt(X)*CHEB1) if T < XHIGH2 { RET = ZERO } else { RET = math.Exp(T) } } } return RET }
integrals/internal/toms/synchrotron.go
0.568775
0.429071
synchrotron.go
starcoder
package storetestcases import ( "context" "io/ioutil" "log" "sync/atomic" "testing" "github.com/stratumn/go-chainscript" "github.com/stratumn/go-chainscript/chainscripttest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // TestGetSegment tests what happens when you get a segment. func (f Factory) TestGetSegment(t *testing.T) { a := f.initAdapter(t) defer f.freeAdapter(a) link := chainscripttest.RandomLink(t) linkHash, _ := a.CreateLink(context.Background(), link) link2 := chainscripttest.NewLinkBuilder(t).From(t, link).WithData(t, chainscripttest.RandomString(24)).Build() linkHash2, _ := a.CreateLink(context.Background(), link2) t.Run("Getting an existing segment should work", func(t *testing.T) { ctx := context.Background() s, err := a.GetSegment(ctx, linkHash) assert.NoError(t, err) require.NotNil(t, s, "Segment should be found") chainscripttest.LinksEqual(t, link, s.Link) gotHash, err := s.Link.Hash() assert.NoError(t, err, "Hash should be computed") assert.EqualValues(t, linkHash, gotHash, "Invalid linkHash") }) t.Run("Getting an updated segment should work", func(t *testing.T) { ctx := context.Background() got, err := a.GetSegment(ctx, linkHash2) assert.NoError(t, err) require.NotNil(t, got, "Segment should be found") chainscripttest.LinksEqual(t, link2, got.Link) gotHash, err := got.Link.Hash() assert.NoError(t, err, "Hash should be computed") assert.EqualValues(t, linkHash2, gotHash, "Invalid linkHash") }) t.Run("Getting an unknown segment should return nil", func(t *testing.T) { ctx := context.Background() s, err := a.GetSegment(ctx, chainscripttest.RandomHash()) assert.NoError(t, err) assert.Nil(t, s) }) t.Run("Getting a segment should return its evidences", func(t *testing.T) { ctx := context.Background() e1, _ := chainscript.NewEvidence("1.0.0", "TMPop", "1", chainscripttest.RandomBytes(6)) e2, _ := chainscript.NewEvidence("1.0.0", "dummy", "2", chainscripttest.RandomBytes(6)) e3, _ := chainscript.NewEvidence("1.0.0", "batch", "3", chainscripttest.RandomBytes(6)) e4, _ := chainscript.NewEvidence("1.0.0", "bcbatch", "4", chainscripttest.RandomBytes(6)) e5, _ := chainscript.NewEvidence("1.0.0", "generic", "5", chainscripttest.RandomBytes(6)) evidences := []*chainscript.Evidence{e1, e2, e3, e4, e5} for _, e := range evidences { err := a.AddEvidence(ctx, linkHash2, e) assert.NoError(t, err, "a.AddEvidence()") } got, err := a.GetSegment(ctx, linkHash2) assert.NoError(t, err, "a.GetSegment()") require.NotNil(t, got) assert.Len(t, got.Meta.Evidences, 5, "Invalid number of evidences") }) } // BenchmarkGetSegment benchmarks getting existing segments. func (f Factory) BenchmarkGetSegment(b *testing.B) { a := f.initAdapterB(b) defer f.freeAdapter(a) linkHashes := make([]chainscript.LinkHash, b.N) for i := 0; i < b.N; i++ { l := RandomLink(b, b.N, i) linkHash, _ := a.CreateLink(context.Background(), l) linkHashes[i] = linkHash } b.ResetTimer() log.SetOutput(ioutil.Discard) for i := 0; i < b.N; i++ { if s, err := a.GetSegment(context.Background(), linkHashes[i]); err != nil { b.Fatal(err) } else if s == nil { b.Error("s = nil want *chainscript.Segment") } } } // BenchmarkGetSegmentParallel benchmarks getting existing segments in parallel. func (f Factory) BenchmarkGetSegmentParallel(b *testing.B) { a := f.initAdapterB(b) defer f.freeAdapter(a) linkHashes := make([]chainscript.LinkHash, b.N) for i := 0; i < b.N; i++ { l := RandomLink(b, b.N, i) linkHash, _ := a.CreateLink(context.Background(), l) linkHashes[i] = linkHash } var counter uint64 b.ResetTimer() log.SetOutput(ioutil.Discard) b.RunParallel(func(pb *testing.PB) { for pb.Next() { i := atomic.AddUint64(&counter, 1) - 1 if s, err := a.GetSegment(context.Background(), linkHashes[i]); err != nil { b.Error(err) } else if s == nil { b.Error("s = nil want *chainscript.Segment") } } }) }
store/storetestcases/getsegment.go
0.51879
0.547162
getsegment.go
starcoder
package schnorr import ( "math/big" "github.com/emmyzkp/crypto/common" ) // BlindedTrans represents a blinded transcript. type BlindedTrans struct { A *big.Int B *big.Int Hash *big.Int ZAlpha *big.Int } func NewBlindedTrans(a, b, hash, zAlpha *big.Int) *BlindedTrans { return &BlindedTrans{ A: a, B: b, Hash: hash, ZAlpha: zAlpha, } } // Verifies that the blinded transcript is valid. That means the knowledge of log_g1(t1), log_G2(T2) // and log_g1(t1) = log_G2(T2). Note that G2 = g2^gamma, T2 = t2^gamma where gamma was chosen // by verifier. func (t *BlindedTrans) Verify(group *Group, g1, t1, G2, T2 *big.Int) bool { // BlindedTrans should be in the following form: [alpha1, beta1, hash(alpha1, beta1), z+alpha] // check hash: hashNum := common.Hash(t.A, t.B) if hashNum.Cmp(t.Hash) != 0 { return false } // We need to verify (note that c-beta = hash(alpha1, beta1)) // g1^(z+alpha) = alpha1 * t1^(c-beta) // G2^(z+alpha) = beta1 * T2^(c-beta) left1 := group.Exp(g1, t.ZAlpha) right1 := group.Exp(t1, t.Hash) right1 = group.Mul(t.A, right1) left2 := group.Exp(G2, t.ZAlpha) right2 := group.Exp(T2, t.Hash) right2 = group.Mul(t.B, right2) if left1.Cmp(right1) == 0 && left2.Cmp(right2) == 0 { return true } else { return false } } type BTEqualityProver struct { Group *Group r *big.Int secret *big.Int g1 *big.Int g2 *big.Int } func NewBTEqualityProver(group *Group) *BTEqualityProver { prover := BTEqualityProver{ Group: group, } return &prover } // Prove that you know dlog_g1(h1), dlog_g2(h2) and that dlog_g1(h1) = dlog_g2(h2). func (p *BTEqualityProver) GetProofRandomData(secret, g1, g2 *big.Int) (*big.Int, *big.Int) { // Set the values that are needed before the protocol can be run. // The protocol proves the knowledge of log_g1(t1), log_g2(t2) and // that log_g1(t1) = log_g2(t2). p.secret = secret p.g1 = g1 p.g2 = g2 r := common.GetRandomInt(p.Group.Q) p.r = r x1 := p.Group.Exp(p.g1, r) x2 := p.Group.Exp(p.g2, r) return x1, x2 } func (p *BTEqualityProver) GetProofData(challenge *big.Int) *big.Int { // z = r + challenge * secret z := new(big.Int) z.Mul(challenge, p.secret) z.Add(z, p.r) z.Mod(z, p.Group.Q) return z } type BTEqualityVerifier struct { Group *Group gamma *big.Int challenge *big.Int g1 *big.Int g2 *big.Int x1 *big.Int x2 *big.Int t1 *big.Int t2 *big.Int alpha *big.Int transcript *BlindedTrans } func NewBTEqualityVerifier(group *Group, gamma *big.Int) *BTEqualityVerifier { if gamma == nil { gamma = common.GetRandomInt(group.Q) } verifier := BTEqualityVerifier{ Group: group, gamma: gamma, } return &verifier } func (v *BTEqualityVerifier) GetChallenge(g1, g2, t1, t2, x1, x2 *big.Int) *big.Int { // Set the values that are needed before the protocol can be run. // The protocol proves the knowledge of log_g1(t1), log_g2(t2) and // that log_g1(t1) = log_g2(t2). v.g1 = g1 v.g2 = g2 v.t1 = t1 v.t2 = t2 // Set the values g1^r1 and g2^r2. v.x1 = x1 v.x2 = x2 alpha := common.GetRandomInt(v.Group.Q) beta := common.GetRandomInt(v.Group.Q) // alpha1 = g1^r * g1^alpha * t1^beta // beta1 = (g2^r * g2^alpha * t2^beta)^gamma alpha1 := v.Group.Exp(v.g1, alpha) alpha1 = v.Group.Mul(v.x1, alpha1) tmp := v.Group.Exp(v.t1, beta) alpha1 = v.Group.Mul(alpha1, tmp) beta1 := v.Group.Exp(v.g2, alpha) beta1 = v.Group.Mul(v.x2, beta1) tmp = v.Group.Exp(v.t2, beta) beta1 = v.Group.Mul(beta1, tmp) beta1 = v.Group.Exp(beta1, v.gamma) // c = hash(alpha1, beta) + beta mod q hashNum := common.Hash(alpha1, beta1) challenge := new(big.Int).Add(hashNum, beta) challenge.Mod(challenge, v.Group.Q) v.challenge = challenge v.transcript = NewBlindedTrans(alpha1, beta1, hashNum, nil) v.alpha = alpha return challenge } // It receives z = r + secret * challenge. //It returns true if g1^z = g1^r * (g1^secret) ^ challenge and g2^z = g2^r * (g2^secret) ^ challenge. func (v *BTEqualityVerifier) Verify(z *big.Int) (bool, *BlindedTrans, *big.Int, *big.Int) { left1 := v.Group.Exp(v.g1, z) left2 := v.Group.Exp(v.g2, z) r11 := v.Group.Exp(v.t1, v.challenge) r12 := v.Group.Exp(v.t2, v.challenge) right1 := v.Group.Mul(r11, v.x1) right2 := v.Group.Mul(r12, v.x2) // transcript [(alpha1, beta1), hash(alpha1, beta1), z+alpha] // however, we are actually returning [alpha1, beta1, hash(alpha1, beta1), z+alpha] z1 := new(big.Int).Add(z, v.alpha) v.transcript.ZAlpha = z1 G2 := v.Group.Exp(v.g2, v.gamma) T2 := v.Group.Exp(v.t2, v.gamma) if left1.Cmp(right1) == 0 && left2.Cmp(right2) == 0 { return true, v.transcript, G2, T2 } else { return false, nil, nil, nil } }
schnorr/dlog_equality_bt.go
0.714329
0.455622
dlog_equality_bt.go
starcoder
package tag import ( "fmt" "github.com/square/metrics/api" "github.com/square/metrics/function" ) // dropTagSeries returns a copy of the timeseries where the given `dropTag` has been removed from its TagSet. func dropTagSeries(series api.Timeseries, dropTag string) api.Timeseries { tagSet := series.TagSet.Clone() delete(tagSet, dropTag) series.TagSet = tagSet return series } // DropTag returns a copy of the series list where the given `tag` has been removed from all timeseries. func DropTag(list api.SeriesList, tag string) (api.SeriesList, error) { if tag == "" { return api.SeriesList{}, fmt.Errorf("tag.drop given empty string for tag") } series := make([]api.Timeseries, len(list.Series)) for i := range series { series[i] = dropTagSeries(list.Series[i], tag) } return api.SeriesList{ Series: series, }, nil } // setTagSeries returns a copy of the timeseries where the given `newTag` has been set to `newValue`, or added if it wasn't present. func setTagSeries(series api.Timeseries, newTag string, newValue string) api.Timeseries { tagSet := api.NewTagSet() for tag, val := range series.TagSet { tagSet[tag] = val } tagSet[newTag] = newValue series.TagSet = tagSet return series } // SetTag returns a copy of the series list where `tag` has been assigned to `value` for every timeseries in the list. func SetTag(list api.SeriesList, tag string, value string) (api.SeriesList, error) { if tag == "" { return api.SeriesList{}, fmt.Errorf("tag.set given empty string for tag") } if value == "" { return api.SeriesList{}, fmt.Errorf("tag.set given empty string for value") } series := make([]api.Timeseries, len(list.Series)) for i := range series { series[i] = setTagSeries(list.Series[i], tag, value) } return api.SeriesList{ Series: series, }, nil } // copyTagSeries copies the value of one tag to another. func copyTagSeries(series api.Timeseries, target string, source string) api.Timeseries { tagSet := series.TagSet.Clone() // it's okay to mutate tagSet because this reference to it is unique. if val, ok := tagSet[source]; ok { tagSet[target] = val } else { delete(tagSet, target) } series.TagSet = tagSet return series } // CopyTag returns a copy of the series list where `target` is replaced by `source`'s value in each timeseries in the list. func CopyTag(list api.SeriesList, target string, source string) (api.SeriesList, error) { if target == "" { return api.SeriesList{}, fmt.Errorf("tag.copy given empty string for target tag") } if source == "" { return api.SeriesList{}, fmt.Errorf("tag.copy given empty string for source tag") } series := make([]api.Timeseries, len(list.Series)) for i := range series { series[i] = copyTagSeries(list.Series[i], target, source) } return api.SeriesList{ Series: series, }, nil } // DropFunction wraps up DropTag into a Function called "tag.drop" var DropFunction = function.MakeFunction("tag.drop", DropTag) // SetFunction wraps up SetTag into a Function called "tag.set" var SetFunction = function.MakeFunction("tag.set", SetTag) // CopyFunction wraps up CopyTag into a Function called "tag.copy" var CopyFunction = function.MakeFunction("tag.copy", CopyTag)
function/builtin/tag/tag.go
0.837021
0.40439
tag.go
starcoder
package klog import ( "cloud.google.com/go/civil" "errors" "fmt" "regexp" "strconv" gotime "time" ) // Time represents a wall clock time. It can be shifted to the adjacent dates. type Time interface { Hour() int Minute() int // MidnightOffset returns the duration since (positive) or until (negative) midnight. MidnightOffset() Duration // IsYesterday checks whether the time is shifted to the previous day. IsYesterday() bool // IsTomorrow checks whether the time is shifted to the next day. IsTomorrow() bool // IsToday checks whether the time is not shifted. IsToday() bool IsEqualTo(Time) bool IsAfterOrEqual(Time) bool // Plus returns a time, where the specified duration was added. It doesn’t modify // the original object. If the resulting time would be shifted by more than one // day, it returns an error. Plus(Duration) (Time, error) // ToString serialises the time, e.g. `8:00` or `23:00>` ToString() string // ToStringWithFormat serialises the date according to the given format. ToStringWithFormat(TimeFormat) string // Format returns the current formatting. Format() TimeFormat } // TimeFormat contains the formatting options for the Time. type TimeFormat struct { Use24HourClock bool } type time struct { hour int minute int dayShift int format TimeFormat } func newTime(hour int, minute int, dayShift int, is24HourClock bool) (Time, error) { if hour == 24 && minute == 00 && dayShift <= 0 { // Accept a time of 24:00 (today), and interpret it as 0:00 (tomorrow). // Accept a time of 24:00 (yesterday), and interpret it as 0:00 (today). // This case is not supported for 24:00 (tomorrow), since that couldn’t be represented. hour = 0 dayShift += 1 } ct := civil.Time{Hour: hour, Minute: minute} if !ct.IsValid() { return nil, errors.New("INVALID_TIME") } return &time{ hour: ct.Hour, minute: ct.Minute, dayShift: dayShift, format: TimeFormat{Use24HourClock: is24HourClock}, }, nil } func NewTime(hour int, minute int) (Time, error) { return newTime(hour, minute, 0, true) } func NewTimeYesterday(hour int, minute int) (Time, error) { return newTime(hour, minute, -1, true) } func NewTimeTomorrow(hour int, minute int) (Time, error) { return newTime(hour, minute, +1, true) } var timePattern = regexp.MustCompile(`^(<)?(\d{1,2}):(\d{2})(am|pm)?(>)?$`) func NewTimeFromString(hhmm string) (Time, error) { match := timePattern.FindStringSubmatch(hhmm) if len(match) != 6 || (match[1] == "<" && match[5] == ">") { return nil, errors.New("MALFORMED_TIME") } hour, _ := strconv.Atoi(match[2]) minute, _ := strconv.Atoi(match[3]) is24HourClock := true if match[4] == "am" || match[4] == "pm" { if hour < 1 || hour > 12 { return nil, errors.New("INVALID_TIME") } is24HourClock = false if match[4] == "am" && hour == 12 { hour = 0 } else if match[4] == "pm" && hour < 12 { hour += 12 } } dayShift := 0 if match[1] == "<" { dayShift = -1 } else if match[5] == ">" { dayShift = +1 } return newTime(hour, minute, dayShift, is24HourClock) } func NewTimeFromGo(t gotime.Time) Time { time, err := NewTime(t.Hour(), t.Minute()) if err != nil { // This can/should never occur panic("ILLEGAL_TIME") } return time } func (t *time) Hour() int { return t.hour } func (t *time) Minute() int { return t.minute } func (t *time) MidnightOffset() Duration { if t.IsYesterday() { return NewDuration(-23+t.Hour(), -60+t.Minute()) } else if t.IsTomorrow() { return NewDuration(24+t.Hour(), t.Minute()) } return NewDuration(t.Hour(), t.Minute()) } func (t *time) IsToday() bool { return t.dayShift == 0 } func (t *time) IsYesterday() bool { return t.dayShift < 0 } func (t *time) IsTomorrow() bool { return t.dayShift > 0 } func (t *time) IsEqualTo(otherTime Time) bool { return t.MidnightOffset().InMinutes() == otherTime.MidnightOffset().InMinutes() } func (t *time) IsAfterOrEqual(otherTime Time) bool { first := t.MidnightOffset() second := otherTime.MidnightOffset() return first.InMinutes() >= second.InMinutes() } func (t *time) Plus(d Duration) (Time, error) { ONE_DAY := 24 * 60 mins := t.MidnightOffset().Plus(d).InMinutes() if mins >= 2*ONE_DAY || mins < ONE_DAY*-1 { return nil, errors.New("IMPOSSIBLE_OPERATION") } dayShift := 0 if mins < 0 { dayShift = -1 mins = ONE_DAY + mins } else if mins > ONE_DAY { dayShift = 1 mins = mins - ONE_DAY } result := &time{ hour: mins / 60, minute: mins % 60, dayShift: dayShift, format: t.format, } return result, nil } func (t *time) ToString() string { yesterdayPrefix := "" if t.IsYesterday() { yesterdayPrefix = "<" } tomorrowSuffix := "" if t.IsTomorrow() { tomorrowSuffix = ">" } hour, amPmSuffix := func() (int, string) { if t.format.Use24HourClock { return t.hour, "" } if t.hour == 12 { return 12, "pm" } if t.hour > 12 { return t.hour - 12, "pm" } if t.hour == 0 { return 12, "am" } return t.hour, "am" }() return fmt.Sprintf("%s%d:%02d%s%s", yesterdayPrefix, hour, t.minute, amPmSuffix, tomorrowSuffix) } func (t *time) ToStringWithFormat(f TimeFormat) string { c := *t c.format = f return c.ToString() } func (t *time) Format() TimeFormat { return t.format }
src/time.go
0.70202
0.428174
time.go
starcoder
package steganography import ( "bytes" "errors" "image/color" "github.com/stegoer/server/pkg/util" ) // ChannelType represents a pixel color channel. type ChannelType byte // PixelData represents data of one particular pixel of an image. type PixelData struct { Width int Height int Channels []ChannelType Color *color.NRGBA } const ( // RedChannel represents the red ChannelType. RedChannel ChannelType = iota // GreenChannel represents the red ChannelType. GreenChannel // BlueChannel represents the red ChannelType. BlueChannel channelTypeSliceCapacity = 3 ) // IsRed returns whether the ChannelType represents a RedChannel. func (ct ChannelType) IsRed() bool { return ct == RedChannel } // IsGreen returns whether the ChannelType represents a GreenChannel. func (ct ChannelType) IsGreen() bool { return ct == GreenChannel } // IsBlue returns whether the ChannelType represents a BlueChannel. func (ct ChannelType) IsBlue() bool { return ct == BlueChannel } // GetRed returns the underlying value of the RedChannel of the PixelData. func (pd PixelData) GetRed() byte { return pd.Color.R } // GetGreen returns the underlying value of the GreenChannel of the PixelData. func (pd PixelData) GetGreen() byte { return pd.Color.G } // GetBlue returns the underlying value of the BlueChannel of the PixelData. func (pd PixelData) GetBlue() byte { return pd.Color.B } // GetChannelValue returns the value of the ChannelType of the PixelData. func (pd PixelData) GetChannelValue(channel ChannelType) byte { switch { case channel.IsRed(): return pd.GetRed() case channel.IsGreen(): return pd.GetGreen() case channel.IsBlue(): return pd.GetBlue() default: // should be unreachable return 0 } } // SetRed sets the RedChannel of the PixelData. func (pd *PixelData) SetRed(value byte) { pd.Color.R = value } // SetGreen sets the GreenChannel of the PixelData. func (pd *PixelData) SetGreen(value byte) { pd.Color.G = value } // SetBlue sets the BlueChannel of the PixelData. func (pd *PixelData) SetBlue(value byte) { pd.Color.B = value } // SetChannelValue sets the value of ChannelType of the PixelData on lsbPos. func (pd PixelData) SetChannelValue( channel ChannelType, value byte, lsbPos byte, ) { switch { case channel.IsRed(): pd.SetRed(util.GetUpdatedByte(value, pd.GetRed(), lsbPos)) case channel.IsGreen(): pd.SetGreen(util.GetUpdatedByte(value, pd.GetGreen(), lsbPos)) case channel.IsBlue(): pd.SetBlue(util.GetUpdatedByte(value, pd.GetBlue(), lsbPos)) } } // NRGBAPixels sends PixelData of util.ImageData based on given parameters. func NRGBAPixels( data util.ImageData, pixelOffset int, channel util.Channel, distributionDivisor int, resultChan chan PixelData, ) { var pixelCount int red := channel.IncludesRed() green := channel.IncludesGreen() blue := channel.IncludesBlue() for width := 0; width < data.Width; width++ { for height := 0; height < data.Height; height++ { pixelCount++ if pixelCount <= pixelOffset || pixelCount%distributionDivisor != 0 { continue } channels := make([]ChannelType, 0, channelTypeSliceCapacity) nrgbaColor := data.NRGBA.NRGBAAt(width, height) if red { channels = append(channels, RedChannel) } if green { channels = append(channels, GreenChannel) } if blue { channels = append(channels, BlueChannel) } resultChan <- PixelData{ Width: width, Height: height, Channels: channels, Color: &nrgbaColor, } } } close(resultChan) } // SetNRGBAValues sets ChannelType values into util.ImageData based on params. func SetNRGBAValues( imageData util.ImageData, encodeData []byte, pixelOffset int, lsbUsed byte, channel util.Channel, distributionDivisor int, ) { bitChannel := make(chan byte) go util.ByteArrToBits(encodeData, bitChannel) pixelDataChannel := make(chan PixelData) go NRGBAPixels( imageData, pixelOffset, channel, distributionDivisor, pixelDataChannel, ) lsbSlice := LSBSlice(lsbUsed) hasBits := true for pixelData := range pixelDataChannel { channelIterator: for _, pixelChannel := range pixelData.Channels { for _, lsbPos := range lsbSlice { dataBit, ok := <-bitChannel if !ok { hasBits = false break channelIterator } pixelData.SetChannelValue(pixelChannel, dataBit, lsbPos) } } imageData.NRGBA.SetNRGBA(pixelData.Width, pixelData.Height, *pixelData.Color) if !hasBits { return } } } // GetNRGBAValues returns bytes.Buffer with ChannelType values. func GetNRGBAValues( imageData util.ImageData, pixelOffset int, lsbUsed byte, channel util.Channel, distributionDivisor int, bufferLength int, ) (*bytes.Buffer, error) { var binaryBuffer bytes.Buffer pixelDataChannel := make(chan PixelData) go NRGBAPixels( imageData, pixelOffset, channel, distributionDivisor, pixelDataChannel, ) lsbSlice := LSBSlice(lsbUsed) for pixelData := range pixelDataChannel { for _, pixelChannel := range pixelData.Channels { for _, lsbPos := range lsbSlice { hasBit := util.HasBit( pixelData.GetChannelValue(pixelChannel), lsbPos, ) binaryBuffer.WriteRune(util.BoolToRune(hasBit)) if binaryBuffer.Len() == bufferLength { return &binaryBuffer, nil } } } } return nil, errors.New("malformed image data") }
pkg/steganography/pixels.go
0.868827
0.412471
pixels.go
starcoder
package local import ( "github.com/ready-steady/adapt/algorithm" "github.com/ready-steady/adapt/algorithm/internal" "github.com/ready-steady/adapt/basis" "github.com/ready-steady/adapt/grid" ) // Algorithm is the interpolation algorithm. type Algorithm struct { ni uint no uint grid Grid basis Basis } // Basis is an interpolation basis. type Basis interface { basis.Computer basis.Integrator } // Grid is an interpolation grid. type Grid interface { grid.Computer } // New creates an interpolator. func New(inputs, outputs uint, grid Grid, basis Basis) *Algorithm { return &Algorithm{ ni: inputs, no: outputs, grid: grid, basis: basis, } } // Compute constructs an interpolant for a function. func (self *Algorithm) Compute(target algorithm.Target, strategy algorithm.Strategy) *algorithm.Surrogate { ni, no := self.ni, self.no surrogate := algorithm.NewSurrogate(ni, no) for s := strategy.First(surrogate); s != nil; s = strategy.Next(s, surrogate) { s.Volumes = internal.Measure(self.basis, s.Indices, ni) s.Nodes = self.grid.Compute(s.Indices) s.Values = algorithm.Invoke(target, s.Nodes, ni, no) s.Estimates = internal.Estimate(self.basis, surrogate.Indices, surrogate.Surpluses, s.Nodes, ni, no) s.Surpluses = internal.Subtract(s.Values, s.Estimates) s.Scores = score(strategy, s, ni, no) surrogate.Push(s.Indices, s.Surpluses, s.Volumes) } return surrogate } // Evaluate computes the values of an interpolant at a set of points. func (self *Algorithm) Evaluate(surrogate *algorithm.Surrogate, points []float64) []float64 { return internal.Estimate(self.basis, surrogate.Indices, surrogate.Surpluses, points, surrogate.Inputs, surrogate.Outputs) } func score(strategy algorithm.Strategy, state *algorithm.State, ni, no uint) []float64 { nn := uint(len(state.Indices)) / ni scores := make([]float64, nn) for i := uint(0); i < nn; i++ { scores[i] = strategy.Score(&algorithm.Element{ Index: state.Indices[i*ni : (i+1)*ni], Node: state.Nodes[i*ni : (i+1)*ni], Volume: state.Volumes[i], Value: state.Values[i*no : (i+1)*no], Surplus: state.Surpluses[i*no : (i+1)*no], }) } return scores }
algorithm/local/main.go
0.734691
0.405213
main.go
starcoder
Implements a basic perceptron neural network - Earliest network model proposed by Rosenblatt in late 1950s. */ package algo import ( "math" "math/rand" "time" ) var EPSILON float64 = 0.00000001 //Soure : //https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ func FloatEquals(a, b float64) bool { // Calculate the difference. var diff float64 = math.Abs(a - b) a = math.Abs(a) b = math.Abs(b) // Find the largest var largest float64 if b > a { largest = b } else { largest = a } if diff <= largest*EPSILON { return true } return false } /* Perceptron Type */ type Perceptron struct { Weights []float64 //The input weights Threshold float64 Bias float64 LearningRate float64 Epoch int } func TrainedPerceptron( trainingData []*TrainingData, threshold float64, bias float64, lrate float64, epoch int) *Perceptron { p := &Perceptron{} trDataLen := len(trainingData) numberOfWeights := len(trainingData[0].Inputs) + 1 //The added slot is for the bias r := rand.New(rand.NewSource(time.Now().UnixNano())) var weights = make([]float64, numberOfWeights, numberOfWeights) //Initialize weights to random numbers for i := 0; i < numberOfWeights; i++ { //r.Float64 return psudo-random number between [0.0,1.0] weights[i] = r.Float64() } p.Weights = weights //Set the rest of the properties p.Bias = bias p.Threshold = threshold p.LearningRate = lrate p.Epoch = epoch //Train the model. //Cycle through the training data a set amount of times (epoch times) //One cycle => one epoch for i := 0; i < epoch; i++ { var globalError int64 globalError = 0 for j := 0; j < trDataLen; j++ { var estimatedOutput int64 = p.CalculateOutput(trainingData[j].Inputs) var expectdOutput int64 = trainingData[j].Output var localError int64 = expectdOutput - estimatedOutput globalError += localError //Update weights for k := 0; k < numberOfWeights-1; k++ { var delta float64 delta = lrate * trainingData[j].Inputs[k] * float64(localError) p.Weights[k] += delta } //Update bias' weight p.Weights[numberOfWeights-1] += lrate * float64(localError) } if globalError == 0 { break } } return p } //Multiplies each input by its corresponding weight. func (p *Perceptron) CalculateOutput(inputs []float64) int64 { sum := 0.0 //Sum inputs * weights for i, input := range inputs { sum += input * p.Weights[i] } //Add Bias sum += p.Bias * p.Weights[len(p.Weights)-1] //Activation if FloatEquals(sum, p.Threshold) { return 1 } else if sum > p.Threshold { return 1 } else { return 0 } } /* TrainingData Type */ type TrainingData struct { Inputs []float64 Output int64 } func NewTrainingData(inputs []float64, output int64) *TrainingData { return &TrainingData{ Inputs: inputs, Output: output, } }
src/go/src/github.com/redsofa/perceptron/algo/algo.go
0.716913
0.618982
algo.go
starcoder
package iota_mnemonic import ( "fmt" "github.com/iotaledger/giota" "github.com/tyler-smith/go-bip39" ) // Generates IOTA seed trits from a 64 bytes seed // The algorithm was adapted from // https://github.com/iota-trezor/trezor-mcu/blob/25292640b560a644ebf88d0dae848e8928e68127/firmware/iota.c#L70 // Absorb 4 times using sliding window: // Divide 64 byte bip39 seed in 4 sections of 48 bytes. // 1: [123.] first 48 bytes // 2: [.123] last 48 bytes // 3: [3.12] last 32 bytes + first 16 bytes // 4: [23.1] last 16 bytes + first 32 bytes func ByteSeedToTrits(seed []byte) (giota.Trits, error) { seedLength := len(seed) if seedLength != 64 { return nil, fmt.Errorf("Seed must have a length of 64 bytes!") } bytes := make([]byte, giota.ByteLength) sponge := giota.NewKerl() if sponge == nil { return nil, fmt.Errorf("Could not initialize Kerl instance.") } // Step 1 for j := 0; j < giota.ByteLength; j++ { // 48 bytes[j] = seed[j] } ti, err := giota.BytesToTrits(bytes) if err != nil { return nil, err } err = sponge.Absorb(ti) if err != nil { return nil, err } // Step 2 offset := seedLength - giota.ByteLength //64 - 48 = 16 for j := 0; j < giota.ByteLength; j++ { bytes[j] = seed[j+offset] } ti, err = giota.BytesToTrits(bytes) if err != nil { return nil, err } err = sponge.Absorb(ti) if err != nil { return nil, err } // Step 3 offset = seedLength / 2 // 64 / 2 = 32 for j := 0; j < offset; j++ { bytes[j] = seed[j+offset] } for j := offset; j < giota.ByteLength; j++ { bytes[j] = seed[j-offset] } ti, err = giota.BytesToTrits(bytes) if err != nil { return nil, err } err = sponge.Absorb(ti) if err != nil { return nil, err } // Step 4 offset = seedLength - giota.ByteLength //64 - 48 = 16 for j := 0; j < offset; j++ { bytes[j] = seed[j+seedLength-offset] } for j := offset; j < giota.ByteLength; j++ { bytes[j] = seed[j-offset] } ti, err = giota.BytesToTrits(bytes) if err != nil { return nil, err } err = sponge.Absorb(ti) if err != nil { return nil, err } // Squeeze out the seed s_trits, err := sponge.Squeeze(giota.TritHashLength) if err != nil { return nil, err } return s_trits, nil } // Generates IOTA seed trits from a BIP0039 space delimited word list func ToTrits(mnemonic string, passphrase string) (giota.Trits, error) { seed, err := bip39.NewSeedWithErrorChecking(mnemonic, passphrase) if err != nil { return nil, err } return ByteSeedToTrits(seed) } // Generates IOTA seed trytes from a BIP0039 space delimited word list func ToTrytes(mnemonic string, passphrase string) (giota.Trytes, error) { t, err := ToTrits(mnemonic, passphrase) if err != nil { return giota.Trytes(""), err } return t.Trytes(), err } // Generates IOTA seed string from a BIP0039 space delimited word list func ToSeed(mnemonic string, passphrase string) (string, error) { t, err := ToTrytes(mnemonic, passphrase) return string(t), err }
iota_mnemonic/iota_mnemonic.go
0.720172
0.422028
iota_mnemonic.go
starcoder
package godash import ( "math" ) // Creates an array of elements splits into groups the length of size. If array // can't be split evenly, the final chunk will be the remaining elements. //go:generate make generate FILE=chunk FUNC=chunk TYPE=int,int8,int16,int32,int64,uint,uint8,uint16,uint32,uint64,string,float32,float64,bool func ChunkInt(array []int, size int) [][]int { length := len(array) if length == 0 || size < 1 { return [][]int{} } index := 0 resIndex := 0 result := make([][]int, int(math.Ceil(float64(length)/float64(size)))) for index < length { end := int(math.Min(float64(index+size), float64(length))) result[resIndex] = array[index:end] index = end resIndex++ } return result } func ChunkInt8(array []int8, size int) [][]int8 { length := len(array) if length == 0 || size < 1 { return [][]int8{} } index := 0 resIndex := 0 result := make([][]int8, int(math.Ceil(float64(length)/float64(size)))) for index < length { end := int(math.Min(float64(index+size), float64(length))) result[resIndex] = array[index:end] index = end resIndex++ } return result } func ChunkInt16(array []int16, size int) [][]int16 { length := len(array) if length == 0 || size < 1 { return [][]int16{} } index := 0 resIndex := 0 result := make([][]int16, int(math.Ceil(float64(length)/float64(size)))) for index < length { end := int(math.Min(float64(index+size), float64(length))) result[resIndex] = array[index:end] index = end resIndex++ } return result } func ChunkInt32(array []int32, size int) [][]int32 { length := len(array) if length == 0 || size < 1 { return [][]int32{} } index := 0 resIndex := 0 result := make([][]int32, int(math.Ceil(float64(length)/float64(size)))) for index < length { end := int(math.Min(float64(index+size), float64(length))) result[resIndex] = array[index:end] index = end resIndex++ } return result } func ChunkInt64(array []int64, size int) [][]int64 { length := len(array) if length == 0 || size < 1 { return [][]int64{} } index := 0 resIndex := 0 result := make([][]int64, int(math.Ceil(float64(length)/float64(size)))) for index < length { end := int(math.Min(float64(index+size), float64(length))) result[resIndex] = array[index:end] index = end resIndex++ } return result } func ChunkUint(array []uint, size int) [][]uint { length := len(array) if length == 0 || size < 1 { return [][]uint{} } index := 0 resIndex := 0 result := make([][]uint, int(math.Ceil(float64(length)/float64(size)))) for index < length { end := int(math.Min(float64(index+size), float64(length))) result[resIndex] = array[index:end] index = end resIndex++ } return result } func ChunkUint8(array []uint8, size int) [][]uint8 { length := len(array) if length == 0 || size < 1 { return [][]uint8{} } index := 0 resIndex := 0 result := make([][]uint8, int(math.Ceil(float64(length)/float64(size)))) for index < length { end := int(math.Min(float64(index+size), float64(length))) result[resIndex] = array[index:end] index = end resIndex++ } return result } func ChunkUint16(array []uint16, size int) [][]uint16 { length := len(array) if length == 0 || size < 1 { return [][]uint16{} } index := 0 resIndex := 0 result := make([][]uint16, int(math.Ceil(float64(length)/float64(size)))) for index < length { end := int(math.Min(float64(index+size), float64(length))) result[resIndex] = array[index:end] index = end resIndex++ } return result } func ChunkUint32(array []uint32, size int) [][]uint32 { length := len(array) if length == 0 || size < 1 { return [][]uint32{} } index := 0 resIndex := 0 result := make([][]uint32, int(math.Ceil(float64(length)/float64(size)))) for index < length { end := int(math.Min(float64(index+size), float64(length))) result[resIndex] = array[index:end] index = end resIndex++ } return result } func ChunkUint64(array []uint64, size int) [][]uint64 { length := len(array) if length == 0 || size < 1 { return [][]uint64{} } index := 0 resIndex := 0 result := make([][]uint64, int(math.Ceil(float64(length)/float64(size)))) for index < length { end := int(math.Min(float64(index+size), float64(length))) result[resIndex] = array[index:end] index = end resIndex++ } return result } func ChunkString(array []string, size int) [][]string { length := len(array) if length == 0 || size < 1 { return [][]string{} } index := 0 resIndex := 0 result := make([][]string, int(math.Ceil(float64(length)/float64(size)))) for index < length { end := int(math.Min(float64(index+size), float64(length))) result[resIndex] = array[index:end] index = end resIndex++ } return result } func ChunkFloat32(array []float32, size int) [][]float32 { length := len(array) if length == 0 || size < 1 { return [][]float32{} } index := 0 resIndex := 0 result := make([][]float32, int(math.Ceil(float64(length)/float64(size)))) for index < length { end := int(math.Min(float64(index+size), float64(length))) result[resIndex] = array[index:end] index = end resIndex++ } return result } func ChunkFloat64(array []float64, size int) [][]float64 { length := len(array) if length == 0 || size < 1 { return [][]float64{} } index := 0 resIndex := 0 result := make([][]float64, int(math.Ceil(float64(length)/float64(size)))) for index < length { end := int(math.Min(float64(index+size), float64(length))) result[resIndex] = array[index:end] index = end resIndex++ } return result } func ChunkBool(array []bool, size int) [][]bool { length := len(array) if length == 0 || size < 1 { return [][]bool{} } index := 0 resIndex := 0 result := make([][]bool, int(math.Ceil(float64(length)/float64(size)))) for index < length { end := int(math.Min(float64(index+size), float64(length))) result[resIndex] = array[index:end] index = end resIndex++ } return result }
chunk.go
0.532668
0.596316
chunk.go
starcoder
package list // ToArray transforms a list into an array of values. // It only handles non-cyclic lists. If a cycle is detected, it will panic. func ToArray(l *Node) []int { result := make([]int, 0) seenNodes := make(map[*Node]bool) for cursor := l; cursor != nil; cursor = cursor.Next { if ok := seenNodes[cursor]; ok { panic("cycle detected") } seenNodes[cursor] = true result = append(result, cursor.Data) } return result } // DeepCopy makes a depp copy of a list. // It handles both non-cyclic and cyclic lists. func DeepCopy(l *Node) *Node { if l == nil { return nil } copiedNodes := make(map[*Node]*Node) dummyHead := &Node{} cursorCopy := dummyHead for cursor := l; cursor != nil; cursor = cursor.Next { if copy, ok := copiedNodes[cursor]; ok { cursorCopy.Next = copy // cycle detected - break the loop break } newNode := Node{ Data: cursor.Data, } cursorCopy.Next = &newNode copiedNodes[cursor] = &newNode cursorCopy = cursorCopy.Next } return dummyHead.Next } // DeepCopyDoubleLinked makes a depp copy of a doubly-linked list. // It handles linear as well as branching lists. func DeepCopyDoubleLinked(l *DoublyLinkedNode) *DoublyLinkedNode { if l == nil { return nil } nodes := make([]*DoublyLinkedNode, 0) copiedNodes := make(map[*DoublyLinkedNode]*DoublyLinkedNode) nodes = append(nodes, l) for currentIdx := 0; currentIdx < len(nodes); currentIdx++ { node := nodes[currentIdx] if _, ok := copiedNodes[node]; !ok { newNode := DoublyLinkedNode{ Data: node.Data, } copiedNodes[node] = &newNode if node.Prev != nil { nodes = append(nodes, node.Prev) } if node.Next != nil { nodes = append(nodes, node.Next) } } } for currentIdx := 0; currentIdx < len(nodes); currentIdx++ { node := nodes[currentIdx] copy := copiedNodes[node] if node.Prev != nil { copy.Prev = copiedNodes[node.Prev] } if node.Next != nil { copy.Next = copiedNodes[node.Next] } } return copiedNodes[l] } // DoublyLinkedNodeFromSlice transforms an array of values into a doubly linked list. // The result is non-cyclic and without any branches. func DoublyLinkedNodeFromSlice(allData []int) *DoublyLinkedNode { dummyHead := &DoublyLinkedNode{} current := dummyHead for _, d := range allData { current.Next = &DoublyLinkedNode{ Data: d, } current.Next.Prev = current current = current.Next } if dummyHead.Next != nil { dummyHead.Next.Prev = nil } return dummyHead.Next } // DoublyLinkedNodeToSlice transforms a doubly linked list into a slice of values. // It only handles non-cyclic lists. If a cycle is detected, it will panic. // It only guarantees to include all the values if there are no branches and the given // node pointer points to the beginning of the list. func DoublyLinkedNodeToSlice(l *DoublyLinkedNode) []int { result := make([]int, 0) seenNodes := make(map[*DoublyLinkedNode]bool) for cursor := l; cursor != nil; cursor = cursor.Next { if ok := seenNodes[cursor]; ok { panic("cycle detected") } seenNodes[cursor] = true result = append(result, cursor.Data) } return result }
data_structures/list/utils.go
0.803675
0.458409
utils.go
starcoder
package collect import ( "fmt" ) type SliceCollection[T ~[]E, E any] struct { z T } func UseSlice[T ~[]E, E any](items T) *SliceCollection[T, E] { return &SliceCollection[T, E]{items} } func (s *SliceCollection[T, E]) All() T { return s.z } func (s *SliceCollection[T, E]) New(items T) *SliceCollection[T, E] { return &SliceCollection[T, E]{items} } func (s *SliceCollection[T, E]) Len() int { return len(s.z) } func (s *SliceCollection[T, E]) Empty() bool { return len(s.z) == 0 } func (s *SliceCollection[T, E]) Print() *SliceCollection[T, E] { fmt.Println(s.z) return s } func (s *SliceCollection[T, E]) Each(callback func(value E, index int)) *SliceCollection[T, E] { Each[T, E](s.z, callback) return s } func (s *SliceCollection[T, E]) Same(target T) bool { return Same[T, E](s.z, target) } func (s *SliceCollection[T, E]) First() (E, bool) { return First[T, E](s.z) } func (s *SliceCollection[T, E]) Last() (E, bool) { return Last[T, E](s.z) } func (s *SliceCollection[T, E]) Index(value E) int { return Index(s.z, value) } func (s *SliceCollection[T, E]) Contains(value E) bool { return Contains(s.z, value) } func (s *SliceCollection[T, E]) Diff(target T) *SliceCollection[T, E] { s.z = Diff[T, E](s.z, target) return s } func (s *SliceCollection[T, E]) Filter(callback func(value E, index int) bool) *SliceCollection[T, E] { s.z = Filter(s.z, callback) return s } func (s *SliceCollection[T, E]) Map(callback func(value E, index int) E) *SliceCollection[T, E] { s.z = Map(s.z, callback) return s } func (s *SliceCollection[T, E]) Unique() *SliceCollection[T, E] { s.z = Unique[T, E](s.z) return s } func (s *SliceCollection[T, E]) Duplicates() *MapCollection[map[int]E, int, E] { return UseMap[map[int]E, int, E](Duplicates[T, E](s.z)) } func (s *SliceCollection[T, E]) Merge(targets ...T) *SliceCollection[T, E] { s.z = Merge[T, E](s.z, targets...) return s } func (s *SliceCollection[T, E]) Random() (E, bool) { return Random[T, E](s.z) } func (s *SliceCollection[T, E]) Reverse() *SliceCollection[T, E] { s.z = Reverse[T, E](s.z) return s } func (s *SliceCollection[T, E]) Shuffle() *SliceCollection[T, E] { s.z = Shuffle[T, E](s.z) return s } func (s *SliceCollection[T, E]) Slice(offset int, length ...int) *SliceCollection[T, E] { s.z = Slice[T, E](s.z, offset, length...) return s } func (s *SliceCollection[T, E]) Split(amount int) []T { return Split[T, E](s.z, amount) } func (s *SliceCollection[T, E]) Splice(offset int, args ...any) *SliceCollection[T, E] { return s.New(Splice[T, E](&s.z, offset, args...)) } func (s *SliceCollection[T, E]) Reduce(initial E, callback func(carry E, value E, key int) E) E { return Reduce[T, E](s.z, initial, callback) } func (s *SliceCollection[T, E]) Pop() (E, bool) { return Pop[T, E](&s.z) } func (s *SliceCollection[T, E]) Push(item E) *SliceCollection[T, E] { Push[T, E](&s.z, item) return s } func (s *SliceCollection[T, E]) Where(args ...any) *SliceCollection[T, E] { s.z = Where[T, E](s.z, args...) return s } func (s *SliceCollection[T, E]) WhereIn(args ...any) *SliceCollection[T, E] { s.z = WhereIn[T, E](s.z, args...) return s } func (s *SliceCollection[T, E]) WhereNotIn(args ...any) *SliceCollection[T, E] { s.z = WhereNotIn[T, E](s.z, args...) return s }
slice.go
0.65368
0.505188
slice.go
starcoder
package lstm import ( "github.com/gorgonia/parser" G "gorgonia.org/gorgonia" "gorgonia.org/tensor" ) // Model holds the tensor of the model type Model struct { wi []float32 ui []float32 biasI []float32 wf []float32 uf []float32 biasF []float32 wo []float32 uo []float32 biasO []float32 wc []float32 uc []float32 biasC []float32 wy []float32 biasY []float32 inputSize int outputSize int hiddenSize int } // lstm represent a single cell of the RNN // each LSTM owns its own ExprGraph type lstm struct { g *G.ExprGraph wi *G.Node ui *G.Node biasI *G.Node wf *G.Node uf *G.Node biasF *G.Node wo *G.Node uo *G.Node biasO *G.Node wc *G.Node uc *G.Node biasC *G.Node wy *G.Node biasY *G.Node parser *parser.Parser inputSize int outputSize int hiddenSize int //inputVector *G.Node prevHidden *G.Node prevCell *G.Node } func (m *Model) newLSTM(hiddenT, cellT tensor.Tensor) *lstm { lstm := new(lstm) g := G.NewGraph() lstm.g = g p := parser.NewParser(g) lstm.parser = p lstm.hiddenSize = m.hiddenSize lstm.inputSize = m.inputSize lstm.outputSize = m.outputSize prevSize := m.inputSize hiddenSize := m.hiddenSize outputSize := m.outputSize // Create the tensor first // input gate weights wiT := tensor.New(tensor.WithShape(hiddenSize, prevSize), tensor.WithBacking(m.wi)) uiT := tensor.New(tensor.WithShape(hiddenSize, hiddenSize), tensor.WithBacking(m.ui)) biasIT := tensor.New(tensor.WithBacking(m.biasI), tensor.WithShape(hiddenSize)) // output gate weights woT := tensor.New(tensor.WithShape(hiddenSize, prevSize), tensor.WithBacking(m.wo)) uoT := tensor.New(tensor.WithShape(hiddenSize, hiddenSize), tensor.WithBacking(m.uo)) biasOT := tensor.New(tensor.WithBacking(m.biasO), tensor.WithShape(hiddenSize)) // forget gate weights wfT := tensor.New(tensor.WithShape(hiddenSize, prevSize), tensor.WithBacking(m.wf)) ufT := tensor.New(tensor.WithShape(hiddenSize, hiddenSize), tensor.WithBacking(m.uf)) biasFT := tensor.New(tensor.WithBacking(m.biasF), tensor.WithShape(hiddenSize)) // cell write wcT := tensor.New(tensor.WithShape(hiddenSize, prevSize), tensor.WithBacking(m.wc)) ucT := tensor.New(tensor.WithShape(hiddenSize, hiddenSize), tensor.WithBacking(m.uc)) biasCT := tensor.New(tensor.WithBacking(m.biasC), tensor.WithShape(hiddenSize)) // Output vector wyT := tensor.New(tensor.WithShape(outputSize, hiddenSize), tensor.WithBacking(m.wy)) biasYT := tensor.New(tensor.WithBacking(m.biasY), tensor.WithShape(outputSize)) // input gate weights lstm.wi = G.NewMatrix(g, tensor.Float32, G.WithName("Wᵢ"), G.WithShape(hiddenSize, prevSize), G.WithValue(wiT)) lstm.ui = G.NewMatrix(g, tensor.Float32, G.WithName("Uᵢ"), G.WithShape(hiddenSize, hiddenSize), G.WithValue(uiT)) lstm.biasI = G.NewVector(g, tensor.Float32, G.WithName("Bᵢ"), G.WithShape(hiddenSize), G.WithValue(biasIT)) p.Set(`Wᵢ`, lstm.wi) p.Set(`Uᵢ`, lstm.ui) p.Set(`Bᵢ`, lstm.biasI) // output gate weights lstm.wo = G.NewMatrix(g, tensor.Float32, G.WithName("Wₒ"), G.WithShape(hiddenSize, prevSize), G.WithValue(woT)) lstm.uo = G.NewMatrix(g, tensor.Float32, G.WithName("Uₒ"), G.WithShape(hiddenSize, hiddenSize), G.WithValue(uoT)) lstm.biasO = G.NewVector(g, tensor.Float32, G.WithName("Bₒ"), G.WithShape(hiddenSize), G.WithValue(biasOT)) p.Set(`Wₒ`, lstm.wo) p.Set(`Uₒ`, lstm.uo) p.Set(`Bₒ`, lstm.biasO) // forget gate weights lstm.wf = G.NewMatrix(g, tensor.Float32, G.WithName("Wf"), G.WithShape(hiddenSize, prevSize), G.WithValue(wfT)) lstm.uf = G.NewMatrix(g, tensor.Float32, G.WithName("Uf"), G.WithShape(hiddenSize, hiddenSize), G.WithValue(ufT)) lstm.biasF = G.NewVector(g, tensor.Float32, G.WithName("Bf"), G.WithShape(hiddenSize), G.WithValue(biasFT)) p.Set(`Wf`, lstm.wf) p.Set(`Uf`, lstm.uf) p.Set(`Bf`, lstm.biasF) // cell write lstm.wc = G.NewMatrix(g, tensor.Float32, G.WithName("Wc"), G.WithShape(hiddenSize, prevSize), G.WithValue(wcT)) lstm.uc = G.NewMatrix(g, tensor.Float32, G.WithName("Uc"), G.WithShape(hiddenSize, hiddenSize), G.WithValue(ucT)) lstm.biasC = G.NewVector(g, tensor.Float32, G.WithName("bc"), G.WithShape(hiddenSize), G.WithValue(biasCT)) p.Set(`Wc`, lstm.wc) p.Set(`Uc`, lstm.uc) p.Set(`Bc`, lstm.biasC) // Output vector lstm.wy = G.NewMatrix(g, tensor.Float32, G.WithName("Wy"), G.WithShape(outputSize, hiddenSize), G.WithValue(wyT)) lstm.biasY = G.NewVector(g, tensor.Float32, G.WithName("by"), G.WithShape(outputSize), G.WithValue(biasYT)) p.Set(`Wy`, lstm.wy) p.Set(`By`, lstm.biasY) // this is to simulate a default "previous" state lstm.prevHidden = G.NewVector(g, tensor.Float32, G.WithName("hₜ₋₁"), G.WithShape(hiddenSize), G.WithValue(hiddenT)) lstm.prevCell = G.NewVector(g, tensor.Float32, G.WithName("Cₜ₋₁"), G.WithShape(hiddenSize), G.WithValue(cellT)) return lstm } func newModelFromBackends(back *backends) *Model { m := new(Model) m.hiddenSize = back.HiddenSize m.inputSize = back.InputSize m.outputSize = back.OutputSize // input gate weights m.wi = back.Wi m.ui = back.Ui m.biasI = back.BiasI // output gate weights m.wo = back.Wo m.uo = back.Uo m.biasO = back.BiasO // forget gate weights m.wf = back.Wf m.uf = back.Uf m.biasF = back.BiasF // cell write m.wc = back.Wc m.uc = back.Uc m.biasC = back.BiasC // Output vector m.wy = back.Wy m.biasY = back.BiasY return m } // NewModel creates a new model func NewModel(inputSize, outputSize int, hiddenSize int) *Model { return newModelFromBackends(initBackends(inputSize, outputSize, hiddenSize)) }
model.go
0.691602
0.608827
model.go
starcoder
package fsm // https://venilnoronha.io/a-simple-state-machine-framework-in-go import ( "context" "errors" "sync" ) // StateType represents an extensible state type in the state machine. type StateType string // EventType represents an extensible event type in the state machine. type EventType string // Action represents the action to be executed in a given state. type Action interface { Execute(ctx context.Context) EventType } // Events represents a mapping of events and states. type Events map[EventType]StateType // State binds a state with an action and a set of events it can handle. type State struct { Action Events } // States represents a mapping of states and their implementations. type States map[StateType]State // Machine represents the state machine. type Machine struct { mu sync.Mutex // mutex ensures that only 1 event is processed by the state machine at any given time. prev StateType // Previous represents the previous state. curr StateType // Current represents the current state. states States // States holds the configuration of states and events handled by the state machine. // OnTransition called when transitioning from current StateType to the nextStateType OnTransition func(curr, next StateType) } // New create new finite-state Machine with initial StateType and States mapping. func New(curr StateType, states States) *Machine { return &Machine{curr: curr, states: states} } // nextState get next StateType and State, return error on invalid State. func (s *Machine) nextState(event EventType) (StateType, *State, error) { if state, ok := s.states[s.curr]; ok && len(state.Events) > 0 { if next, ok := state.Events[event]; ok { if state, ok = s.states[next]; ok && state.Action != nil { return next, &state, nil } return next, nil, errors.New("fsm: invalid state") } } return "", nil, errors.New("fsm: rejected event") } // SendEvent sends an event to the state machine. func (s *Machine) SendEvent(ctx context.Context, event EventType) (err error) { s.mu.Lock() defer s.mu.Unlock() for { // Determine the next state for the event given the machine's current state. next, state, err := s.nextState(event) if err != nil { return err } // Transition over to the next state when event is valid. if s.OnTransition != nil { s.OnTransition(s.curr, next) } s.prev, s.curr = s.curr, next // Execute the next state's action and loop over again if the event returned is not a no-op. if nextEvent := state.Action.Execute(ctx); nextEvent != "" { event = nextEvent continue } return nil } } // GetStates tuple of previous and current state func (s *Machine) GetStates() (prev, curr StateType) { return s.prev, s.curr }
fsm/fsm.go
0.695752
0.668759
fsm.go
starcoder
package stats import ( "fmt" "math" "github.com/jamestunnell/go-dsp/util/floatslice" ) // CrossCorrelation determines the normalized cross-correlation of a feature with an image. // Normalization is from -1 to +1, where +1 is high correlation, -1 is high // correlation (of inverse), and 0 is no correlation. // For autocorrelation, just cross-correlate a signal with itself. // Image is the values which are actually recieved/measured. // Feature is the values to be searched for in the image. Size must not be greater // than size of image. // ZeroPadding is the number of zeros to surround the image with (both sides will be padded). // Returns a non-nil error in case of failure. func CrossCorrelation(image, feature []float64, zeroPadding int) ([]float64, error) { if len(feature) > len(image) { err := fmt.Errorf("feature size %d is > image size %d", len(feature), len(image)) return []float64{}, err } if zeroPadding > 0 { newImage := make([]float64, len(image)+2*zeroPadding) for i := 0; i < len(image); i++ { newImage[i+zeroPadding] = image[i] } image = newImage } featureMean := Mean(feature) featureDiff := floatslice.Map(feature, func(x float64) float64 { return x - featureMean }) sx := floatslice.Accumulate(featureDiff, func(x float64) float64 { return x * x }) data := []float64{} nImage := len(image) nFeature := len(feature) for i := 0; i < (1 + nImage - nFeature); i++ { region := image[i:(i + nFeature)] regionMean := Mean(region) regionDiff := floatslice.Map(region, func(x float64) float64 { return x - regionMean }) sy := floatslice.Accumulate(regionDiff, func(x float64) float64 { return x * x }) if sx == 0 || sy == 0 { if sx == 0 && sy == 0 { data = append(data, 1.0) } else { data = append(data, 0.0) } continue } denom := math.Sqrt(sx * sy) sum := 0.0 for j := 0; j < nFeature; j++ { sum += (regionDiff[j] * featureDiff[j]) } r := sum / denom data = append(data, r) } return data, nil }
stats/crosscorrelation.go
0.773345
0.58951
crosscorrelation.go
starcoder
package mlpack /* #cgo CFLAGS: -I./capi -Wall #cgo LDFLAGS: -L. -lmlpack_go_kde #include <capi/kde.h> #include <stdlib.h> */ import "C" import "gonum.org/v1/gonum/mat" type KdeOptionalParam struct { AbsError float64 Algorithm string Bandwidth float64 InitialSampleSize int InputModel *kdeModel Kernel string McBreakCoef float64 McEntryCoef float64 McProbability float64 MonteCarlo bool Query *mat.Dense Reference *mat.Dense RelError float64 Tree string Verbose bool } func KdeOptions() *KdeOptionalParam { return &KdeOptionalParam{ AbsError: 0, Algorithm: "dual-tree", Bandwidth: 1, InitialSampleSize: 100, InputModel: nil, Kernel: "gaussian", McBreakCoef: 0.4, McEntryCoef: 3, McProbability: 0.95, MonteCarlo: false, Query: nil, Reference: nil, RelError: 0.05, Tree: "kd-tree", Verbose: false, } } /* This program performs a Kernel Density Estimation. KDE is a non-parametric way of estimating probability density function. For each query point the program will estimate its probability density by applying a kernel function to each reference point. The computational complexity of this is O(N^2) where there are N query points and N reference points, but this implementation will typically see better performance as it uses an approximate dual or single tree algorithm for acceleration. Dual or single tree optimization avoids many barely relevant calculations (as kernel function values decrease with distance), so it is an approximate computation. You can specify the maximum relative error tolerance for each query value with "RelError" as well as the maximum absolute error tolerance with the parameter "AbsError". This program runs using an Euclidean metric. Kernel function can be selected using the "Kernel" option. You can also choose what which type of tree to use for the dual-tree algorithm with "Tree". It is also possible to select whether to use dual-tree algorithm or single-tree algorithm using the "Algorithm" option. Monte Carlo estimations can be used to accelerate the KDE estimate when the Gaussian Kernel is used. This provides a probabilistic guarantee on the the error of the resulting KDE instead of an absolute guarantee.To enable Monte Carlo estimations, the "MonteCarlo" flag can be used, and success probability can be set with the "McProbability" option. It is possible to set the initial sample size for the Monte Carlo estimation using "InitialSampleSize". This implementation will only consider a node, as a candidate for the Monte Carlo estimation, if its number of descendant nodes is bigger than the initial sample size. This can be controlled using a coefficient that will multiply the initial sample size and can be set using "McEntryCoef". To avoid using the same amount of computations an exact approach would take, this program recurses the tree whenever a fraction of the amount of the node's descendant points have already been computed. This fraction is set using "McBreakCoef". For example, the following will run KDE using the data in ref_data for training and the data in qu_data as query data. It will apply an Epanechnikov kernel with a 0.2 bandwidth to each reference point and use a KD-Tree for the dual-tree optimization. The returned predictions will be within 5% of the real KDE value for each query point. // Initialize optional parameters for Kde(). param := mlpack.KdeOptions() param.Reference = ref_data param.Query = qu_data param.Bandwidth = 0.2 param.Kernel = "epanechnikov" param.Tree = "kd-tree" param.RelError = 0.05 _, out_data := mlpack.Kde(param) the predicted density estimations will be stored in out_data. If no "Query" is provided, then KDE will be computed on the "Reference" dataset. It is possible to select either a reference dataset or an input model but not both at the same time. If an input model is selected and parameter values are not set (e.g. "Bandwidth") then default parameter values will be used. In addition to the last program call, it is also possible to activate Monte Carlo estimations if a Gaussian kernel is used. This can provide faster results, but the KDE will only have a probabilistic guarantee of meeting the desired error bound (instead of an absolute guarantee). The following example will run KDE using a Monte Carlo estimation when possible. The results will be within a 5% of the real KDE value with a 95% probability. Initial sample size for the Monte Carlo estimation will be 200 points and a node will be a candidate for the estimation only when it contains 700 (i.e. 3.5*200) points. If a node contains 700 points and 420 (i.e. 0.6*700) have already been sampled, then the algorithm will recurse instead of keep sampling. // Initialize optional parameters for Kde(). param := mlpack.KdeOptions() param.Reference = ref_data param.Query = qu_data param.Bandwidth = 0.2 param.Kernel = "gaussian" param.Tree = "kd-tree" param.RelError = 0.05 param.MonteCarlo = param.McProbability = 0.95 param.InitialSampleSize = 200 param.McEntryCoef = 3.5 param.McBreakCoef = 0.6 _, out_data := mlpack.Kde(param) Input parameters: - AbsError (float64): Relative error tolerance for the prediction. Default value 0. - Algorithm (string): Algorithm to use for the prediction.('dual-tree', 'single-tree'). Default value 'dual-tree'. - Bandwidth (float64): Bandwidth of the kernel. Default value 1. - InitialSampleSize (int): Initial sample size for Monte Carlo estimations. Default value 100. - InputModel (kdeModel): Contains pre-trained KDE model. - Kernel (string): Kernel to use for the prediction.('gaussian', 'epanechnikov', 'laplacian', 'spherical', 'triangular'). Default value 'gaussian'. - McBreakCoef (float64): Controls what fraction of the amount of node's descendants is the limit for the sample size before it recurses. Default value 0.4. - McEntryCoef (float64): Controls how much larger does the amount of node descendants has to be compared to the initial sample size in order to be a candidate for Monte Carlo estimations. Default value 3. - McProbability (float64): Probability of the estimation being bounded by relative error when using Monte Carlo estimations. Default value 0.95. - MonteCarlo (bool): Whether to use Monte Carlo estimations when possible. - Query (mat.Dense): Query dataset to KDE on. - Reference (mat.Dense): Input reference dataset use for KDE. - RelError (float64): Relative error tolerance for the prediction. Default value 0.05. - Tree (string): Tree to use for the prediction.('kd-tree', 'ball-tree', 'cover-tree', 'octree', 'r-tree'). Default value 'kd-tree'. - Verbose (bool): Display informational messages and the full list of parameters and timers at the end of execution. Output parameters: - outputModel (kdeModel): If specified, the KDE model will be saved here. - predictions (mat.Dense): Vector to store density predictions. */ func Kde(param *KdeOptionalParam) (kdeModel, *mat.Dense) { resetTimers() enableTimers() disableBacktrace() disableVerbose() restoreSettings("Kernel Density Estimation") // Detect if the parameter was passed; set if so. if param.AbsError != 0 { setParamDouble("abs_error", param.AbsError) setPassed("abs_error") } // Detect if the parameter was passed; set if so. if param.Algorithm != "dual-tree" { setParamString("algorithm", param.Algorithm) setPassed("algorithm") } // Detect if the parameter was passed; set if so. if param.Bandwidth != 1 { setParamDouble("bandwidth", param.Bandwidth) setPassed("bandwidth") } // Detect if the parameter was passed; set if so. if param.InitialSampleSize != 100 { setParamInt("initial_sample_size", param.InitialSampleSize) setPassed("initial_sample_size") } // Detect if the parameter was passed; set if so. if param.InputModel != nil { setKDEModel("input_model", param.InputModel) setPassed("input_model") } // Detect if the parameter was passed; set if so. if param.Kernel != "gaussian" { setParamString("kernel", param.Kernel) setPassed("kernel") } // Detect if the parameter was passed; set if so. if param.McBreakCoef != 0.4 { setParamDouble("mc_break_coef", param.McBreakCoef) setPassed("mc_break_coef") } // Detect if the parameter was passed; set if so. if param.McEntryCoef != 3 { setParamDouble("mc_entry_coef", param.McEntryCoef) setPassed("mc_entry_coef") } // Detect if the parameter was passed; set if so. if param.McProbability != 0.95 { setParamDouble("mc_probability", param.McProbability) setPassed("mc_probability") } // Detect if the parameter was passed; set if so. if param.MonteCarlo != false { setParamBool("monte_carlo", param.MonteCarlo) setPassed("monte_carlo") } // Detect if the parameter was passed; set if so. if param.Query != nil { gonumToArmaMat("query", param.Query) setPassed("query") } // Detect if the parameter was passed; set if so. if param.Reference != nil { gonumToArmaMat("reference", param.Reference) setPassed("reference") } // Detect if the parameter was passed; set if so. if param.RelError != 0.05 { setParamDouble("rel_error", param.RelError) setPassed("rel_error") } // Detect if the parameter was passed; set if so. if param.Tree != "kd-tree" { setParamString("tree", param.Tree) setPassed("tree") } // Detect if the parameter was passed; set if so. if param.Verbose != false { setParamBool("verbose", param.Verbose) setPassed("verbose") enableVerbose() } // Mark all output options as passed. setPassed("output_model") setPassed("predictions") // Call the mlpack program. C.mlpackKde() // Initialize result variable and get output. var outputModel kdeModel outputModel.getKDEModel("output_model") var predictionsPtr mlpackArma predictions := predictionsPtr.armaToGonumCol("predictions") // Clear settings. clearSettings() // Return output(s). return outputModel, predictions }
kde.go
0.76454
0.528716
kde.go
starcoder
package retry import ( "math" "math/rand" "time" ) func init() { rand.Seed(time.Now().UnixNano()) } // BackoffStrategy is function that calculates for how long should delay be before // sending next request. type BackoffStrategy func(n int) time.Duration // ConstantBackoff returns backoff that returns always same, provided, parameter. func ConstantBackoff(duration time.Duration) BackoffStrategy { return BackoffStrategy(func(_ int) time.Duration { return duration }) } // LinearBackoff returns BackoffStrategy that multiplies provided step with repeat // attempt number. func LinearBackoff(step time.Duration, max time.Duration) BackoffStrategy { return BackoffStrategy(func(n int) time.Duration { return minDuration(calcLinear(step, n), max) }) } // LinearJitterBackoff returns BackoffStrategy that multiplies provided step with // attempt number and adds random jitter to result. func LinearJitterBackoff(step time.Duration, max time.Duration) BackoffStrategy { return BackoffStrategy(func(n int) time.Duration { return minDuration(addJitter(calcLinear(step, n), step), max) }) } // ExponentialBackoff returns BackoffStrategy that exponentially increases time. func ExponentialBackoff(min, max time.Duration, factor float64) BackoffStrategy { return BackoffStrategy(func(n int) time.Duration { return calcExponential(min, max, factor, n) }) } // ExponentialJitterBackoff returns BackoffStrategy that exponentially increases time and // adds random jitter to result. func ExponentialJitterBackoff(min, max time.Duration, factor float64) BackoffStrategy { return BackoffStrategy(func(n int) time.Duration { return addJitter(calcExponential(min, max, factor, n), min) }) } func calcExponential(min, max time.Duration, factor float64, attempt int) time.Duration { d := time.Duration(float64(min) * math.Pow(factor, float64(attempt))) return minDuration(d, max) } func calcLinear(step time.Duration, attempt int) time.Duration { return time.Duration(attempt) * step } func minDuration(first time.Duration, second time.Duration) time.Duration { if first.Nanoseconds() > second.Nanoseconds() { return second } return first } func addJitter(next, min time.Duration) time.Duration { return time.Duration(rand.Float64()*float64(2*min) + float64(next-min)) }
middlewares/retry/backoff.go
0.868172
0.460107
backoff.go
starcoder
package modules import ( "fmt" ) const ( // SixWordsPositionTopLeft is a SixWordsPosition of type TopLeft. SixWordsPositionTopLeft SixWordsPosition = iota // SixWordsPositionMiddleLeft is a SixWordsPosition of type MiddleLeft. SixWordsPositionMiddleLeft // SixWordsPositionBottomLeft is a SixWordsPosition of type BottomLeft. SixWordsPositionBottomLeft // SixWordsPositionTopRight is a SixWordsPosition of type TopRight. SixWordsPositionTopRight // SixWordsPositionMiddleRight is a SixWordsPosition of type MiddleRight. SixWordsPositionMiddleRight // SixWordsPositionBottomRight is a SixWordsPosition of type BottomRight. SixWordsPositionBottomRight ) const _SixWordsPositionName = "topLeftmiddleLeftbottomLefttopRightmiddleRightbottomRight" var _SixWordsPositionMap = map[SixWordsPosition]string{ SixWordsPositionTopLeft: _SixWordsPositionName[0:7], SixWordsPositionMiddleLeft: _SixWordsPositionName[7:17], SixWordsPositionBottomLeft: _SixWordsPositionName[17:27], SixWordsPositionTopRight: _SixWordsPositionName[27:35], SixWordsPositionMiddleRight: _SixWordsPositionName[35:46], SixWordsPositionBottomRight: _SixWordsPositionName[46:57], } // String implements the Stringer interface. func (x SixWordsPosition) String() string { if str, ok := _SixWordsPositionMap[x]; ok { return str } return fmt.Sprintf("SixWordsPosition(%d)", x) } var _SixWordsPositionValue = map[string]SixWordsPosition{ _SixWordsPositionName[0:7]: SixWordsPositionTopLeft, _SixWordsPositionName[7:17]: SixWordsPositionMiddleLeft, _SixWordsPositionName[17:27]: SixWordsPositionBottomLeft, _SixWordsPositionName[27:35]: SixWordsPositionTopRight, _SixWordsPositionName[35:46]: SixWordsPositionMiddleRight, _SixWordsPositionName[46:57]: SixWordsPositionBottomRight, } // ParseSixWordsPosition attempts to convert a string to a SixWordsPosition func ParseSixWordsPosition(name string) (SixWordsPosition, error) { if x, ok := _SixWordsPositionValue[name]; ok { return x, nil } return SixWordsPosition(0), fmt.Errorf("%s is not a valid SixWordsPosition", name) } // MarshalText implements the text marshaller method func (x SixWordsPosition) MarshalText() ([]byte, error) { return []byte(x.String()), nil } // UnmarshalText implements the text unmarshaller method func (x *SixWordsPosition) UnmarshalText(text []byte) error { name := string(text) tmp, err := ParseSixWordsPosition(name) if err != nil { return err } *x = tmp return nil }
modules/six_words_enum.go
0.615781
0.432303
six_words_enum.go
starcoder
package posev import ( "github.com/mitsuse/matrix-go" "github.com/mitsuse/matrix-go/dense" //~ "log" "math" "math/rand" ) // matrix.Matrix.Scalar() mutates the matrix, it is not always handy func ScalarMult(m matrix.Matrix, s float64) matrix.Matrix { r := dense.Zeros(m.Shape()) c := m.All() for c.HasNext() { v, i, j := c.Get() r.Update(i, j, v*s) } return r } func VecNorm(m matrix.Matrix) float64 { n := float64(0) c := m.All() for c.HasNext() { v, _, _ := c.Get() n += v * v } return math.Sqrt(n) } func VecNormalize(m matrix.Matrix) matrix.Matrix { n := VecNorm(m) return ScalarMult(m, float64(1)/n) } func randUnitVec(n int) matrix.Matrix { r := dense.Zeros(n, 1) for i := 0; i < n; i++ { r.Update(i, 0, rand.NormFloat64()) } return VecNormalize(r) } func PowerTopEigen(a matrix.Matrix, maxIters int, eps float64) (v matrix.Matrix, e float64) { v = randUnitVec(a.Columns()) o := float64(0) for i := 0; i < maxIters; i++ { z := a.Multiply(v) v = VecNormalize(z) e = v.Transpose().Multiply(z).Get(0, 0) if math.Abs((e-o)/e) < eps { return } o = e } return nil, 0 } func DeltaEigen(e float64, v, a matrix.Matrix) float64 { dim := v.Rows() id := dense.Zeros(dim, dim) for i := 0; i < dim; i++ { id.Update(i, i, e) } z := id.Subtract(a).Multiply(v) return VecNorm(z) } func PowerTopSingular(a, r matrix.Matrix, maxIters int, eps float64) (u, v, w matrix.Matrix, s float64) { n := a.Columns() b := a.Transpose() for i := 0; i < maxIters; i++ { u = VecNormalize(a.Multiply(r)) v = b.Multiply(u) s = VecNorm(v) v.Scalar(float64(1) / s) d := float64(0) for j := 0; j < n; j++ { c := math.Abs(r.Get(j, 0) - v.Get(j, 0)) if d < c { d = c } } if d > 10*eps { w = r.Subtract(v) } if d < eps { return } r = v } return nil, nil, nil, 0 } func HotellingDeflation(a, l, r matrix.Matrix, s float64) (b matrix.Matrix) { b = dense.Zeros(a.Shape()) for i := 0; i < l.Rows(); i++ { for j := 0; j < r.Rows(); j++ { b.Update(i, j, a.Get(i, j)-s*l.Get(i, 0)*r.Get(j, 0)) } } return } func PowerTopKSingular(a matrix.Matrix, k, maxIters int, eps float64) (u, v matrix.Matrix, s []float64) { tol := 0.000000001 m := a.Rows() n := a.Columns() u = dense.Zeros(m, k) v = dense.Zeros(n, k) s = make([]float64, k) r := randUnitVec(n) for i := 0; i < k; i++ { ui, vi, w, si := PowerTopSingular(a, r, maxIters, eps) if si/(s[0]+tol) < tol { // Singluar values are too small return } s[i] = si for j := 0; j < m; j++ { u.Update(j, i, ui.Get(j, 0)) } for j := 0; j < n; j++ { v.Update(j, i, vi.Get(j, 0)) } a = HotellingDeflation(a, ui, vi, si) if w == nil { r = randUnitVec(n) } else { r = VecNormalize(w) } } return } func DeltaSingular(s float64, u, v, a matrix.Matrix) float64 { z := a.Multiply(v).Subtract(ScalarMult(u, s)) return VecNorm(z) }
posev.go
0.702326
0.496582
posev.go
starcoder
Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of time.Time types. And both convert to time.Time through a ToTime method. */ package date import ( "fmt" "time" ) const ( fullDate = "2006-01-02" fullDateJSON = `"2006-01-02"` dateFormat = "%04d-%02d-%02d" jsonFormat = `"%04d-%02d-%02d"` ) // Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., // 2006-01-02). type Date struct { time.Time } // ParseDate create a new Date from the passed string. func ParseDate(date string) (d Date, err error) { return parseDate(date, fullDate) } func parseDate(date string, format string) (Date, error) { d, err := time.Parse(format, date) return Date{Time: d}, err } // MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., // 2006-01-02). func (d Date) MarshalBinary() ([]byte, error) { return d.MarshalText() } // UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., // 2006-01-02). func (d *Date) UnmarshalBinary(data []byte) error { return d.UnmarshalText(data) } // MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., // 2006-01-02). func (d Date) MarshalJSON() (json []byte, err error) { return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil } // UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., // 2006-01-02). func (d *Date) UnmarshalJSON(data []byte) (err error) { d.Time, err = time.Parse(fullDateJSON, string(data)) return err } // MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., // 2006-01-02). func (d Date) MarshalText() (text []byte, err error) { return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil } // UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., // 2006-01-02). func (d *Date) UnmarshalText(data []byte) (err error) { d.Time, err = time.Parse(fullDate, string(data)) return err } // String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). func (d Date) String() string { return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) } // ToTime returns a Date as a time.Time func (d Date) ToTime() time.Time { return d.Time }
vendor/github.com/Azure/go-autorest/autorest/date/date.go
0.868297
0.580471
date.go
starcoder
package evaluator import ( "bytes" "fmt" "github.com/radlinskii/interpreter/ast" "github.com/radlinskii/interpreter/object" ) var ( // TRUE is a single object that all the appeareances of boolean nodes with value "true" will point to. TRUE = &object.Boolean{Value: true} // FALSE is a single object that all the appeareances of boolean nodes with value "false" will point to. FALSE = &object.Boolean{Value: false} // NULL is a single object that all the appeareances of nodes without a value will point to. NULL = &object.Null{} // VOID is a single object that all the appeareances of nodes without a value will point to. VOID = &object.Void{} ) var programOutput bytes.Buffer // eval evaluates the AST func eval(node ast.Node, env *object.Environment) object.Object { switch node := node.(type) { // Statements case *ast.BlockStatement: return evalBlockStatement(node, env) case *ast.ExpressionStatement: return eval(node.Expression, env) case *ast.IfStatement: return evalIfStatement(node, env) case *ast.ReturnStatement: return evalReturnStatement(node, env) case *ast.ConstStatement: return evalConstStatement(node, env) //Expressions case *ast.IntegerLiteral: return &object.Integer{Value: node.Value} case *ast.BooleanLiteral: return evalBoolToBooleanObjectReference(node.Value) case *ast.StringLiteral: return &object.String{Value: node.Value} case *ast.PrefixExpression: right := eval(node.Right, env) if isError(right) { return right } return evalPrefixExpression(node.Operator, right) case *ast.InfixExpression: left := eval(node.Left, env) if isError(left) { return left } right := eval(node.Right, env) if isError(right) { return right } return evalInfixExpression(node.Operator, left, right) case *ast.Identifier: return evalIdentifier(node, env) case *ast.FunctionLiteral: params := node.Parameters body := node.Body return &object.Function{Parameters: params, Env: env, Body: body} case *ast.CallExpression: fun := eval(node.Function, env) if isError(fun) { return fun } args := evalExpressions(node.Arguments, env) if len(args) == 1 && isError(args[0]) { return args[0] } return applyFunction(fun, args) case *ast.ArrayLiteral: elements := evalExpressions(node.Elements, env) if len(elements) == 1 && isError(elements[0]) { return elements[0] } return &object.Array{Elements: elements} case *ast.IndexExpression: left := eval(node.Left, env) if isError(left) { return left } right := eval(node.Right, env) if isError(right) { return right } return evalIndexExpression(left, right) case *ast.HashLiteral: return evalHashLiteral(node, env) default: return nil } } // EvalProgram starts evaluation of the AST. func evalProgram(program *ast.Program, env *object.Environment) object.Object { var result object.Object for _, stmnt := range program.Statements { result = eval(stmnt, env) switch result := result.(type) { case *object.Return: return newError("return statement not permitted outside function body") case *object.Error: return result } } return result } // EvalProgram starts evaluation of the AST. func EvalProgram(program *ast.Program, env *object.Environment) string { evaluated := evalProgram(program, env) programOutput.WriteString(evaluated.Inspect()) retStr := programOutput.String() programOutput.Reset() return retStr } func evalBlockStatement(block *ast.BlockStatement, env *object.Environment) object.Object { var result object.Object blockEnv := object.NewEnclosedEnvironment(env) for _, stmnt := range block.Statements { result = eval(stmnt, blockEnv) if result != nil { rt := result.Type() if rt == object.RETURN || rt == object.ERROR { return result } } } return result } func evalPrefixExpression(operator string, right object.Object) object.Object { switch operator { case "!": return evalBangOperatorExpression(right) case "-": return evalMinusPrefixOperatorExpression(right) default: return newError("unknown operator: %s%s", operator, right.Type()) } } func evalBangOperatorExpression(right object.Object) object.Object { switch right { case TRUE: return FALSE case FALSE: return TRUE default: return newError("expected BOOLEAN in negation expression, got: %s", right.Type()) } } func evalMinusPrefixOperatorExpression(right object.Object) object.Object { if right.Type() != object.INTEGER { return newError("unknown operator: -%s", right.Type()) } value := right.(*object.Integer).Value return &object.Integer{Value: -value} } func evalInfixExpression(operator string, left, right object.Object) object.Object { switch { case left.Type() != right.Type(): // handling type mismatch error first return newError("type mismatch: %s %s %s", left.Type(), operator, right.Type()) case left.Type() == object.INTEGER: return evalIntegerInfixExpression(operator, left, right) case left.Type() == object.STRING: return evalStringInfixExpression(operator, left, right) case operator == "==": return evalBoolToBooleanObjectReference(left == right) case operator == "!=": return evalBoolToBooleanObjectReference(left != right) default: return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type()) } } func evalIntegerInfixExpression(operator string, left, right object.Object) object.Object { leftVal := left.(*object.Integer).Value rightVal := right.(*object.Integer).Value switch operator { case "+": return &object.Integer{Value: leftVal + rightVal} case "-": return &object.Integer{Value: leftVal - rightVal} case "*": return &object.Integer{Value: leftVal * rightVal} case "/": return &object.Integer{Value: leftVal / rightVal} case "<": return evalBoolToBooleanObjectReference(leftVal < rightVal) case ">": return evalBoolToBooleanObjectReference(leftVal > rightVal) case "==": return evalBoolToBooleanObjectReference(leftVal == rightVal) case "!=": return evalBoolToBooleanObjectReference(leftVal != rightVal) case "<=": return evalBoolToBooleanObjectReference(leftVal <= rightVal) case ">=": return evalBoolToBooleanObjectReference(leftVal >= rightVal) default: return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type()) } } func evalStringInfixExpression(operator string, left, right object.Object) object.Object { leftVal := left.(*object.String).Value rightVal := right.(*object.String).Value switch operator { case "+": return &object.String{Value: leftVal + rightVal} case "==": return evalBoolToBooleanObjectReference(leftVal == rightVal) case "!=": return evalBoolToBooleanObjectReference(leftVal != rightVal) default: return newError("unknown operator: %s %s %s", left.Type(), operator, right.Type()) } } func evalBoolToBooleanObjectReference(val bool) object.Object { if val { return TRUE } return FALSE } func evalIfStatement(ie *ast.IfStatement, env *object.Environment) object.Object { condition := eval(ie.Condition, env) if isError(condition) { return condition } isConditionTrue, ok := isTruthy(condition) if !ok { return newError("expected BOOLEAN as condition in if-statement got: %s", condition.Type()) } if isConditionTrue { return eval(ie.Consequence, env) } else if ie.Alternative != nil { return eval(ie.Alternative, env) } return NULL } func isTruthy(obj object.Object) (val, ok bool) { switch obj { case FALSE: val = false ok = true case TRUE: val = true ok = true default: val = false ok = false } return } func evalIdentifier(i *ast.Identifier, env *object.Environment) object.Object { if val, ok := env.Get(i.Value); ok { return val } if builtin, ok := builtins[i.Value]; ok { return builtin } return newError("unknown identifier: %s", i.Value) } func evalIndexExpression(left, right object.Object) object.Object { switch { case left.Type() == object.ARRAY && right.Type() == object.INTEGER: return evalArrayIndexExpression(left, right) case left.Type() == object.HASH: return evalHashIndexExpression(left, right) default: return newError("index operator not supported: %s[%s]", left.Type(), right.Type()) } } func evalArrayIndexExpression(array, index object.Object) object.Object { arrayObject := array.(*object.Array) i := index.(*object.Integer).Value max := int64(len(arrayObject.Elements) - 1) if i < 0 || i > max { return newError("index out of boundaries") } return arrayObject.Elements[i] } func evalHashIndexExpression(hash, index object.Object) object.Object { hashObject := hash.(*object.Hash) key, ok := index.(object.Hashable) if !ok { return newError("index operator not supported: %s[%s]", hash.Type(), index.Type()) } pair, ok := hashObject.Pairs[key.HashKey()] if !ok { return newError("No hash pair in %q with key %q", hash.Inspect(), index.Inspect()) } return pair.Value } func evalHashLiteral(node *ast.HashLiteral, env *object.Environment) object.Object { pairs := make(map[object.HashKey]object.HashPair) for keyNode, valueNode := range node.Pairs { key := eval(keyNode, env) if isError(key) { return key } hashKey, ok := key.(object.Hashable) if !ok { return newError("%s can't be used as hash key", key.Type()) } value := eval(valueNode, env) if isError(value) { return value } hashed := hashKey.HashKey() pairs[hashed] = object.HashPair{Key: key, Value: value} } return &object.Hash{Pairs: pairs} } func evalExpressions(exps []ast.Expression, env *object.Environment) []object.Object { var result []object.Object for _, e := range exps { evaluated := eval(e, env) if isError(evaluated) { return []object.Object{evaluated} } result = append(result, evaluated) } return result } func evalReturnStatement(rs *ast.ReturnStatement, env *object.Environment) object.Object { if rs.ReturnValue == nil { return VOID } val := eval(rs.ReturnValue, env) if isError(val) { return val } return &object.Return{Value: val} } func evalConstStatement(cs *ast.ConstStatement, env *object.Environment) object.Object { if _, ok := env.ShallowGet(cs.Name.Value); ok { return newError("redeclared constant: %q in one block", cs.Name.Value) } val := eval(cs.Value, env) if isError(val) { return val } return env.Set(cs.Name.Value, val) } func applyFunction(fun object.Object, args []object.Object) object.Object { switch function := fun.(type) { case *object.Function: extendedEnv := extendedFunctionEnv(function, args) evaluated := evalFunctionBody(function.Body, extendedEnv) if isError(evaluated) { return evaluated } return unwrapReturnValue(evaluated) case *object.Builtin: return function.Fn(args...) default: return newError("not a function: %s", function.Type()) } } func evalFunctionBody(body *ast.BlockStatement, env *object.Environment) object.Object { var result object.Object for _, stmnt := range body.Statements { result = eval(stmnt, env) if result != nil { rt := result.Type() if result == VOID || rt == object.RETURN || rt == object.ERROR { return result } } } return newError("missing return at the end of function body") } func extendedFunctionEnv(fun *object.Function, args []object.Object) *object.Environment { env := object.NewEnclosedEnvironment(fun.Env) for paramIdx, param := range fun.Parameters { env.Set(param.Value, args[paramIdx]) } return env } func unwrapReturnValue(obj object.Object) object.Object { if rtrn, ok := obj.(*object.Return); ok { return rtrn.Value } return obj } func newError(format string, a ...interface{}) *object.Error { return &object.Error{Message: fmt.Sprintf(format, a...)} } func isError(obj object.Object) bool { if obj != nil { return obj.Type() == object.ERROR } return false }
evaluator/evaluator.go
0.62498
0.463323
evaluator.go
starcoder
package generaltree import ( "fmt" "math" "github.com/MehdiEidi/gods/queue/linkedqueue" "github.com/MehdiEidi/gods/stack/linkedstack" ) type GeneralTree[T any] struct { Root *Node[T] Size int } // New constructs and returns an empty general tree. func New[T any]() *GeneralTree[T] { return &GeneralTree[T]{} } // Parent returns the parent of the given node. func (gt *GeneralTree[T]) Parent(n *Node[T]) *Node[T] { return n.Parent } // Children returns a slice of the children of the given node. func (gt *GeneralTree[T]) Children(n *Node[T]) []*Node[T] { return n.Children } // ChildrenCount returns the count of the children of the given node. func (gt *GeneralTree[T]) ChildrenCount(n *Node[T]) int { return len(n.Children) } // IsInternal returns true if the given node has one or more children (its an internal node). func (gt *GeneralTree[T]) IsInternal(n *Node[T]) bool { return gt.ChildrenCount(n) != 0 } // IsExternal returns true if the given node doesn't have any children (its an external node). func (gt *GeneralTree[T]) IsExternal(n *Node[T]) bool { return gt.ChildrenCount(n) == 0 } // IsRoot checks if the given node is the root of the tree. func (gt *GeneralTree[T]) IsRoot(n *Node[T]) bool { return gt.Root == n } // IsEmpty returns true if the tree is empty (doesn't have any node). func (gt *GeneralTree[T]) IsEmpty() bool { return gt.Size == 0 } // Depth recursively finds the depth of the given node. Depth is based on the root of the tree (root has depth 0). func (gt *GeneralTree[T]) Depth(n *Node[T]) int { if gt.IsRoot(n) { return 0 } return 1 + gt.Depth(n.Parent) } // Height recursively finds the height of the given node. Height is based on the bottom of the tree (a leaf has height 0). func (gt *GeneralTree[T]) Height(n *Node[T]) (h int) { for _, c := range n.Children { h = int(math.Max(float64(h), float64(1+gt.Height(c)))) } return } // AddRoot constructs a node out of the given data and makes it the root of the tree. It returns // TreeNotEmptyErr if tree wasn't empty before. func (gt *GeneralTree[T]) AddRoot(data T) (*Node[T], error) { if !gt.IsEmpty() { return nil, TreeNotEmptyErr } gt.Root = &Node[T]{Data: data} gt.Size++ return gt.Root, nil } // AddChildTo constructs a node out of the given data and adds it to the children of the given node. // It returns the newly constructed node. func (gt *GeneralTree[T]) AddChildTo(parent *Node[T], data T) *Node[T] { child := &Node[T]{Data: data, Parent: parent} parent.Children = append(parent.Children, child) gt.Size++ return child } // Set changes the data of the given node. It returns the previously stored data. func (gt *GeneralTree[T]) Set(n *Node[T], data T) T { val := n.Data n.Data = data return val } // PreOrder returns a slice of the nodes of the tree in pre-order. func (gt *GeneralTree[T]) PreOrder() (list []*Node[T]) { if !gt.IsEmpty() { gt.preOrderUtil(gt.Root, &list) } return } // preOrderUtil walks the tree in pre-order recursively and appends visited nodes to the slice. func (gt *GeneralTree[T]) preOrderUtil(n *Node[T], list *[]*Node[T]) { *list = append(*list, n) for _, c := range gt.Children(n) { gt.preOrderUtil(c, list) } } // String returns a string representation of the tree in pre-order. func (gt *GeneralTree[T]) String() string { list := gt.PreOrder() str := "[ " for _, n := range list { str += fmt.Sprint(n.Data) + " " } str += "]" return str } // PostOrder returns a slice of the nodes of the tree in post-order. func (gt *GeneralTree[T]) PostOrder() (list []*Node[T]) { if !gt.IsEmpty() { gt.postOrderUtil(gt.Root, &list) } return } // postOrderUtil walks the tree in post-order recursively and appends visited nodes to the slice. func (gt *GeneralTree[T]) postOrderUtil(n *Node[T], list *[]*Node[T]) { for _, c := range gt.Children(n) { gt.postOrderUtil(c, list) } *list = append(*list, n) } // BFS returns a slice of the nodes of the tree in breadth-first order. func (gt *GeneralTree[T]) BFS() (list []*Node[T]) { if gt.IsEmpty() { return } queue := linkedqueue.New[*Node[T]]() queue.Enqueue(gt.Root) for !queue.IsEmpty() { n, _ := queue.Dequeue() list = append(list, n) for _, c := range gt.Children(n) { queue.Enqueue(c) } } return } // DFS returns a slice of the nodes of the tree in depth-first order. func (gt *GeneralTree[T]) DFS() (list []*Node[T]) { if gt.IsEmpty() { return } stack := linkedstack.New[*Node[T]]() stack.Push(gt.Root) for !stack.IsEmpty() { n, _ := stack.Pop() list = append(list, n) for _, c := range gt.Children(n) { stack.Push(c) } } return } // EulerTour returns a slice of the nodes of the tree visited in the euler tour. func (gt *GeneralTree[T]) EulerTour() (list []*Node[T]) { if !gt.IsEmpty() { gt.eulerTourUtil(gt.Root, &list) } return } // eulerTourUtil walks the tree in euler-tour recursively and appends visited nodes to the slice. func (gt *GeneralTree[T]) eulerTourUtil(n *Node[T], list *[]*Node[T]) { *list = append(*list, n) for _, c := range gt.Children(n) { gt.eulerTourUtil(c, list) } *list = append(*list, n) }
tree/generaltree/general_tree.go
0.810516
0.557123
general_tree.go
starcoder
package fn // Special math functions. import ( "math" ) const π = float64(math.Pi) const ln2 = math.Ln2 const lnSqrt2π = 0.918938533204672741780329736406 // log(sqrt(2*pi)) const min64 = math.SmallestNonzeroFloat64 // DBL_MIN const eps64 = 1.1102230246251565e-16 // DBL_EPSILON const maxExp = 1024.0 // DBL_MAX_EXP const sqrt2 = math.Sqrt2 const lnSqrtπd2 = 0.225791352644727432363097614947 // log(sqrt(pi/2)) M_LN_SQRT_PId2 var nan = math.NaN() var fZero float64 = float64(0.0) var fOne float64 = float64(1.0) var iZero int64 = int64(0) var iOne int64 = int64(1) var negInf float64 = math.Inf(-1) var posInf float64 = math.Inf(+1) // Functions imported from "math" var abs func(float64) float64 = math.Abs var floor func(float64) float64 = math.Floor var ceil func(float64) float64 = math.Ceil var log func(float64) float64 = math.Log var log1p func(float64) float64 = math.Log1p var exp func(float64) float64 = math.Exp var sqrt func(float64) float64 = math.Sqrt var pow func(float64, float64) float64 = math.Pow var atan func(float64) float64 = math.Atan var tan func(float64) float64 = math.Tan var sin func(float64) float64 = math.Sin var trunc func(float64) float64 = math.Trunc var erf func(float64) float64 = math.Erf var erfc func(float64) float64 = math.Erfc var isNaN func(float64) bool = math.IsNaN var isInf func(float64, int) bool = math.IsInf var fmod func(float64, float64) float64 = math.Mod var Γ = math.Gamma var GammaF = math.Gamma var sqrt2pi = math.Sqrt(2 * math.Pi) var logsqrt2pi = math.Log(math.Sqrt(2 * math.Pi)) var LnΓp = LnGammaP var LnΓpRatio = LnGammaPRatio var lanczos_coef []float64 = []float64{ 0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7} func isOdd(k float64) bool { if k != 2*floor(k/2.0) { return true } return false } func isInt(x float64) bool { if abs((x)-floor((x)+0.5)) <= 1e-7 { return true } return false } // Round to nearest integer func Round(x float64) float64 { var i float64 f := math.Floor(x) c := math.Ceil(x) if x-f < c-x { i = f } else { i = c } return i } // Arithmetic mean func ArithMean(data *Vector) float64 { n := data.L sum := 0.0 for i := 0; i < n; i++ { sum += data.Get(i) } return sum / float64(n) } // Geometric mean func GeomMean(data *Vector) float64 { n := data.L sum := 0.0 for i := 0; i < n; i++ { sum += math.Log(data.Get(i)) } return math.Exp(sum / float64(n)) } // Harmonic mean func HarmonicMean(data *Vector) float64 { n := data.L sum := 0.0 for i := 0; i < n; i++ { sum += 1.0 / data.Get(i) } return float64(n) / sum } // Generalized mean func GenMean(data *Vector, p float64) float64 { n := data.L sum := 0.0 for i := 0; i < n; i++ { sum += math.Pow(data.Get(i), p) } return math.Pow(sum/float64(n), 1/p) } // Bernoulli number // Akiyama–Tanigawa algorithm for Bn func Bn(n int64) float64 { var m int64 a := make([]float64, n) for m = 0; m <= n; m++ { a[m] = 1 / float64(m+1) for j := m; j >= 1; j-- { a[j-1] = float64(j) * (a[j-1] - a[j]) } } return a[0] // (which is Bn) } // H returns the generalized harmonic number of order n of m. func H(n int64, m float64) float64 { var i int64 h := 0.0 for i = 1; i <= n; i++ { h += math.Pow(float64(i), m) } return h } // Generalized harmonic number func H2(n int64, q, s float64) float64 { var i int64 h := 0.0 for i = 1; i <= n; i++ { h += math.Pow((float64(i) + q), -s) } return h }
go/src/code.google.com/p/go-fn/fn/fn.go
0.638835
0.488283
fn.go
starcoder
package main const PB_FIELD_NOT_REPEATED = -1 type Protobuf Handle func (Protobuf) ReadInt(field string, index int) int func (Protobuf) ReadInt64(field string, value *[2]int, index int) func (Protobuf) ReadFloat(field string, index int) float func (Protobuf) ReadBool(field string, index int) bool func (Protobuf) ReadString(field string, buffer []char, maxlen int, index int) func (Protobuf) ReadColor(field string, buffer *[4]int, index int) func (Protobuf) ReadAngle(field string, buffer *Vec3, index int) func (Protobuf) ReadVector(field string, buffer *Vec3, index int) func (Protobuf) ReadVector2D(field string, buffer *[2]float, index int) func (Protobuf) GetRepeatedFieldCount(field string) int func (Protobuf) HasField(field string) bool func (Protobuf) SetInt(field string, value int, index int) func (Protobuf) SetInt64(field string, value [2]int, index int) func (Protobuf) SetFloat(field string, value float, index int) func (Protobuf) SetBool(field string, value bool, index int) func (Protobuf) SetString(field, value string, index int) func (Protobuf) SetColor(field string, color [4]int, index int) func (Protobuf) SetAngle(field string, vec Vec3, index int) func (Protobuf) SetVector(field string, vec Vec3, index int) func (Protobuf) SetVector2D(field string, vec [2]float, index int) func (Protobuf) AddInt(field string, value int) func (Protobuf) AddInt64(field string, value [2]int) func (Protobuf) AddFloat(field string, value float) func (Protobuf) AddBool(field string, value bool) func (Protobuf) AddString(field, value string) func (Protobuf) AddColor(field string, color [4]int) func (Protobuf) AddAngle(field string, vec Vec3) func (Protobuf) AddVector(field string, vec Vec3) func (Protobuf) AddVector2D(field string, vec [2]float) func (Protobuf) RemoveRepeatedFieldValue(field string, index int) func (Protobuf) ReadMessage(field string) Protobuf func (Protobuf) ReadRepeatedMessage(field string, index int) Protobuf func (Protobuf) AddMessage(field string) Protobuf func PbReadInt(pb Protobuf, field string, index int) int func PbReadFloat(pb Protobuf, field string, index int) float func PbReadBool(pb Protobuf, field string, index int) bool func PbReadString(pb Protobuf, field string, buffer []char, maxlen int, index int) func PbReadColor(pb Protobuf, field string, buffer *[4]int, index int) func PbReadAngle(pb Protobuf, field string, buffer *Vec3, index int) func PbReadVector(pb Protobuf, field string, buffer *Vec3, index int) func PbReadVector2D(pb Protobuf, field string, buffer *[2]float, index int) func PbGetRepeatedFieldCount(pb Protobuf, field string) int func PbHasField(pb Protobuf, field string) bool func PbSetInt(pb Protobuf, field string, value int, index int) func PbSetFloat(pb Protobuf, field string, value float, index int) func PbSetBool(pb Protobuf, field string, value bool, index int) func PbSetString(field, value string, index int) func PbSetColor(pb Protobuf, field string, color [4]int, index int) func PbSetAngle(pb Protobuf, field string, vec Vec3, index int) func PbSetVector(pb Protobuf, field string, vec Vec3, index int) func PbSetVector2D(pb Protobuf, field string, vec [2]float, index int) func PbAddInt(pb Protobuf, field string, value int) func PbAddFloat(pb Protobuf, field string, value float) func PbAddBool(pb Protobuf, field string, value bool) func PbAddString(field, value string) func PbAddColor(pb Protobuf, field string, color [4]int) func PbAddAngle(pb Protobuf, field string, vec Vec3) func PbAddVector(pb Protobuf, field string, vec Vec3) func PbAddVector2D(pb Protobuf, field string, vec [2]float) func PbRemoveRepeatedFieldValue(pb Protobuf, field string, index int) func PbReadMessage(pb Protobuf, field string) Protobuf func PbReadRepeatedMessage(pb Protobuf, field string, index int) Protobuf func PbAddMessage(pb Protobuf, field string) Protobuf
sourcemod/protobuf.go
0.855157
0.65972
protobuf.go
starcoder
package api import ( . "github.com/gocircuit/circuit/gocircuit.org/render" ) func RenderProcessPage() string { figs := A{ "FigMkProc": RenderFigurePngSvg("Process elements execute OS processes on behalf of the user.", "mkproc", "600px"), } return RenderHtml("Using processes", Render(processBody, figs)) } const processBody = ` <h2>Using processes</h2> <p>You can start an OS process on any host in your cluster by creating a new <em>process element</em> at an anchor of your choosing that is a descendant of the host's server anchor. The created process element becomes your interface to the underlying OS process. <h3>Creating a process</h3> <p>Suppose the variable <code>anchor</code> holds an <code>Anchor</code> object, corresponding to a path in the anchor hierarchy that has no element attached to it. For instance, say we obtained <code>anchor</code> like this: <pre> anchor := root.Walk([]string{"Xe2ac4c8c83976ce6", "job", "demo"}) </pre> <p>This anchor corresponds to the path <code>/Xe2ac4c8c83976ce6/job/demo</code>. (Read more on <a href="api-anchor.html">navigating anchors here</a>.) <p>To create a new process element and attach it to <code>anchor</code>, we use the anchor's <code>MakeProc</code> method: <pre> MakeProc(cmd Cmd) (Proc, error) </pre> <p><code>MakeProc</code> will start a new process on the host <code>/Xe2ac4c8c83976ce6</code>, as specified by the command parameter <code>cmd</code>. If successful, it will create a corresponding process element and attach it to the anchor. <code>MakeProc</code> returns the newly created process element (of type <code>Proc</code>) as well as an <a href="api.html#errors">application error</a> (of type <code>error</code>), or it panics if a <a href="api.html#errors">system error</a> occurs. <p>An application error can occur in one of two cases. Either the anchor already has another element attached to it, or the process execution was rejected by the host OS (due to a missing binary or insufficient permissions, for example). <p><code>MakeProc</code> never blocks. <p>The command parameter, of type <code>Cmd</code>, specifies the standard POSIX-level execution parameters and an additional parameter called <code>Scrub</code>: <pre> type Cmd struct { Env []string Dir string Path string Args []string Scrub bool } </pre> <p>If <code>Scrub</code> is set, the process element will automatically be detached from the anchor and discarded, as soon as the underlying OS process exits. If <code>Scrub</code> is not set, the process element will remain attached to the anchor even after the underlying OS process dies. The latter regime is useful when one wants to start a job and return at a later time to check if the job has already completed and what was its exit status. Furthermore, removing process elements explicitly (rather than automatically) is a way of explicit accounting on the user's side. Thus this regime is particularly well suited for applications that control circuit processes programmatically (as opposed to manually). <p>Regardless of the setting of the <code>Scrub</code> parameter, the user can use the <code>Scrub</code> method to discard the process element at any point: <pre> Scrub() </pre> <p>A call to <code>Scrub</code> will detach the process element from its anchor and discard it, thereby freeing the anchor to attach other elements. If the underlying OS process is still running, ‘scrubbing’ will not terminate the process. (If OS process termination is desired, the user must explicitly send a kill signal to the process, using a <code>Signal</code> which is described later.) <h4>Example</h4> <p>For instance, the following code executes the GNU list command: <pre> proc, err := a.MakeProc( cli.Cmd{ Env: []string{"TERM=xterm"}, Dir: "/", Path: "/bin/ls", Args: []string{"-l", "/"}, Scrub: true, }, ) </pre> <p>The following picture tries to illustrate the relationship between the process element and the underlying OS process itself. {{.FigMkProc}} <h3>Controlling the standard file descriptors of a process</h3> <p>After its invocation, <code>MakeProc</code> returns immediately, while the underlying OS process is executing on the host machine. <p>After a successful execution the user is obligated, by the POSIX standard, to take care of the standard input, output and error streams of the underlying process. (For instance, if the standard input is not written to or closed, or if the output is not read from, some programs will pause in waiting.) <p>The standard streams of the executed process can be retrieved with the following methods of the process element: <pre> Stdin() io.WriteCloser Stdout() io.ReadCloser Stderr() io.ReadCloser </pre> <p>It is allowed to close the standard output and error at any point into the stream. This will result in discarding all remaining data in the stream, without blocking the underlying process. <p>Eventually, the user is responsible for closing all standard streams otherwise the underlying process will block and not exit. <h3>Sending signals and killing processes</h3> <p>You can send a POSIX signal to the underlying process at any point (asynchronously) using: <pre> Signal(sig string) error </pre> <p>The <code>sig</code> string must be one of the following recognized signal names: <code>ABRT</code>, <code>ALRM</code>, <code>BUS</code>, <code>CHLD</code>, <code>CONT</code>, <code>FPE</code>, <code>HUP</code>, <code>ILL</code>, <code>INT</code>, <code>IO</code>, <code>IOT</code>, <code>KILL</code>, <code>PIPE</code>, <code>PROF</code>, <code>QUIT</code>, <code>SEGV</code>, <code>STOP</code>, <code>SYS</code>, <code>TERM</code>, <code>TRAP</code>, <code>TSTP</code>, <code>TTIN</code>, <code>TTOU</code>, <code>URG</code>, <code>USR1</code>, <code>USR2</code>, <code>VTALRM</code>, <code>WINCH</code>, <code>XCPU</code>, <code>XFSZ</code>. <h3>Querying the status of a process asynchronously</h3> <p>You can query the status of a process asynchronously, using: <pre> Peek() ProcStat </pre> <p>The returned structure includes the command that started the process, a phase string describing the state of the process and, in the event that the process has exited, an exit error value or <code>nil</code> on successful exit. <pre> type ProcStat struct { Cmd Cmd Exit error Phase string } </pre> <p>The phase string takes on one of the following values: <code>running</code>, <code>exited</code>, <code>stopped</code>, <code>signaled</code>, <code>continued</code>. <h3>Waiting until a process exits</h3> <p>Finally, you can call <code>Wait</code> asynchronously to block until the process ends: <pre> Wait() (ProcStat, error) </pre> <p>If you call <code>Wait</code> before the process has exited, the invocation will block until exit occurs. Otherwise, it will return immediately. In both cases, a process status structure (described earlier) is returned, which captures the exit state (successful or not) of the underlying OS process. <p><code>Wait</code> can return an application error only in the event that it is interrupted by a concurring call to <code>Scrub</code>. `
gocircuit.org/api/process.go
0.729809
0.476762
process.go
starcoder
package eedid import ( "bytes" "encoding/binary" "fmt" ) type DisplayID struct { Version byte VariableDataBlockLength byte DisplayPrimaryUsecase DisplayPrimaryUsecase NumberOfExtensions byte DataBlocks []byte DetailedTimingDataBlock DetailedTimingDataBlock } type DisplayPrimaryUsecase byte const ( ExtensionSection DisplayPrimaryUsecase = 0x00 TestStructure DisplayPrimaryUsecase = 0x01 Generic DisplayPrimaryUsecase = 0x02 Television DisplayPrimaryUsecase = 0x03 Productivity DisplayPrimaryUsecase = 0x04 Gaming DisplayPrimaryUsecase = 0x05 Presentation DisplayPrimaryUsecase = 0x06 VirtualReality DisplayPrimaryUsecase = 0x07 AugmentedReality DisplayPrimaryUsecase = 0x08 ) func (dpu DisplayPrimaryUsecase) String() string { if dpu > 8 { return "Unknown" } displayPrimaryUsecaseStrings := [...]string{ "Extension section", "Test structure", "Generic", "Television", "Productivity", "Gaming", "Presentation", "Virtual reality", "Augmented reality", } return displayPrimaryUsecaseStrings[dpu] } func (s DisplayPrimaryUsecase) MarshalJSON() ([]byte, error) { buffer := bytes.NewBufferString(`"`) buffer.WriteString(s.String()) buffer.WriteString(`"`) return buffer.Bytes(), nil } type DataBlockType byte const ( ProductIdentification DataBlockType = 0x00 DisplayParameters DataBlockType = 0x01 ColorCharacteristics DataBlockType = 0x02 TypeITiming DataBlockType = 0x03 TypeIITiming DataBlockType = 0x04 TypeIIITiming DataBlockType = 0x05 TypeIVTiming DataBlockType = 0x06 VESATimingStandard DataBlockType = 0x07 CEATimingStandard DataBlockType = 0x08 VideoTimingRange DataBlockType = 0x09 ProductSerialNumber DataBlockType = 0x0A GeneralPurposeASCIIString DataBlockType = 0x0B DisplayDeviceData DataBlockType = 0x0C InterfacePowerSequencing DataBlockType = 0x0D TransferCharacteristics DataBlockType = 0x0E DisplayInterfaceData DataBlockType = 0x0F StereoDisplayInterface DataBlockType = 0x10 TypeVTiming DataBlockType = 0x11 TypeVITiming DataBlockType = 0x13 ProductIdentification2 DataBlockType = 0x20 DisplayParameters2 DataBlockType = 0x21 TypeVIIDetailedTiming DataBlockType = 0x22 TypeVIIIEnumeratedTiming DataBlockType = 0x23 TypeIXFormulaTiming DataBlockType = 0x24 DynamicVideoTimingRangeLimits DataBlockType = 0x25 DisplayInterfaceFeatures DataBlockType = 0x26 StereoDisplayInterface2 DataBlockType = 0x27 TiledDisplayTopology DataBlockType = 0x28 ContainerID DataBlockType = 0x29 ) func (dbt DataBlockType) String() string { dataBlockTypeStrings := [...]string{ "Product Identification", // 0x00 "Display Parameters", // 0x01 "Color Characteristics", // 0x02 "Type I Timing - Detailed", // 0x03 "Type II Timing - Detailed", // 0x04 "Type III Timing - Short", // 0x05 "Type IV Timing - DMT ID Code", // 0x06 "VESA Timing Standard", // 0x07 "CEA Timing Standard", // 0x08 "Video Timing Range", // 0x09 "Product Serial Number", // 0x0A "General Purpose ASCII String", // 0x0B "Display Device Data", // 0x0C "Interface Power Sequencing", // 0x0D "Transfer Characteristics", // 0x0E "Display Interface Data", // 0x0F "Stereo Display Interface", // 0x10 "Type V Timing - Short", // 0x11 "UNKNOWN", // 0x12 "Type VI Timing - Detailed", // 0x13 "UNKNOWN", // 0x14 "UNKNOWN", // 0x15 "UNKNOWN", // 0x16 "UNKNOWN", // 0x17 "UNKNOWN", // 0x18 "UNKNOWN", // 0x19 "UNKNOWN", // 0x1A "UNKNOWN", // 0x1B "UNKNOWN", // 0x1C "UNKNOWN", // 0x1D "UNKNOWN", // 0x1E "UNKNOWN", // 0x1F "Product Identification", // 0x20 "Display Parameters", // 0x21 "Type VII - Detailed Timing", // 0x22 "Type VIII - Enumerated Timing Code", // 0x23 "Type IX - Formula-based Timing", // 0x24 "Dynamic Video Timing Range Limits", // 0x25 "Display Interface Features", // 0x26 "Stereo Display Interface", // 0x27 "Tiled Display Topology", // 0x28 "ContainerID", // 0x29 } switch dbt { case 0xD0: return "Interface Power Sequencing" case 0x7F: return "Vendor specific" case 0x81: return "CTA DisplayID" default: if dbt > 0x29 { return "UNKNOWN" } else { return dataBlockTypeStrings[dbt] } } } func (s DataBlockType) MarshalJSON() ([]byte, error) { buffer := bytes.NewBufferString(`"`) buffer.WriteString(s.String()) buffer.WriteString(`"`) return buffer.Bytes(), nil } type DetailedTimingDataBlock struct { DataBlockType DataBlockType `json:",test"` BlockRevision byte NumberOfBytes byte DetailedTimings []DetailedTimingDescriptor } //https://github.com/pkorobeinikov/golang-example/blob/master/math/gcd.go // GCDEuclidean calculates GCD by Euclidian algorithm. func GCDEuclidean(a, b int) int { for a != b { if a > b { a -= b } else { b -= a } } return a } func DecodeDTDVII(dtdBytes []byte) DetailedTimingDescriptor { dtd := new(DetailedTimingDescriptor) dtd.PixelClockKHz = (uint32(dtdBytes[2]) << 16) | (uint32(dtdBytes[1]) << 8) | uint32(dtdBytes[0]) dtd.HorizontalActive = binary.LittleEndian.Uint16(dtdBytes[4:]) dtd.HorizontalBlanking = binary.LittleEndian.Uint16(dtdBytes[6:]) dtd.HorizontalFrontPorch = binary.LittleEndian.Uint16(dtdBytes[8:]) & 0x7FFF dtd.HorizontalSyncPolarity = binary.LittleEndian.Uint16(dtdBytes[8:])&0x8000 > 1 dtd.HorizontalSyncPulseWidth = binary.LittleEndian.Uint16(dtdBytes[10:]) dtd.VerticalActive = binary.LittleEndian.Uint16(dtdBytes[12:]) dtd.VerticalBlanking = binary.LittleEndian.Uint16(dtdBytes[14:]) dtd.VerticalFrontPorch = binary.LittleEndian.Uint16(dtdBytes[16:]) & 0x7FFF dtd.VerticalSyncPolarity = binary.LittleEndian.Uint16(dtdBytes[16:])&0x8000 > 1 dtd.VerticalSyncPulseWidth = binary.LittleEndian.Uint16(dtdBytes[18:]) // Do this last because we may need hAct and vAct for calculation switch dtdBytes[3] & 0x7 { case 0: dtd.AspectRatio = "1:1" case 1: dtd.AspectRatio = "5:4" case 2: dtd.AspectRatio = "4:3" case 3: dtd.AspectRatio = "15:9" case 4: dtd.AspectRatio = "16:9" case 5: dtd.AspectRatio = "16:10" case 6: dtd.AspectRatio = "64:27" case 7: dtd.AspectRatio = "256:135" case 8: gcd := GCDEuclidean(int(dtd.HorizontalActive+1), int(dtd.VerticalActive+1)) hor := int(dtd.HorizontalActive+1) / gcd vert := int(dtd.VerticalActive+1) / gcd dtd.AspectRatio = string(hor) + ":" + string(vert) default: dtd.AspectRatio = "RESERVED" } // StereoMode is wrong for displayID // TODO: fix this dtd.Stereo = StereoMode(dtdBytes[3] & 0x61) dtd.Interlaced = (dtdBytes[3] & 0x10) > 0 return *dtd } func (dtd *DetailedTimingDescriptor) EncodeDTDVII() [18]byte { // TODO: Finish this encode var returnBytes [18]byte returnBytes[0] = byte(dtd.PixelClockKHz & 0xFF) returnBytes[1] = byte(dtd.PixelClockKHz >> 8) returnBytes[2] = byte(dtd.HorizontalActive & 0xFF) returnBytes[3] = byte(dtd.HorizontalBlanking & 0xFF) returnBytes[5] = byte(dtd.VerticalActive & 0xFF) returnBytes[6] = byte(dtd.VerticalBlanking & 0xFF) returnBytes[4] = byte(dtd.HorizontalBlanking&0xF000>>12 | dtd.HorizontalActive&0x0F00>>8) returnBytes[7] = byte(dtd.VerticalBlanking&0xF000>>12 | dtd.VerticalActive&0x0F00>>8) returnBytes[8] = byte(dtd.HorizontalFrontPorch) returnBytes[9] = byte(dtd.HorizontalSyncPulseWidth) returnBytes[10] = byte(dtd.VerticalSyncPulseWidth&0xF) | byte((dtd.VerticalFrontPorch&0xF)<<4) returnBytes[11] = byte(dtd.VerticalSyncPulseWidth & 0x30 >> 4) returnBytes[11] = returnBytes[11] | byte(dtd.VerticalFrontPorch&0x30)>>2 returnBytes[11] = returnBytes[11] | byte(dtd.HorizontalSyncPulseWidth&0x30) returnBytes[11] = returnBytes[11] | byte(dtd.HorizontalFrontPorch&0x30)<<2 returnBytes[12] = byte(dtd.HorizontalImageSize) returnBytes[13] = byte(dtd.VerticalImageSize) returnBytes[14] = byte(dtd.HorizontalImageSize>>4) | byte(dtd.VerticalImageSize>>8) returnBytes[15] = dtd.HorizontalBorder returnBytes[16] = dtd.VerticalBorder if dtd.Interlaced { returnBytes[17] = returnBytes[17] | 0x80 } returnBytes[17] |= byte(dtd.Stereo) // force Digital sync., separate if dtd.VerticalSyncPolarity == SYNC_ON_POSITIVE { returnBytes[17] |= 0x4 } if dtd.HorizontalSyncPolarity == SYNC_ON_POSITIVE { returnBytes[17] |= 0x2 } return dtd.data } func GetDetailedTimingDataBlock(dtdblockBytes []byte) DetailedTimingDataBlock { dttdb := new(DetailedTimingDataBlock) // Header dttdb.DataBlockType = DataBlockType(dtdblockBytes[0]) dttdb.BlockRevision = dtdblockBytes[1] & 0x7 dttdb.NumberOfBytes = dtdblockBytes[2] // timing blocks for i := byte(3); i < dttdb.NumberOfBytes; i += 20 { // each block is 20 dtd := DecodeDTDVII(dtdblockBytes[i : i+20]) dttdb.DetailedTimings = append(dttdb.DetailedTimings, dtd) } return *dttdb } func DecodeDisplayID(didBytes []byte) DisplayID { did := new(DisplayID) did.Version = didBytes[1] did.VariableDataBlockLength = didBytes[2] did.DisplayPrimaryUsecase = DisplayPrimaryUsecase(didBytes[3]) did.NumberOfExtensions = didBytes[4] for i := byte(5); i < did.VariableDataBlockLength; i++ { blockType := DataBlockType(didBytes[i]) // Catch data blocks without number of bytes set numBytes := didBytes[i+2] if numBytes < 1 { fmt.Println("Block does not have byte count", blockType) } else { fmt.Println(blockType) } switch blockType { case ProductIdentification: // fmt.Println(blockType) case DisplayInterfaceFeatures: iffBlock := GetDisplayInterfaceFeatures(didBytes[i : i+3+numBytes]) fmt.Println(iffBlock) case TypeITiming: dtviitb := GetDetailedTimingDataBlock(didBytes[i : i+3+numBytes]) did.DetailedTimingDataBlock = dtviitb i += numBytes + 2 } } return *did } type DisplayInterfaceFeaturesBlock struct { DataBlockType DataBlockType BlockRevision byte NumberOfBytes byte RGBBitDepth byte YCbCr444BitDepth byte YCbCr422BitDepth byte YCbCr420BitDepth byte YCbCr420MinPixRate byte AudioCapability byte ColourSpace byte EOTFBytes byte } func GetDisplayInterfaceFeatures(iffBytes []byte) DisplayInterfaceFeaturesBlock { iffdb := new(DisplayInterfaceFeaturesBlock) // Header iffdb.DataBlockType = DataBlockType(iffBytes[0]) iffdb.BlockRevision = iffBytes[1] & 0x7 iffdb.NumberOfBytes = iffBytes[2] // 9 bytes iffdb.RGBBitDepth = iffBytes[3] iffdb.YCbCr444BitDepth = iffBytes[4] iffdb.YCbCr422BitDepth = iffBytes[5] iffdb.YCbCr420BitDepth = iffBytes[6] iffdb.YCbCr420MinPixRate = iffBytes[7] iffdb.AudioCapability = iffBytes[8] iffdb.ColourSpace = iffBytes[9] iffdb.EOTFBytes = iffBytes[11] if int(iffdb.EOTFBytes) > len(iffBytes)+3 { // Parse EOTF here } return *iffdb }
pkg/eedid/DisplayID.go
0.567937
0.400398
DisplayID.go
starcoder
package mathg import "math" type Vec2 struct { X float64 Y float64 } func (v *Vec2) IsZero() bool { return math.Abs(v.X) < epsilon && math.Abs(v.Y) < epsilon } func (v *Vec2) IsEqual(v2 *Vec2) bool { return math.Abs(v.X-v2.X) < epsilon && math.Abs(v.Y-v2.Y) < epsilon } func (v *Vec2) Zero() { v.X = 0.0 v.Y = 0.0 } func (v *Vec2) One() { v.X = 1.0 v.Y = 1.0 } func (v *Vec2) Add(v2 *Vec2) *Vec2 { return &Vec2{v.X + v2.X, v.Y + v2.Y} } func (v *Vec2) AddScalar(scalar float64) *Vec2 { return &Vec2{v.X + scalar, v.Y + scalar} } func (v *Vec2) Subtract(v2 *Vec2) *Vec2 { return &Vec2{v.X - v2.X, v.Y - v2.Y} } func (v *Vec2) SubtractScalar(scalar float64) *Vec2 { return &Vec2{v.X - scalar, v.Y - scalar} } func (v *Vec2) Multiply(v2 *Vec2) *Vec2 { return &Vec2{v.X * v2.X, v.Y * v2.Y} } func (v *Vec2) MultiplyScalar(scalar float64) *Vec2 { return &Vec2{v.X * scalar, v.Y * scalar} } func (v *Vec2) MultiplyMat2(m *Mat2) *Vec2 { return &Vec2{m.M11*v.X + m.M12*v.Y, m.M21*v.X + m.M22*v.Y} } func (v *Vec2) Divide(v1 *Vec2) *Vec2 { return &Vec2{v.X / v1.X, v.Y / v1.Y} } func (v *Vec2) DivideScalar(scalar float64) *Vec2 { return &Vec2{v.X / scalar, v.Y / scalar} } func (v *Vec2) Snap(v1 *Vec2) *Vec2 { return &Vec2{math.Floor(v.X/v1.X) * v1.X, math.Floor(v.Y/v1.Y) * v1.Y} } func (v *Vec2) Snapf(f float64) *Vec2 { return &Vec2{math.Floor(v.X/f) * f, math.Floor(v.Y/f) * f} } func (v *Vec2) Negative() *Vec2 { return &Vec2{-1 * v.X, -1 * v.Y} } func (v *Vec2) Magnitude() float64 { return math.Sqrt(math.Pow(v.X, 2) + math.Pow(v.Y, 2)) } func (v *Vec2) Dot(v2 *Vec2) float64 { return v.X*v2.X + v.Y*v2.Y } func (v *Vec2) Cross(v2 *Vec2) float64 { return (v.X * v2.Y) - (v.Y * v2.X) } func (v *Vec2) Abs() *Vec2 { return &Vec2{math.Abs(v.X), math.Abs(v.Y)} } func (v *Vec2) Floor() *Vec2 { return &Vec2{math.Floor(v.X), math.Floor(v.Y)} } func (v *Vec2) Ceil() *Vec2 { return &Vec2{math.Ceil(v.X), math.Ceil(v.Y)} } func (v *Vec2) Round() *Vec2 { return &Vec2{math.Round(v.X), math.Round(v.Y)} } func (v *Vec2) Max(v2 *Vec2) *Vec2 { return &Vec2{math.Max(v.X, v2.X), math.Max(v.Y, v2.Y)} } func (v *Vec2) Min(v2 *Vec2) *Vec2 { return &Vec2{math.Min(v.X, v2.X), math.Min(v.Y, v2.Y)} } func (v *Vec2) Normalize() *Vec2 { m := v.Magnitude() return &Vec2{v.X / m, v.Y / m} } func (v *Vec2) Clamp(min *Vec2, max *Vec2) *Vec2 { return &Vec2{Clamp(v.X, min.X, max.X), Clamp(v.Y, min.Y, max.Y)} } func (v *Vec2) Project(v1 *Vec2) *Vec2 { d := v1.Dot(v1) s := v.Dot(v1) / d return &Vec2{v1.X * s, v1.Y * s} } func (v *Vec2) Slide(normal *Vec2) *Vec2 { d := v.Dot(normal) return &Vec2{v.X - normal.X*d, v.Y - normal.Y*d} } func (v *Vec2) Reflect(normal *Vec2) *Vec2 { d := 2. * v.Dot(normal) return &Vec2{normal.X*d - v.X, normal.Y*d - v.Y} } func (v *Vec2) Tangent() *Vec2 { return &Vec2{v.Y, -1 * v.X} } func (v *Vec2) Rotate(radians float64) *Vec2 { cs := math.Cos(radians) sn := math.Sin(radians) return &Vec2{v.X*cs - v.Y*sn, v.X*sn + v.Y*cs} } func (v *Vec2) Lerp(v1 *Vec2, percent float64) *Vec2 { return &Vec2{v.X + (v1.X-v.X)*percent, v.Y + (v1.Y-v.Y)*percent} } func (v *Vec2) Bezier3(v1 *Vec2, v2 *Vec2, percent float64) *Vec2 { t0 := v.Lerp(v1, percent) t1 := v1.Lerp(v2, percent) return t0.Lerp(t1, percent) } func (v *Vec2) Bezier4(v1 *Vec2, v2 *Vec2, v3 *Vec2, percent float64) *Vec2 { t0 := v.Lerp(v1, percent) t1 := v1.Lerp(v2, percent) t2 := v2.Lerp(v3, percent) t3 := t0.Lerp(t1, percent) t4 := t1.Lerp(t2, percent) return t3.Lerp(t4, percent) } func (v *Vec2) Angle() float64 { return math.Atan2(v.Y, v.X) } func (v *Vec2) Distance(v1 *Vec2) float64 { return math.Sqrt(math.Pow((v.X-v1.X), 2) + math.Pow((v.Y-v1.Y), 2)) } func (v *Vec2) LinearIndependent(v1 *Vec2) bool { return (v.X*v1.Y - v1.X*v.Y) != 0 }
vec2.go
0.91686
0.480966
vec2.go
starcoder
package goprometheus import ( metricCollector "github.com/afex/hystrix-go/hystrix/metric_collector" "github.com/gin-gonic/gin" p "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promhttp" "net/http" ) type IHystrixPrometheus interface { Middleware(name string) metricCollector.MetricCollector } type IGinPrometheus interface { Middleware(c *gin.Context) GetEngine() *gin.Engine } type IGoPrometheus interface { AddSummaryVector(name, desc string, labels []string) AddCounterVector(name, desc string, labels []string) AddGaugeVector(name, desc string, labels []string) AddHistogramVector(name, desc string, labels []string) } type GoPrometheus struct { Vectors *Vectors gin IGinPrometheus hystrix IHystrixPrometheus } type Vectors struct { SummaryVectors map[string]*SummaryVector CounterVectors map[string]*CounterVector GaugeVectors map[string]*GaugeVector HistogramVectors map[string]*HistogramVector } type SummaryVector struct { Vector *p.SummaryVec Labels []string } type CounterVector struct { Vector *p.CounterVec Labels []string } type GaugeVector struct { Vector *p.GaugeVec Labels []string } type HistogramVector struct { Vector *p.HistogramVec Labels []string } func New() *GoPrometheus { vectors := &Vectors{ SummaryVectors: make(map[string]*SummaryVector), CounterVectors: make(map[string]*CounterVector), GaugeVectors: make(map[string]*GaugeVector), HistogramVectors: make(map[string]*HistogramVector), } return &GoPrometheus{ Vectors: vectors, } } func (gp *GoPrometheus) UseGin(gin IGinPrometheus) { gp.gin = gin } func (gp *GoPrometheus) UseHystrix(hystrix IHystrixPrometheus) { gp.hystrix = hystrix } func (gp *GoPrometheus) AddSummaryVector(name, desc string, labels []string) { if _, isPresent := gp.Vectors.SummaryVectors[name]; isPresent { return } gp.Vectors.SummaryVectors[name] = &SummaryVector{ Vector: promauto.NewSummaryVec(p.SummaryOpts{ Name: name, Help: desc, }, labels), Labels: labels, } } func (gp *GoPrometheus) AddCounterVector(name, desc string, labels []string) { if _, isPresent := gp.Vectors.CounterVectors[name]; isPresent { return } gp.Vectors.CounterVectors[name] = &CounterVector{ Vector: promauto.NewCounterVec(p.CounterOpts{ Name: name, Help: desc, }, labels), Labels: labels, } } func (gp *GoPrometheus) AddGaugeVector(name, desc string, labels []string) { if _, isPresent := gp.Vectors.GaugeVectors[name]; isPresent { return } gp.Vectors.GaugeVectors[name] = &GaugeVector{ Vector: promauto.NewGaugeVec(p.GaugeOpts{ Name: name, Help: desc, }, labels), Labels: labels, } } func (gp *GoPrometheus) AddHistogramVector(name, desc string, labels []string) { if _, isPresent := gp.Vectors.HistogramVectors[name]; isPresent { return } gp.Vectors.HistogramVectors[name] = &HistogramVector{ Vector: promauto.NewHistogramVec(p.HistogramOpts{ Name: name, Help: desc, }, labels), Labels: labels, } } func (v *SummaryVector) AddMetric(value float64, labelValues ...string) { v.Vector.WithLabelValues(labelValues...).Observe(value) } func (v *CounterVector) AddMetric(value float64, labelValues ...string) { v.Vector.WithLabelValues(labelValues...).Add(value) } func (v *GaugeVector) AddMetric(value float64, labelValues ...string) { v.Vector.WithLabelValues(labelValues...).Add(value) } func (v *HistogramVector) AddMetric(value float64, labelValues ...string) { v.Vector.WithLabelValues(labelValues...).Observe(value) } func (gp *GoPrometheus) Run() { http.Handle("/metrics", promhttp.Handler()) if gp.hystrix != nil { metricCollector.Registry.Register(gp.hystrix.Middleware) } if gp.gin != nil { gp.gin.GetEngine().Use(gp.gin.Middleware) } }
go-prometheus.go
0.689619
0.442275
go-prometheus.go
starcoder
package dei import ( "math" "math/rand" "sort" ) type BRA struct { alpha, beta, betastart, betastep float64 // Parameters of the algorithm maxiter int // Maximum number of iterations solution Solution value, delay int } // Constructor func BiasedRandomised (alpha, betastart, betastep float64, maxiter... int) *BRA{ var iter int = 3000 if len(maxiter) > 0 { iter = maxiter[0] } return &BRA{alpha, betastart, betastart,betastep, iter, nil, 0, 0} } // Get the solution found func (self *BRA) getSolution () (Solution, int, int) { return self.solution, self.value, self.delay } // A Biased randomised selection based on a quasi-geometric function func (self *BRA) biased_random_selection (size int) int { return int(math.Log(rand.Float64()) / math.Log(1 - self.alpha)) % size } // Build a new solution func (self *BRA) buildSolution (path []*Node, dists [][]int, value int, cnode *Node) (Solution, int, int){ // Init the part of the solution that remains the same index := int((1 - self.beta) * float64(len(path))) sol := path[:index] val, del := evaluate(sol, dists, value, cnode) // Init options and current node var currentNode Node = *cnode var newNode Node options := path[index:] // Execute a greedy construction of the new solution for len(options) > 0 { // Sort options for increasing cost (i.e., arrival + delay) sort.Slice(options, func (i int, j int) bool { return options[i].Cost(currentNode, dists, val) < options[j].Cost(currentNode, dists, val) }) // Pick a new node using a quasi-geometric pos := self.biased_random_selection(len(options)) newNode = *options[pos] // Pick the best and make it current node val = int(math.Max(float64(val + dists[currentNode.id][newNode.id]), float64(newNode.open))) del += int(math.Max(0, float64(val - newNode.close))) sol = append(sol, options[pos]) currentNode = newNode options = append(options[:pos], options[pos + 1:]...) } // Return the new solution return sol, val, del } // Execution func (self *BRA) execute (path []*Node, dists [][]int, value int, cnode *Node) { // Init starting solution sol, val, del := self.buildSolution(path, dists, value, cnode) for i := 0; i < self.maxiter; i++ { // Build new solution newsol, newval, newdel := self.buildSolution(sol, dists, value, cnode) // eventually update best solution found so far if newval + newdel < val + del { sol, val, del = newsol, newval, newdel self.beta = self.betastart } else { self.beta += self.betastep self.beta = math.Min(1.0, self.beta) } } self.solution, self.value, self.delay = sol, val, del }
dei/bra.go
0.615088
0.443299
bra.go
starcoder
package circuits import ( "fmt" "math/big" "github.com/consensys/gnark-crypto/ecc" "github.com/consensys/gnark/backend/hint" "github.com/consensys/gnark/frontend" ) type hintCircuit struct { A, B frontend.Variable } func (circuit *hintCircuit) Define(api frontend.API) error { res, err := api.NewHint(mulBy7, circuit.A) if err != nil { return fmt.Errorf("mulBy7: %w", err) } a7 := res[0] _a7 := api.Mul(circuit.A, 7) api.AssertIsEqual(a7, _a7) api.AssertIsEqual(a7, circuit.B) res, err = api.NewHint(make3) if err != nil { return fmt.Errorf("make3: %w", err) } c := res[0] c = api.Mul(c, c) api.AssertIsEqual(c, 9) return nil } type vectorDoubleCircuit struct { A []frontend.Variable B []frontend.Variable } func (c *vectorDoubleCircuit) Define(api frontend.API) error { res, err := api.NewHint(dvHint, c.A...) if err != nil { return fmt.Errorf("double newhint: %w", err) } if len(res) != len(c.B) { return fmt.Errorf("expected len %d, got %d", len(c.B), len(res)) } for i := range res { api.AssertIsEqual(api.Mul(2, c.A[i]), c.B[i]) api.AssertIsEqual(res[i], c.B[i]) } return nil } func init() { { good := []frontend.Circuit{ &hintCircuit{ A: 42, B: 42 * 7, }, } bad := []frontend.Circuit{ &hintCircuit{ A: 42, B: 42, }, } addNewEntry("hint", &hintCircuit{}, good, bad, ecc.Implemented(), mulBy7, make3) } { good := []frontend.Circuit{ &vectorDoubleCircuit{ A: []frontend.Variable{ 1, 2, 3, 4, 5, 6, 7, 8, }, B: []frontend.Variable{ 2, 4, 6, 8, 10, 12, 14, 16, }, }, } bad := []frontend.Circuit{ &vectorDoubleCircuit{ A: []frontend.Variable{ 1, 2, 3, 4, 5, 6, 7, 8, }, B: []frontend.Variable{ 1, 2, 3, 4, 5, 6, 7, 8, }, }, } addNewEntry("multi-output-hint", &vectorDoubleCircuit{A: make([]frontend.Variable, 8), B: make([]frontend.Variable, 8)}, good, bad, ecc.Implemented(), dvHint) } } var mulBy7 = hint.NewStaticHint(func(curveID ecc.ID, inputs []*big.Int, result []*big.Int) error { result[0].Mul(inputs[0], big.NewInt(7)).Mod(result[0], curveID.Info().Fr.Modulus()) return nil }, 1, 1) var make3 = hint.NewStaticHint(func(curveID ecc.ID, inputs []*big.Int, result []*big.Int) error { result[0].SetUint64(3) return nil }, 0, 1) var dvHint = &doubleVector{} type doubleVector struct{} func (dv *doubleVector) UUID() hint.ID { return hint.UUID(dv.Call) } func (dv *doubleVector) Call(curveID ecc.ID, inputs []*big.Int, res []*big.Int) error { two := big.NewInt(2) for i := range inputs { res[i].Mul(two, inputs[i]) } return nil } func (dv *doubleVector) NbOutputs(curveID ecc.ID, nInputs int) (nOutputs int) { return nInputs } func (dv *doubleVector) String() string { return "double" }
internal/backend/circuits/hint.go
0.661923
0.404919
hint.go
starcoder
package blocks import ( "fmt" "strings" v1 "github.com/authzed/authzed-go/proto/authzed/api/v1" yamlv3 "gopkg.in/yaml.v3" "github.com/authzed/spicedb/pkg/commonerrors" "github.com/authzed/spicedb/pkg/tuple" ) // Assertions represents assertions defined in the validation file. type Assertions struct { // AssertTrue is the set of relationships to assert true. AssertTrue []Assertion `yaml:"assertTrue"` // AssertFalse is the set of relationships to assert false. AssertFalse []Assertion `yaml:"assertFalse"` // SourcePosition is the position of the assertions in the file. SourcePosition commonerrors.SourcePosition } // Assertion is a parsed assertion. type Assertion struct { // RelationshipString is the string form of the assertion. RelationshipString string // Relationship is the parsed relationship on which the assertion is being // run. Relationship *v1.Relationship // SourcePosition is the position of the assertion in the file. SourcePosition commonerrors.SourcePosition } type internalAssertions struct { // AssertTrue is the set of relationships to assert true. AssertTrue []Assertion `yaml:"assertTrue"` // AssertFalse is the set of relationships to assert false. AssertFalse []Assertion `yaml:"assertFalse"` } // UnmarshalYAML is a custom unmarshaller. func (a *Assertions) UnmarshalYAML(node *yamlv3.Node) error { ia := internalAssertions{} if err := node.Decode(&ia); err != nil { return convertYamlError(err) } a.AssertTrue = ia.AssertTrue a.AssertFalse = ia.AssertFalse a.SourcePosition = commonerrors.SourcePosition{LineNumber: node.Line, ColumnPosition: node.Column} return nil } // UnmarshalYAML is a custom unmarshaller. func (a *Assertion) UnmarshalYAML(node *yamlv3.Node) error { if err := node.Decode(&a.RelationshipString); err != nil { return convertYamlError(err) } trimmed := strings.TrimSpace(a.RelationshipString) tpl := tuple.Parse(trimmed) if tpl == nil { return commonerrors.NewErrorWithSource( fmt.Errorf("error parsing relationship `%s`", trimmed), trimmed, uint64(node.Line), uint64(node.Column), ) } a.Relationship = tuple.MustToRelationship(tpl) a.SourcePosition = commonerrors.SourcePosition{LineNumber: node.Line, ColumnPosition: node.Column} return nil } // ParseAssertionsBlock parses the given contents as an assertions block. func ParseAssertionsBlock(contents []byte) (*Assertions, error) { a := internalAssertions{} if err := yamlv3.Unmarshal(contents, &a); err != nil { return nil, convertYamlError(err) } return &Assertions{ AssertTrue: a.AssertTrue, AssertFalse: a.AssertFalse, }, nil }
pkg/validationfile/blocks/assertions.go
0.694821
0.531209
assertions.go
starcoder
package renderer import ( "github.com/abieberbach/goplanemp" "github.com/abieberbach/goplane/xplm/dataAccess" "github.com/abieberbach/goplane/extra" ) type dataType int const ( gear_ratio dataType = 0 flap_ratio dataType = 1 spoiler_ratio dataType = 2 speedbreak_ratio dataType = 3 slat_ratio dataType = 4 wing_sweep_ratio dataType = 5 thrust dataType = 6 pitch dataType = 7 heading dataType = 8 roll dataType = 9 landing_light_on dataType = 10 beacon_light_on dataType = 11 strobe_light_on dataType = 12 nav_light_on dataType = 13 taxi_light_on dataType = 14 ) var currentAircraft *goplanemp.Plane func getFloatValue(ref interface{}) float32 { if currentAircraft == nil { return 0.0 } switch ref.(dataType) { case gear_ratio: return currentAircraft.SurfacesData.GearPosition case flap_ratio: return currentAircraft.SurfacesData.FlapRatio case spoiler_ratio: return currentAircraft.SurfacesData.SpoilerRatio case speedbreak_ratio: return currentAircraft.SurfacesData.SpeedBrakeRatio case slat_ratio: return currentAircraft.SurfacesData.SlatRatio case wing_sweep_ratio: return currentAircraft.SurfacesData.WingSweep case thrust: return currentAircraft.SurfacesData.Thrust case pitch: return currentAircraft.SurfacesData.YokePitch case heading: return currentAircraft.SurfacesData.YokeHeading case roll: return currentAircraft.SurfacesData.YokeRoll case landing_light_on: if currentAircraft.SurfacesData.Lights.LandingLights { return 1.0 } return 0.0 case beacon_light_on: if currentAircraft.SurfacesData.Lights.BeaconLights { return 1.0 } return 0.0 case strobe_light_on: if currentAircraft.SurfacesData.Lights.StrobeLights { return 1.0 } return 0.0 case nav_light_on: if currentAircraft.SurfacesData.Lights.NavLights { return 1.0 } return 0.0 case taxi_light_on: if currentAircraft.SurfacesData.Lights.TaxiLights { return 1.0 } return 0.0 default: return 0.0 } } func registerMultiplayerDataRefs() { accessors := dataAccess.DataRefAccessors{} accessors.ReadFloat = getFloatValue dataAccess.RegisterDataAccessor("goplanemp/controls/gear_ratio", dataAccess.TypeFloat, false, accessors, gear_ratio, nil) dataAccess.RegisterDataAccessor("goplanemp/controls/flap_ratio", dataAccess.TypeFloat, false, accessors, flap_ratio, nil) dataAccess.RegisterDataAccessor("goplanemp/controls/spoiler_ratio", dataAccess.TypeFloat, false, accessors, spoiler_ratio, nil) dataAccess.RegisterDataAccessor("goplanemp/controls/speed_brake_ratio", dataAccess.TypeFloat, false, accessors, speedbreak_ratio, nil) dataAccess.RegisterDataAccessor("goplanemp/controls/slat_ratio", dataAccess.TypeFloat, false, accessors, slat_ratio, nil) dataAccess.RegisterDataAccessor("goplanemp/controls/wing_sweep_ratio", dataAccess.TypeFloat, false, accessors, wing_sweep_ratio, nil) dataAccess.RegisterDataAccessor("goplanemp/controls/thrust_ratio", dataAccess.TypeFloat, false, accessors, thrust, nil) dataAccess.RegisterDataAccessor("goplanemp/controls/yoke_pitch_ratio", dataAccess.TypeFloat, false, accessors, pitch, nil) dataAccess.RegisterDataAccessor("goplanemp/controls/yoke_heading_ratio", dataAccess.TypeFloat, false, accessors, heading, nil) dataAccess.RegisterDataAccessor("goplanemp/controls/yoke_roll_ratio", dataAccess.TypeFloat, false, accessors, roll, nil) dataAccess.RegisterDataAccessor("goplanemp/lights/landing_lights_on", dataAccess.TypeFloat, false, accessors, landing_light_on, nil) dataAccess.RegisterDataAccessor("goplanemp/lights/beacon_lights_on", dataAccess.TypeFloat, false, accessors, beacon_light_on, nil) dataAccess.RegisterDataAccessor("goplanemp/lights/strobe_lights_on", dataAccess.TypeFloat, false, accessors, strobe_light_on, nil) dataAccess.RegisterDataAccessor("goplanemp/lights/nav_lights_on", dataAccess.TypeFloat, false, accessors, nav_light_on, nil) dataAccess.RegisterDataAccessor("goplanemp/lights/taxi_lights_on", dataAccess.TypeFloat, false, accessors, taxi_light_on, nil) extra.RegisterDataRefToDataRefEditor("goplanemp/controls/gear_ratio", "goplanemp/controls/flap_ratio", "goplanemp/controls/spoiler_ratio", "goplanemp/controls/speed_brake_ratio", "goplanemp/controls/slat_ratio", "goplanemp/controls/wing_sweep_ratio", "goplanemp/controls/thrust_ratio", "goplanemp/controls/yoke_pitch_ratio", "goplanemp/controls/yoke_heading_ratio", "goplanemp/controls/yoke_roll_ratio", "goplanemp/lights/landing_lights_on", "goplanemp/lights/beacon_lights_on", "goplanemp/lights/strobe_lights_on", "goplanemp/lights/nav_lights_on", "goplanemp/lights/taxi_lights_on") }
renderer/dataAccess.go
0.555435
0.429071
dataAccess.go
starcoder
package analyze import ( "fmt" "github.com/calebcase/sla/uow" "github.com/gonum/stat" "github.com/stripe/veneur/tdigest" ) type Circular struct { Current int Data []float64 } func NewCircular(size int) *Circular { circular := Circular{ Data: make([]float64, size), } return &circular } func (c *Circular) Add(data float64) { c.Data[c.Current] = data c.Current += 1 if c.Current >= len(c.Data) { c.Current = 0 } } func (c *Circular) Quantile(quantile float64) float64 { data := make([]float64, len(c.Data)) copy(data, c.Data) stat.SortWeighted(data, nil) return stat.Quantile(quantile, stat.Empirical, data, nil) } type Record struct { DNS *tdigest.MergingDigest Connection *tdigest.MergingDigest TLS *tdigest.MergingDigest Request *tdigest.MergingDigest Delay *tdigest.MergingDigest Response *tdigest.MergingDigest Duration *tdigest.MergingDigest Trailing *Circular } func NewRecord() *Record { record := &Record{ DNS: tdigest.NewMerging(100, false), Connection: tdigest.NewMerging(100, false), TLS: tdigest.NewMerging(100, false), Request: tdigest.NewMerging(100, false), Delay: tdigest.NewMerging(100, false), Response: tdigest.NewMerging(100, false), Duration: tdigest.NewMerging(100, false), Trailing: NewCircular(10), } return record } func (r *Record) AddRound(round *uow.Round) { r.DNS.Add(round.Timing.DNS.Seconds(), 1.0) r.Connection.Add(round.Timing.Connection.Seconds(), 1.0) r.TLS.Add(round.Timing.TLS.Seconds(), 1.0) r.Request.Add(round.Timing.Request.Seconds(), 1.0) r.Delay.Add(round.Timing.Delay.Seconds(), 1.0) r.Response.Add(round.Timing.Response.Seconds(), 1.0) r.Duration.Add(round.Timing.Duration.Seconds(), 1.0) r.Trailing.Add(round.Timing.Duration.Seconds()) } func (r *Record) Header() []string { headers := []string{ "DNS", "Connection", "TLS", "Request", "Delay", "Response", "Duration", } return headers } func (r *Record) Quantiles(quantile float64) []float64 { results := []float64{ r.DNS.Quantile(quantile), r.Connection.Quantile(quantile), r.TLS.Quantile(quantile), r.Request.Quantile(quantile), r.Delay.Quantile(quantile), r.Response.Quantile(quantile), r.Duration.Quantile(quantile), } return results } func (r *Record) String() string { return fmt.Sprintf("%v", r.Quantiles(0.95)) }
analyze/record.go
0.717507
0.493592
record.go
starcoder
package onshape import ( "encoding/json" ) // BTMParameterQuantity147 struct for BTMParameterQuantity147 type BTMParameterQuantity147 struct { BTMParameter1 BtType *string `json:"btType,omitempty"` Expression *string `json:"expression,omitempty"` IsInteger *bool `json:"isInteger,omitempty"` Units *string `json:"units,omitempty"` Value *float64 `json:"value,omitempty"` } // NewBTMParameterQuantity147 instantiates a new BTMParameterQuantity147 object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewBTMParameterQuantity147() *BTMParameterQuantity147 { this := BTMParameterQuantity147{} return &this } // NewBTMParameterQuantity147WithDefaults instantiates a new BTMParameterQuantity147 object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewBTMParameterQuantity147WithDefaults() *BTMParameterQuantity147 { this := BTMParameterQuantity147{} return &this } // GetBtType returns the BtType field value if set, zero value otherwise. func (o *BTMParameterQuantity147) GetBtType() string { if o == nil || o.BtType == nil { var ret string return ret } return *o.BtType } // GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTMParameterQuantity147) GetBtTypeOk() (*string, bool) { if o == nil || o.BtType == nil { return nil, false } return o.BtType, true } // HasBtType returns a boolean if a field has been set. func (o *BTMParameterQuantity147) HasBtType() bool { if o != nil && o.BtType != nil { return true } return false } // SetBtType gets a reference to the given string and assigns it to the BtType field. func (o *BTMParameterQuantity147) SetBtType(v string) { o.BtType = &v } // GetExpression returns the Expression field value if set, zero value otherwise. func (o *BTMParameterQuantity147) GetExpression() string { if o == nil || o.Expression == nil { var ret string return ret } return *o.Expression } // GetExpressionOk returns a tuple with the Expression field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTMParameterQuantity147) GetExpressionOk() (*string, bool) { if o == nil || o.Expression == nil { return nil, false } return o.Expression, true } // HasExpression returns a boolean if a field has been set. func (o *BTMParameterQuantity147) HasExpression() bool { if o != nil && o.Expression != nil { return true } return false } // SetExpression gets a reference to the given string and assigns it to the Expression field. func (o *BTMParameterQuantity147) SetExpression(v string) { o.Expression = &v } // GetIsInteger returns the IsInteger field value if set, zero value otherwise. func (o *BTMParameterQuantity147) GetIsInteger() bool { if o == nil || o.IsInteger == nil { var ret bool return ret } return *o.IsInteger } // GetIsIntegerOk returns a tuple with the IsInteger field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTMParameterQuantity147) GetIsIntegerOk() (*bool, bool) { if o == nil || o.IsInteger == nil { return nil, false } return o.IsInteger, true } // HasIsInteger returns a boolean if a field has been set. func (o *BTMParameterQuantity147) HasIsInteger() bool { if o != nil && o.IsInteger != nil { return true } return false } // SetIsInteger gets a reference to the given bool and assigns it to the IsInteger field. func (o *BTMParameterQuantity147) SetIsInteger(v bool) { o.IsInteger = &v } // GetUnits returns the Units field value if set, zero value otherwise. func (o *BTMParameterQuantity147) GetUnits() string { if o == nil || o.Units == nil { var ret string return ret } return *o.Units } // GetUnitsOk returns a tuple with the Units field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTMParameterQuantity147) GetUnitsOk() (*string, bool) { if o == nil || o.Units == nil { return nil, false } return o.Units, true } // HasUnits returns a boolean if a field has been set. func (o *BTMParameterQuantity147) HasUnits() bool { if o != nil && o.Units != nil { return true } return false } // SetUnits gets a reference to the given string and assigns it to the Units field. func (o *BTMParameterQuantity147) SetUnits(v string) { o.Units = &v } // GetValue returns the Value field value if set, zero value otherwise. func (o *BTMParameterQuantity147) GetValue() float64 { if o == nil || o.Value == nil { var ret float64 return ret } return *o.Value } // GetValueOk returns a tuple with the Value field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *BTMParameterQuantity147) GetValueOk() (*float64, bool) { if o == nil || o.Value == nil { return nil, false } return o.Value, true } // HasValue returns a boolean if a field has been set. func (o *BTMParameterQuantity147) HasValue() bool { if o != nil && o.Value != nil { return true } return false } // SetValue gets a reference to the given float64 and assigns it to the Value field. func (o *BTMParameterQuantity147) SetValue(v float64) { o.Value = &v } func (o BTMParameterQuantity147) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} serializedBTMParameter1, errBTMParameter1 := json.Marshal(o.BTMParameter1) if errBTMParameter1 != nil { return []byte{}, errBTMParameter1 } errBTMParameter1 = json.Unmarshal([]byte(serializedBTMParameter1), &toSerialize) if errBTMParameter1 != nil { return []byte{}, errBTMParameter1 } if o.BtType != nil { toSerialize["btType"] = o.BtType } if o.Expression != nil { toSerialize["expression"] = o.Expression } if o.IsInteger != nil { toSerialize["isInteger"] = o.IsInteger } if o.Units != nil { toSerialize["units"] = o.Units } if o.Value != nil { toSerialize["value"] = o.Value } return json.Marshal(toSerialize) } type NullableBTMParameterQuantity147 struct { value *BTMParameterQuantity147 isSet bool } func (v NullableBTMParameterQuantity147) Get() *BTMParameterQuantity147 { return v.value } func (v *NullableBTMParameterQuantity147) Set(val *BTMParameterQuantity147) { v.value = val v.isSet = true } func (v NullableBTMParameterQuantity147) IsSet() bool { return v.isSet } func (v *NullableBTMParameterQuantity147) Unset() { v.value = nil v.isSet = false } func NewNullableBTMParameterQuantity147(val *BTMParameterQuantity147) *NullableBTMParameterQuantity147 { return &NullableBTMParameterQuantity147{value: val, isSet: true} } func (v NullableBTMParameterQuantity147) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableBTMParameterQuantity147) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
onshape/model_btm_parameter_quantity_147.go
0.749821
0.415966
model_btm_parameter_quantity_147.go
starcoder
package sales // Decimal defines model for Decimal. type Decimal string // Error defines model for Error. type Error struct { // An error code that identifies the type of error that occured. Code string `json:"code"` // Additional details that can help the caller understand or fix the issue. Details *string `json:"details,omitempty"` // A message that describes the error condition in a human-readable form. Message string `json:"message"` } // ErrorList defines model for ErrorList. type ErrorList []Error // GetOrderMetricsResponse defines model for GetOrderMetricsResponse. type GetOrderMetricsResponse struct { // A list of error responses returned when a request is unsuccessful. Errors *ErrorList `json:"errors,omitempty"` // A set of order metrics, each scoped to a particular time interval. Payload *OrderMetricsList `json:"payload,omitempty"` } // Money defines model for Money. type Money struct { // A decimal number with no loss of precision. Useful when precision loss is unacceptable, as with currencies. Follows RFC7159 for number representation. <br>**Pattern** : `^-?(0|([1-9]\d*))(\.\d+)?([eE][+-]?\d+)?$`. Amount Decimal `json:"amount"` // Three-digit currency code. In ISO 4217 format. CurrencyCode string `json:"currencyCode"` } // OrderMetricsInterval defines model for OrderMetricsInterval. type OrderMetricsInterval struct { // The currency type and the amount. AverageUnitPrice Money `json:"averageUnitPrice"` // The interval of time based on requested granularity (ex. Hour, Day, etc.) If this is the first or the last interval from the list, it might contain incomplete data if the requested interval doesn't align with the requested granularity (ex. request interval 2018-09-01T02:00:00Z--2018-09-04T19:00:00Z and granularity day will result in Sept 1st UTC day and Sept 4th UTC days having partial data). Interval string `json:"interval"` // The number of orders based on the specified filters. OrderCount int `json:"orderCount"` // The number of order items based on the specified filters. OrderItemCount int `json:"orderItemCount"` // The currency type and the amount. TotalSales Money `json:"totalSales"` // The number of units in orders based on the specified filters. UnitCount int `json:"unitCount"` } // OrderMetricsList defines model for OrderMetricsList. type OrderMetricsList []OrderMetricsInterval // GetOrderMetricsParams defines parameters for GetOrderMetrics. type GetOrderMetricsParams struct { // A list of marketplace identifiers. Example: ATVPDKIKX0DER indicates the US marketplace. MarketplaceIds []string `json:"marketplaceIds"` // A time interval used for selecting order metrics. This takes the form of two dates separated by two hyphens (first date is inclusive; second date is exclusive). Dates are in ISO8601 format and must represent absolute time (either Z notation or offset notation). Example: 2018-09-01T00:00:00-07:00--2018-09-04T00:00:00-07:00 requests order metrics for Sept 1st, 2nd and 3rd in the -07:00 zone. Interval string `json:"interval"` // An IANA-compatible time zone for determining the day boundary. Required when specifying a granularity value greater than Hour. The granularityTimeZone value must align with the offset of the specified interval value. For example, if the interval value uses Z notation, then granularityTimeZone must be UTC. If the interval value uses an offset, then granularityTimeZone must be an IANA-compatible time zone that matches the offset. Example: US/Pacific to compute day boundaries, accounting for daylight time savings, for US/Pacific zone. GranularityTimeZone *string `json:"granularityTimeZone,omitempty"` // The granularity of the grouping of order metrics, based on a unit of time. Specifying granularity=Hour results in a successful request only if the interval specified is less than or equal to 30 days from now. For all other granularities, the interval specified must be less or equal to 2 years from now. Specifying granularity=Total results in order metrics that are aggregated over the entire interval that you specify. If the interval start and end date don’t align with the specified granularity, the head and tail end of the response interval will contain partial data. Example: Day to get a daily breakdown of the request interval, where the day boundary is defined by the granularityTimeZone. Granularity string `json:"granularity"` // Filters the results by the buyer type that you specify, B2B (business to business) or B2C (business to customer). Example: B2B, if you want the response to include order metrics for only B2B buyers. BuyerType *string `json:"buyerType,omitempty"` // Filters the results by the fulfillment network that you specify, MFN (merchant fulfillment network) or AFN (Amazon fulfillment network). Do not include this filter if you want the response to include order metrics for all fulfillment networks. Example: AFN, if you want the response to include order metrics for only Amazon fulfillment network. FulfillmentNetwork *string `json:"fulfillmentNetwork,omitempty"` // Specifies the day that the week starts on when granularity=Week, either Monday or Sunday. Default: Monday. Example: Sunday, if you want the week to start on a Sunday. FirstDayOfWeek *string `json:"firstDayOfWeek,omitempty"` // Filters the results by the ASIN that you specify. Specifying both ASIN and SKU returns an error. Do not include this filter if you want the response to include order metrics for all ASINs. Example: B0792R1RSN, if you want the response to include order metrics for only ASIN B0792R1RSN. Asin *string `json:"asin,omitempty"` // Filters the results by the SKU that you specify. Specifying both ASIN and SKU returns an error. Do not include this filter if you want the response to include order metrics for all SKUs. Example: TestSKU, if you want the response to include order metrics for only SKU TestSKU. Sku *string `json:"sku,omitempty"` }
sales/types.gen.go
0.869687
0.489992
types.gen.go
starcoder
package allure // Label is the implementation of the label. // A label is an entity used by Allure to make metrics and grouping of tests. type Label struct { Name string `json:"name"` // Label's name Value string `json:"value"` // Label's value } // NewLabel - builds and returns a new allure.Label. The label key depends on the passed labelType. func NewLabel(labelType LabelType, value string) Label { return Label{ Name: labelType.ToString(), Value: value, } } type LabelType string // LabelType constants const ( Epic LabelType = "epic" Feature LabelType = "feature" Story LabelType = "story" ID LabelType = "as_id" Severity LabelType = "severity" ParentSuite LabelType = "parentSuite" Suite LabelType = "suite" SubSuite LabelType = "subSuite" Package LabelType = "package" Thread LabelType = "thread" Host LabelType = "host" Tag LabelType = "tag" Framework LabelType = "framework" Language LabelType = "language" Owner LabelType = "owner" Lead LabelType = "lead" AllureID LabelType = "ALLURE_ID" ) func (l LabelType) ToString() string { return string(l) } type SeverityType string // SeverityType constants const ( BLOCKER SeverityType = "blocker" CRITICAL SeverityType = "critical" NORMAL SeverityType = "normal" MINOR SeverityType = "minor" TRIVIAL SeverityType = "trivial" ) // ToString casts SeverityType to string func (s SeverityType) ToString() string { return string(s) } // LanguageLabel returns Language Label func LanguageLabel(language string) Label { return NewLabel(Language, language) } // FrameWorkLabel returns Framework Label func FrameWorkLabel(framework string) Label { return NewLabel(Framework, framework) } // IDLabel returns ID Label func IDLabel(testID string) Label { return NewLabel(ID, testID) } // TagLabel returns Tag Label func TagLabel(tag string) Label { return NewLabel(Tag, tag) } // TagLabels returns array of Tag Label func TagLabels(tags ...string) []Label { var result []Label for _, tag := range tags { result = append(result, TagLabel(tag)) } return result } // HostLabel returns Host Label func HostLabel(host string) Label { return NewLabel(Host, host) } // ThreadLabel returns Thread Label func ThreadLabel(thread string) Label { return NewLabel(Thread, thread) } // SeverityLabel returns Severity Label func SeverityLabel(severity SeverityType) Label { return NewLabel(Severity, severity.ToString()) } // SubSuiteLabel returns SubSuite Label func SubSuiteLabel(subSuite string) Label { return NewLabel(SubSuite, subSuite) } // EpicLabel returns Epic Label func EpicLabel(epic string) Label { return NewLabel(Epic, epic) } // StoryLabel returns Story Label func StoryLabel(story string) Label { return NewLabel(Story, story) } // FeatureLabel returns Feature Label func FeatureLabel(feature string) Label { return NewLabel(Feature, feature) } // ParentSuiteLabel returns ParentSuite Label func ParentSuiteLabel(parent string) Label { return NewLabel(ParentSuite, parent) } // SuiteLabel returns Suite Label func SuiteLabel(suite string) Label { return NewLabel(Suite, suite) } // PackageLabel returns Package Label func PackageLabel(packageName string) Label { return NewLabel(Package, packageName) } // OwnerLabel returns Owner Label func OwnerLabel(ownerName string) Label { return NewLabel(Owner, ownerName) } // LeadLabel returns Lead Label func LeadLabel(leadName string) Label { return NewLabel(Lead, leadName) } // IDAllureLabel returns AllureID Label func IDAllureLabel(allureID string) Label { return NewLabel(AllureID, allureID) }
pkg/allure/label.go
0.807726
0.529446
label.go
starcoder
package coordconv import ( "errors" "math" "github.com/golang/geo/s1" "github.com/golang/geo/s2" ) // PolarStereographic provides conversions between geodetic (latitude and // longitude) coordinates and Polar Stereographic (easting and northing) // coordinates. type PolarStereographic struct { semiMajorAxis float64 flattening float64 es float64 // Eccentricity of ellipsoid esOverTwo float64 // es / 2.0 isSouthernHemisphere bool // Flag variable polarTC float64 polarK90 float64 polaraMc float64 // Polar_a * mc twoPolarA float64 // 2.0 * Polar_a // Polar Stereographic projection Parameters polarStandardParallel float64 // Latitude of origin in radians polarCentralMeridian float64 // Longitude of origin in radians polarFalseEasting float64 // False easting in meters polarFalseNorthing float64 // False northing in meters // Maximum variance for easting and northing values for WGS 84. polarDeltaEasting float64 polarDeltaNorthing float64 polarScaleFactor float64 } // NewPolarStereographic receives the ellipsoid parameters and Polar // Stereograpic (Standard Parallel) projection parameters as inputs, and sets // the corresponding state variables. func NewPolarStereographic(ellipsoidSemiMajorAxis, ellipsoidFlattening, centralMeridian, standardParallel, falseEasting, falseNorthing float64) (*PolarStereographic, error) { p := &PolarStereographic{ es: (0.08181919084262188000), esOverTwo: (.040909595421311), isSouthernHemisphere: (false), polarTC: (1.0), polarK90: (1.0033565552493), polaraMc: (6378137.0), twoPolarA: (12756274.0), polarCentralMeridian: (0.0), polarStandardParallel: ((math.Pi * 90) / 180), polarFalseEasting: (0.0), polarFalseNorthing: (0.0), polarScaleFactor: (1.0), polarDeltaEasting: (12713601.0), polarDeltaNorthing: (12713601.0), } invF := 1 / ellipsoidFlattening if ellipsoidSemiMajorAxis <= 0.0 { return nil, errors.New("Semi-major axis must be greater than zero") } if (invF < 250) || (invF > 350) { return nil, errors.New("Inverse flattening must be between 250 and 350") } if (standardParallel < -math.Pi/2) || (standardParallel > math.Pi/2) { return nil, errors.New("Origin Latitude out of range") } if (centralMeridian < -math.Pi) || (centralMeridian > 2*math.Pi) { return nil, errors.New("Origin Longitude out of range") } p.semiMajorAxis = ellipsoidSemiMajorAxis p.flattening = ellipsoidFlattening p.twoPolarA = 2.0 * p.semiMajorAxis if centralMeridian > math.Pi { centralMeridian -= 2 * math.Pi } if standardParallel < 0 { p.isSouthernHemisphere = true p.polarStandardParallel = -standardParallel p.polarCentralMeridian = -centralMeridian } else { p.isSouthernHemisphere = false p.polarStandardParallel = standardParallel p.polarCentralMeridian = centralMeridian } p.polarFalseEasting = falseEasting p.polarFalseNorthing = falseNorthing es2 := 2*p.flattening - p.flattening*p.flattening p.es = math.Sqrt(es2) p.esOverTwo = p.es / 2.0 if math.Abs(math.Abs(p.polarStandardParallel)-math.Pi/2) > 1.0e-10 { sinolat := math.Sin(p.polarStandardParallel) essin := p.es * sinolat powEs := p.polarPow(essin) cosolat := math.Cos(p.polarStandardParallel) mc := cosolat / math.Sqrt(1.0-essin*essin) p.polaraMc = p.semiMajorAxis * mc p.polarTC = math.Tan(math.Pi/4-p.polarStandardParallel/2.0) / powEs } onePlusEs := 1.0 + p.es oneMinusEs := 1.0 - p.es p.polarK90 = math.Sqrt(math.Pow(onePlusEs, onePlusEs) * math.Pow(oneMinusEs, oneMinusEs)) slat := math.Sin(math.Abs(standardParallel)) onePlusEsSinoLat := 1.0 + p.es*slat oneMinusEsSinoLat := 1.0 - p.es*slat p.polarScaleFactor = ((1 + slat) / 2) * (p.polarK90 / math.Sqrt(math.Pow(onePlusEsSinoLat, onePlusEs)* math.Pow(oneMinusEsSinoLat, oneMinusEs))) // Calculate Radius tempGeodeticCoordinates := s2.LatLng{Lng: s1.Angle(centralMeridian), Lat: 0} tempCoordinates, err := p.ConvertFromGeodetic(tempGeodeticCoordinates) if err != nil { return nil, err } p.polarDeltaNorthing = tempCoordinates.Northing if p.polarFalseNorthing != 0 { p.polarDeltaNorthing -= p.polarFalseNorthing } if p.polarDeltaNorthing < 0 { p.polarDeltaNorthing = -p.polarDeltaNorthing } p.polarDeltaNorthing *= 1.01 p.polarDeltaEasting = p.polarDeltaNorthing return p, nil } // NewPolarStereographicScaleFactor ellipsoid parameters and Polar Stereograpic // (Scale Factor) projection parameters as inputs, and sets the corresponding // state variables. func NewPolarStereographicScaleFactor(ellipsoidSemiMajorAxis, ellipsoidFlattening, centralMeridian, scaleFactor float64, hemisphere Hemisphere, falseEasting, falseNorthing float64) (*PolarStereographic, error) { p := &PolarStereographic{ // coordinateType: (CoordinateType::polarStereographicScaleFactor), es: (0.08181919084262188000), esOverTwo: (.040909595421311), isSouthernHemisphere: false, polarTC: (1.0), polarK90: (1.0033565552493), polaraMc: (6378137.0), twoPolarA: (12756274.0), polarCentralMeridian: (0.0), polarStandardParallel: ((math.Pi * 90) / 180), polarFalseEasting: (0.0), polarFalseNorthing: (0.0), polarScaleFactor: (1.0), polarDeltaEasting: (12713601.0), polarDeltaNorthing: (12713601.0), } tolerance := 1.0e-15 count := 30 invF := 1 / ellipsoidFlattening const minScaleFactor = 0.1 const maxScaleFactor = 3.0 if ellipsoidSemiMajorAxis <= 0.0 { return nil, errors.New("Semi-major axis must be greater than zero") } if (invF < 250) || (invF > 350) { return nil, errors.New("Inverse flattening must be between 250 and 350") } if (scaleFactor < minScaleFactor) || (scaleFactor > maxScaleFactor) { return nil, errors.New("Scale factor out of range") } if (centralMeridian < -math.Pi) || (centralMeridian > 2*math.Pi) { return nil, errors.New("Origin Longitude out of range") } if (hemisphere != HemisphereNorth) && (hemisphere != HemisphereSouth) { return nil, errors.New("Hemisphere out of range") } p.semiMajorAxis = ellipsoidSemiMajorAxis p.flattening = ellipsoidFlattening p.polarScaleFactor = scaleFactor p.polarFalseEasting = falseEasting p.polarFalseNorthing = falseNorthing p.twoPolarA = 2.0 * p.semiMajorAxis es2 := 2*p.flattening - p.flattening*p.flattening p.es = math.Sqrt(es2) p.esOverTwo = p.es / 2.0 onePlusEs := 1.0 + p.es oneMinusEs := 1.0 - p.es p.polarK90 = math.Sqrt(math.Pow(onePlusEs, onePlusEs) * math.Pow(oneMinusEs, oneMinusEs)) sk := 0.0 skPlus1 := -1 + 2*p.polarScaleFactor for math.Abs(skPlus1-sk) > tolerance && count != 0 { sk = skPlus1 onePlusEsSk := 1.0 + p.es*sk oneMinusEsSk := 1.0 - p.es*sk skPlus1 = ((2 * p.polarScaleFactor * math.Sqrt(math.Pow(onePlusEsSk, onePlusEs)* math.Pow(oneMinusEsSk, oneMinusEs))) / p.polarK90) - 1 count-- } if count == 0 { return nil, errors.New("origin latitude error") } standardParallel := 0.0 if skPlus1 >= -1.0 && skPlus1 <= 1.0 { standardParallel = math.Asin(skPlus1) } else { return nil, errors.New("origin latitude error") } if hemisphere == HemisphereSouth { standardParallel *= -1.0 } if centralMeridian > math.Pi { centralMeridian -= 2 * math.Pi } if standardParallel < 0 { p.isSouthernHemisphere = true p.polarStandardParallel = -standardParallel p.polarCentralMeridian = -centralMeridian } else { p.isSouthernHemisphere = false p.polarStandardParallel = standardParallel p.polarCentralMeridian = centralMeridian } sinolat := math.Sin(p.polarStandardParallel) if math.Abs(math.Abs(p.polarStandardParallel)-math.Pi/2) > 1.0e-10 { essin := p.es * sinolat powEs := p.polarPow(essin) cosolat := math.Cos(p.polarStandardParallel) mc := cosolat / math.Sqrt(1.0-essin*essin) p.polaraMc = p.semiMajorAxis * mc p.polarTC = math.Tan(math.Pi/4-p.polarStandardParallel/2.0) / powEs } // Calculate Radius tempGeodeticCoordinates := s2.LatLng{Lng: s1.Angle(centralMeridian), Lat: 0} tempCoordinates, err := p.ConvertFromGeodetic(tempGeodeticCoordinates) if err != nil { return nil, err } p.polarDeltaNorthing = tempCoordinates.Northing if p.polarFalseNorthing != 0 { p.polarDeltaNorthing -= p.polarFalseNorthing } if p.polarDeltaNorthing < 0 { p.polarDeltaNorthing = -p.polarDeltaNorthing } p.polarDeltaNorthing *= 1.01 p.polarDeltaEasting = p.polarDeltaNorthing return p, nil } // ConvertFromGeodetic converts geodetic coordinates (latitude and longitude) to // Polar Stereographic coordinates (easting and northing), according to the // current ellipsoid and Polar Stereographic projection parameters. func (p *PolarStereographic) ConvertFromGeodetic(geodeticCoordinates s2.LatLng) (MapCoords, error) { longitude := geodeticCoordinates.Lng.Radians() latitude := geodeticCoordinates.Lat.Radians() if (latitude < -math.Pi/2) || (latitude > math.Pi/2) { return MapCoords{}, errors.New("latitide out of range") } else if (latitude < 0) && (!p.isSouthernHemisphere) { return MapCoords{}, errors.New("latitude and Origin Latitude in different hemispheres") } else if (latitude > 0) && (p.isSouthernHemisphere) { return MapCoords{}, errors.New("latitude and Origin Latitude in different hemispheres") } if (longitude < -math.Pi) || (longitude > 2*math.Pi) { return MapCoords{}, errors.New("longitude out of range") } var easting, northing float64 if math.Abs(math.Abs(latitude)-math.Pi/2) < 1.0e-10 { easting = p.polarFalseEasting northing = p.polarFalseNorthing } else { if p.isSouthernHemisphere { longitude *= -1.0 latitude *= -1.0 } dlam := longitude - p.polarCentralMeridian if dlam > math.Pi { dlam -= 2 * math.Pi } if dlam < -math.Pi { dlam += 2 * math.Pi } slat := math.Sin(latitude) essin := p.es * slat powEs := p.polarPow(essin) t := math.Tan(math.Pi/4-latitude/2.0) / powEs var rho float64 if math.Abs(math.Abs(p.polarStandardParallel)-math.Pi/2) > 1.0e-10 { rho = p.polaraMc * t / p.polarTC } else { rho = p.twoPolarA * t / p.polarK90 } if p.isSouthernHemisphere { easting = -(rho*math.Sin(dlam) - p.polarFalseEasting) northing = rho*math.Cos(dlam) + p.polarFalseNorthing } else { easting = rho*math.Sin(dlam) + p.polarFalseEasting northing = -rho*math.Cos(dlam) + p.polarFalseNorthing } } return MapCoords{Easting: easting, Northing: northing}, nil } // ConvertToGeodetic converts Polar Stereographic coordinates (easting and // northing) to geodetic coordinates (latitude and longitude) according to the // current ellipsoid and Polar Stereographic projection Parameters. func (p *PolarStereographic) ConvertToGeodetic(mapProjectionCoordinates MapCoords) (s2.LatLng, error) { easting := mapProjectionCoordinates.Easting northing := mapProjectionCoordinates.Northing minEasting := p.polarFalseEasting - p.polarDeltaEasting maxEasting := p.polarFalseEasting + p.polarDeltaEasting minNorthing := p.polarFalseNorthing - p.polarDeltaNorthing maxNorthing := p.polarFalseNorthing + p.polarDeltaNorthing if easting > maxEasting || easting < minEasting { return s2.LatLng{}, errors.New("easting out of range") } if northing > maxNorthing || northing < minNorthing { return s2.LatLng{}, errors.New("northing out of range") } dy := northing - p.polarFalseNorthing dx := easting - p.polarFalseEasting // Radius of point with origin of false easting, false northing rho := math.Sqrt(dx*dx + dy*dy) deltaRadius := math.Sqrt(p.polarDeltaEasting*p.polarDeltaEasting + p.polarDeltaNorthing*p.polarDeltaNorthing) if rho > deltaRadius { return s2.LatLng{}, errors.New("Point is outside of projection area") } var latitude, longitude float64 if (dy == 0.0) && (dx == 0.0) { latitude = math.Pi / 2 longitude = p.polarCentralMeridian } else { if p.isSouthernHemisphere { dy *= -1.0 dx *= -1.0 } var t float64 if math.Abs(math.Abs(p.polarStandardParallel)-math.Pi/2) > 1.0e-10 { t = rho * p.polarTC / (p.polaraMc) } else { t = rho * p.polarK90 / (p.twoPolarA) } PHI := math.Pi/2 - 2.0*math.Atan(t) tempPHI := 0.0 for math.Abs(PHI-tempPHI) > 1.0e-10 { tempPHI = PHI sinPhi := math.Sin(PHI) essin := p.es * sinPhi powEs := p.polarPow(essin) PHI = math.Pi/2 - 2.0*math.Atan(t*powEs) } latitude = PHI longitude = p.polarCentralMeridian + math.Atan2(dx, -dy) if longitude > math.Pi { longitude -= 2 * math.Pi } else if longitude < -math.Pi { longitude += 2 * math.Pi } if latitude > math.Pi/2 { // force distorted values to 90, -90 degrees latitude = math.Pi / 2 } else if latitude < -math.Pi/2 { latitude = -math.Pi / 2 } if longitude > math.Pi { // force distorted values to 180, -180 degrees longitude = math.Pi } else if longitude < -math.Pi { longitude = -math.Pi } } if p.isSouthernHemisphere { latitude *= -1.0 longitude *= -1.0 } return s2.LatLng{Lat: s1.Angle(latitude), Lng: s1.Angle(longitude)}, nil } func (p *PolarStereographic) polarPow(esSin float64) float64 { return math.Pow((1.0-esSin)/(1.0+esSin), p.esOverTwo) }
polarstereographic.go
0.687
0.674466
polarstereographic.go
starcoder
package main import ( //"log" ) type QuadTreeNode struct { //Parent *QuadTreeNode //SideLength int Leaf *Paper Q0, Q1, Q2, Q3 *QuadTreeNode } type QuadTree struct { MinX, MinY, MaxX, MaxY, MaxR int Root *QuadTreeNode } func QuadTreeInsertPaper(parent *QuadTreeNode, q **QuadTreeNode, paper *Paper, MinX, MinY, MaxX, MaxY int, insertErrors *int) { if *q == nil { // hit an empty node; create a new leaf cell and put this paper in it *q = new(QuadTreeNode) //(*q).Parent = parent //(*q).SideLength = MaxX - MinX (*q).Leaf = paper } else if (*q).Leaf != nil { // hit a leaf; turn it into an internal node and re-insert the papers oldPaper := (*q).Leaf (*q).Leaf = nil (*q).Q0 = nil (*q).Q1 = nil (*q).Q2 = nil (*q).Q3 = nil QuadTreeInsertPaper(parent, q, oldPaper, MinX, MinY, MaxX, MaxY, insertErrors) QuadTreeInsertPaper(parent, q, paper, MinX, MinY, MaxX, MaxY, insertErrors) } else { // hit an internal node // check cell size didn't get too small if (MaxX <= MinX + 1 || MaxY <= MinY + 1) { *insertErrors += 1 return } // compute the dividing x and y positions MidX := (MinX + MaxX) / 2 MidY := (MinY + MaxY) / 2 // insert the new paper in the correct cell if ((paper.y) < MidY) { if ((paper.x) < MidX) { QuadTreeInsertPaper(*q, &(*q).Q0, paper, MinX, MinY, MidX, MidY, insertErrors) } else { QuadTreeInsertPaper(*q, &(*q).Q1, paper, MidX, MinY, MaxX, MidY, insertErrors) } } else { if ((paper.x) < MidX) { QuadTreeInsertPaper(*q, &(*q).Q2, paper, MinX, MidY, MidX, MaxY, insertErrors) } else { QuadTreeInsertPaper(*q, &(*q).Q3, paper, MidX, MidY, MaxX, MaxY, insertErrors) } } } } func (q *QuadTreeNode) ApplyIfWithin(MinX, MinY, MaxX, MaxY int, x, y, rx, ry int, f func(paper *Paper)) { if q == nil { } else if q.Leaf != nil { rx += q.Leaf.radius ry += q.Leaf.radius if x - rx <= q.Leaf.x && q.Leaf.x <= x + rx && y - ry <= q.Leaf.y && q.Leaf.y <= y + ry { f(q.Leaf) } } else if ((MinX <= x - rx && x - rx <= MaxX) || (MinX <= x + rx && x + rx <= MaxX) || (x - rx <= MinX && x + rx >= MaxX)) && ((MinY <= y - ry && y - ry <= MaxY) || (MinY <= y + ry && y + ry <= MaxY) || (y - ry <= MinY && y + ry >= MaxY)) { MidX := (MinX + MaxX) / 2 MidY := (MinY + MaxY) / 2 q.Q0.ApplyIfWithin(MinX, MinY, MidX, MidY, x, y, rx, ry, f) q.Q1.ApplyIfWithin(MidX, MinY, MaxX, MidY, x, y, rx, ry, f) q.Q2.ApplyIfWithin(MinX, MidY, MidX, MaxY, x, y, rx, ry, f) q.Q3.ApplyIfWithin(MidX, MidY, MaxX, MaxY, x, y, rx, ry, f) } } func (qt *QuadTree) ApplyIfWithin(x, y, rx, ry int, f func(paper *Paper)) { qt.Root.ApplyIfWithin(qt.MinX, qt.MinY, qt.MaxX, qt.MaxY, x, y, rx, ry, f) }
tiles/quadtree.go
0.644784
0.48249
quadtree.go
starcoder
package layers import ( "log" "math" "math/rand" "gitlab.com/akita/dnn/layers" "gitlab.com/akita/dnn/tensor" "gitlab.com/akita/mgpusim/driver" ) // FullyConnectedLayer represents a fully-connected layer. type FullyConnectedLayer struct { InputSize, OutputSize int GPUDriver *driver.Driver GPUCtx *driver.Context TensorOperator *TensorOperator verifyForward bool verifyBackward bool cpuLayer *layers.FullyConnectedLayer parameters *Vector weight *Vector bias *Vector gradients *Vector weightGradients *Vector biasGradients *Vector forwardInput driver.GPUPtr } // NewFullyConnectedLayer creates a new fully connected layer. func NewFullyConnectedLayer( inputSize, outputSize int, driver *driver.Driver, ctx *driver.Context, operator *TensorOperator, ) *FullyConnectedLayer { return &FullyConnectedLayer{ InputSize: inputSize, OutputSize: outputSize, GPUDriver: driver, GPUCtx: ctx, TensorOperator: operator, } } // EnableVerification runs a CPU pass for every forward and backward propagation // though the fully connected layer to make sure the simulator is correct. func (f *FullyConnectedLayer) EnableVerification() { f.verifyForward = true f.verifyBackward = true f.cpuLayer = layers.NewFullyConnectedLayer(f.InputSize, f.OutputSize) } // Randomize initialized the parameters randomly. func (f *FullyConnectedLayer) Randomize() { f.allocateMemory() f.initWeights() f.initBias() } func (f *FullyConnectedLayer) initBias() { initBias := make([]float32, f.numBias()) for i := 0; i < f.numBias(); i++ { initBias[i] = rand.Float32()*2 - 1 } f.GPUDriver.MemCopyH2D(f.GPUCtx, f.bias.ptr, initBias) } func (f *FullyConnectedLayer) initWeights() { initWeights := make([]float32, f.numWeights()) for i := 0; i < f.numWeights(); i++ { initWeights[i] = (rand.Float32() - 0.5) / float32(f.OutputSize) * 2 } f.GPUDriver.MemCopyH2D(f.GPUCtx, f.weight.ptr, initWeights) } func (f *FullyConnectedLayer) allocateMemory() { f.allocateParams() f.allocateGradients() } func (f *FullyConnectedLayer) allocateGradients() { sizeOfFloat := 4 gradientsPtr := f.GPUDriver.AllocateMemory( f.GPUCtx, uint64(f.numParameters()*sizeOfFloat)) f.gradients = &Vector{ size: f.numParameters(), ptr: gradientsPtr, GPUDriver: f.GPUDriver, GPUCtx: f.GPUCtx, } f.weightGradients = &Vector{ size: f.numWeights(), ptr: gradientsPtr, GPUDriver: f.GPUDriver, GPUCtx: f.GPUCtx, } f.biasGradients = &Vector{ size: f.numBias(), ptr: gradientsPtr + driver.GPUPtr(f.numWeights()*4), GPUDriver: f.GPUDriver, GPUCtx: f.GPUCtx, } } func (f *FullyConnectedLayer) allocateParams() { sizeOfFloat := 4 parametersPtr := f.GPUDriver.AllocateMemory( f.GPUCtx, uint64(f.numParameters()*sizeOfFloat)) f.parameters = &Vector{ size: f.numParameters(), ptr: parametersPtr, GPUDriver: f.GPUDriver, GPUCtx: f.GPUCtx, } f.weight = &Vector{ size: f.numWeights(), ptr: parametersPtr, GPUDriver: f.GPUDriver, GPUCtx: f.GPUCtx, } f.bias = &Vector{ size: f.numBias(), ptr: parametersPtr + driver.GPUPtr(f.numWeights()*4), GPUDriver: f.GPUDriver, GPUCtx: f.GPUCtx, } } func (f FullyConnectedLayer) numParameters() int { numParameters := f.numWeights() + f.numBias() return numParameters } func (f FullyConnectedLayer) numBias() int { numBias := f.OutputSize return numBias } func (f FullyConnectedLayer) numWeights() int { numWeights := f.InputSize * f.OutputSize return numWeights } // Forward performs the forward propagation algorithm. func (f *FullyConnectedLayer) Forward(inputT tensor.Tensor) tensor.Tensor { input := inputT.(*Tensor) output := &Tensor{ driver: f.GPUDriver, ctx: f.GPUCtx, size: []int{input.Size()[0], f.OutputSize}, ptr: f.GPUDriver.AllocateMemory(f.GPUCtx, uint64(input.Size()[0]*f.OutputSize*4)), } f.saveInput(input) weightM := f.weight.AsMatrix(f.InputSize, f.OutputSize) biasM := f.TensorOperator.CreateTensor( []int{inputT.Size()[0], f.OutputSize}) biasData := make([]float32, f.OutputSize) f.GPUDriver.MemCopyD2H(f.GPUCtx, biasData, f.bias.ptr) for i := 0; i < inputT.Size()[0]; i++ { ptr := driver.GPUPtr(uint64(biasM.ptr) + uint64(i*f.OutputSize*4)) f.GPUDriver.MemCopyH2D(f.GPUCtx, ptr, biasData) } f.TensorOperator.Gemm(false, false, inputT.Size()[0], f.OutputSize, f.InputSize, 1.0, 1.0, input, weightM, biasM, output) f.TensorOperator.Free(biasM) f.verifyForwardPass(input, output) return output } func (f *FullyConnectedLayer) verifyForwardPass(input, output *Tensor) { if !f.verifyForward { return } params := f.Parameters().Raw() copy(f.cpuLayer.Parameters().Raw(), params) inputV := input.Vector() cpuInput := &tensor.SimpleTensor{} cpuInput.Init(inputV, input.Size()) cpuOut := f.cpuLayer.Forward(cpuInput).Vector() gpuOut := output.Vector() for i := 0; i < len(cpuOut); i++ { diff := math.Abs(gpuOut[i] - cpuOut[i]) if diff > 1e-5 { log.Panicf("Mismatch at %d, expected %f, but get %f.", i, cpuOut[i], gpuOut[i]) } } log.Printf("Fully connected forward verification passed!") } func (f *FullyConnectedLayer) saveInput(input *Tensor) { if f.forwardInput != 0 { f.GPUDriver.FreeMemory(f.GPUCtx, f.forwardInput) } numElement := input.Size()[0] * input.Size()[1] f.forwardInput = f.GPUDriver.AllocateMemory(f.GPUCtx, uint64(numElement*4)) temp := make([]float32, numElement) f.GPUDriver.MemCopyD2H(f.GPUCtx, temp, input.ptr) f.GPUDriver.MemCopyH2D(f.GPUCtx, f.forwardInput, temp) } // Backward performs the backward propagation operation. func (f *FullyConnectedLayer) Backward(input tensor.Tensor) tensor.Tensor { f.resetGradients() f.calculateWeightGradients(input.(*Tensor)) f.calculateBiasGradients(input.(*Tensor)) output := f.calculateInputGradients(input.(*Tensor)) f.verifyBackPass(input.(*Tensor), output) return output } func (f *FullyConnectedLayer) resetGradients() { data := make([]float32, f.numParameters()) f.GPUDriver.MemCopyH2D(f.GPUCtx, f.gradients.ptr, data) } func (f *FullyConnectedLayer) verifyBackPass(input, output *Tensor) { if !f.verifyBackward { return } params := f.Parameters().Raw() copy(f.cpuLayer.Parameters().Raw(), params) inputV := input.Vector() cpuInput := &tensor.SimpleTensor{} cpuInput.Init(inputV, input.Size()) cpuOut := f.cpuLayer.Backward(cpuInput).Vector() gpuOut := output.Vector() for i := 0; i < len(cpuOut); i++ { diff := math.Abs(gpuOut[i] - cpuOut[i]) if diff > 1e-5 { log.Panicf("Mismatch at %d, expected %f, but get %f.", i, cpuOut[i], gpuOut[i]) } } cpuGradient := f.cpuLayer.Gradients().Raw() gpuGradient := f.Gradients().Raw() for i := 0; i < len(cpuGradient); i++ { diff := math.Abs(gpuGradient[i] - cpuGradient[i]) if diff > 1e-3 { log.Panicf("Mismatch at %d, expected %f, but get %f.", i, cpuGradient[i], gpuGradient[i]) } } log.Printf("Fully connected backward verification passed!") } func (f *FullyConnectedLayer) calculateBiasGradients(input tensor.Tensor) { inputV := input.Vector() biasV := f.biasGradients.Raw() for i := 0; i < input.Size()[0]; i++ { for j := 0; j < input.Size()[1]; j++ { index := i*input.Size()[1] + j biasV[j] += inputV[index] } } tempData := make([]float32, f.OutputSize) for i, value := range biasV { tempData[i] = float32(value) } f.GPUDriver.MemCopyH2D(f.GPUCtx, f.biasGradients.ptr, tempData) } func (f *FullyConnectedLayer) calculateWeightGradients(input *Tensor) { size := input.Size() forwardMatrix := &Tensor{ size: []int{size[0], f.InputSize}, ptr: f.forwardInput, } forwardMatrixTrans := f.TensorOperator.CreateTensor( []int{f.InputSize, size[0]}) f.TensorOperator.TransposeMatrix(forwardMatrix, forwardMatrixTrans) zeroMatrix := f.TensorOperator.CreateTensor( []int{f.InputSize, f.OutputSize}) f.TensorOperator.Gemm(false, false, f.InputSize, f.OutputSize, size[0], 1.0, 1.0, forwardMatrixTrans, input, zeroMatrix, f.weightGradients.AsMatrix(f.InputSize, f.OutputSize), ) } func (f FullyConnectedLayer) calculateInputGradients(input *Tensor) *Tensor { size := input.Size() output := NewTensor(f.GPUDriver, f.GPUCtx) output.Init( make([]float64, size[0]*f.InputSize), []int{size[0], f.InputSize}) weightMatrix := f.weight.AsMatrix(f.InputSize, f.OutputSize) weightMatrixTrans := f.TensorOperator.CreateTensor( []int{f.OutputSize, f.InputSize}) f.TensorOperator.TransposeMatrix(weightMatrix, weightMatrixTrans) zeroMatrix := NewTensor(f.GPUDriver, f.GPUCtx) zeroMatrix.Init( make([]float64, size[0]*f.InputSize), []int{size[0], f.InputSize}, ) f.TensorOperator.Gemm(false, false, size[0], f.InputSize, f.OutputSize, 1.0, 1.0, input, weightMatrixTrans, zeroMatrix, output) f.TensorOperator.Free(weightMatrixTrans) f.TensorOperator.Free(zeroMatrix) return output } // Parameters returns the parameters of the layer. func (f FullyConnectedLayer) Parameters() tensor.Vector { return f.parameters } // Gradients returns the gradients calculated by the last backward propagation. func (f FullyConnectedLayer) Gradients() tensor.Vector { return f.gradients }
benchmarks/dnn/layers/fullyconnected.go
0.720368
0.487246
fullyconnected.go
starcoder
package secio import ( "unsafe" mol "github.com/driftluo/tentacle-go/secio/mol" ) // intoBytes convert to molecule bytes func intoBytes(b []byte) mol.Bytes { tmp := intoByteslice(b) return mol.NewBytesBuilder().Set(tmp).Build() } // intoString convert to molecule string func intoString(s string) mol.String { b := Str2bytes(s) tmp := intoByteslice(b) return mol.NewStringBuilder().Set(tmp).Build() } // intoByteslice convert to molecule byte slice func intoByteslice(b []byte) []mol.Byte { tmp := make([]mol.Byte, len(b)) for i, v := range b { tmp[i] = mol.NewByte(v) } return tmp } // Str2bytes convert to bytes in place // https://www.cnblogs.com/shuiyuejiangnan/p/9707066.html func Str2bytes(s string) []byte { x := (*[2]uintptr)(unsafe.Pointer(&s)) h := [3]uintptr{x[0], x[1], x[1]} return *(*[]byte)(unsafe.Pointer(&h)) } // Bytes2str convert to string in place func Bytes2str(b []byte) string { return *(*string)(unsafe.Pointer(&b)) } // propose is handshake propose context type propose struct { rand []byte pubkey []byte exchange string ciphers string hashes string } func (p propose) encode() []byte { rand := intoBytes(p.rand) pubkey := intoBytes(p.pubkey) exchange := intoString(p.exchange) ciphers := intoString(p.ciphers) hashes := intoString(p.hashes) pr := mol.NewProposeBuilder().Rand(rand).Pubkey(pubkey).Exchanges(exchange).Ciphers(ciphers).Hashes(hashes).Build() return pr.AsSlice() } func decodeToPropose(b []byte) (*propose, error) { pr, err := mol.ProposeFromSlice(b, true) if err != nil { return nil, err } propose := new(propose) propose.rand = pr.Rand().RawData() propose.pubkey = pr.Pubkey().RawData() propose.exchange = Bytes2str(pr.Exchanges().RawData()) propose.ciphers = Bytes2str(pr.Ciphers().RawData()) propose.hashes = Bytes2str(pr.Hashes().RawData()) return propose, nil } type exchange struct { epubkey []byte signature []byte } func (e exchange) encode() []byte { epub := intoBytes(e.epubkey) sig := intoBytes(e.signature) ex := mol.NewExchangeBuilder().Epubkey(epub).Signature(sig).Build() return ex.AsSlice() } func decodeToExchange(b []byte) (*exchange, error) { ex, err := mol.ExchangeFromSlice(b, true) if err != nil { return nil, err } exchange := new(exchange) exchange.epubkey = ex.Epubkey().RawData() exchange.signature = ex.Signature().RawData() return exchange, nil }
secio/handshake_struct.go
0.592077
0.423398
handshake_struct.go
starcoder
package mdp // Model is the model parameters of Markov Descision Processes of your problem. type Model struct { StateOf map[int]*State states []State actions []Action transitions []Transition } // NumStates returns a number of states in MDP. func (m *Model) NumStates() int { return len(m.states) } // NumActions returns a number of actions in MDP. func (m *Model) NumActions() int { return len(m.actions) } // UpdateReward update the reward of all actions. func (m *Model) UpdateReward(reward []float64) bool { if len(reward) != m.NumActions() { return false } for i := range m.actions { if m.actions[i].transition == nil { continue } m.actions[i].transition.r = reward[i] } return true } // State represents a state of Model. type State struct { id int index int actions []*Action transitions []*Transition } // Index returns the array index of the state. func (s *State) Index() int { return s.index } // Action represents an action of Model. type Action struct { index int state *State transition *Transition } // Index returns the array index of the action. func (a *Action) Index() int { return a.index } // Transition represents a action-state transition of Model. // Since the Model is deterministic, an action corresponds to the state one-to-one. // If stochastic Model, Action has multiple transitions and their probability. type Transition struct { action *Action state *State r float64 // reward } // StateTransition is a stete-state transition in a deterministic Model. type StateTransition struct { FromID, ToID int Reward float64 // reward } // NewModel constructs a Model instance and returns a pointer to it. func NewModel(stateIDs []int, stateTransitions []StateTransition) *Model { // construct stateID => stateIdx Map StateOf := make(map[int]*State) states := make([]State, len(stateIDs)) for i, id := range stateIDs { states[i] = State{ id: id, index : i, actions: make([]*Action, 0), transitions: make([]*Transition, 0), } StateOf[id] = &states[i] } actions := make([]Action, len(stateTransitions)) transitions := make([]Transition, len(stateTransitions)) for i, st := range stateTransitions { toState, ok := StateOf[st.ToID] if !ok { continue } state, ok := StateOf[st.FromID] if !ok { continue } actions[i].state = state actions[i].index = i actions[i].transition = &transitions[i] transitions[i].state = toState transitions[i].action = &actions[i] transitions[i].r = st.Reward state.actions = append(state.actions, &actions[i]) toState.transitions = append(toState.transitions, &transitions[i]) } m := &Model{ states: states, actions: actions, transitions: transitions, StateOf: StateOf, } return m } // ActionByID returns the action satisfied with a given state transition. func (m *Model) ActionByID(fromStateID, toStateID int) (a *Action, ok bool) { fromState, ok := m.StateOf[fromStateID] if !ok { return } toState, ok := m.StateOf[toStateID] if !ok { return } for _, action := range fromState.actions { if action.transition.state == toState { a = action ok = true return } } return }
mdp/model.go
0.859443
0.581541
model.go
starcoder
package mocks import ( "github.com/sasalatart/batcoms/domain/wikiactors" ) // WikiFaction returns a faction instance of wikiactors.Actor that may be used for testing purposes func WikiFaction() wikiactors.Actor { return wikiactors.Actor{ Kind: wikiactors.FactionKind, ID: 21418258, URL: "https://en.wikipedia.org/wiki/French_First_Empire", Name: "First French Empire", Description: "Empire of Napoleon I of France between 1804–1815", Extract: "The First French Empire, officially the French Empire or the Napoleonic Empire, was the empire of Napoleon Bonaparte of France and the dominant power in much of continental Europe at the beginning of the 19th century. Although France had already established an overseas colonial empire beginning in the 17th century, the French state had remained a kingdom under the Bourbons and a republic after the French Revolution. Historians refer to Napoleon's regime as the First Empire to distinguish it from the restorationist Second Empire (1852–1870) ruled by his nephew Napoleon III.", } } // WikiFaction2 returns a faction instance of wikiactors.Actor that may be used for testing purposes func WikiFaction2() wikiactors.Actor { return wikiactors.Actor{ Kind: wikiactors.FactionKind, ID: 20611504, URL: "https://en.wikipedia.org/wiki/Imperial_Russia", Name: "Russian Empire", Description: "Empire in Eurasia and North America", Extract: "The Russian Empire was an empire that extended across Eurasia and North America from 1721, following the end of the Great Northern War, until the Republic was proclaimed by the Provisional Government that took power after the February Revolution of 1917. The third-largest empire in history, at its greatest extent stretching over three continents, Europe, Asia, and North America, the Russian Empire was surpassed in size only by the British and Mongol empires. The rise of the Russian Empire coincided with the decline of neighboring rival powers: the Swedish Empire, the Polish–Lithuanian Commonwealth, Persia and the Ottoman Empire. It played a major role in 1812–1814 in defeating Napoleon's ambitions to control Europe and expanded to the west and south.", } } // WikiFaction3 returns a faction instance of wikiactors.Actor that may be used for testing purposes func WikiFaction3() wikiactors.Actor { return wikiactors.Actor{ Kind: wikiactors.FactionKind, ID: 266894, URL: "https://en.wikipedia.org/wiki/Austrian_Empire", Name: "Austrian Empire", Description: "monarchy in Central Europe between 1804 and 1867", Extract: "The Austrian Empire was a Central European multinational great power from 1804 to 1867, created by proclamation out of the realms of the Habsburgs. During its existence, it was the third most populous empire after the Russian Empire and the United Kingdom in Europe. Along with Prussia, it was one of the two major powers of the German Confederation. Geographically, it was the third largest empire in Europe after the Russian Empire and the First French Empire. Proclaimed in response to the First French Empire, it partially overlapped with the Holy Roman Empire until the latter's dissolution in 1806.", } } // WikiCommander returns a commander instance of wikiactors.Actor that may be used for testing purposes func WikiCommander() wikiactors.Actor { return wikiactors.Actor{ Kind: wikiactors.CommanderKind, ID: 69880, URL: "https://en.wikipedia.org/wiki/Emperor_Napoleon_I", Name: "Napoleon", Description: "19th century French military leader, strategist, and politician", Extract: "<NAME>, born <NAME>, was a French statesman and military leader who became famous as an artillery commander during the French Revolution. He led many successful campaigns during the French Revolutionary Wars and was Emperor of the French as Napoleon I from 1804 until 1814 and again briefly in 1815 during the Hundred Days. Napoleon dominated European and global affairs for more than a decade while leading France against a series of coalitions during the Napoleonic Wars. He won many of these wars and a vast majority of his battles, building a large empire that ruled over much of continental Europe before its final collapse in 1815. He is considered one of the greatest commanders in history, and his wars and campaigns are studied at military schools worldwide. Napoleon's political and cultural legacy has made him one of the most celebrated and controversial leaders in human history.", } } // WikiCommander2 returns a commander instance of wikiactors.Actor that may be used for testing purposes func WikiCommander2() wikiactors.Actor { return wikiactors.Actor{ Kind: wikiactors.CommanderKind, ID: 27126603, URL: "https://en.wikipedia.org/wiki/Alexander_I_of_Russia", Name: "<NAME>", Description: "Emperor of Russia", Extract: "<NAME> was the Emperor of Russia (Tsar) between 1801 and 1825. He was the eldest son of <NAME> and <NAME>rttemberg. Alexander was the first king of Congress Poland, reigning from 1815 to 1825, as well as the first Russian Grand Duke of Finland, reigning from 1809 to 1825.", } } // WikiCommander3 returns a commander instance of wikiactors.Actor that may be used for testing purposes func WikiCommander3() wikiactors.Actor { return wikiactors.Actor{ Kind: wikiactors.CommanderKind, ID: 251000, URL: "https://en.wikipedia.org/wiki/Mikhail_Illarionovich_Kutuzov", Name: "<NAME>", Description: "Field Marshal of the Russian Empire", Extract: "<NAME> a Field Marshal of the Russian Empire. He served as one of the finest military officers and diplomats of Russia under the reign of three Romanov Tsars: <NAME>, <NAME> and <NAME>. His military career was closely associated with the rising period of Russia from the end of the 18th century to the beginning of the 19th century. Kutuzov is considered to have been one of the best Russian generals.", } } // WikiCommander4 returns a commander instance of wikiactors.Actor that may be used for testing purposes func WikiCommander4() wikiactors.Actor { return wikiactors.Actor{ Kind: wikiactors.CommanderKind, ID: 11551, URL: "https://en.wikipedia.org/wiki/Francis_II,_Holy_Roman_Emperor", Name: "<NAME>, Holy Roman Emperor", Description: "The last Holy Roman Emperor and first Emperor of Austria", Extract: "Francis II was the last Holy Roman Emperor, ruling from 1792 until 6 August 1806, when he dissolved the Holy Roman Empire after the decisive defeat at the hands of the First French Empire led by Napoleon at the Battle of Austerlitz. In 1804, he had founded the Austrian Empire and became Francis I, the first Emperor of Austria, ruling from 1804 to 1835, so later he was named the first Doppelkaiser in history.. For the two years between 1804 and 1806, Francis used the title and style by the Grace of God elected Roman Emperor, ever Augustus, hereditary Emperor of Austria and he was called the Emperor of both the Holy Roman Empire and Austria. He was also Apostolic King of Hungary, Croatia and Bohemia as Francis I. He also served as the first president of the German Confederation following its establishment in 1815.", } } // WikiCommander5 returns a commander instance of wikiactors.Actor that may be used for testing purposes func WikiCommander5() wikiactors.Actor { return wikiactors.Actor{ Kind: wikiactors.CommanderKind, ID: 14092123, URL: "https://en.wikipedia.org/wiki/Franz_von_Weyrother", Name: "<NAME>", Description: "Austrian general", Extract: "<NAME> was an Austrian staff officer and general who fought during the French Revolutionary Wars and the Napoleonic Wars. He drew up the plans for the disastrous defeats at the Battle of Rivoli, Battle of Hohenlinden and the Battle of Austerlitz, in which the Austrian army was defeated by <NAME> twice and Jean Moreau once.", } }
mocks/wikiactors.go
0.526099
0.559832
wikiactors.go
starcoder
package kata import "sort" // Converts number into digits. // Weight of digit i is i**10. func number2digits(n int64) []int8 { d := make([]int8, 0, 20) if n < 0 { panic("negative number") } for n > 0 { d = append(d, int8(n%10)) n /= 10 } return d } // Converts digits into number value. func digits2number(digits []int8) (accu int64) { for i := len(digits) - 1; i >= 0; i-- { accu = accu*10 + int64(digits[i]) } return } type changedNumber struct { digit []int8 // changed number src int // cut index in the array digit dst int // paste index in the array digit } // Checks if the change results in a new minimum. // An empty len(digit)==0 mimimum as input is allowed. func checkChange(old changedNumber, digit []int8, src, dst int) changedNumber { test := changedNumber{digit: make([]int8, len(digit)), src: src, dst: dst} var i, j int for { if i == src { i++ } if j == dst { j++ } if i == len(digit) || j == len(digit) { break } test.digit[j] = digit[i] i++ j++ } test.digit[dst] = digit[src] if len(old.digit) == 0 { // No other number to compare return test } // compare the two changes by number value and index value var delta int8 for i := len(digit) - 1; i >= 0 && delta == 0; i-- { delta = test.digit[i] - old.digit[i] } if delta < 0 { return test } if delta > 0 { return old } if test.src > old.src { return test } if old.src > test.src { return old } if test.dst > old.dst { return test } return old } // Find the smallest on the reduced number. // The smallest number will start with min, the smallest digit. // The returned src, dst are the indices in the array d to cut and insert. func smallest(digit []int8, min int8) changedNumber { topD := len(digit) - 1 var minimum changedNumber // Check option to cut top digit if topD > 0 && digit[topD-1] == min { dst := topD - 1 for dst > 0 && digit[dst-1] <= digit[topD] { dst-- } minimum = checkChange(minimum, digit, topD, dst) } // Check all options to cut a digit with minimal value and insert it at top. for idx := 0; idx < topD; idx++ { if digit[idx] == min { minimum = checkChange(minimum, digit, idx, topD) } } return minimum } func Smallest(n int64) []int64 { // d = digits in the order of the given number d := number2digits(n) lastD := len(d) - 1 // s = digits in order of smallest possible number by permutations s := make([]int8, len(d)) copy(s, d) sort.Slice(s, func(i, j int) bool { return s[i] > s[j] }) // strip the topmost digits already which are already minimal top := lastD for top > 0 && d[top] == s[top] { top-- } if top == 0 { // the number is alread minimal return []int64{n, 0, 0} } // minimise the reduced number optimum := smallest(d[:top+1], s[top]) copy(d[:top+1], optimum.digit) // Optimise the destination index without changing the number // Same topmost digits could be used to insert on higher index for optimum.dst < lastD && d[optimum.dst+1] == d[optimum.dst] { optimum.dst++ } return []int64{digits2number(d), int64(lastD - optimum.src), int64(lastD - optimum.dst)} }
5_kyu/Find_the_smallest.go
0.616243
0.497009
Find_the_smallest.go
starcoder
package ilm import ( "strconv" ) const ( // rule ID field column width in table output idColumnWidth int = 16 // rule prefix field column width in table output prefixColumnWidth int = 16 // StatusColumnWidth column width in table output statusColumnWidth int = 12 // ExpiryColumnWidth column width in table output expiryColumnWidth int = 8 // ExpiryDatesColumnWidth column width in table output expiryDatesColumnWidth int = 14 // TagsColumnWidth column width in table output tagsColumnWidth int = 18 // TransitionColumnWidth column width in table output transitionColumnWidth int = 14 // TransitionDateColumnWidth column width in table output transitionDateColumnWidth int = 18 // StorageClassColumnWidth column width in table output storageClassColumnWidth int = 18 ) const ( leftAlign int = 1 centerAlign int = 2 rightAlign int = 3 ) // Labels used for display. const ( idLabel string = "ID" prefixLabel string = "Prefix" statusLabel string = "Enabled " expiryLabel string = "Expiry" expiryDatesLabel string = "Date/Days " tagLabel string = "Tags" transitionLabel string = "Transition" transitionDateLabel string = "Date/Days " storageClassLabel string = "Storage-Class " ) // Keys to be used in map structure which stores the columns to be displayed. const ( statusLabelKey string = "Enabled" storageClassLabelKey string = "Storage-Class" expiryDatesLabelKey string = "Expiry-Dates" transitionDatesLabelKey string = "Transition-Date" ) // Some cell values const ( tickCell string = "\u2713 " crossTickCell string = "\u2717 " blankCell string = " " ) // Used in tags. Ex: --tags "key1=value1&key2=value2&key3=value3" const ( tagSeperator string = "&" keyValSeperator string = "=" ) // Represents information going into a single cell in the table. type tableCellInfo struct { label string multLabels []string labelKey string columnWidth int align int } // Determines what columns need to be shown type showDetails struct { allAvailable bool expiry bool transition bool minimum bool } // PopulateILMDataForDisplay based on showDetails determined by user input, populate the ILM display // table with information. Table is constructed row-by-row. Headers are first, then the rest of the rows. func PopulateILMDataForDisplay(ilmCfg LifecycleConfiguration, rowCheck *map[string]int, alignedHdrLabels *[]string, cellDataNoTags *[][]string, cellDataWithTags *[][]string, tagRows *map[string][]string, showAll, showMin, showExpiry, showTransition bool) { // We need the different column headers and their respective column index // where they appear in a map data-structure format. // [Column Label] -> [Column Number] *rowCheck = make(map[string]int) // For rows with tags only tags are shown. Rest of the cells are empty (blanks in full cell length) *tagRows = make(map[string][]string) showOpts := showDetails{ allAvailable: showAll, minimum: showMin, expiry: showExpiry, transition: showTransition, } getColumns(ilmCfg, *rowCheck, alignedHdrLabels, showOpts) getILMShowDataWithoutTags(cellDataNoTags, *rowCheck, ilmCfg, showOpts) getILMShowDataWithTags(cellDataWithTags, *tagRows, *rowCheck, ilmCfg, showOpts) } // Text inside the table cell func getAlignedText(label string, align int, columnWidth int) string { cellLabel := blankCell switch align { case leftAlign: cellLabel = getLeftAligned(label, columnWidth) case centerAlign: cellLabel = getCenterAligned(label, columnWidth) case rightAlign: cellLabel = getRightAligned(label, columnWidth) } return cellLabel } // GetColumnWidthTable We will use this map of Header Labels -> Column width func getILMColumnWidthTable() map[string]int { colWidth := make(map[string]int) colWidth[idLabel] = idColumnWidth colWidth[prefixLabel] = prefixColumnWidth colWidth[statusLabelKey] = statusColumnWidth colWidth[expiryLabel] = expiryColumnWidth colWidth[expiryDatesLabelKey] = expiryDatesColumnWidth colWidth[transitionLabel] = transitionColumnWidth colWidth[transitionDatesLabelKey] = transitionDateColumnWidth colWidth[storageClassLabelKey] = storageClassColumnWidth colWidth[tagLabel] = tagsColumnWidth return colWidth } // checkAddTableCellRows multiple rows created by filling up each cell of the table. // Multiple rows are required for display of data with tags. // Each 'key:value' pair is shown in 1 row and the rest of it is cells populated with blanks. func checkAddTableCellRows(rowArr *[]string, rowCheck map[string]int, showOpts showDetails, cellInfo tableCellInfo, ruleID string, newRows map[string][]string) { var cellLabel string if showOpts.minimum { return } multLth := len(cellInfo.multLabels) if cellInfo.label != "" || multLth == 0 { if colIdx, ok := rowCheck[cellInfo.labelKey]; ok { (*rowArr)[colIdx] = getCenterAligned(blankCell, cellInfo.columnWidth) } return } colWidth := getILMColumnWidthTable() if colIdx, ok := rowCheck[cellInfo.labelKey]; ok { cellLabel := cellInfo.multLabels[0] if len(cellInfo.multLabels[0]) > (cellInfo.columnWidth - 3) { // 2 dots & 1 space for left-alignment cellLabel = cellLabel[:(cellInfo.columnWidth-5)] + ".." } (*rowArr)[colIdx] = getLeftAligned(cellLabel, cellInfo.columnWidth) } for index := 1; index < multLth; index++ { row := make([]string, len(rowCheck)) for k, v := range rowCheck { if k == cellInfo.labelKey { cellLabel = cellInfo.multLabels[index] if len(cellInfo.multLabels[index]) > (cellInfo.columnWidth - 3) { cellLabel = cellLabel[:(cellInfo.columnWidth-5)] + ".." } row[v] = getLeftAligned(cellLabel, cellInfo.columnWidth) } else { var width int var ok bool if width, ok = colWidth[k]; !ok { width = 4 } row[v] = getCenterAligned(blankCell, width) } } newRows[ruleID+strconv.Itoa(index-1)] = row } } // The right kind of tick is returned. Cross-tick if expiry is not set. func getExpiryTick(rule LifecycleRule) string { expiryTick := crossTickCell expiryDateSet := rule.Expiration != nil && rule.Expiration.ExpirationDate != nil && !rule.Expiration.ExpirationDate.IsZero() expirySet := rule.Expiration != nil && (expiryDateSet || rule.Expiration.ExpirationInDays > 0) if expirySet { expiryTick = tickCell } return expiryTick } // The right kind of tick is returned. Cross-tick if status is 'Disabled' & tick if status is 'Enabled'. func getStatusTick(rule LifecycleRule) string { statusTick := crossTickCell if rule.Status == statusLabelKey { statusTick = tickCell } return statusTick } // Expiry date. 'YYYY-MM-DD'. Set for 00:00:00 GMT as per the standard. func getExpiryDateVal(rule LifecycleRule) string { expiryDate := blankCell expirySet := (rule.Expiration != nil) expiryDateSet := expirySet && rule.Expiration.ExpirationDate != nil && !rule.Expiration.ExpirationDate.IsZero() if expiryDateSet { expiryDate = strconv.Itoa(rule.Expiration.ExpirationDate.Day()) + " " + rule.Expiration.ExpirationDate.Month().String()[0:3] + " " + strconv.Itoa(rule.Expiration.ExpirationDate.Year()) } else if expirySet && rule.Expiration.ExpirationInDays > 0 { expiryDate = strconv.Itoa(rule.Expiration.ExpirationInDays) + " day(s)" } return expiryDate } // Cross-tick if Transition is not set. func getTransitionTick(rule LifecycleRule) string { transitionSet := rule.Transition != nil transitionDateSet := transitionSet && ((rule.Transition.TransitionDate != nil && !rule.Transition.TransitionDate.IsZero()) || rule.Transition.TransitionInDays > 0) if !transitionSet || !transitionDateSet { return crossTickCell } return tickCell } // Transition date. 'YYYY-MM-DD'. Set for 00:00:00 GMT as per the standard. func getTransitionDate(rule LifecycleRule) string { transitionDate := blankCell transitionSet := (rule.Transition != nil) transitionDateSet := transitionSet && (rule.Transition.TransitionDate != nil && !rule.Transition.TransitionDate.IsZero()) transitionDaySet := transitionSet && (rule.Transition.TransitionInDays > 0) if transitionDateSet { transitionDate = strconv.Itoa(rule.Transition.TransitionDate.Day()) + " " + rule.Transition.TransitionDate.Month().String()[0:3] + " " + strconv.Itoa(rule.Transition.TransitionDate.Year()) } else if transitionDaySet { transitionDate = strconv.Itoa(rule.Transition.TransitionInDays) + " day(s)" } return transitionDate } // Storage class name for transition. func getStorageClassName(rule LifecycleRule) string { storageClass := blankCell transitionSet := (rule.Transition != nil) storageClassAvail := transitionSet && (rule.Transition.StorageClass != "") if storageClassAvail { storageClass = rule.Transition.StorageClass } return storageClass } // Array of Tag strings, each in key:value format func getTagArr(rule LifecycleRule) []string { tagArr := rule.TagFilters tagLth := len(rule.TagFilters) if len(rule.TagFilters) == 0 && rule.RuleFilter != nil && rule.RuleFilter.And != nil { tagLth = len(rule.RuleFilter.And.Tags) tagArr = rule.RuleFilter.And.Tags } tagCellArr := make([]string, len(tagArr)) for tagIdx := 0; tagIdx < tagLth; tagIdx++ { tagCellArr[tagIdx] = (tagArr[tagIdx].Key + ":" + tagArr[tagIdx].Value) } return tagCellArr } // Add single row table cell - non-header. func checkAddTableCell(rowArr *[]string, rowCheck map[string]int, cellInfo tableCellInfo) { if rowArr == nil { return } if len(*rowArr) == 0 && len(rowCheck) > 0 { *rowArr = make([]string, len(rowCheck)) } if colIdx, ok := rowCheck[cellInfo.labelKey]; ok { if len(cellInfo.label)%2 != 0 && len(cellInfo.label) < cellInfo.columnWidth { cellInfo.label += " " } else if len(cellInfo.label) > (cellInfo.columnWidth - 2) { // 2 dots to indicate text longer than column width cellInfo.label = cellInfo.label[:(cellInfo.columnWidth-6)] + ".." } (*rowArr)[colIdx] = getAlignedText(cellInfo.label, cellInfo.align, cellInfo.columnWidth) } } // GetILMShowDataWithoutTags - Without tags func getILMShowDataWithoutTags(cellInfo *[][]string, rowCheck map[string]int, info LifecycleConfiguration, showOpts showDetails) { *cellInfo = make([][]string, 0) count := 0 for index := 0; index < len(info.Rules); index++ { rule := info.Rules[index] showExpiry := (rule.Expiration != nil) && ((rule.Expiration.ExpirationDate != nil && !rule.Expiration.ExpirationDate.IsZero()) || rule.Expiration.ExpirationInDays > 0) transitionSet := (rule.Transition != nil) && ((rule.Transition.TransitionDate != nil && !rule.Transition.TransitionDate.IsZero()) || rule.Transition.TransitionInDays > 0) skipExpTran := (showOpts.expiry && !showExpiry) || (showOpts.transition && !transitionSet) if skipExpTran { continue } tagPresent := (rule.RuleFilter != nil) && (rule.RuleFilter.And != nil) if len(rule.TagFilters) > 0 || (tagPresent && len(rule.RuleFilter.And.Tags) > 0) { continue } *cellInfo = append(*cellInfo, make([]string, 0)) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: rule.ID, labelKey: idLabel, columnWidth: idColumnWidth, align: leftAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: getPrefixVal(rule), labelKey: prefixLabel, columnWidth: prefixColumnWidth, align: centerAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: getStatusTick(rule), labelKey: statusLabelKey, columnWidth: statusColumnWidth, align: centerAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: getExpiryTick(rule), labelKey: expiryLabel, columnWidth: expiryColumnWidth, align: centerAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: getExpiryDateVal(rule), labelKey: expiryDatesLabelKey, columnWidth: expiryDatesColumnWidth, align: centerAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: getTransitionTick(rule), labelKey: transitionLabel, columnWidth: transitionColumnWidth, align: centerAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: getTransitionDate(rule), labelKey: transitionDatesLabelKey, columnWidth: transitionDateColumnWidth, align: centerAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: getStorageClassName(rule), labelKey: storageClassLabelKey, columnWidth: storageClassColumnWidth, align: centerAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: blankCell, labelKey: tagLabel, columnWidth: tagsColumnWidth, align: centerAlign}) count++ } } // GetILMShowDataWithTags Just the data with extra rows for extra tags func getILMShowDataWithTags(cellInfo *[][]string, newRows map[string][]string, rowCheck map[string]int, info LifecycleConfiguration, showOpts showDetails) { *cellInfo = make([][]string, 0) count := 0 for index := 0; index < len(info.Rules); index++ { rule := info.Rules[index] showExpiry := (rule.Expiration != nil) && ((rule.Expiration.ExpirationDate != nil && !rule.Expiration.ExpirationDate.IsZero()) || rule.Expiration.ExpirationInDays > 0) transitionSet := (rule.Transition != nil) && ((rule.Transition.TransitionDate != nil && !rule.Transition.TransitionDate.IsZero()) || rule.Transition.TransitionInDays > 0) skipExpTran := (showOpts.expiry && !showExpiry) || (showOpts.transition && !transitionSet) if skipExpTran { continue } if len(getTagArr(rule)) == 0 { continue } *cellInfo = append(*cellInfo, make([]string, 0)) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: rule.ID, labelKey: idLabel, columnWidth: idColumnWidth, align: leftAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: getPrefixVal(rule), labelKey: prefixLabel, columnWidth: prefixColumnWidth, align: centerAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: getStatusTick(rule), labelKey: statusLabelKey, columnWidth: statusColumnWidth, align: centerAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: getExpiryTick(rule), labelKey: expiryLabel, columnWidth: expiryColumnWidth, align: centerAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: getExpiryDateVal(rule), labelKey: expiryDatesLabelKey, columnWidth: expiryDatesColumnWidth, align: centerAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: getTransitionTick(rule), labelKey: transitionLabel, columnWidth: transitionColumnWidth, align: centerAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: getTransitionDate(rule), labelKey: transitionDatesLabelKey, columnWidth: transitionDateColumnWidth, align: centerAlign}) checkAddTableCell(&((*cellInfo)[count]), rowCheck, tableCellInfo{label: getStorageClassName(rule), labelKey: storageClassLabelKey, columnWidth: storageClassColumnWidth, align: centerAlign}) checkAddTableCellRows(&((*cellInfo)[count]), rowCheck, showOpts, tableCellInfo{multLabels: getTagArr(rule), label: "", labelKey: tagLabel, columnWidth: tagsColumnWidth, align: leftAlign}, rule.ID, newRows) count++ } } func getPrefixVal(rule LifecycleRule) string { prefixVal := "" switch { case rule.Prefix != "": prefixVal = rule.Prefix case rule.RuleFilter != nil && rule.RuleFilter.And != nil && rule.RuleFilter.And.Prefix != "": prefixVal = rule.RuleFilter.And.Prefix case rule.RuleFilter != nil && rule.RuleFilter.Prefix != "": prefixVal = rule.RuleFilter.Prefix } return prefixVal } func showExpiryDetails(rule LifecycleRule, showOpts showDetails) bool { if showOpts.allAvailable { return true } expirySet := (rule.Expiration != nil) && ((rule.Expiration.ExpirationDate != nil && !rule.Expiration.ExpirationDate.IsZero()) || rule.Expiration.ExpirationInDays > 0) return (expirySet && (showOpts.allAvailable || showOpts.expiry)) } func showExpTick(showOpts showDetails) bool { return showOpts.allAvailable || showOpts.minimum } func showTransitionTick(showOpts showDetails) bool { return (showOpts.allAvailable || showOpts.minimum) } func showTransitionDetails(rule LifecycleRule, showOpts showDetails) bool { if showOpts.allAvailable { return true } transitionSet := (rule.Transition != nil) && ((rule.Transition.TransitionDate != nil && !rule.Transition.TransitionDate.IsZero()) || rule.Transition.TransitionInDays > 0) transitionDetailsShow := (showOpts.allAvailable || showOpts.transition) return transitionSet && transitionDetailsShow } func showTags(rule LifecycleRule, showOpts showDetails) bool { if showOpts.minimum { return false } tagSet := showOpts.allAvailable || (len(rule.TagFilters) > 0 || (rule.RuleFilter != nil && rule.RuleFilter.And != nil && (len(rule.RuleFilter.And.Tags) > 0))) return tagSet } func getColumns(info LifecycleConfiguration, rowCheck map[string]int, alignedHdrLabels *[]string, showOpts showDetails) { tagIn := false // Keep tag in the end colIdx := 0 colWidthTbl := getILMColumnWidthTable() incColIdx := func() int { if tagIn { colIdx = rowCheck[tagLabel] rowCheck[tagLabel] = colIdx + 1 } else { colIdx++ } return colIdx } for index := 0; index < len(info.Rules); index++ { rule := info.Rules[index] _, ok := rowCheck[idLabel] if !ok { // ID & Prefix are shown always. rowCheck[idLabel] = colIdx (*alignedHdrLabels) = append((*alignedHdrLabels), getAlignedText(idLabel, centerAlign, colWidthTbl[idLabel])) } _, ok = rowCheck[prefixLabel] if !ok { // ID & Prefix are shown always. rowCheck[prefixLabel] = incColIdx() (*alignedHdrLabels) = append((*alignedHdrLabels), getAlignedText(prefixLabel, centerAlign, colWidthTbl[prefixLabel])) } _, ok = rowCheck[statusLabelKey] if !ok { rowCheck[statusLabelKey] = incColIdx() (*alignedHdrLabels) = append((*alignedHdrLabels), getAlignedText(statusLabel, centerAlign, colWidthTbl[statusLabelKey])) } _, ok = rowCheck[expiryLabel] if !ok && showExpTick(showOpts) { rowCheck[expiryLabel] = incColIdx() (*alignedHdrLabels) = append((*alignedHdrLabels), getAlignedText(expiryLabel, centerAlign, colWidthTbl[expiryLabel])) } _, ok = rowCheck[expiryDatesLabelKey] if !ok && showExpiryDetails(rule, showOpts) { rowCheck[expiryDatesLabelKey] = incColIdx() (*alignedHdrLabels) = append((*alignedHdrLabels), getAlignedText(expiryDatesLabel, centerAlign, colWidthTbl[expiryDatesLabelKey])) } _, ok = rowCheck[transitionLabel] if !ok && showTransitionTick(showOpts) { rowCheck[transitionLabel] = incColIdx() (*alignedHdrLabels) = append((*alignedHdrLabels), getAlignedText(transitionLabel, centerAlign, colWidthTbl[transitionLabel])) } _, ok = rowCheck[transitionDatesLabelKey] if !ok && showTransitionDetails(rule, showOpts) { rowCheck[transitionDatesLabelKey] = incColIdx() (*alignedHdrLabels) = append((*alignedHdrLabels), getAlignedText(transitionDateLabel, centerAlign, colWidthTbl[transitionDatesLabelKey])) } _, ok = rowCheck[storageClassLabelKey] if !ok && showTransitionDetails(rule, showOpts) { rowCheck[storageClassLabelKey] = incColIdx() (*alignedHdrLabels) = append((*alignedHdrLabels), getAlignedText(storageClassLabel, centerAlign, colWidthTbl[storageClassLabelKey])) } _, ok = rowCheck[tagLabel] if !ok && showTags(rule, showOpts) { rowCheck[tagLabel] = incColIdx() tagIn = true (*alignedHdrLabels) = append((*alignedHdrLabels), getAlignedText(tagLabel, centerAlign, colWidthTbl[tagLabel])) } } }
cmd/ilm/tabular_info.go
0.599251
0.412234
tabular_info.go
starcoder
package pathreflect import ( "fmt" "reflect" "strconv" "strings" spew "github.com/davecgh/go-spew/spew" ) const ( PathSep = "/" ) var ( zeroValue = reflect.Value{} ) type Path []string func Parse(pathString string) Path { parts := strings.Split(pathString, PathSep) finalParts := []string{} // Remove empty parts (i.e. extra slashes) for _, part := range parts { if part != "" { finalParts = append(finalParts, part) } } return Path(finalParts) } // Get gets the value in the given on at this Path. func (p Path) Get(on interface{}) (interface{}, error) { parent, current, nameOrIndex, err := p.descend(on) if err != nil { return nil, err } if parent.Kind() == reflect.Map { // For maps, get the value from the parent result := parent.MapIndex(reflect.ValueOf(nameOrIndex)) if result == zeroValue { return nil, fmt.Errorf("Unable to get value") } return result.Interface(), nil } else { // For structs and slices, get the value itself return current.Interface(), nil } } // Set sets the given value in the given on at this Path. func (p Path) Set(on interface{}, val interface{}) error { parent, current, nameOrIndex, err := p.descend(on) if err != nil { return err } if parent.Kind() == reflect.Map { // For maps, set the value on the parent parent.SetMapIndex(reflect.ValueOf(nameOrIndex), reflect.ValueOf(val)) } else { // For structs and slices, set the value using Set on the terminal field current.Set(reflect.ValueOf(val)) } return nil } // Clear clears the given value in the given on at this Path. func (p Path) Clear(on interface{}) error { parent, current, nameOrIndex, err := p.descend(on) if err != nil { return err } if parent.Kind() == reflect.Map { // For maps, remove the value from the parent zeroValueOfValue := reflect.ValueOf(nil) parent.SetMapIndex(reflect.ValueOf(nameOrIndex), zeroValueOfValue) } else { // For structs and slices, set the value using Set on the terminal field zeroValueOfType := reflect.Zero(current.Type()) current.Set(zeroValueOfType) } return nil } // ZeroValue returns the ZeroValue corresponding to the type of element at this // path. func (p Path) ZeroValue(on interface{}) (val interface{}, err error) { parent, current, _, err := p.descend(on) if err != nil { return nil, err } var t reflect.Type if parent.Kind() == reflect.Map || parent.Kind() == reflect.Slice || parent.Kind() == reflect.Array { t = parent.Type().Elem() } else { t = current.Type() } if t.Kind() == reflect.Ptr { return reflect.New(t.Elem()).Interface(), nil } else { return reflect.Zero(t).Interface(), nil } } func (p Path) String() string { return strings.Join(p, PathSep) } func (p Path) descend(on interface{}) (parent reflect.Value, current reflect.Value, nameOrIndex string, err error) { if len(p) == 0 { err = fmt.Errorf("Path must contain at least one element") return } current = reflect.ValueOf(on) nameOrIndex = "" for i := 0; i < len(p); i++ { if i > 0 { parent = current } nameOrIndex = p[i] current, err = getChild(current, nameOrIndex) if err != nil { err = fmt.Errorf("On %s, error traversing beyond path %s: %s", spew.Sdump(on), p.through(i), err) return } } return } func (p Path) through(i int) string { return strings.Join(p[:i], PathSep) } func getChild(parent reflect.Value, nameOrIndex string) (val reflect.Value, err error) { if parent.Kind() == reflect.Ptr || parent.Kind() == reflect.Interface { if parent.IsNil() { err = fmt.Errorf("Empty parent value") return } parent = parent.Elem() } switch parent.Kind() { case reflect.Map: val = parent.MapIndex(reflect.ValueOf(nameOrIndex)) return case reflect.Struct: val = parent.FieldByName(nameOrIndex) return case reflect.Array, reflect.Slice: i, err2 := strconv.Atoi(nameOrIndex) if err2 != nil { err = fmt.Errorf("%s is not a valid index for an array or slice", nameOrIndex) return } val = parent.Index(i) return default: err = fmt.Errorf("Unable to extract value %s from value of kind %s", nameOrIndex, parent.Kind()) return } }
src/github.com/getlantern/pathreflect/pathreflect.go
0.629091
0.407658
pathreflect.go
starcoder
package test_literal func assert(want int, act int, code string) func println(format ...string) func main() { assert(97, 'a', "'a'") assert(10, '\n', "'\\n'") assert(511, 0o777, "0o777") assert(0, 0x0, "0x0") assert(10, 0xa, "0xa") assert(10, 0xA, "0xA") assert(48879, 0xbeef, "0xbeef") assert(48879, 0xBEEF, "0xBEEF") assert(0, 0b0, "0b0") assert(1, 0b1, "0b1") assert(47, 0b101111, "0b101111") // '_' test assert(384, 0_600, "0_600") assert(42, 4_2, "4_2") assert(195951310, 0x_BadFace, "0x_BadFace") assert(801915078, 0x_67_7a_2f_cc_40_c6, "0x_67_7a_2f_cc_40_c6") assert(170141183460469, 170_141183_460469, "170_141183_460469") assert(1000, 1_0_0_0, "1_0_0_0") assert(4, Sizeof(0), "Sizeof(0)") assert(4, Sizeof(2147483647), "Sizeof(2147483647)") assert(8, Sizeof(2147483648), "Sizeof(2147483648)") // cannot tokenize for strconv.ParseInt function in tokenize.go assert(-1, 0xffffffffffffffff, "0xffffffffffffffff") assert(4, Sizeof(0xffffffffffffffff), "Sizeof(0xffffffffffffffff)") assert(-1, 0xffffffffffffffff>>63, "0xffffffffffffffff>>63") assert(-1, 18446744073709551615, "18446744073709551615") assert(4, Sizeof(18446744073709551615), "Sizeof(18446744073709551615)") assert(-1, 18446744073709551615>>63, "18446744073709551615>>63") assert(-1, 0xffffffffffffffff, "0xffffffffffffffff") assert(4, Sizeof(0xffffffffffffffff), "Sizeof(0xffffffffffffffff)") assert(-1, 0xffffffffffffffff>>63, "0xffffffffffffffff>>63") assert(-1, 01777777777777777777777, "01777777777777777777777") assert(4, Sizeof(01777777777777777777777), "Sizeof(01777777777777777777777)") assert(-1, 01777777777777777777777>>63, "01777777777777777777777>>63") assert(-1, 0b1111111111111111111111111111111111111111111111111111111111111111, "0b1111111111111111111111111111111111111111111111111111111111111111") assert(4, Sizeof(0b1111111111111111111111111111111111111111111111111111111111111111), "Sizeof(0b1111111111111111111111111111111111111111111111111111111111111111)") assert(-1, 0b1111111111111111111111111111111111111111111111111111111111111111>>63, "0b1111111111111111111111111111111111111111111111111111111111111111>>63") assert(8, Sizeof(2147483648), "Sizeof(2147483648)") assert(4, Sizeof(2147483647), "Sizeof(2147483647)") assert(8, Sizeof(0x1ffffffff), "Sizeof(0x1ffffffff)") assert(4, Sizeof(0x7ffffffe), "Sizeof(0xffffffff)") assert(1, 0xffffffff>>31, "0xffffffff>>31") assert(8, Sizeof(040000000000), "Sizeof(040000000000)") assert(4, Sizeof(017777777775), "Sizeof(017777777775)") assert(1, 037777777777>>31, "037777777777>>31") assert(8, Sizeof(0b111111111111111111111111111111111), "Sizeof(0b111111111111111111111111111111111)") assert(4, Sizeof(0b1111111111111111111111111111110), "Sizeof(0b11111111111111111111111111111111)") assert(1, 0b11111111111111111111111111111111>>31, "0b11111111111111111111111111111111>>31") assert(-1, 1<<31>>31, "1<<31>>31") assert(-1, 01<<31>>31, "01<<31>>31") assert(-1, 0x1<<31>>31, "0x1<<31>>31") assert(-1, 0b1<<31>>31, "0b1<<31>>31") assert(0, 0.0, "0.0") assert(1, 1.0, "1.0") assert(300000000, 3e+8, "3e+8") assert(16, 0x10.1p0, "0x10.1p0") assert(1000, .1e4, ".1e4") assert(16, 0x1_0.1p0, "0x1_0.1p0") assert(16, 0x_10.1p0, "0x_10.1p0") assert(348, 0x15e-2, "0x15e-2") assert(15, 0.15e+0_2, "0.15e+0_2") assert(4, Sizeof(8), "Sizeof(8)") assert(8, Sizeof(0.3), "Sizeof(0.3)") assert(8, Sizeof(0.), "Sizeof(0.)") assert(8, Sizeof(.0), "Sizeof(.0)") assert(8, Sizeof(5.), "Sizeof(5.)") assert(8, Sizeof(2.0), "Sizeof(2.0)") // assert(8, Sizeof("あいうえお"), "Sizeof(\"あいうえお\")") println("OK") }
testdata/esc/literal.go
0.558688
0.621943
literal.go
starcoder
package dckks import ( "math/big" "encoding/binary" "github.com/tuneinsight/lattigo/v3/ckks" "github.com/tuneinsight/lattigo/v3/drlwe" "github.com/tuneinsight/lattigo/v3/ring" "github.com/tuneinsight/lattigo/v3/rlwe" "github.com/tuneinsight/lattigo/v3/utils" ) // MaskedTransformProtocol is a struct storing the parameters for the MaskedTransformProtocol protocol. type MaskedTransformProtocol struct { e2s E2SProtocol s2e S2EProtocol defaultScale *big.Int precision int tmpMask []*big.Int encoder ckks.EncoderBigComplex } // ShallowCopy creates a shallow copy of MaskedTransformProtocol in which all the read-only data-structures are // shared with the receiver and the temporary buffers are reallocated. The receiver and the returned // MaskedTransformProtocol can be used concurrently. func (rfp *MaskedTransformProtocol) ShallowCopy() *MaskedTransformProtocol { params := rfp.e2s.params precision := rfp.precision tmpMask := make([]*big.Int, params.N()) for i := range rfp.tmpMask { tmpMask[i] = new(big.Int) } return &MaskedTransformProtocol{ e2s: *rfp.e2s.ShallowCopy(), s2e: *rfp.s2e.ShallowCopy(), precision: precision, defaultScale: rfp.defaultScale, tmpMask: tmpMask, encoder: rfp.encoder.ShallowCopy(), } } // MaskedTransformFunc represents a user-defined in-place function that can be evaluated on masked CKKS plaintexts, as a part of the // Masked Transform Protocol. // The function is called with a vector of *ring.Complex modulo ckks.Parameters.Slots() as input, and must write // its output on the same buffer. type MaskedTransformFunc func(coeffs []*ring.Complex) // MaskedTransformShare is a struct storing the decryption and recryption shares. type MaskedTransformShare struct { e2sShare drlwe.CKSShare s2eShare drlwe.CKSShare } // MarshalBinary encodes a RefreshShare on a slice of bytes. func (share *MaskedTransformShare) MarshalBinary() (data []byte, err error) { var e2sData, s2eData []byte if e2sData, err = share.e2sShare.MarshalBinary(); err != nil { return nil, err } if s2eData, err = share.s2eShare.MarshalBinary(); err != nil { return nil, err } data = make([]byte, 8) binary.LittleEndian.PutUint64(data, uint64(len(e2sData))) data = append(data, e2sData...) data = append(data, s2eData...) return data, nil } // UnmarshalBinary decodes a marshaled RefreshShare on the target RefreshShare. func (share *MaskedTransformShare) UnmarshalBinary(data []byte) error { e2sDataLen := binary.LittleEndian.Uint64(data[:8]) if err := share.e2sShare.UnmarshalBinary(data[8 : e2sDataLen+8]); err != nil { return err } if err := share.s2eShare.UnmarshalBinary(data[8+e2sDataLen:]); err != nil { return err } return nil } // NewMaskedTransformProtocol creates a new instance of the PermuteProtocol. // precision : the log2 of decimal precision of the internal encoder. func NewMaskedTransformProtocol(params ckks.Parameters, precision int, sigmaSmudging float64) (rfp *MaskedTransformProtocol) { rfp = new(MaskedTransformProtocol) rfp.e2s = *NewE2SProtocol(params, sigmaSmudging) rfp.s2e = *NewS2EProtocol(params, sigmaSmudging) rfp.precision = precision rfp.defaultScale = new(big.Int) ring.NewFloat(params.DefaultScale(), precision).Int(rfp.defaultScale) rfp.tmpMask = make([]*big.Int, params.N()) for i := range rfp.tmpMask { rfp.tmpMask[i] = new(big.Int) } rfp.encoder = ckks.NewEncoderBigComplex(params, precision) return } // AllocateShare allocates the shares of the PermuteProtocol func (rfp *MaskedTransformProtocol) AllocateShare(levelDecrypt, levelRecrypt int) *MaskedTransformShare { return &MaskedTransformShare{*rfp.e2s.AllocateShare(levelDecrypt), *rfp.s2e.AllocateShare(levelRecrypt)} } // SampleCRP samples a common random polynomial to be used in the Masked-Transform protocol from the provided // common reference string. The CRP is considered to be in the NTT domain. func (rfp *MaskedTransformProtocol) SampleCRP(level int, crs utils.PRNG) drlwe.CKSCRP { crp := rfp.s2e.SampleCRP(level, crs) crp.IsNTT = true return crp } // GenShare generates the shares of the PermuteProtocol // This protocol requires additional inputs which are : // logBound : the bit length of the masks. // logSlots : the bit length of the number of slots. // ct1 : the degree 1 element the ciphertext to refresh, i.e. ct1 = ckk.Ciphetext.Value[1]. // scale : the scale of the ciphertext when entering the refresh. // The method "GetMinimumLevelForBootstrapping" should be used to get the minimum level at which the masked transform can be called while still ensure 128-bits of security, as well as the // value for logBound. func (rfp *MaskedTransformProtocol) GenShare(sk *rlwe.SecretKey, logBound, logSlots int, ct1 *ring.Poly, scale float64, crs drlwe.CKSCRP, transform MaskedTransformFunc, shareOut *MaskedTransformShare) { ringQ := rfp.s2e.params.RingQ() if ct1.Level() < shareOut.e2sShare.Value.Level() { panic("ct[1] level must be at least equal to e2sShare level") } if (*ring.Poly)(&crs).Level() != shareOut.s2eShare.Value.Level() { panic("crs level must be equal to s2eShare") } slots := 1 << logSlots dslots := slots if ringQ.Type() == ring.Standard { dslots *= 2 } // Generates the decryption share // Returns [M_i] on rfp.tmpMask and [a*s_i -M_i + e] on e2sShare rfp.e2s.GenShare(sk, logBound, logSlots, ct1, &rlwe.AdditiveShareBigint{Value: rfp.tmpMask}, &shareOut.e2sShare) // Applies LT(M_i) if transform != nil { bigComplex := make([]*ring.Complex, slots) for i := range bigComplex { bigComplex[i] = ring.NewComplex(ring.NewFloat(0, rfp.precision), ring.NewFloat(0, rfp.precision)) } // Extracts sparse coefficients for i := 0; i < slots; i++ { bigComplex[i][0].SetInt(rfp.tmpMask[i]) } switch rfp.e2s.params.RingType() { case ring.Standard: for i, j := 0, slots; i < slots; i, j = i+1, j+1 { bigComplex[i][1].SetInt(rfp.tmpMask[j]) } case ring.ConjugateInvariant: for i := 1; i < slots; i++ { bigComplex[i][1].Neg(bigComplex[slots-i][0]) } default: panic("invalid ring type") } // Decodes rfp.encoder.FFT(bigComplex, 1<<logSlots) // Applies the linear transform transform(bigComplex) // Recodes rfp.encoder.InvFFT(bigComplex, 1<<logSlots) // Puts the coefficient back for i := 0; i < slots; i++ { bigComplex[i].Real().Int(rfp.tmpMask[i]) } if rfp.e2s.params.RingType() == ring.Standard { for i, j := 0, slots; i < slots; i, j = i+1, j+1 { bigComplex[i].Imag().Int(rfp.tmpMask[j]) } } } // Applies LT(M_i) * diffscale inputScaleInt := new(big.Int) ring.NewFloat(scale, 256).Int(inputScaleInt) // Scales the mask by the ratio between the two scales for i := 0; i < dslots; i++ { rfp.tmpMask[i].Mul(rfp.tmpMask[i], rfp.defaultScale) rfp.tmpMask[i].Quo(rfp.tmpMask[i], inputScaleInt) } // Returns [-a*s_i + LT(M_i) * diffscale + e] on s2eShare rfp.s2e.GenShare(sk, crs, logSlots, &rlwe.AdditiveShareBigint{Value: rfp.tmpMask}, &shareOut.s2eShare) } // AggregateShare sums share1 and share2 on shareOut. func (rfp *MaskedTransformProtocol) AggregateShare(share1, share2, shareOut *MaskedTransformShare) { if share1.e2sShare.Value.Level() != share2.e2sShare.Value.Level() || share1.e2sShare.Value.Level() != shareOut.e2sShare.Value.Level() { panic("all e2s shares must be at the same level") } if share1.s2eShare.Value.Level() != share2.s2eShare.Value.Level() || share1.s2eShare.Value.Level() != shareOut.s2eShare.Value.Level() { panic("all s2e shares must be at the same level") } ringQ := rfp.s2e.params.RingQ() ringQ.AddLvl(share1.e2sShare.Value.Level(), share1.e2sShare.Value, share2.e2sShare.Value, shareOut.e2sShare.Value) ringQ.AddLvl(share1.s2eShare.Value.Level(), share1.s2eShare.Value, share2.s2eShare.Value, shareOut.s2eShare.Value) } // Transform applies Decrypt, Recode and Recrypt on the input ciphertext. // The ciphertext scale is reset to the default scale. func (rfp *MaskedTransformProtocol) Transform(ct *ckks.Ciphertext, logSlots int, transform MaskedTransformFunc, crs drlwe.CKSCRP, share *MaskedTransformShare, ciphertextOut *ckks.Ciphertext) { if ct.Level() < share.e2sShare.Value.Level() { panic("input ciphertext level must be at least equal to e2s level") } maxLevel := (*ring.Poly)(&crs).Level() if maxLevel != share.s2eShare.Value.Level() { panic("crs level and s2e level must be the same") } ringQ := rfp.s2e.params.RingQ() slots := 1 << logSlots dslots := slots if ringQ.Type() == ring.Standard { dslots *= 2 } // Returns -sum(M_i) + x (outside of the NTT domain) rfp.e2s.GetShare(nil, &share.e2sShare, logSlots, ct, &rlwe.AdditiveShareBigint{Value: rfp.tmpMask[:dslots]}) // Returns LT(-sum(M_i) + x) if transform != nil { bigComplex := make([]*ring.Complex, slots) for i := range bigComplex { bigComplex[i] = ring.NewComplex(ring.NewFloat(0, rfp.precision), ring.NewFloat(0, rfp.precision)) } // Extracts sparse coefficients for i := 0; i < slots; i++ { bigComplex[i][0].SetInt(rfp.tmpMask[i]) } switch rfp.e2s.params.RingType() { case ring.Standard: for i, j := 0, slots; i < slots; i, j = i+1, j+1 { bigComplex[i][1].SetInt(rfp.tmpMask[j]) } case ring.ConjugateInvariant: for i := 1; i < slots; i++ { bigComplex[i][1].Neg(bigComplex[slots-i][0]) } default: panic("invalid ring type") } // Decodes rfp.encoder.FFT(bigComplex, 1<<logSlots) // Applies the linear transform transform(bigComplex) // Recodes rfp.encoder.InvFFT(bigComplex, 1<<logSlots) // Puts the coefficient back for i := 0; i < slots; i++ { bigComplex[i].Real().Int(rfp.tmpMask[i]) } if rfp.e2s.params.RingType() == ring.Standard { for i := 0; i < slots; i++ { bigComplex[i].Imag().Int(rfp.tmpMask[i+slots]) } } } // Returns LT(-sum(M_i) + x) * diffscale inputScaleInt := new(big.Int) ring.NewFloat(ct.Scale, 256).Int(inputScaleInt) // Scales the mask by the ratio between the two scales for i := 0; i < slots; i++ { rfp.tmpMask[i].Mul(rfp.tmpMask[i], rfp.defaultScale) rfp.tmpMask[i].Quo(rfp.tmpMask[i], inputScaleInt) } // Extend the levels of the ciphertext for future allocation for ciphertextOut.Level() != maxLevel { level := ciphertextOut.Level() + 1 ciphertextOut.Value[0].Coeffs = append(ciphertextOut.Value[0].Coeffs, make([][]uint64, 1)...) ciphertextOut.Value[0].Coeffs[level] = make([]uint64, ringQ.N) ciphertextOut.Value[1].Coeffs = append(ciphertextOut.Value[1].Coeffs, make([][]uint64, 1)...) ciphertextOut.Value[1].Coeffs[level] = make([]uint64, ringQ.N) } ciphertextOut.Value[0].Zero() // Sets LT(-sum(M_i) + x) * diffscale in the RNS domain ringQ.SetCoefficientsBigintLvl(maxLevel, rfp.tmpMask[:dslots], ciphertextOut.Value[0]) ckks.NttAndMontgomeryLvl(maxLevel, logSlots, ringQ, false, ciphertextOut.Value[0]) // LT(-sum(M_i) + x) * diffscale + [-a*s + LT(M_i) * diffscale + e] = [-a*s + LT(x) * diffscale + e] ringQ.AddLvl(maxLevel, ciphertextOut.Value[0], share.s2eShare.Value, ciphertextOut.Value[0]) // Copies the result on the out ciphertext rfp.s2e.GetEncryption(&drlwe.CKSShare{Value: ciphertextOut.Value[0]}, crs, ciphertextOut) ciphertextOut.Scale = rfp.e2s.params.DefaultScale() }
dckks/transform.go
0.664758
0.416381
transform.go
starcoder
package dns import ( "encoding/json" ) // DataMatrixResult Time series containing a range of data points over time for each time series type DataMatrixResult struct { // The data points' labels Metric *map[string]string `json:"metric,omitempty"` // Time series data point values Values *[]DataValue `json:"values,omitempty"` } // NewDataMatrixResult instantiates a new DataMatrixResult object // This constructor will assign default values to properties that have it defined, // and makes sure properties required by API are set, but the set of arguments // will change when the set of required properties is changed func NewDataMatrixResult() *DataMatrixResult { this := DataMatrixResult{} return &this } // NewDataMatrixResultWithDefaults instantiates a new DataMatrixResult object // This constructor will only assign default values to properties that have it defined, // but it doesn't guarantee that properties required by API are set func NewDataMatrixResultWithDefaults() *DataMatrixResult { this := DataMatrixResult{} return &this } // GetMetric returns the Metric field value if set, zero value otherwise. func (o *DataMatrixResult) GetMetric() map[string]string { if o == nil || o.Metric == nil { var ret map[string]string return ret } return *o.Metric } // GetMetricOk returns a tuple with the Metric field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *DataMatrixResult) GetMetricOk() (*map[string]string, bool) { if o == nil || o.Metric == nil { return nil, false } return o.Metric, true } // HasMetric returns a boolean if a field has been set. func (o *DataMatrixResult) HasMetric() bool { if o != nil && o.Metric != nil { return true } return false } // SetMetric gets a reference to the given map[string]string and assigns it to the Metric field. func (o *DataMatrixResult) SetMetric(v map[string]string) { o.Metric = &v } // GetValues returns the Values field value if set, zero value otherwise. func (o *DataMatrixResult) GetValues() []DataValue { if o == nil || o.Values == nil { var ret []DataValue return ret } return *o.Values } // GetValuesOk returns a tuple with the Values field value if set, nil otherwise // and a boolean to check if the value has been set. func (o *DataMatrixResult) GetValuesOk() (*[]DataValue, bool) { if o == nil || o.Values == nil { return nil, false } return o.Values, true } // HasValues returns a boolean if a field has been set. func (o *DataMatrixResult) HasValues() bool { if o != nil && o.Values != nil { return true } return false } // SetValues gets a reference to the given []DataValue and assigns it to the Values field. func (o *DataMatrixResult) SetValues(v []DataValue) { o.Values = &v } func (o DataMatrixResult) MarshalJSON() ([]byte, error) { toSerialize := map[string]interface{}{} if o.Metric != nil { toSerialize["metric"] = o.Metric } if o.Values != nil { toSerialize["values"] = o.Values } return json.Marshal(toSerialize) } type NullableDataMatrixResult struct { value *DataMatrixResult isSet bool } func (v NullableDataMatrixResult) Get() *DataMatrixResult { return v.value } func (v *NullableDataMatrixResult) Set(val *DataMatrixResult) { v.value = val v.isSet = true } func (v NullableDataMatrixResult) IsSet() bool { return v.isSet } func (v *NullableDataMatrixResult) Unset() { v.value = nil v.isSet = false } func NewNullableDataMatrixResult(val *DataMatrixResult) *NullableDataMatrixResult { return &NullableDataMatrixResult{value: val, isSet: true} } func (v NullableDataMatrixResult) MarshalJSON() ([]byte, error) { return json.Marshal(v.value) } func (v *NullableDataMatrixResult) UnmarshalJSON(src []byte) error { v.isSet = true return json.Unmarshal(src, &v.value) }
pkg/dns/model_data_matrix_result.go
0.804828
0.540499
model_data_matrix_result.go
starcoder
package pure import ( "context" "strconv" "time" "github.com/benthosdev/benthos/v4/internal/bundle" "github.com/benthosdev/benthos/v4/internal/component/processor" "github.com/benthosdev/benthos/v4/internal/docs" "github.com/benthosdev/benthos/v4/internal/log" "github.com/benthosdev/benthos/v4/internal/message" oprocessor "github.com/benthosdev/benthos/v4/internal/old/processor" "github.com/benthosdev/benthos/v4/internal/tracing" ) func init() { err := bundle.AllProcessors.Add(func(conf oprocessor.Config, mgr bundle.NewManagement) (processor.V1, error) { p, err := newTryProc(conf.Try, mgr) if err != nil { return nil, err } return processor.NewV2BatchedToV1Processor("try", p, mgr.Metrics()), nil }, docs.ComponentSpec{ Name: "try", Categories: []string{ "Composition", }, Summary: `Executes a list of child processors on messages only if no prior processors have failed (or the errors have been cleared).`, Description: ` This processor behaves similarly to the ` + "[`for_each`](/docs/components/processors/for_each)" + ` processor, where a list of child processors are applied to individual messages of a batch. However, if a message has failed any prior processor (before or during the try block) then that message will skip all following processors. For example, with the following config: ` + "```yaml" + ` pipeline: processors: - resource: foo - try: - resource: bar - resource: baz - resource: buz ` + "```" + ` If the processor ` + "`bar`" + ` fails for a particular message, that message will skip the processors ` + "`baz` and `buz`" + `. Similarly, if ` + "`bar`" + ` succeeds but ` + "`baz`" + ` does not then ` + "`buz`" + ` will be skipped. If the processor ` + "`foo`" + ` fails for a message then none of ` + "`bar`, `baz` or `buz`" + ` are executed on that message. This processor is useful for when child processors depend on the successful output of previous processors. This processor can be followed with a ` + "[catch](/docs/components/processors/catch)" + ` processor for defining child processors to be applied only to failed messages. More information about error handing can be found [here](/docs/configuration/error_handling). ### Nesting within a catch block In some cases it might be useful to nest a try block within a catch block, since the ` + "[`catch` processor](/docs/components/processors/catch)" + ` only clears errors _after_ executing its child processors this means a nested try processor will not execute unless the errors are explicitly cleared beforehand. This can be done by inserting an empty catch block before the try block like as follows: ` + "```yaml" + ` pipeline: processors: - resource: foo - catch: - log: level: ERROR message: "Foo failed due to: ${! error() }" - catch: [] # Clear prior error - try: - resource: bar - resource: baz ` + "```" + ` `, Config: docs.FieldProcessor("", "").Array().HasDefault([]interface{}{}), }) if err != nil { panic(err) } } type tryProc struct { children []processor.V1 log log.Modular } func newTryProc(conf []oprocessor.Config, mgr bundle.NewManagement) (*tryProc, error) { var children []processor.V1 for i, pconf := range conf { pMgr := mgr.IntoPath("try", strconv.Itoa(i)).(bundle.NewManagement) proc, err := pMgr.NewProcessor(pconf) if err != nil { return nil, err } children = append(children, proc) } return &tryProc{ children: children, log: mgr.Logger(), }, nil } func (p *tryProc) ProcessBatch(ctx context.Context, _ []*tracing.Span, msg *message.Batch) ([]*message.Batch, error) { resultMsgs := make([]*message.Batch, msg.Len()) _ = msg.Iter(func(i int, p *message.Part) error { tmpMsg := message.QuickBatch(nil) tmpMsg.SetAll([]*message.Part{p}) resultMsgs[i] = tmpMsg return nil }) var res error if resultMsgs, res = oprocessor.ExecuteTryAll(p.children, resultMsgs...); res != nil || len(resultMsgs) == 0 { return nil, res } resMsg := message.QuickBatch(nil) for _, m := range resultMsgs { _ = m.Iter(func(i int, p *message.Part) error { resMsg.Append(p) return nil }) } if resMsg.Len() == 0 { return nil, res } resMsgs := [1]*message.Batch{resMsg} return resMsgs[:], nil } func (p *tryProc) Close(ctx context.Context) error { for _, c := range p.children { c.CloseAsync() } deadline, exists := ctx.Deadline() if !exists { deadline = time.Now().Add(time.Second * 5) } for _, c := range p.children { if err := c.WaitForClose(time.Until(deadline)); err != nil { return err } } return nil }
internal/impl/pure/processor_try.go
0.633977
0.634175
processor_try.go
starcoder
package writer import "github.com/benpate/activitystream/vocabulary" // Accept func Accept(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeAccept). Actor(actor). Object(object) } // Add func Add(actor Object, object Object, target Object) Object { return NewObject(). Type(vocabulary.ActivityTypeAdd). Actor(actor). Object(object). Target(target) } // Announce func Announce(actor Object, object Object, target Object) Object { return NewObject(). Type(vocabulary.ActivityTypeAnnounce). Actor(actor). Object(object). Target(target) } // Arrive func Arrive(actor Object, location Object, origin Object) Object { return NewObject(). Type(vocabulary.ActivityTypeArrive). Actor(actor). Location(location). Origin(origin) } // Block func Block(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeBlock). Actor(actor). Object(object) } // Create func Create(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeCreate). Actor(actor). Object(object) } // Delete func Delete(actor Object, object Object, origin Object) Object { return NewObject(). Type(vocabulary.ActivityTypeDelete). Actor(actor). Object(object). Origin(origin) } // Dislike func Dislike(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeDislike). Actor(actor). Object(object) } // Flag func Flag(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeFlag). Actor(actor). Object(object) } // Follow func Follow(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeFollow). Actor(actor). Object(object) } // Ignore func Ignore(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeIgnore). Actor(actor). Object(object) } // Invite func Invite(actor Object, object Object, target Object) Object { return NewObject(). Type(vocabulary.ActivityTypeInvite). Actor(actor). Object(object). Target(target) } // Join func Join(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeJoin). Actor(actor). Object(object) } // Leave func Leave(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeLeave). Actor(actor). Object(object) } // Like func Like(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeLike). Actor(actor). Object(object) } // Listen func Listen(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeListen). Actor(actor). Object(object) } // Move func Move(actor Object, object Object, origin Object, target Object) Object { return NewObject(). Type(vocabulary.ActivityTypeMove). Actor(actor). Object(object). Origin(origin). Target(target) } // Offer func Offer(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeLike). Actor(actor). Object(object) } // Question func Question() Object { // TODO: this is not complete return NewObject(). Type(vocabulary.ActivityTypeQuestion) } // Reject func Reject(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeReject). Actor(actor). Object(object) } // Read func Read(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeRead). Actor(actor). Object(object) } // Remove func Remove(actor Object, object Object, origin Object) Object { return NewObject(). Type(vocabulary.ActivityTypeRemove). Actor(actor). Object(object). Origin(origin) } // TentativeAccept func TentativeAccept(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeTentativeAccept). Actor(actor). Object(object) } // TentativeReject func TentativeReject(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeTentativeReject). Actor(actor). Object(object) } // Travel func Travel(actor Object, origin Object, target Object) Object { return NewObject(). Type(vocabulary.ActivityTypeTravel). Actor(actor). Origin(origin). Target(target) } // Undo func Undo(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeUndo). Actor(actor). Object(object) } // Update func Update(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeUpdate). Actor(actor). Object(object) } // View func View(actor Object, object Object) Object { return NewObject(). Type(vocabulary.ActivityTypeView). Actor(actor). Object(object) }
writer/activities.go
0.517327
0.450359
activities.go
starcoder
package p514 import ( "math" ) /** In the video game Fallout 4, the quest "Road to Freedom" requires players to reach a metal dial called the "Freedom Trail Ring", and use the dial to spell a specific keyword in order to open the door. Given a string ring, which represents the code engraved on the outer ring and another string key, which represents the keyword needs to be spelled. You need to find the minimum number of steps in order to spell all the characters in the keyword. Initially, the first character of the ring is aligned at 12:00 direction. You need to spell all the characters in the string key one by one by rotating the ring clockwise or anticlockwise to make each character of the string key aligned at 12:00 direction and then by pressing the center button. At the stage of rotating the ring to spell the key character key[i]: You can rotate the ring clockwise or anticlockwise one place, which counts as 1 step. The final purpose of the rotation is to align one of the string ring's characters at the 12:00 direction, where this character must equal to the character key[i]. If the character key[i] has been aligned at the 12:00 direction, you need to press the center button to spell, which also counts as 1 step. After the pressing, you could begin to spell the next character in the key (next stage), otherwise, you've finished all the spelling. */ func min(a, b int) int { if a < b { return a } return b } func findRotateSteps(ring string, key string) int { positions := make([][]int, 26) dps := make([][]int, 26) for i := 0; i < 26; i++ { positions[i] = make([]int, 0) } ringLength := len(ring) cirleStep := func(i, j int) int { if i > j { i, j = j, i } if j-i < (i + ringLength - j) { return j - i } return i + ringLength - j } for i := 0; i < ringLength; i++ { ix := ring[i] - 'a' positions[ix] = append(positions[ix], i) } for i := 0; i < 26; i++ { dps[i] = make([]int, len(positions[i])) } cur := byte(0) for i := 0; i < len(key); i++ { k := key[i] - 'a' if i == 0 { for ki, v := range positions[k] { dps[k][ki] = cirleStep(0, v) } } else { for ki, pos := range positions[k] { tmp := math.MaxInt32 for ci, prevPos := range positions[cur] { tmp = min(tmp, dps[cur][ci]+cirleStep(pos, prevPos)) } dps[k][ki] = tmp } } cur = k } minStep := math.MaxInt32 for _, s := range dps[cur] { minStep = min(minStep, s) } return minStep + len(key) }
algorithms/p514/514.go
0.606382
0.690435
514.go
starcoder
package deploy const ( NotificationTmpl = `{{ define "rancher.title" }} {{ if eq (index .Alerts 0).Labels.alert_type "event"}} {{ (index .Alerts 0).Labels.event_type}} event of {{(index .Alerts 0).Labels.resource_kind}} occurred {{ else if eq (index .Alerts 0).Labels.alert_type "nodeHealthy"}} The kubelet on the node {{ (index .Alerts 0).Labels.node_name}} is not healthy {{ else if eq (index .Alerts 0).Labels.alert_type "nodeCPU"}} The CPU usage on the node {{ (index .Alerts 0).Labels.node_name}} is over {{ (index .Alerts 0).Labels.cpu_threshold}}% {{ else if eq (index .Alerts 0).Labels.alert_type "nodeMemory"}} The memory usage on the node {{ (index .Alerts 0).Labels.node_name}} is over {{ (index .Alerts 0).Labels.mem_threshold}}% {{ else if eq (index .Alerts 0).Labels.alert_type "podNotScheduled"}} The Pod {{ if (index .Alerts 0).Labels.namespace}}{{(index .Alerts 0).Labels.namespace}}:{{end}}{{(index .Alerts 0).Labels.pod_name}} is not scheduled {{ else if eq (index .Alerts 0).Labels.alert_type "podNotRunning"}} The Pod {{ if (index .Alerts 0).Labels.namespace}}{{(index .Alerts 0).Labels.namespace}}:{{end}}{{(index .Alerts 0).Labels.pod_name}} is not running {{ else if eq (index .Alerts 0).Labels.alert_type "podRestarts"}} The Pod {{ if (index .Alerts 0).Labels.namespace}}{{(index .Alerts 0).Labels.namespace}}:{{end}}{{(index .Alerts 0).Labels.pod_name}} restarts {{ (index .Alerts 0).Labels.restart_times}} times in {{ (index .Alerts 0).Labels.restart_interval}} sec {{ else if eq (index .Alerts 0).Labels.alert_type "systemService"}} The system component {{ (index .Alerts 0).Labels.component_name}} is not running {{ else if eq (index .Alerts 0).Labels.alert_type "workload"}} The workload {{ if (index .Alerts 0).Labels.workload_namespace}}{{(index .Alerts 0).Labels.workload_namespace}}:{{end}}{{(index .Alerts 0).Labels.workload_name}} has available replicas less than {{ (index .Alerts 0).Labels.available_percentage}}% {{ end}} {{ end}} {{ define "slack.text" }} {{ if eq (index .Alerts 0).Labels.alert_type "event"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}} Severity: {{ (index .Alerts 0).Labels.severity}} Cluster Name: {{(index .Alerts 0).Labels.cluster_name}} Target: {{ if (index .Alerts 0).Labels.target_namespace}}{{(index .Alerts 0).Labels.target_namespace}}:{{end}}{{(index .Alerts 0).Labels.target_name}} Count: {{ (index .Alerts 0).Labels.event_count}} Event Message: {{ (index .Alerts 0).Labels.event_message}} First Seen: {{ (index .Alerts 0).Labels.event_firstseen}} Last Seen: {{ (index .Alerts 0).Labels.event_lastseen}} {{ else if eq (index .Alerts 0).Labels.alert_type "nodeHealthy"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}} Severity: {{ (index .Alerts 0).Labels.severity}} Cluster Name: {{(index .Alerts 0).Labels.cluster_name}} {{ else if eq (index .Alerts 0).Labels.alert_type "nodeCPU"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}} Severity: {{ (index .Alerts 0).Labels.severity}} Cluster Name: {{(index .Alerts 0).Labels.cluster_name}} Used CPU: {{ (index .Alerts 0).Labels.used_cpu}} m Total CPU: {{ (index .Alerts 0).Labels.total_cpu}} m {{ else if eq (index .Alerts 0).Labels.alert_type "nodeMemory"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}} Severity: {{ (index .Alerts 0).Labels.severity}} Cluster Name: {{(index .Alerts 0).Labels.cluster_name}} Used Memory: {{ (index .Alerts 0).Labels.used_mem}} Total Memory: {{ (index .Alerts 0).Labels.total_mem}} {{ else if eq (index .Alerts 0).Labels.alert_type "podRestarts"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}} Severity: {{ (index .Alerts 0).Labels.severity}} Cluster Name: {{(index .Alerts 0).Labels.cluster_name}} Namespace: {{ (index .Alerts 0).Labels.namespace}} Container Name: {{(index .Alerts 0).Labels.container_name}} {{ else if eq (index .Alerts 0).Labels.alert_type "podNotRunning"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}} Severity: {{ (index .Alerts 0).Labels.severity}} Cluster Name: {{(index .Alerts 0).Labels.cluster_name}} Namespace: {{ (index .Alerts 0).Labels.namespace}} Container Name: {{ (index .Alerts 0).Labels.container_name}} {{ else if eq (index .Alerts 0).Labels.alert_type "podNotScheduled"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}} Severity: {{ (index .Alerts 0).Labels.severity}} Cluster Name: {{(index .Alerts 0).Labels.cluster_name}} Namespace: {{ (index .Alerts 0).Labels.namespace}} Pod Name: {{ (index .Alerts 0).Labels.pod_name}} {{ else if eq (index .Alerts 0).Labels.alert_type "systemService"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}} Severity: {{ (index .Alerts 0).Labels.severity}} Cluster Name: {{(index .Alerts 0).Labels.cluster_name}} {{ else if eq (index .Alerts 0).Labels.alert_type "workload"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}} Severity: {{ (index .Alerts 0).Labels.severity}} Cluster Name: {{(index .Alerts 0).Labels.cluster_name}} Available Replicas: {{ (index .Alerts 0).Labels.available_replicas}} Desired Replicas: {{ (index .Alerts 0).Labels.desired_replicas}} {{ end}} {{ if (index .Alerts 0).Labels.logs}} Logs: {{ (index .Alerts 0).Labels.logs}} {{ end}} {{ end}} {{ define "email.text" }} {{ if eq (index .Alerts 0).Labels.alert_type "event"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}}<br> Severity: {{ (index .Alerts 0).Labels.severity}}<br> Cluster Name: {{(index .Alerts 0).Labels.cluster_name}}<br> Target: {{ if (index .Alerts 0).Labels.target_namespace}}{{(index .Alerts 0).Labels.target_namespace}}:{{end}}{{ (index .Alerts 0).Labels.target_name}}<br> Count: {{ (index .Alerts 0).Labels.event_count}}<br> Event Message: {{ (index .Alerts 0).Labels.event_message}}<br> First Seen: {{ (index .Alerts 0).Labels.event_firstseen}}<br> Last Seen: {{ (index .Alerts 0).Labels.event_lastseen}}<br> {{ else if eq (index .Alerts 0).Labels.alert_type "nodeHealthy"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}}<br> Severity: {{ (index .Alerts 0).Labels.severity}}<br> Cluster Name: {{(index .Alerts 0).Labels.cluster_name}}<br> {{ else if eq (index .Alerts 0).Labels.alert_type "nodeCPU"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}}<br> Severity: {{ (index .Alerts 0).Labels.severity}}<br> Cluster Name: {{(index .Alerts 0).Labels.cluster_name}}<br> Used CPU: {{ (index .Alerts 0).Labels.used_cpu}} m<br> Total CPU: {{ (index .Alerts 0).Labels.total_cpu}} m<br> {{ else if eq (index .Alerts 0).Labels.alert_type "nodeMemory"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}}<br> Severity: {{ (index .Alerts 0).Labels.severity}}<br> Cluster Name: {{(index .Alerts 0).Labels.cluster_name}}<br> Used Memory: {{ (index .Alerts 0).Labels.used_mem}}<br> Total Memory: {{ (index .Alerts 0).Labels.total_mem}}<br> {{ else if eq (index .Alerts 0).Labels.alert_type "podRestarts"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}}<br> Severity: {{ (index .Alerts 0).Labels.severity}}<br> Cluster Name: {{(index .Alerts 0).Labels.cluster_name}}<br> Namespace: {{ (index .Alerts 0).Labels.namespace}}<br> Container Name: {{(index .Alerts 0).Labels.container_name}}<br> {{ else if eq (index .Alerts 0).Labels.alert_type "podNotRunning"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}}<br> Severity: {{ (index .Alerts 0).Labels.severity}}<br> Cluster Name: {{(index .Alerts 0).Labels.cluster_name}}<br> Namespace: {{ (index .Alerts 0).Labels.namespace}}<br> Container Name: {{ (index .Alerts 0).Labels.container_name}}<br> {{ else if eq (index .Alerts 0).Labels.alert_type "podNotScheduled"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}}<br> Severity: {{ (index .Alerts 0).Labels.severity}}<br> Cluster Name: {{(index .Alerts 0).Labels.cluster_name}}<br> Namespace: {{ (index .Alerts 0).Labels.namespace}}<br> Pod Name: {{ (index .Alerts 0).Labels.pod_name}}<br> {{ else if eq (index .Alerts 0).Labels.alert_type "systemService"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}}<br> Severity: {{ (index .Alerts 0).Labels.severity}}<br> Cluster Name: {{(index .Alerts 0).Labels.cluster_name}}<br> {{ else if eq (index .Alerts 0).Labels.alert_type "workload"}} Alert Name: {{ (index .Alerts 0).Labels.alert_name}}<br> Severity: {{ (index .Alerts 0).Labels.severity}}<br> Cluster Name: {{(index .Alerts 0).Labels.cluster_name}}<br> Available Replicas: {{ (index .Alerts 0).Labels.available_replicas}}<br> Desired Replicas: {{ (index .Alerts 0).Labels.desired_replicas}}<br> {{ end}} {{ if (index .Alerts 0).Labels.logs}} Logs: {{ (index .Alerts 0).Labels.logs}} {{ end}} {{ end}} ` )
pkg/controllers/user/alert/deploy/notification_template.go
0.512205
0.524577
notification_template.go
starcoder
package naq import "github.com/ContextLogic/cldr" var calendar = cldr.Calendar{ Formats: cldr.CalendarFormats{ Date: cldr.CalendarDateFormat{Full: "EEEE, d MMMM y", Long: "d MMMM y", Medium: "d MMM y", Short: "dd/MM/y"}, Time: cldr.CalendarDateFormat{Full: "h:mm:ss a zzzz", Long: "h:mm:ss a z", Medium: "h:mm:ss a", Short: "h:mm a"}, DateTime: cldr.CalendarDateFormat{}, }, FormatNames: cldr.CalendarFormatNames{ Months: cldr.CalendarMonthFormatNames{ Abbreviated: cldr.CalendarMonthFormatNameValue{Jan: "Jan", Feb: "Feb", Mar: "Mar", Apr: "Apr", May: "May", Jun: "Jun", Jul: "Jul", Aug: "Aug", Sep: "Sep", Oct: "Oct", Nov: "Nov", Dec: "Dec"}, Narrow: cldr.CalendarMonthFormatNameValue{Jan: "J", Feb: "F", Mar: "M", Apr: "A", May: "M", Jun: "J", Jul: "J", Aug: "A", Sep: "S", Oct: "O", Nov: "N", Dec: "D"}, Short: cldr.CalendarMonthFormatNameValue{}, Wide: cldr.CalendarMonthFormatNameValue{Jan: "ǃKhanni", Feb: "ǃKhanǀgôab", Mar: "ǀKhuuǁkhâb", Apr: "ǃHôaǂkhaib", May: "ǃKhaitsâb", Jun: "Gamaǀaeb", Jul: "ǂKhoesaob", Aug: "Aoǁkhuumûǁkhâb", Sep: "Taraǀkhuumûǁkhâb", Oct: "ǂNûǁnâiseb", Nov: "ǀHooǂgaeb", Dec: "Hôasoreǁkhâb"}, }, Days: cldr.CalendarDayFormatNames{ Abbreviated: cldr.CalendarDayFormatNameValue{Sun: "Son", Mon: "Ma", Tue: "De", Wed: "Wu", Thu: "Do", Fri: "Fr", Sat: "Sat"}, Narrow: cldr.CalendarDayFormatNameValue{Sun: "S", Mon: "M", Tue: "E", Wed: "W", Thu: "D", Fri: "F", Sat: "A"}, Short: cldr.CalendarDayFormatNameValue{}, Wide: cldr.CalendarDayFormatNameValue{Sun: "Sontaxtsees", Mon: "Mantaxtsees", Tue: "Denstaxtsees", Wed: "Wunstaxtsees", Thu: "Dondertaxtsees", Fri: "Fraitaxtsees", Sat: "Satertaxtsees"}, }, Periods: cldr.CalendarPeriodFormatNames{ Abbreviated: cldr.CalendarPeriodFormatNameValue{}, Narrow: cldr.CalendarPeriodFormatNameValue{}, Short: cldr.CalendarPeriodFormatNameValue{}, Wide: cldr.CalendarPeriodFormatNameValue{AM: "ǁgoagas", PM: "ǃuias"}, }, }, }
resources/locales/naq/calendar.go
0.50708
0.451931
calendar.go
starcoder
package goutil import ( "reflect" "unsafe" ) // AddrInt returns a pointer int representing the address of i. func AddrInt(i int) *int { return &i } // InitAndGetString if strPtr is empty string, initialize it with def, // and return the final value. func InitAndGetString(strPtr *string, def string) string { if strPtr == nil { return def } if *strPtr == "" { *strPtr = def } return *strPtr } // DereferenceType dereference, get the underlying non-pointer type. func DereferenceType(t reflect.Type) reflect.Type { for t.Kind() == reflect.Ptr { t = t.Elem() } return t } // DereferenceValue dereference and unpack interface, // get the underlying non-pointer and non-interface value. func DereferenceValue(v reflect.Value) reflect.Value { for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { v = v.Elem() } return v } // DereferencePtrValue returns the underlying non-pointer type value. func DereferencePtrValue(v reflect.Value) reflect.Value { for v.Kind() == reflect.Ptr { v = v.Elem() } return v } // DereferenceIfaceValue returns the value of the underlying type that implements the interface v. func DereferenceIfaceValue(v reflect.Value) reflect.Value { for v.Kind() == reflect.Interface { v = v.Elem() } return v } // DereferenceImplementType returns the underlying type of the value that implements the interface v. func DereferenceImplementType(v reflect.Value) reflect.Type { return DereferenceType(DereferenceIfaceValue(v).Type()) } // DereferenceSlice convert []*T to []T. func DereferenceSlice(v reflect.Value) reflect.Value { m := v.Len() - 1 if m < 0 { return reflect.New(reflect.SliceOf(DereferenceType(v.Type().Elem()))).Elem() } s := make([]reflect.Value, m+1) for ; m >= 0; m-- { s[m] = DereferenceValue(v.Index(m)) } v = reflect.New(reflect.SliceOf(s[0].Type())).Elem() v = reflect.Append(v, s...) return v } // ReferenceSlice convert []T to []*T, the ptrDepth is the count of '*'. func ReferenceSlice(v reflect.Value, ptrDepth int) reflect.Value { if ptrDepth <= 0 { return v } m := v.Len() - 1 if m < 0 { return reflect.New(reflect.SliceOf(ReferenceType(v.Type().Elem(), ptrDepth))).Elem() } s := make([]reflect.Value, m+1) for ; m >= 0; m-- { s[m] = ReferenceValue(v.Index(m), ptrDepth) } v = reflect.New(reflect.SliceOf(s[0].Type())).Elem() v = reflect.Append(v, s...) return v } // ReferenceType convert T to *T, the ptrDepth is the count of '*'. func ReferenceType(t reflect.Type, ptrDepth int) reflect.Type { for ; ptrDepth > 0; ptrDepth-- { t = reflect.PtrTo(t) } return t } // ReferenceValue convert T to *T, the ptrDepth is the count of '*'. func ReferenceValue(v reflect.Value, ptrDepth int) reflect.Value { for ; ptrDepth > 0; ptrDepth-- { vv := reflect.New(v.Type()) vv.Elem().Set(v) v = vv } return v } // IsLittleEndian determine whether the current system is little endian. func IsLittleEndian() bool { var i int32 = 0x01020304 u := unsafe.Pointer(&i) pb := (*byte)(u) b := *pb return (b == 0x04) }
vendor/github.com/henrylee2cn/goutil/other.go
0.648578
0.452234
other.go
starcoder
package datastream //The ExtendedDataRange interface - this is the object that is returned from different caches/stores - it represents //a range of data values stored in a certain way, and Next() gets the next datapoint in the range. type ExtendedDataRange interface { Index() int64 //Returns the index of the ExtendedDataRange's next datapoint NextArray() (*DatapointArray, error) //Returns the next chunk of datapoints from the ExtendedDataRange Next() (*Datapoint, error) //Returns the next datapoint in the sequence Close() } //DataRange is ExtendedDataRange's little brother - while ExtendedDataRange contains the NextArray and Index methods for more powerful //manipulation, a DataRange is just a basic iterator. Note that all ExtendedDataRange fit the DataRange interface type DataRange interface { Next() (*Datapoint, error) Close() } //The EmptyRange is a range that always returns nil - as if there were no datapoints left. //It is the ExtendedDataRange equivalent of nil type EmptyRange struct{} //Index just returns 0 func (r EmptyRange) Index() int64 { return 0 } //Close does absolutely nothing func (r EmptyRange) Close() {} //NextArray always just returns nil,nil func (r EmptyRange) NextArray() (*DatapointArray, error) { return nil, nil } //Next always just returns nil,nil func (r EmptyRange) Next() (*Datapoint, error) { return nil, nil } //A TimeRange is a Datarange which is time-bounded from both sides. That is, the datapoints allowed are only //within the given time range. So if given a ExtendedDataRange with range [a,b], and the timerange is (c,d], the //TimeRange will return all datapoints within the Datarange which are within (c,d]. type TimeRange struct { dr ExtendedDataRange //The ExtendedDataRange to wrap endtime float64 //The time at which to stop returning datapoints dpap *DatapointArray //The current array that is being read } //Index returns the underlying ExtendedDataRange's index. func (r *TimeRange) Index() int64 { if r.dpap != nil { return r.dr.Index() - int64(r.dpap.Length()) } return r.dr.Index() } //Close closes the internal ExtendedDataRange func (r *TimeRange) Close() { r.dr.Close() } //NextArray returns the next datapoint array in sequence from the underlying ExtendedDataRange, so long as it is within the //correct timestamp bounds func (r *TimeRange) NextArray() (dpap *DatapointArray, err error) { if r.dpap == nil { r.dpap, err = r.dr.NextArray() } if err != nil || r.dpap == nil { return r.dpap, err } dpa := r.dpap.TEnd(r.endtime) r.dpap = nil if dpa == nil { return nil, err } if dpa.Length() > 0 { return &dpa, err } return nil, nil } //Next returns the next datapoint in sequence from the underlying ExtendedDataRange, so long as it is within the //correct timestamp bounds func (r *TimeRange) Next() (dp *Datapoint, err error) { if r.dpap != nil && r.dpap.Length() > 0 { res := (*r.dpap)[0] dpa := (*r.dpap)[1:] r.dpap = &dpa if r.dpap.Length() == 0 { r.dpap = nil } dp = &res } else { dp, err = r.dr.Next() } //Return nil if the timestamp is beyond our range if dp != nil && r.endtime > 0.0 && dp.Timestamp > r.endtime { //The datapoint is beyond our range. return nil, nil } return dp, err } //NewTimeRange creates a time range given the time range of valid datapoints func NewTimeRange(dr ExtendedDataRange, starttime float64, endtime float64) (ExtendedDataRange, error) { //We have a ExtendedDataRange - but we don't know what time it starts at. We want to skip the // datapoints before starttime dpap, err := dr.NextArray() for dpap != nil && err == nil { dpa := dpap.TStart(starttime) if dpa.Length() > 0 { return &TimeRange{dr, endtime, &dpa}, nil } dpap, err = dr.NextArray() } return EmptyRange{}, err } //NumRange returns only the first given number of datapoints (with an optional skip param) from a ExtendedDataRange type NumRange struct { dr ExtendedDataRange numleft int64 //The number of datapoints left to return } //Close closes the internal ExtendedDataRange func (r *NumRange) Close() { r.dr.Close() } //Index returns the underlying ExtendedDataRange's index value func (r *NumRange) Index() int64 { return r.dr.Index() } //NextArray returns the next datapoint from the underlying ExtendedDataRange so long as the datapoint array is within the //amount of datapoints to return. func (r *NumRange) NextArray() (*DatapointArray, error) { if r.numleft == 0 { return nil, nil } dpa, err := r.dr.NextArray() if err != nil { return nil, err } if int64(dpa.Length()) <= r.numleft { r.numleft -= int64(dpa.Length()) return dpa, nil } dpa = dpa.IRange(0, int(r.numleft)) r.numleft = 0 return dpa, nil } //Next returns the next datapoint from the underlying ExtendedDataRange so long as the datapoint is within the //amonut of datapoints to return. func (r *NumRange) Next() (*Datapoint, error) { if r.numleft == 0 { return nil, nil } r.numleft-- return r.dr.Next() } //Skip the given number of datapoints without changing the number of datapoints left to return func (r *NumRange) Skip(num int) error { for i := 0; i < num; i++ { _, err := r.dr.Next() if err != nil { return err } } return nil } //NewNumRange initializes a new NumRange which will return up to the given amount of datapoints. func NewNumRange(dr ExtendedDataRange, datapoints int64) *NumRange { return &NumRange{dr, datapoints} } //DatapointArrayRange allows DatapointArray to conform to the range interface type DatapointArrayRange struct { rangeindex int da DatapointArray startindex int64 } //Close resets the range func (d *DatapointArrayRange) Close() { d.rangeindex = 0 } //Index returns the index of the DatapointArray func (d *DatapointArrayRange) Index() int64 { return d.startindex + int64(d.rangeindex) } //Next returns the next datapoint func (d *DatapointArrayRange) Next() (*Datapoint, error) { if d.rangeindex >= d.da.Length() { return nil, nil } d.rangeindex++ return &d.da[d.rangeindex-1], nil } //NextArray returns what is left of the array func (d *DatapointArrayRange) NextArray() (*DatapointArray, error) { if d.rangeindex >= d.da.Length() { return nil, nil } dpa := d.da[d.rangeindex:] d.rangeindex = d.da.Length() return &dpa, nil } //NewDatapointArrayRange does exactly what the function says func NewDatapointArrayRange(da DatapointArray, startindex int64) *DatapointArrayRange { return &DatapointArrayRange{0, da, startindex} }
src/connectordb/datastream/datarange.go
0.812012
0.743936
datarange.go
starcoder
package main import ( "errors" "fmt" ) type GraphType string const ( DIRECTED GraphType = "DIRECTED" UNDIRECTED GraphType = "UNDIRECTED" ) type Graph interface { Init() AddEdge(vertexOne, vertexTwo int) error AddEdgeWithWeight(vertexOne, vertexTwo, weight int) error RemoveEdge(vertexOne, vertexTwo int) error HasEdge(vertexOne, vertexTwo int) bool GetGraphType() GraphType GetAdjacencyNodesForVertex(vertex int) map[int]bool GetWeightOfEdge(vertexOne, vertexTwo int) (int, error) GetNumberOfVertices() int GetNumberOfEdges() int GetInDegreeOfVertex(vertex int) int } type AdjacencyMatrix struct { Vertices int Edges int GraphType GraphType AdjMatrx [][]int } func (G *AdjacencyMatrix) Init() { G.AdjMatrx = make([][]int, G.Vertices) G.Edges = 0 for i := 0; i < G.Vertices; i++ { G.AdjMatrx[i] = make([]int, G.Vertices) } } func (G *AdjacencyMatrix) AddEdge(vertexOne, vertexTwo int) error { if vertexOne >= G.Vertices || vertexTwo >= G.Vertices || vertexOne < 0 || vertexTwo < 0 { return errors.New("Index out of bounds") } G.AdjMatrx[vertexOne][vertexTwo] = 1 G.Edges++ if G.GraphType == UNDIRECTED { G.AdjMatrx[vertexTwo][vertexOne] = 1 G.Edges++ } return nil } func (G *AdjacencyMatrix) AddEdgeWithWeight(vertexOne, vertexTwo, weight int) error { if vertexOne >= G.Vertices || vertexTwo >= G.Vertices || vertexOne < 0 || vertexTwo < 0 { return errors.New("Index out of bounds") } G.AdjMatrx[vertexOne][vertexTwo] = weight G.Edges++ if G.GraphType == UNDIRECTED { G.AdjMatrx[vertexTwo][vertexOne] = weight G.Edges++ } return nil } func (G *AdjacencyMatrix) RemoveEdge(vertexOne, vertexTwo int) error { if vertexOne >= G.Vertices || vertexTwo >= G.Vertices || vertexOne < 0 || vertexTwo < 0 { return errors.New("Imdex out of bounds") } G.AdjMatrx[vertexOne][vertexTwo] = 0 G.Edges-- if G.GraphType == UNDIRECTED { G.AdjMatrx[vertexTwo][vertexOne] = 0 G.Edges-- } return nil } func (G *AdjacencyMatrix) HasEdge(vertexOne, vertexTwo int) bool { if vertexOne >= G.Vertices || vertexTwo >= G.Vertices || vertexOne < 0 || vertexTwo < 0 { return false } return G.AdjMatrx[vertexOne][vertexTwo] != 0 } func (G *AdjacencyMatrix) GetGraphType() GraphType { return G.GraphType } func (G *AdjacencyMatrix) GetAdjacencyNodesForVertex(vertex int) map[int]bool { adjancencyMatrucVertices := map[int]bool{} if vertex >= G.Vertices || vertex < 0 { return adjancencyMatrucVertices } for i := 0; i < G.Vertices; i++ { if G.AdjMatrx[vertex][i] != 0 { adjancencyMatrucVertices[i] = (G.AdjMatrx[vertex][i] != 0) } } return adjancencyMatrucVertices } func (G *AdjacencyMatrix) GetWeightOfEdge(vertexOne, vertexTwo int) (int, error) { if vertexOne >= G.Vertices || vertexTwo >= G.Vertices || vertexOne < 0 || vertexTwo < 0 { return 0, errors.New("Error getting weight for the vertex") } return G.AdjMatrx[vertexOne][vertexTwo], nil } func (G *AdjacencyMatrix) GetNumberOfVertices() int { return G.Vertices } func (G *AdjacencyMatrix) GetNumberOfEdges() int { return G.Edges } func (G *AdjacencyMatrix) GetInDegreeOgVertex(vertex int) int { inDegree := 0 adjacencyNodes := G.GetAdjacencyNodesForVertex(vertex) for key := range adjacencyNodes { if adjacencyNodes[key] { inDegree++ } } return inDegree } func main() { var testAdjMatrixDirected = &AdjacencyMatrix{4, 0, DIRECTED, nil} testAdjMatrixDirected.Init() err := testAdjMatrixDirected.AddEdge(2, 1) if err != nil { fmt.Printf("Error adding edge") } if testAdjMatrixDirected.AdjMatrx[2][1] != 1 { fmt.Println("Data not found at index") } if testAdjMatrixDirected.AdjMatrx[1][2] != 0 { fmt.Printf("Data not found at index") } }
Graphs/graphs_adjacency_matrix.go
0.642096
0.551695
graphs_adjacency_matrix.go
starcoder
package texture import ( "github.com/g3n/engine/geometry" "github.com/g3n/engine/graphic" "github.com/g3n/engine/light" "github.com/g3n/engine/material" "github.com/g3n/engine/math32" "github.com/g3n/engine/texture" "github.com/g3n/engine/util/helper" "github.com/g3n/g3nd/app" "time" ) func init() { app.DemoMap["texture.plane"] = &Texplane{} } type Texplane struct { plane1 *graphic.Mesh plane2 *graphic.Mesh } // Start is called once at the start of the demo. func (t *Texplane) Start(a *app.App) { // Create axes helper axes := helper.NewAxes(1) a.Scene().Add(axes) // Adds red directional right light dir1 := light.NewDirectional(&math32.Color{1, 0, 0}, 1.0) dir1.SetPosition(1, 0, 0) a.Scene().Add(dir1) // Adds green directional top light dir2 := light.NewDirectional(&math32.Color{1, 0, 0}, 1.0) dir2.SetPosition(0, 1, 0) a.Scene().Add(dir2) // Adds white directional front light dir3 := light.NewDirectional(&math32.Color{1, 1, 1}, 1.0) dir3.SetPosition(0, 0, 1) a.Scene().Add(dir3) // Loads texture from image texfile := a.DirData() + "/images/uvgrid.jpg" tex1, err := texture.NewTexture2DFromImage(texfile) if err != nil { a.Log().Fatal("Error:%s loading texture:%s", err, texfile) } // Creates plane 1 plane1_geom := geometry.NewPlane(2, 2) plane1_mat := material.NewStandard(&math32.Color{1, 1, 1}) plane1_mat.SetSide(material.SideDouble) plane1_mat.AddTexture(tex1) t.plane1 = graphic.NewMesh(plane1_geom, plane1_mat) t.plane1.SetPosition(0, 1.1, 0) a.Scene().Add(t.plane1) // Loads texture from image texfile = a.DirData() + "/images/tiger1.jpg" tex2, err := texture.NewTexture2DFromImage(texfile) if err != nil { a.Log().Fatal("Error:%s loading texture:%s", err, texfile) } // Creates plane2 plane2_geom := geometry.NewPlane(2, 2) plane2_mat := material.NewStandard(&math32.Color{1, 1, 1}) plane2_mat.SetSide(material.SideDouble) plane2_mat.AddTexture(tex2) t.plane2 = graphic.NewMesh(plane2_geom, plane2_mat) t.plane2.SetPosition(0, -1.1, 0) a.Scene().Add(t.plane2) } // Update is called every frame. func (t *Texplane) Update(a *app.App, deltaTime time.Duration) { // TODO use deltaTime t.plane1.RotateY(0.01) t.plane2.RotateY(-0.01) } // Cleanup is called once at the end of the demo. func (t *Texplane) Cleanup(a *app.App) {}
demos/texture/plane.go
0.617743
0.402157
plane.go
starcoder
package packet import ( "github.com/go-gl/mathgl/mgl32" "github.com/google/uuid" "github.com/LiteLDev/BEProtocolGolang/minecraft/protocol" ) // AddPlayer is sent by the server to the client to make a player entity show up client-side. It is one of the // few entities that cannot be sent using the AddActor packet. type AddPlayer struct { // UUID is the UUID of the player. It is the same UUID that the client sent in the Login packet at the // start of the session. A player with this UUID must exist in the player list (built up using the // PlayerList packet), for it to show up in-game. UUID uuid.UUID // Username is the name of the player. This username is the username that will be set as the initial // name tag of the player. Username string // EntityUniqueID is the unique ID of the player. The unique ID is a value that remains consistent across // different sessions of the same world, but most servers simply fill the runtime ID of the player out for // this field. EntityUniqueID int64 // EntityRuntimeID is the runtime ID of the player. The runtime ID is unique for each world session, and // entities are generally identified in packets using this runtime ID. EntityRuntimeID uint64 // PlatformChatID is an identifier only set for particular platforms when chatting (presumably only for // Nintendo Switch). It is otherwise an empty string, and is used to decide which players are able to // chat with each other. PlatformChatID string // Position is the position to spawn the player on. If the player is on a distance that the viewer cannot // see it, the player will still show up if the viewer moves closer. Position mgl32.Vec3 // Velocity is the initial velocity the player spawns with. This velocity will initiate client side // movement of the player. Velocity mgl32.Vec3 // Pitch is the vertical rotation of the player. Facing straight forward yields a pitch of 0. Pitch is // measured in degrees. Pitch float32 // Yaw is the horizontal rotation of the player. Yaw is also measured in degrees. Yaw float32 // HeadYaw is the same as Yaw, except that it applies specifically to the head of the player. A different // value for HeadYaw than Yaw means that the player will have its head turned. HeadYaw float32 // HeldItem is the item that the player is holding. The item is shown to the viewer as soon as the player // itself shows up. Needless to say that this field is rather pointless, as additional packets still must // be sent for armour to show up. HeldItem protocol.ItemInstance // EntityMetadata is a map of entity metadata, which includes flags and data properties that alter in // particular the way the player looks. Flags include ones such as 'on fire' and 'sprinting'. // The metadata values are indexed by their property key. EntityMetadata map[uint32]interface{} // Flags is a set of flags that specify certain properties of the player, such as whether or not it can // fly and/or move through blocks. Flags uint32 // CommandPermissionLevel is a set of permissions that specify what commands a player is allowed to execute. CommandPermissionLevel uint32 // ActionPermissions is, much like Flags, a set of flags that specify actions that the player is allowed // to undertake, such as whether it is allowed to edit blocks, open doors etc. ActionPermissions uint32 // PermissionLevel is the permission level of the player as it shows up in the player list built up using // the PlayerList packet. PermissionLevel uint32 // CustomStoredPermissions ... CustomStoredPermissions uint32 // PlayerUniqueID is a unique identifier of the player. It appears it is not required to fill this field // out with a correct value. Simply writing 0 seems to work. PlayerUniqueID int64 // EntityLinks is a list of entity links that are currently active on the player. These links alter the // way the player shows up when first spawned in terms of it shown as riding an entity. Setting these // links is important for new viewers to see the player is riding another entity. EntityLinks []protocol.EntityLink // DeviceID is the device ID set in one of the files found in the storage of the device of the player. It // may be changed freely, so it should not be relied on for anything. DeviceID string // BuildPlatform is the build platform/device OS of the player that is about to be added, as it sent in // the Login packet when joining. BuildPlatform int32 } // ID ... func (*AddPlayer) ID() uint32 { return IDAddPlayer } // Marshal ... func (pk *AddPlayer) Marshal(w *protocol.Writer) { w.UUID(&pk.UUID) w.String(&pk.Username) w.Varint64(&pk.EntityUniqueID) w.Varuint64(&pk.EntityRuntimeID) w.String(&pk.PlatformChatID) w.Vec3(&pk.Position) w.Vec3(&pk.Velocity) w.Float32(&pk.Pitch) w.Float32(&pk.Yaw) w.Float32(&pk.HeadYaw) w.ItemInstance(&pk.HeldItem) w.EntityMetadata(&pk.EntityMetadata) w.Varuint32(&pk.Flags) w.Varuint32(&pk.CommandPermissionLevel) w.Varuint32(&pk.ActionPermissions) w.Varuint32(&pk.PermissionLevel) w.Varuint32(&pk.CustomStoredPermissions) w.Int64(&pk.PlayerUniqueID) protocol.WriteEntityLinks(w, &pk.EntityLinks) w.String(&pk.DeviceID) w.Int32(&pk.BuildPlatform) } // Unmarshal ... func (pk *AddPlayer) Unmarshal(r *protocol.Reader) { r.UUID(&pk.UUID) r.String(&pk.Username) r.Varint64(&pk.EntityUniqueID) r.Varuint64(&pk.EntityRuntimeID) r.String(&pk.PlatformChatID) r.Vec3(&pk.Position) r.Vec3(&pk.Velocity) r.Float32(&pk.Pitch) r.Float32(&pk.Yaw) r.Float32(&pk.HeadYaw) r.ItemInstance(&pk.HeldItem) r.EntityMetadata(&pk.EntityMetadata) r.Varuint32(&pk.Flags) r.Varuint32(&pk.CommandPermissionLevel) r.Varuint32(&pk.ActionPermissions) r.Varuint32(&pk.PermissionLevel) r.Varuint32(&pk.CustomStoredPermissions) r.Int64(&pk.PlayerUniqueID) protocol.EntityLinks(r, &pk.EntityLinks) r.String(&pk.DeviceID) r.Int32(&pk.BuildPlatform) }
minecraft/protocol/packet/add_player.go
0.532911
0.448547
add_player.go
starcoder
package main import ( "github.com/MattSwanson/raylib-go/physics" "github.com/MattSwanson/raylib-go/raylib" ) func main() { screenWidth := int32(800) screenHeight := int32(450) rl.SetConfigFlags(rl.FlagMsaa4xHint) rl.InitWindow(screenWidth, screenHeight, "Physac [raylib] - physics demo") // Physac logo drawing position logoX := screenWidth - rl.MeasureText("Physac", 30) - 10 logoY := int32(15) // Initialize physics and default physics bodies physics.Init() // Create floor rectangle physics body floor := physics.NewBodyRectangle(rl.NewVector2(float32(screenWidth)/2, float32(screenHeight)), 500, 100, 10) floor.Enabled = false // Disable body state to convert it to static (no dynamics, but collisions) // Create obstacle circle physics body circle := physics.NewBodyCircle(rl.NewVector2(float32(screenWidth)/2, float32(screenHeight)/2), 45, 10) circle.Enabled = false // Disable body state to convert it to static (no dynamics, but collisions) rl.SetTargetFPS(60) for !rl.WindowShouldClose() { // Update created physics objects physics.Update() if rl.IsKeyPressed(rl.KeyR) { // Reset physics input physics.Reset() floor = physics.NewBodyRectangle(rl.NewVector2(float32(screenWidth)/2, float32(screenHeight)), 500, 100, 10) floor.Enabled = false circle = physics.NewBodyCircle(rl.NewVector2(float32(screenWidth)/2, float32(screenHeight)/2), 45, 10) circle.Enabled = false } // Physics body creation inputs if rl.IsMouseButtonPressed(rl.MouseLeftButton) { physics.NewBodyPolygon(rl.GetMousePosition(), float32(rl.GetRandomValue(20, 80)), int(rl.GetRandomValue(3, 8)), 10) } else if rl.IsMouseButtonPressed(rl.MouseRightButton) { physics.NewBodyCircle(rl.GetMousePosition(), float32(rl.GetRandomValue(10, 45)), 10) } // Destroy falling physics bodies for _, body := range physics.GetBodies() { if body.Position.Y > float32(screenHeight)*2 { physics.DestroyBody(body) } } rl.BeginDrawing() rl.ClearBackground(rl.Black) rl.DrawFPS(screenWidth-90, screenHeight-30) // Draw created physics bodies for i, body := range physics.GetBodies() { vertexCount := physics.GetShapeVerticesCount(i) for j := 0; j < vertexCount; j++ { // Get physics bodies shape vertices to draw lines // NOTE: GetShapeVertex() already calculates rotation transformations vertexA := body.GetShapeVertex(j) jj := 0 if j+1 < vertexCount { // Get next vertex or first to close the shape jj = j + 1 } vertexB := body.GetShapeVertex(jj) rl.DrawLineV(vertexA, vertexB, rl.Green) // Draw a line between two vertex positions } } rl.DrawText("Left mouse button to create a polygon", 10, 10, 10, rl.White) rl.DrawText("Right mouse button to create a circle", 10, 25, 10, rl.White) rl.DrawText("Press 'R' to reset example", 10, 40, 10, rl.White) rl.DrawText("Physac", logoX, logoY, 30, rl.White) rl.DrawText("Powered by", logoX+50, logoY-7, 10, rl.White) rl.EndDrawing() } physics.Close() // Unitialize physics rl.CloseWindow() }
examples/physics/physac/demo/main.go
0.593609
0.40754
main.go
starcoder
The original Space-Saving algorithm: https://icmi.cs.ucsb.edu/research/tech_reports/reports/2005-23.pdf The Filtered Space-Saving enhancement: http://www.l2f.inesc-id.pt/~fmmb/wiki/uploads/Work/misnis.ref0a.pdf This implementation follows the algorithm of the FSS paper, but not the suggested implementation. Specifically, we use a heap instead of a sorted list of monitored items, and since we are also using a map to provide O(1) access on update also don't need the c_i counters in the hash table. Licensed under the MIT license. */ package topk import ( "bytes" "container/heap" "encoding/gob" "hash/fnv" "sort" ) // Element is a TopK item type Element struct { Key string Count int Error int } type elementsByCountDescending []Element func (elts elementsByCountDescending) Len() int { return len(elts) } func (elts elementsByCountDescending) Less(i, j int) bool { return (elts[i].Count >= elts[j].Count) || (elts[i].Count == elts[j].Count && elts[i].Key < elts[i].Key) } func (elts elementsByCountDescending) Swap(i, j int) { elts[i], elts[j] = elts[j], elts[i] } type keys struct { m map[string]int elts []Element } // Implement the container/heap interface func (tk *keys) Len() int { return len(tk.elts) } func (tk *keys) Less(i, j int) bool { return (tk.elts[i].Count < tk.elts[j].Count) || (tk.elts[i].Count == tk.elts[j].Count && tk.elts[i].Error > tk.elts[j].Error) } func (tk *keys) Swap(i, j int) { tk.elts[i], tk.elts[j] = tk.elts[j], tk.elts[i] tk.m[tk.elts[i].Key] = i tk.m[tk.elts[j].Key] = j } func (tk *keys) Push(x interface{}) { e := x.(Element) tk.m[e.Key] = len(tk.elts) tk.elts = append(tk.elts, e) } func (tk *keys) Pop() interface{} { var e Element e, tk.elts = tk.elts[len(tk.elts)-1], tk.elts[:len(tk.elts)-1] delete(tk.m, e.Key) return e } // Stream calculates the TopK elements for a stream type Stream struct { n int k keys alphas []int } // New returns a Stream estimating the top n most frequent elements func New(n int) *Stream { return &Stream{ n: n, k: keys{m: make(map[string]int), elts: make([]Element, 0, n)}, alphas: make([]int, n*6), // 6 is the multiplicative constant from the paper } } // Insert adds an element to the stream to be tracked func (s *Stream) Insert(x string, count int) { h := fnv.New32a() h.Write([]byte(x)) xhash := int(h.Sum32()) % len(s.alphas) // are we tracking this element? if idx, ok := s.k.m[x]; ok { s.k.elts[idx].Count += count heap.Fix(&s.k, idx) return } // can we track more elements? if len(s.k.elts) < s.n { // there is free space heap.Push(&s.k, Element{Key: x, Count: count}) return } if s.alphas[xhash]+count < s.k.elts[0].Count { s.alphas[xhash] += count return } // replace the current minimum element minKey := s.k.elts[0].Key h.Reset() h.Write([]byte(minKey)) mkhash := int(h.Sum32()) % len(s.alphas) s.alphas[mkhash] = s.k.elts[0].Count s.k.elts[0].Key = x s.k.elts[0].Error = s.alphas[xhash] s.k.elts[0].Count = s.alphas[xhash] + count // we're not longer monitoring minKey delete(s.k.m, minKey) // but 'x' is as array position 0 s.k.m[x] = 0 heap.Fix(&s.k, 0) } // Keys returns the current estimates for the most frequent elements func (s *Stream) Keys() []Element { elts := append([]Element(nil), s.k.elts...) sort.Sort(elementsByCountDescending(elts)) return elts } func (s *Stream) GobEncode() ([]byte, error) { buf := bytes.Buffer{} enc := gob.NewEncoder(&buf) if err := enc.Encode(s.n); err != nil { return nil, err } if err := enc.Encode(s.k.m); err != nil { return nil, err } if err := enc.Encode(s.k.elts); err != nil { return nil, err } if err := enc.Encode(s.alphas); err != nil { return nil, err } return buf.Bytes(), nil } func (s *Stream) GobDecode(b []byte) error { dec := gob.NewDecoder(bytes.NewBuffer(b)) if err := dec.Decode(&s.n); err != nil { return err } if err := dec.Decode(&s.k.m); err != nil { return err } if err := dec.Decode(&s.k.elts); err != nil { return err } if err := dec.Decode(&s.alphas); err != nil { return err } return nil }
vendor/github.com/dgryski/go-topk/topk.go
0.802942
0.540863
topk.go
starcoder
package bun import ( "net/url" "strings" ) var defaultCommand = "g" // Commands is a map from the list of available commands to the function that will handle the redirect var Commands = map[string]Command{ "bad": Command{ Name: "Blockchain address", Key: "bad", redirectFunc: simpleAppend("https://blockchain.info/address/"), Help: "Finds the specific bitcoin address on blockchain.info", private: false, }, "bh": Command{ Name: "Behance", Key: "bh", redirectFunc: simpleQuery("https://www.behance.net/search", "search"), Help: "Searches Behance", private: false, }, "btx": Command{ Name: "Blockchain tx", Key: "btx", redirectFunc: simpleAppend("https://blockchain.info/tx/"), Help: "Finds the specific bitcoin transaction on blockchain.info", private: false, }, "d": Command{ Name: "Google drive", Key: "d", redirectFunc: simpleQuery("https://drive.google.com/drive/search", "q"), Help: "Searches Google Drive", private: false, }, "db": Command{ Name: "Dribble", Key: "db", redirectFunc: simpleQuery("https://dribbble.com/search", "q"), Help: "Searches Dribbble", private: false, }, "etx": Command{ Name: "Ethereum transaction", Key: "etx", redirectFunc: simpleAppend("https://etherscan.io/tx/"), Help: "Finds the ethereum transaction on Etherscan", private: false, }, "ead": Command{ Name: "Ethereum address", Key: "ead", redirectFunc: simpleAppend("https://etherscan.io/address/"), Help: "Finds the ethereum address on Etherscan", private: false, }, "fi": Command{ Name: "FlatIcon", Key: "fi", redirectFunc: simpleQuery("https://www.flaticon.com/search", "word"), Help: "FlatIcon search", private: false, }, "fp": Command{ Name: "FreePic", Key: "fp", redirectFunc: simpleAppend("http://www.freepik.com/index.php?goto=2&searchform=1&k="), Help: "FreePic search", private: false, }, "g": Command{ Name: "Google", Key: "g", redirectFunc: simpleQuery("https://www.google.com/search", "q"), Help: "Google search", private: false, }, "gd": Command{ Name: "Google Design", Key: "gd", redirectFunc: simpleQuery("https://design.google/search/", "q"), Help: "Google design search", private: false, }, "gm": Command{ Name: "Gmail", Key: "gm", redirectFunc: simpleAppend("https://mail.google.com/mail/#search/"), Help: "Searches your Gmail inbox", private: false, }, "h": Command{ Name: "help", Key: "h", redirectFunc: simpleRedirect("/"), Help: "Shows help (You're looking at it!)", private: false, }, "np": Command{ Name: "Noun Project", Key: "np", redirectFunc: simpleQuery("https://thenounproject.com/search/", "q"), Help: "Noun Project search", private: false, }, "pin": Command{ Name: "Pinterest", Key: "pin", redirectFunc: simpleQuery("https://za.pinterest.com/search/pins/", "q"), Help: "Pinterest search", private: false, }, "so": Command{ Name: "StackOverflow", Key: "so", redirectFunc: simpleQuery("https://stackoverflow.com/search", "q"), Help: "StackOverflow search", private: false, }, "sl": Command{ Name: "Shelflife", Key: "sl", redirectFunc: simpleQuery("https://www.shelflife.co.za/search", "search"), Help: "For your sneaker needs", private: false, }, "tk": Command{ Name: "Takealot", Key: "tk", redirectFunc: simpleQuery("https://www.takealot.com/all", "qsearch"), Help: "Takealot search", private: false, }, "tw": Command{ Name: "Twitter", Key: "tw", redirectFunc: simpleQuery("https://www.twitter.com/search", "q"), Help: "Twitter search", private: false, }, "us": Command{ Name: "Usplash", Key: "us", redirectFunc: usplash, Help: "Usplash search", private: false, }, "wk": Command{ Name: "Wikipedia", Key: "wk", redirectFunc: simpleQuery("https://en.wikipedia.org/w/index.php", "search"), Help: "Search Wikipedia", private: false, }, "yt": Command{ Name: "YouTube", Key: "yt", redirectFunc: simpleQuery("https://www.youtube.com/results", "search_query"), Help: "Search YouTube", private: false, }, } // simpleQuery handles search urls in the format // baseURL?key=query func simpleQuery(baseURL, key string) redirector { return func(query string) string { redirectURL, _ := url.Parse(baseURL) q := redirectURL.Query() q.Set(key, query) redirectURL.RawQuery = q.Encode() return redirectURL.String() } } // simpleRedirect handles situations where search is not // possible, just always redirects to specific url func simpleRedirect(baseURL string) redirector { return func(query string) string { return baseURL } } // simpleAppend handles search urls in the format // baseURL/query func simpleAppend(baseURL string) redirector { return func(query string) string { return baseURL + query } } // usplash uses '-'instead of '+' in their query URLS func usplash(query string) string { wrongURL := simpleAppend("https://unsplash.com/search/")(query) return strings.Replace(wrongURL, "+", "-", -1) }
default.go
0.501953
0.418459
default.go
starcoder
package main import ( . "gorgonia.org/gorgonia" "gorgonia.org/tensor" ) var of = tensor.Float32 type FC struct { W *Node Act func(x *Node) (*Node, error) } func (l *FC) fwd(x *Node) (*Node, error) { xw := Must(Mul(x, l.W)) if l.Act == nil { return xw, nil } return l.Act(xw) } type NN struct { g *ExprGraph x *Node y *Node l []FC pred *Node predVal Value } func NewNN(batchsize int) *NN { g := NewGraph() x := NewMatrix(g, of, WithShape(batchsize, 4), WithName("X"), WithInit(Zeroes())) y := NewVector(g, of, WithShape(batchsize), WithName("Y"), WithInit(Zeroes())) l := []FC{ FC{W: NewMatrix(g, of, WithShape(4, 2), WithName("L0W"), WithInit(GlorotU(1.0))), Act: Tanh}, FC{W: NewMatrix(g, of, WithShape(2, 128), WithName("L1W"), WithInit(GlorotU(1.0))), Act: Tanh}, FC{W: NewMatrix(g, of, WithShape(128, 128), WithName("L2W"), WithInit(GlorotU(1.0))), Act: Tanh}, FC{W: NewMatrix(g, of, WithShape(128, 1), WithName("L3W"), WithInit(GlorotU(1.0)))}, } return &NN{ g: g, x: x, y: y, l: l, } } func (nn *NN) learnables() Nodes { retVal := make(Nodes, 0, len(nn.l)) for _, l := range nn.l { retVal = append(retVal, l.W) } return retVal } func (nn *NN) model() []ValueGrad { return NodesToValueGrads(nn.learnables()) } func (nn *NN) cons() (pred *Node, err error) { pred = nn.x for _, l := range nn.l { if pred, err = l.fwd(pred); err != nil { return nil, err } } nn.pred = pred Read(nn.pred, &nn.predVal) cost := Must(Mean(Must(Square(Must(Sub(nn.y, pred)))))) if _, err = Grad(cost, nn.learnables()...); err != nil { return nil, err } return pred, nil } type input struct { State Point Action Vector } func (nn *NN) Let2(xs []input, y []float32) { xval := nn.x.Value().Data().([]float32) yval := nn.y.Value().Data().([]float32) // zero the data which may be contaminated by previous runs for i := range xval { xval[i] = 0 } for i := range yval { yval[i] = 0 } tmp := make([]float32, 0, len(xs)*4) for _, x := range xs { tmp = append(tmp, float32(x.State.X), float32(x.State.Y), float32(x.Action.X), float32(x.Action.Y)) } copy(xval, tmp) copy(yval, y) } func (nn *NN) Let1(x input) { xval := nn.x.Value().Data().([]float32) // zero the data which may be contaminated by previous runs for i := range xval { xval[i] = 0 } xval[0] = float32(x.State.X) xval[1] = float32(x.State.Y) xval[2] = float32(x.Action.X) xval[3] = float32(x.Action.Y) }
Chapter07/dqn_maze_solver/nn.go
0.603815
0.419707
nn.go
starcoder
package networkpolicy import ( "fmt" "math" "sort" "k8s.io/klog" "github.com/vmware-tanzu/antrea/pkg/agent/types" ) const ( PriorityBottomCNP = uint16(100) PriorityTopCNP = uint16(65000) InitialPriorityOffset = uint16(640) InitialPriorityZones = 100 ) // InitialOFPriorityGetter is a function that will map types.Priority to a specific initial OpenFlow // priority in a table. It is used to space out the priorities in the OVS table and provide an initial // "guess" on the OpenFlow priority that can be assigned to the input Priority. If that OpenFlow // priority is not available, getInsertionPoint of priorityAssigner will then search for the appropriate // OpenFlow priority to insert the input Priority. type InitialOFPriorityGetter func(p types.Priority) uint16 // InitialOFPrioritySingleTierPerTable is an InitialOFPriorityGetter that can be used by OVS tables that // handles only one Antrea NetworkPolicy Tier. It roughly divides the table into 100 zones and computes // the initial OpenFlow priority based on rule priority. func InitialOFPrioritySingleTierPerTable(p types.Priority) uint16 { priorityIndex := int32(math.Floor(p.PolicyPriority)) if priorityIndex > InitialPriorityZones-1 { priorityIndex = InitialPriorityZones - 1 } // Cannot return a negative OF priority. if PriorityTopCNP-InitialPriorityOffset*uint16(priorityIndex) <= uint16(p.RulePriority) { return PriorityBottomCNP } return PriorityTopCNP - InitialPriorityOffset*uint16(priorityIndex) - uint16(p.RulePriority) } // priorityAssigner is a struct that maintains the current mapping between types.Priority and // OpenFlow priorities in a single OVS table. It also knows how to re-assign priorities if certain section overflows. type priorityAssigner struct { // priorityMap maintains the current mapping of known Priorities to OpenFlow priorities. priorityMap map[types.Priority]uint16 // ofPriorityMap maintains the current mapping of OpenFlow priorities in the table to Priorities. ofPriorityMap map[uint16]types.Priority // sortedPriorities maintains a slice of sorted OpenFlow priorities in the table that are occupied. sortedOFPriorities []uint16 // initialOFPriorityFunc determines the initial OpenFlow priority to be checked for input Priorities. initialOFPriorityFunc InitialOFPriorityGetter } func newPriorityAssigner(initialOFPriorityFunc InitialOFPriorityGetter) *priorityAssigner { pa := &priorityAssigner{ priorityMap: map[types.Priority]uint16{}, ofPriorityMap: map[uint16]types.Priority{}, sortedOFPriorities: []uint16{}, initialOFPriorityFunc: initialOFPriorityFunc, } return pa } // updatePriorityAssignment updates all the local maps to correlate input ofPriority and Priority. // TODO: Add performance benchmark for priority allocation and ways to optimize sortedOFPriorities. func (pa *priorityAssigner) updatePriorityAssignment(ofPriority uint16, p types.Priority) { if _, exists := pa.ofPriorityMap[ofPriority]; !exists { // idx is the insertion point for the newly allocated ofPriority. idx := sort.Search(len(pa.sortedOFPriorities), func(i int) bool { return ofPriority <= pa.sortedOFPriorities[i] }) pa.sortedOFPriorities = append(pa.sortedOFPriorities, 0) // Move elements starting from idx back one position to make room for the inserting ofPriority. copy(pa.sortedOFPriorities[idx+1:], pa.sortedOFPriorities[idx:]) pa.sortedOFPriorities[idx] = ofPriority } pa.ofPriorityMap[ofPriority] = p pa.priorityMap[p] = ofPriority } // getNextOccupiedOFPriority returns the first ofPriority higher than the input ofPriority that is // currently occupied in the table, as well as the corresponding Priority. // Note that if the input ofPriority itself is occupied, this function will return that ofPriority // and the Priority that maps to it currently. The search is based on sortedOFPriorities as it assumes // the table is sparse in most cases. func (pa *priorityAssigner) getNextOccupiedOFPriority(ofPriority uint16) (*uint16, *types.Priority) { idx := sort.Search(len(pa.sortedOFPriorities), func(i int) bool { return ofPriority <= pa.sortedOFPriorities[i] }) if idx < len(pa.sortedOFPriorities) { nextOccupied := pa.sortedOFPriorities[idx] priority := pa.ofPriorityMap[nextOccupied] return &nextOccupied, &priority } return nil, nil } // getNextVacantOFPriority returns the first higher ofPriority that is currently vacant in the table, // starting from the input ofPriority. // Note that if the input ofPriority itself is vacant, it will simply return that ofPriority. // The search is incrementally against all ofPriorities available as it assumes the table is sparse in most cases. func (pa *priorityAssigner) getNextVacantOFPriority(ofPriority uint16) *uint16 { for i := ofPriority; i <= PriorityTopCNP; i++ { // input ofPriority will be greater than or equal to PriorityBottomCNP if _, exists := pa.ofPriorityMap[i]; !exists { return &i } } return nil } // getLastOccupiedOFPriority returns the first ofPriority lower than the input ofPriority that is // currently occupied in the table, as well as the corresponded Priority. // Note that the function must return a ofPriority that is lower than the input ofPriority. // The search is based on sortedOFPriorities as it assumes the table is sparse in most cases. func (pa *priorityAssigner) getLastOccupiedOFPriority(ofPriority uint16) (*uint16, *types.Priority) { idx := sort.Search(len(pa.sortedOFPriorities), func(i int) bool { return ofPriority <= pa.sortedOFPriorities[i] }) if idx > 0 { lastOccupied := pa.sortedOFPriorities[idx-1] priority := pa.ofPriorityMap[lastOccupied] return &lastOccupied, &priority } return nil, nil } // getLastVacantOFPriority returns the first lower ofPriority that is currently vacant in the table, // starting from the ofPriority one below the input. // The search is incrementally against all ofPriorities available as it assumes the table is sparse in most cases. func (pa *priorityAssigner) getLastVacantOFPriority(ofPriority uint16) *uint16 { for i := ofPriority - 1; i >= PriorityBottomCNP; i-- { // ofPriority-1 will be less than or equal to PriorityTopCNP if _, exists := pa.ofPriorityMap[i]; !exists { return &i } } return nil } // upperBoundOk returns if the Priorities *on* and after the input ofPriority are higher than the input Priority. func (pa *priorityAssigner) upperBoundOk(ofPriority uint16, p types.Priority) bool { of, priority := pa.getNextOccupiedOFPriority(ofPriority) return of == nil || p.Less(*priority) } // lowerBoundOk returns if the Priorities before the input ofPriority are lower than the input Priority. func (pa *priorityAssigner) lowerBoundOk(ofPriority uint16, p types.Priority) bool { of, priority := pa.getLastOccupiedOFPriority(ofPriority) return of == nil || priority.Less(p) } // getInsertionPoint searches for the ofPriority to insert the input Priority in the table. // It is guaranteed that the Priorities before the insertionPoint index is lower than the input Priority, // and Priorities *on* and after the insertionPoint index is higher than the input Priority. // ofPriority returned will range from PriorityBottomCNP to PriorityTopCNP+1. func (pa *priorityAssigner) getInsertionPoint(p types.Priority) (uint16, bool) { insertionPoint := pa.initialOFPriorityFunc(p) occupied, upwardSearching := false, false Loop: for insertionPoint >= PriorityBottomCNP && insertionPoint <= PriorityTopCNP { switch { case pa.upperBoundOk(insertionPoint, p) && pa.lowerBoundOk(insertionPoint, p): if _, occupied = pa.ofPriorityMap[insertionPoint]; occupied && !upwardSearching { if insertionPoint != PriorityBottomCNP { insertionPoint-- continue Loop } } break Loop case pa.upperBoundOk(insertionPoint, p): insertionPoint-- case pa.lowerBoundOk(insertionPoint, p): insertionPoint++ upwardSearching = true } } return insertionPoint, occupied } // reassignPriorities re-arranges current Priority mappings to make place for the inserting Priority. It sifts // existing priorties up or down based on cost (how many priorities it needs to move). siftPrioritiesDown is used // as a tie-breaker. An error should only be returned if all the available ofPriorities in the table are occupied. func (pa *priorityAssigner) reassignPriorities(insertionPoint uint16, p types.Priority) (*uint16, map[uint16]uint16, func(), error) { nextVacant, lastVacant := pa.getNextVacantOFPriority(insertionPoint), pa.getLastVacantOFPriority(insertionPoint) switch { case (insertionPoint == PriorityBottomCNP || lastVacant == nil) && nextVacant != nil: return pa.siftPrioritiesUp(insertionPoint, *nextVacant, p) case (insertionPoint > PriorityTopCNP || nextVacant == nil) && lastVacant != nil: return pa.siftPrioritiesDown(insertionPoint-uint16(1), *lastVacant, p) case nextVacant != nil && lastVacant != nil: costSiftUp := *nextVacant - insertionPoint costSiftDown := insertionPoint - *lastVacant - uint16(1) if costSiftUp < costSiftDown { return pa.siftPrioritiesUp(insertionPoint, *nextVacant, p) } else { return pa.siftPrioritiesDown(insertionPoint-uint16(1), *lastVacant, p) } default: return nil, map[uint16]uint16{}, nil, fmt.Errorf("no available Openflow priority left to insert priority %v", p) } } // siftPrioritiesUp moves all consecutive occupied ofPriorities and corresponding Priorities up by one ofPriority, // starting from the insertionPoint. It also assigns the freed ofPriority to the input Priority. func (pa *priorityAssigner) siftPrioritiesUp(insertionPoint, nextVacant uint16, p types.Priority) (*uint16, map[uint16]uint16, func(), error) { priorityReassignments := map[uint16]uint16{} if insertionPoint >= nextVacant { return nil, priorityReassignments, nil, fmt.Errorf("failed to determine the range for sifting priorities up") } for i := nextVacant; i > insertionPoint; i-- { p, _ := pa.ofPriorityMap[i-1] pa.updatePriorityAssignment(i, p) priorityReassignments[i-1] = i klog.V(4).Infof("Original priority %v now needs to be re-assigned %v", i-1, i) } pa.updatePriorityAssignment(insertionPoint, p) revertFunc := func() { delete(pa.priorityMap, p) for original := insertionPoint; original < nextVacant; original++ { updated := original + 1 p := pa.ofPriorityMap[updated] // nextVacant was allocated because of the reassignment and needs to be released when reverting. if updated == nextVacant { pa.Release(updated) } pa.ofPriorityMap[original] = p pa.priorityMap[p] = original } } return &insertionPoint, priorityReassignments, revertFunc, nil } // siftPrioritiesDown moves all consecutive occupied ofPriorities and corresponding Priorities down by one ofPriority, // starting from the insertionPoint. It also assigns the freed ofPriority to the input Priority. func (pa *priorityAssigner) siftPrioritiesDown(insertionPoint, lastVacant uint16, p types.Priority) (*uint16, map[uint16]uint16, func(), error) { priorityReassignments := map[uint16]uint16{} if insertionPoint <= lastVacant { return nil, priorityReassignments, nil, fmt.Errorf("failed to determine the range for sifting priorities down") } for i := lastVacant; i < insertionPoint; i++ { p, _ := pa.ofPriorityMap[i+1] pa.updatePriorityAssignment(i, p) priorityReassignments[i+1] = i klog.V(4).Infof("Original priority %v now needs to be re-assigned %v", i+1, i) } pa.updatePriorityAssignment(insertionPoint, p) revertFunc := func() { delete(pa.priorityMap, p) for original := insertionPoint; original > lastVacant; original-- { updated := original - 1 p := pa.ofPriorityMap[updated] // lastVacant was allocated because of the reassignment and needs to be released when reverting. if updated == lastVacant { pa.Release(updated) } pa.ofPriorityMap[original] = p pa.priorityMap[p] = original } } return &insertionPoint, priorityReassignments, revertFunc, nil } // GetOFPriority retrieves the OFPriority for the input Priority to be installed, // and returns installed priorities that need to be re-assigned if necessary. func (pa *priorityAssigner) GetOFPriority(p types.Priority) (*uint16, map[uint16]uint16, func(), error) { if ofPriority, exists := pa.priorityMap[p]; exists { return &ofPriority, nil, nil, nil } insertionPoint, occupied := pa.getInsertionPoint(p) if insertionPoint == PriorityBottomCNP || insertionPoint > PriorityTopCNP || occupied { return pa.reassignPriorities(insertionPoint, p) } pa.updatePriorityAssignment(insertionPoint, p) return &insertionPoint, nil, nil, nil } // RegisterPriorities registers a list of Priorities to be created with the priorityAssigner. // It is used to populate the priorityMap in case of batch rule adds. func (pa *priorityAssigner) RegisterPriorities(priorities []types.Priority) error { for _, p := range priorities { if _, _, _, err := pa.GetOFPriority(p); err != nil { return err } } return nil } // Release removes the priority that currently corresponds to the input OFPriority from the known priorities. func (pa *priorityAssigner) Release(ofPriority uint16) { priority, exists := pa.ofPriorityMap[ofPriority] if !exists { klog.V(2).Infof("OpenFlow priority %v not known to this table, skip releasing priority", ofPriority) return } delete(pa.priorityMap, priority) delete(pa.ofPriorityMap, ofPriority) idxToDel := sort.Search(len(pa.sortedOFPriorities), func(i int) bool { return ofPriority <= pa.sortedOFPriorities[i] }) pa.sortedOFPriorities = append(pa.sortedOFPriorities[:idxToDel], pa.sortedOFPriorities[idxToDel+1:]...) }
pkg/agent/controller/networkpolicy/priority.go
0.633297
0.420421
priority.go
starcoder
package greatspacerace import "github.com/TSavo/chipmunk/vect" type Segment struct { Point1, Point2 vect.Vect } type Track struct { Id, Name string Laps, MaxTicks int Segments []Segment Goal Segment StartingAngle vect.Float Checkpoints []Segment } func (S1 *Segment) Intersects(S2 *Segment) bool { u := vect.Sub(S1.Point2, S1.Point1) v := vect.Sub(S2.Point2, S2.Point1) w := vect.Sub(S1.Point1, S2.Point1) D := vect.Cross(u, v) // test if they are parallel (includes either being a point) if vect.FAbs(D) < 0.0000001 { // S1 and S2 are parallel if vect.Cross(u, w) != 0 || vect.Cross(v, w) != 0 { return false // they are NOT collinear } // they are collinear or degenerate // check if they are degenerate points du := vect.Dot(u, u) dv := vect.Dot(v, v) if du == 0 && dv == 0 { // both segments are points if !vect.Equals(S1.Point1, S2.Point1) { // they are distinct points return false } return true } if du == 0 { // S1 is a single point if !S2.Contains(S1.Point1) { // but is not in S2 return false } return true } if dv == 0 { // S2 a single point if !S1.Contains(S2.Point1) { // but is not in S1 return false } return true } // they are collinear segments - get overlap (or not) var t0, t1 vect.Float // endpoints of S1 in eqn for S2 w2 := vect.Sub(S1.Point2, S2.Point1) if v.X != 0 { t0 = w.X / v.X t1 = w2.X / v.X } else { t0 = w.Y / v.Y t1 = w2.Y / v.Y } if t0 > t1 { // must have t0 smaller than t1 t0, t1 = t1, t0 // swap if not } if t0 > 1 || t1 < 0 { return false // NO overlap } return true } // the segments are skew and may intersect in a point // get the intersect parameter for S1 sI := vect.Cross(v, w) / D if sI < 0 || sI > 1 { // no intersect with S1 return false } // get the intersect parameter for S2 tI := vect.Cross(u, w) / D if tI < 0 || tI > 1 { // no intersect with S2 return false } return true } func (S *Segment) Contains(P vect.Vect) bool { if S.Point1.X != S.Point2.X { // S is not vertical if S.Point1.X <= P.X && P.X <= S.Point2.X { return true } if S.Point1.X >= P.X && P.X >= S.Point2.X { return true } } else { // S is vertical, so test y coordinate if S.Point1.Y <= P.Y && P.Y <= S.Point2.Y { return true } if S.Point1.Y >= P.Y && P.Y >= S.Point2.Y { return true } } return false } func (this *Segment) GetStartingPositions(pieces int) []vect.Vect { xDif := (this.Point2.X - this.Point1.X) / vect.Float(pieces+1) yDif := (this.Point2.Y - this.Point1.Y) / vect.Float(pieces+1) places := make([]vect.Vect, pieces) for x := 0; x < pieces; x++ { places[x].X = this.Point1.X + (xDif * vect.Float(x+1)) places[x].Y = this.Point1.Y + (yDif * vect.Float(x+1)) } return places }
track.go
0.584508
0.503601
track.go
starcoder
package CloudForest import ( "math" ) /* AdaCostTarget wraps a numerical feature as a target for us in Cost Sensitive Adaptive Boosting (AdaC2.M1) "Boosting for Learning Multiple Classes with Imbalanced Class Distribution" <NAME>, <NAME> and <NAME> See equations in slides here: http://people.ee.duke.edu/~lcarin/Minhua4.18.08.pdf */ type AdaCostTarget struct { CatFeature Weights []float64 Costs []float64 } /* NewAdaCostTarget creates a categorical adaptive boosting target and initializes its weights. */ func NewAdaCostTarget(f CatFeature) (abt *AdaCostTarget) { nCases := f.Length() abt = &AdaCostTarget{f, make([]float64, nCases), make([]float64, f.NCats())} for i := range abt.Weights { abt.Weights[i] = 1 / float64(nCases) } return } /*RegretTarget.SetCosts puts costs in a map[string]float64 by feature name into the proper entries in RegretTarget.Costs.*/ func (target *AdaCostTarget) SetCosts(costmap map[string]float64) { for i := 0; i < target.NCats(); i++ { c := target.NumToCat(i) target.Costs[i] = costmap[c] } } /* SplitImpurity is an AdaCosting version of SplitImpurity. */ func (target *AdaCostTarget) SplitImpurity(l *[]int, r *[]int, m *[]int, allocs *BestSplitAllocs) (impurityDecrease float64) { nl := float64(len(*l)) nr := float64(len(*r)) nm := 0.0 impurityDecrease = nl * target.Impurity(l, allocs.LCounter) impurityDecrease += nr * target.Impurity(r, allocs.RCounter) if m != nil && len(*m) > 0 { nm = float64(len(*m)) impurityDecrease += nm * target.Impurity(m, allocs.Counter) } impurityDecrease /= nl + nr + nm return } //UpdateSImpFromAllocs willl be called when splits are being built by moving cases from r to l as in learning from numerical variables. //Here it just wraps SplitImpurity but it can be implemented to provide further optimization. func (target *AdaCostTarget) UpdateSImpFromAllocs(l *[]int, r *[]int, m *[]int, allocs *BestSplitAllocs, movedRtoL *[]int) (impurityDecrease float64) { var cat, i int lcounter := *allocs.LCounter rcounter := *allocs.RCounter for _, i = range *movedRtoL { //most expensive statement: cat = target.Geti(i) lcounter[cat]++ rcounter[cat]-- //counter[target.Geti(i)]++ } nl := float64(len(*l)) nr := float64(len(*r)) nm := 0.0 impurityDecrease = nl * target.ImpFromCounts(l, allocs.LCounter) impurityDecrease += nr * target.ImpFromCounts(r, allocs.RCounter) if m != nil && len(*m) > 0 { nm = float64(len(*m)) impurityDecrease += nm * target.ImpFromCounts(m, allocs.Counter) } impurityDecrease /= nl + nr + nm return } //Impurity is an AdaCosting that uses the weights specified in weights. func (target *AdaCostTarget) Impurity(cases *[]int, counter *[]int) (e float64) { e = 0.0 //m := target.Modei(cases) target.CountPerCat(cases, counter) e = target.ImpFromCounts(cases, counter) return } //ImpFromCounts recalculates gini impurity from class counts for us in intertive updates. func (target *AdaCostTarget) ImpFromCounts(cases *[]int, counter *[]int) (e float64) { var m, mc int for i, c := range *counter { if c > mc { m = i mc = c } } for _, c := range *cases { cat := target.Geti(c) if cat != m { e += target.Weights[c] * target.Costs[cat] } } return } //Boost performs categorical adaptive boosting using the specified partition and //returns the weight that tree that generated the partition should be given. func (t *AdaCostTarget) Boost(leaves *[][]int) (weight float64) { weight = 0.0 counter := make([]int, t.NCats()) for _, cases := range *leaves { weight += t.Impurity(&cases, &counter) } if weight >= .5 { return 0.0 } weight = .5 * math.Log((1-weight)/weight) for _, cases := range *leaves { t.CountPerCat(&cases, &counter) var m, mc int for i, c := range counter { if c > mc { m = i mc = c } } for _, c := range cases { if t.IsMissing(c) == false { cat := t.Geti(c) //CHANGE from adaboost: if cat != m { t.Weights[c] = t.Weights[c] * math.Exp(weight) * t.Costs[cat] } else { t.Weights[c] = t.Weights[c] * math.Exp(-weight) * t.Costs[cat] } } } } normfactor := 0.0 for _, v := range t.Weights { normfactor += v } for i, v := range t.Weights { t.Weights[i] = v / normfactor } return }
adacosttarget.go
0.761361
0.440409
adacosttarget.go
starcoder
package value import ( "github.com/JosephNaberhaus/go-delta-sync/agnostic/blocks/types" ) // Refers to a literal null/nil/empty value type Null struct { isValueType isMethodIndependent } func NewNull() Null { return Null{} } // Refers to a literal string value type String struct { isValueType isMethodIndependent value string } func (s String) Value() string { return s.value } func NewString(value string) String { return String{value: value} } // Refers a literal int value type Int struct { isValueType isMethodIndependent value int } func (i Int) Value() int { return i.value } func NewInt(value int) Int { return Int{value: value} } // Refers to a literal floating point value type Float struct { isValueType isMethodIndependent value float64 } func (f Float) Value() float64 { return f.value } func NewFloat(value float64) Float { return Float{value: value} } // Refers to a literal boolean value type Bool struct { isValueType isMethodIndependent value bool } func (b Bool) Value() bool { return b.value } func NewBool(value bool) Bool { return Bool{value: value} } // Refers to an array literal type Array struct { isValueType elementType types.Any elements []Any } func (a Array) ElementType() types.Any { return a.elementType } func (a Array) Elements() []Any { return a.elements } func (a Array) IsMethodDependent() bool { for _, element := range a.elements { if element.IsMethodDependent() { return true } } return false } func NewArray(elementType types.Any, element ...Any) Array { return Array{ elementType: elementType, elements: element, } } type KeyValue struct { key, value Any } func (k KeyValue) Key() Any { return k.key } func (k KeyValue) Value() Any { return k.value } func NewKeyValue(key, value Any) KeyValue { return KeyValue{ key: key, value: value, } } type Map struct { isValueType keyType, valueType types.Any elements []KeyValue } func (m Map) KeyType() types.Any { return m.keyType } func (m Map) ValueType() types.Any { return m.valueType } func (m Map) Elements() []KeyValue { return m.elements } func (m Map) IsMethodDependent() bool { for _, element := range m.elements { if element.Key().IsMethodDependent() || element.Value().IsMethodDependent() { return true } } return false } func NewMap(keyType, valueType types.Any, elements ...KeyValue) Map { return Map{ keyType: keyType, valueType: valueType, elements: elements, } }
agnostic/blocks/value/literal.go
0.817283
0.477371
literal.go
starcoder
package iso20022 // Transfer from one investment fund/fund class to another investment fund or investment fund class by the investor. A switch is composed of one or several subscription legs, and one or several redemption legs. type SwitchOrder3 struct { // Unique and unambiguous identifier for a group of individual orders, as assigned by the instructing party. This identifier links the individual orders together. MasterReference *Max35Text `xml:"MstrRef,omitempty"` // Date and time at which the order was placed by the investor. OrderDateTime *ISODateTime `xml:"OrdrDtTm,omitempty"` // Unique and unambiguous identifier for an order, as assigned by the instructing party. OrderReference *Max35Text `xml:"OrdrRef"` // Unique and unambiguous investor's identification of an order. This reference can typically be used in a hub scenario to give the reference of the order as assigned by the underlying client. ClientReference *Max35Text `xml:"ClntRef,omitempty"` // Unique and unambiguous identifier for an order cancellation, as assigned by the instructing party. CancellationReference *Max35Text `xml:"CxlRef,omitempty"` // Account between an investor(s) and a fund manager or a fund. The account can contain holdings in any investment fund or investment fund class managed (or distributed) by the fund manager, within the same fund family. InvestmentAccountDetails *InvestmentAccount21 `xml:"InvstmtAcctDtls,omitempty"` // Amount of money used to derive the quantity of investment fund units to be redeemed. TotalRedemptionAmount *ActiveOrHistoricCurrencyAndAmount `xml:"TtlRedAmt,omitempty"` // Amount of money used to derive the quantity of investment fund units to be subscribed. TotalSubscriptionAmount *ActiveOrHistoricCurrencyAndAmount `xml:"TtlSbcptAmt,omitempty"` // Future date at which the investor requests the order to be executed. // The specification of a requested future trade date is not allowed in some markets. The date must be a date in the future. RequestedFutureTradeDate *ISODate `xml:"ReqdFutrTradDt,omitempty"` // Total amount of money paid /to be paid or received in exchange for the financial instrument in the individual order. SettlementAmount *ActiveCurrencyAndAmount `xml:"SttlmAmt,omitempty"` // Date on which cash is available. CashSettlementDate *ISODate `xml:"CshSttlmDt,omitempty"` // Method by which the transaction is settled. SettlementMethod *DeliveryReceiptType2Code `xml:"SttlmMtd,omitempty"` // Date on which the order expires. ExpiryDateTime *DateAndDateTimeChoice `xml:"XpryDtTm,omitempty"` // Additional amount of money paid by the investor in addition to the switch redemption amount. AdditionalCashIn *ActiveOrHistoricCurrencyAndAmount `xml:"AddtlCshIn,omitempty"` // Amount of money that results from a switch-out, that is not reinvested in another investment fund, and is repaid to the investor. ResultingCashOut *ActiveOrHistoricCurrencyAndAmount `xml:"RsltgCshOut,omitempty"` // Information about parties related to the transaction. RelatedPartyDetails []*Intermediary8 `xml:"RltdPtyDtls,omitempty"` // Cancellation right of an investor with respect to an investment fund order. CancellationRight *CancellationRight1Code `xml:"CxlRght,omitempty"` // Cancellation right of an investor with respect to an investment fund order. ExtendedCancellationRight *Extended350Code `xml:"XtndedCxlRght,omitempty"` // Part of an investment fund switch order that is a redemption. RedemptionLegDetails []*SwitchRedemptionLegOrder3 `xml:"RedLegDtls"` // Part of an investment fund switch order that is a subscription. SubscriptionLegDetails []*SwitchSubscriptionLegOrder3 `xml:"SbcptLegDtls"` // Payment processes required to transfer cash from the debtor to the creditor. CashSettlementDetails *PaymentTransaction25 `xml:"CshSttlmDtls,omitempty"` // Information needed to process a currency exchange or conversion. ForeignExchangeDetails *ForeignExchangeTerms6 `xml:"FXDtls,omitempty"` // Specifies if advice has been received from an independent financial advisor. FinancialAdvice *FinancialAdvice1Code `xml:"FinAdvc,omitempty"` // Specifies whether the trade is negotiated. NegotiatedTrade *NegotiatedTrade1Code `xml:"NgtdTrad,omitempty"` } func (s *SwitchOrder3) SetMasterReference(value string) { s.MasterReference = (*Max35Text)(&value) } func (s *SwitchOrder3) SetOrderDateTime(value string) { s.OrderDateTime = (*ISODateTime)(&value) } func (s *SwitchOrder3) SetOrderReference(value string) { s.OrderReference = (*Max35Text)(&value) } func (s *SwitchOrder3) SetClientReference(value string) { s.ClientReference = (*Max35Text)(&value) } func (s *SwitchOrder3) SetCancellationReference(value string) { s.CancellationReference = (*Max35Text)(&value) } func (s *SwitchOrder3) AddInvestmentAccountDetails() *InvestmentAccount21 { s.InvestmentAccountDetails = new(InvestmentAccount21) return s.InvestmentAccountDetails } func (s *SwitchOrder3) SetTotalRedemptionAmount(value, currency string) { s.TotalRedemptionAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency) } func (s *SwitchOrder3) SetTotalSubscriptionAmount(value, currency string) { s.TotalSubscriptionAmount = NewActiveOrHistoricCurrencyAndAmount(value, currency) } func (s *SwitchOrder3) SetRequestedFutureTradeDate(value string) { s.RequestedFutureTradeDate = (*ISODate)(&value) } func (s *SwitchOrder3) SetSettlementAmount(value, currency string) { s.SettlementAmount = NewActiveCurrencyAndAmount(value, currency) } func (s *SwitchOrder3) SetCashSettlementDate(value string) { s.CashSettlementDate = (*ISODate)(&value) } func (s *SwitchOrder3) SetSettlementMethod(value string) { s.SettlementMethod = (*DeliveryReceiptType2Code)(&value) } func (s *SwitchOrder3) AddExpiryDateTime() *DateAndDateTimeChoice { s.ExpiryDateTime = new(DateAndDateTimeChoice) return s.ExpiryDateTime } func (s *SwitchOrder3) SetAdditionalCashIn(value, currency string) { s.AdditionalCashIn = NewActiveOrHistoricCurrencyAndAmount(value, currency) } func (s *SwitchOrder3) SetResultingCashOut(value, currency string) { s.ResultingCashOut = NewActiveOrHistoricCurrencyAndAmount(value, currency) } func (s *SwitchOrder3) AddRelatedPartyDetails() *Intermediary8 { newValue := new (Intermediary8) s.RelatedPartyDetails = append(s.RelatedPartyDetails, newValue) return newValue } func (s *SwitchOrder3) SetCancellationRight(value string) { s.CancellationRight = (*CancellationRight1Code)(&value) } func (s *SwitchOrder3) SetExtendedCancellationRight(value string) { s.ExtendedCancellationRight = (*Extended350Code)(&value) } func (s *SwitchOrder3) AddRedemptionLegDetails() *SwitchRedemptionLegOrder3 { newValue := new (SwitchRedemptionLegOrder3) s.RedemptionLegDetails = append(s.RedemptionLegDetails, newValue) return newValue } func (s *SwitchOrder3) AddSubscriptionLegDetails() *SwitchSubscriptionLegOrder3 { newValue := new (SwitchSubscriptionLegOrder3) s.SubscriptionLegDetails = append(s.SubscriptionLegDetails, newValue) return newValue } func (s *SwitchOrder3) AddCashSettlementDetails() *PaymentTransaction25 { s.CashSettlementDetails = new(PaymentTransaction25) return s.CashSettlementDetails } func (s *SwitchOrder3) AddForeignExchangeDetails() *ForeignExchangeTerms6 { s.ForeignExchangeDetails = new(ForeignExchangeTerms6) return s.ForeignExchangeDetails } func (s *SwitchOrder3) SetFinancialAdvice(value string) { s.FinancialAdvice = (*FinancialAdvice1Code)(&value) } func (s *SwitchOrder3) SetNegotiatedTrade(value string) { s.NegotiatedTrade = (*NegotiatedTrade1Code)(&value) }
SwitchOrder3.go
0.826362
0.44071
SwitchOrder3.go
starcoder
package gift import ( "image" "image/color" "image/draw" ) type pixel struct { R, G, B, A float32 } type imageType int const ( itGeneric imageType = iota itNRGBA itNRGBA64 itRGBA itRGBA64 itYCbCr itGray itGray16 itPaletted ) type pixelGetter struct { imgType imageType imgBounds image.Rectangle imgGeneric image.Image imgNRGBA *image.NRGBA imgNRGBA64 *image.NRGBA64 imgRGBA *image.RGBA imgRGBA64 *image.RGBA64 imgYCbCr *image.YCbCr imgGray *image.Gray imgGray16 *image.Gray16 imgPaletted *image.Paletted imgPalette []pixel } func newPixelGetter(img image.Image) (p *pixelGetter) { switch img := img.(type) { case *image.NRGBA: p = &pixelGetter{ imgType: itNRGBA, imgBounds: img.Bounds(), imgNRGBA: img, } case *image.NRGBA64: p = &pixelGetter{ imgType: itNRGBA64, imgBounds: img.Bounds(), imgNRGBA64: img, } case *image.RGBA: p = &pixelGetter{ imgType: itRGBA, imgBounds: img.Bounds(), imgRGBA: img, } case *image.RGBA64: p = &pixelGetter{ imgType: itRGBA64, imgBounds: img.Bounds(), imgRGBA64: img, } case *image.Gray: p = &pixelGetter{ imgType: itGray, imgBounds: img.Bounds(), imgGray: img, } case *image.Gray16: p = &pixelGetter{ imgType: itGray16, imgBounds: img.Bounds(), imgGray16: img, } case *image.YCbCr: p = &pixelGetter{ imgType: itYCbCr, imgBounds: img.Bounds(), imgYCbCr: img, } case *image.Paletted: p = &pixelGetter{ imgType: itPaletted, imgBounds: img.Bounds(), imgPaletted: img, imgPalette: convertPalette(img.Palette), } return default: p = &pixelGetter{ imgType: itGeneric, imgBounds: img.Bounds(), imgGeneric: img, } } return } const ( qf8 = float32(1.0 / 255.0) qf16 = float32(1.0 / 65535.0) epal = qf16 * qf16 / 2.0 ) func convertPalette(p []color.Color) []pixel { plen := len(p) pnew := make([]pixel, plen) for i := 0; i < plen; i++ { r16, g16, b16, a16 := p[i].RGBA() switch a16 { case 0: pnew[i] = pixel{0.0, 0.0, 0.0, 0.0} case 65535: r := float32(r16) * qf16 g := float32(g16) * qf16 b := float32(b16) * qf16 pnew[i] = pixel{r, g, b, 1.0} default: q := float32(1.0) / float32(a16) r := float32(r16) * q g := float32(g16) * q b := float32(b16) * q a := float32(a16) * qf16 pnew[i] = pixel{r, g, b, a} } } return pnew } func getPaletteIndex(pal []pixel, px pixel) int { var k int = 0 var dmin float32 = 4 for i, palpx := range pal { d := px.R - palpx.R dcur := d * d d = px.G - palpx.G dcur += d * d d = px.B - palpx.B dcur += d * d d = px.A - palpx.A dcur += d * d if dcur < epal { return i } if dcur < dmin { dmin = dcur k = i } } return k } func pixelclr(c color.Color) (px pixel) { r16, g16, b16, a16 := c.RGBA() switch a16 { case 0: px = pixel{0.0, 0.0, 0.0, 0.0} case 65535: r := float32(r16) * qf16 g := float32(g16) * qf16 b := float32(b16) * qf16 px = pixel{r, g, b, 1.0} default: q := float32(1.0) / float32(a16) r := float32(r16) * q g := float32(g16) * q b := float32(b16) * q a := float32(a16) * qf16 px = pixel{r, g, b, a} } return px } func (p *pixelGetter) getPixel(x, y int) (px pixel) { switch p.imgType { case itNRGBA: i := p.imgNRGBA.PixOffset(x, y) r := float32(p.imgNRGBA.Pix[i+0]) * qf8 g := float32(p.imgNRGBA.Pix[i+1]) * qf8 b := float32(p.imgNRGBA.Pix[i+2]) * qf8 a := float32(p.imgNRGBA.Pix[i+3]) * qf8 px = pixel{r, g, b, a} case itNRGBA64: i := p.imgNRGBA64.PixOffset(x, y) r := float32(uint16(p.imgNRGBA64.Pix[i+0])<<8|uint16(p.imgNRGBA64.Pix[i+1])) * qf16 g := float32(uint16(p.imgNRGBA64.Pix[i+2])<<8|uint16(p.imgNRGBA64.Pix[i+3])) * qf16 b := float32(uint16(p.imgNRGBA64.Pix[i+4])<<8|uint16(p.imgNRGBA64.Pix[i+5])) * qf16 a := float32(uint16(p.imgNRGBA64.Pix[i+6])<<8|uint16(p.imgNRGBA64.Pix[i+7])) * qf16 px = pixel{r, g, b, a} case itRGBA: i := p.imgRGBA.PixOffset(x, y) a8 := p.imgRGBA.Pix[i+3] switch a8 { case 0: px = pixel{0.0, 0.0, 0.0, 0.0} case 255: r := float32(p.imgRGBA.Pix[i+0]) * qf8 g := float32(p.imgRGBA.Pix[i+1]) * qf8 b := float32(p.imgRGBA.Pix[i+2]) * qf8 px = pixel{r, g, b, 1.0} default: q := float32(1.0) / float32(a8) r := float32(p.imgRGBA.Pix[i+0]) * q g := float32(p.imgRGBA.Pix[i+1]) * q b := float32(p.imgRGBA.Pix[i+2]) * q a := float32(a8) * qf8 px = pixel{r, g, b, a} } case itRGBA64: i := p.imgRGBA64.PixOffset(x, y) a16 := uint16(p.imgRGBA64.Pix[i+6])<<8 | uint16(p.imgRGBA64.Pix[i+7]) switch a16 { case 0: px = pixel{0.0, 0.0, 0.0, 0.0} case 65535: r := float32(uint16(p.imgRGBA64.Pix[i+0])<<8|uint16(p.imgRGBA64.Pix[i+1])) * qf16 g := float32(uint16(p.imgRGBA64.Pix[i+2])<<8|uint16(p.imgRGBA64.Pix[i+3])) * qf16 b := float32(uint16(p.imgRGBA64.Pix[i+4])<<8|uint16(p.imgRGBA64.Pix[i+5])) * qf16 px = pixel{r, g, b, 1.0} default: q := float32(1.0) / float32(a16) r := float32(uint16(p.imgRGBA64.Pix[i+0])<<8|uint16(p.imgRGBA64.Pix[i+1])) * q g := float32(uint16(p.imgRGBA64.Pix[i+2])<<8|uint16(p.imgRGBA64.Pix[i+3])) * q b := float32(uint16(p.imgRGBA64.Pix[i+4])<<8|uint16(p.imgRGBA64.Pix[i+5])) * q a := float32(a16) * qf16 px = pixel{r, g, b, a} } case itGray: i := p.imgGray.PixOffset(x, y) v := float32(p.imgGray.Pix[i]) * qf8 px = pixel{v, v, v, 1.0} case itGray16: i := p.imgGray16.PixOffset(x, y) v := float32(uint16(p.imgGray16.Pix[i+0])<<8|uint16(p.imgGray16.Pix[i+1])) * qf16 px = pixel{v, v, v, 1.0} case itYCbCr: iy := p.imgYCbCr.YOffset(x, y) ic := p.imgYCbCr.COffset(x, y) r8, g8, b8 := color.YCbCrToRGB(p.imgYCbCr.Y[iy], p.imgYCbCr.Cb[ic], p.imgYCbCr.Cr[ic]) r := float32(r8) * qf8 g := float32(g8) * qf8 b := float32(b8) * qf8 px = pixel{r, g, b, 1.0} case itPaletted: i := p.imgPaletted.PixOffset(x, y) k := p.imgPaletted.Pix[i] px = p.imgPalette[k] case itGeneric: px = pixelclr(p.imgGeneric.At(x, y)) } return } func (p *pixelGetter) getPixelRow(y int, buf *[]pixel) { *buf = (*buf)[0:0] for x := p.imgBounds.Min.X; x != p.imgBounds.Max.X; x++ { *buf = append(*buf, p.getPixel(x, y)) } } func (p *pixelGetter) getPixelColumn(x int, buf *[]pixel) { *buf = (*buf)[0:0] for y := p.imgBounds.Min.Y; y != p.imgBounds.Max.Y; y++ { *buf = append(*buf, p.getPixel(x, y)) } } func f32u8(val float32) uint8 { if val > 255.0 { val = 255.0 } else if val < 0.0 { val = 0.0 } return uint8(val + 0.5) } func f32u16(val float32) uint16 { if val > 65535.0 { val = 65535.0 } else if val < 0.0 { val = 0.0 } return uint16(val + 0.5) } type pixelSetter struct { imgType imageType imgBounds image.Rectangle imgGeneric draw.Image imgNRGBA *image.NRGBA imgNRGBA64 *image.NRGBA64 imgRGBA *image.RGBA imgRGBA64 *image.RGBA64 imgGray *image.Gray imgGray16 *image.Gray16 imgPaletted *image.Paletted imgPalette []pixel } func newPixelSetter(img draw.Image) (p *pixelSetter) { switch img := img.(type) { case *image.NRGBA: p = &pixelSetter{ imgType: itNRGBA, imgBounds: img.Bounds(), imgNRGBA: img, } case *image.NRGBA64: p = &pixelSetter{ imgType: itNRGBA64, imgBounds: img.Bounds(), imgNRGBA64: img, } case *image.RGBA: p = &pixelSetter{ imgType: itRGBA, imgBounds: img.Bounds(), imgRGBA: img, } case *image.RGBA64: p = &pixelSetter{ imgType: itRGBA64, imgBounds: img.Bounds(), imgRGBA64: img, } case *image.Gray: p = &pixelSetter{ imgType: itGray, imgBounds: img.Bounds(), imgGray: img, } case *image.Gray16: p = &pixelSetter{ imgType: itGray16, imgBounds: img.Bounds(), imgGray16: img, } case *image.Paletted: p = &pixelSetter{ imgType: itPaletted, imgBounds: img.Bounds(), imgPaletted: img, imgPalette: convertPalette(img.Palette), } default: p = &pixelSetter{ imgType: itGeneric, imgBounds: img.Bounds(), imgGeneric: img, } } return } func (p *pixelSetter) setPixel(x, y int, px pixel) { if !image.Pt(x, y).In(p.imgBounds) { return } switch p.imgType { case itNRGBA: i := p.imgNRGBA.PixOffset(x, y) p.imgNRGBA.Pix[i+0] = f32u8(px.R * 255.0) p.imgNRGBA.Pix[i+1] = f32u8(px.G * 255.0) p.imgNRGBA.Pix[i+2] = f32u8(px.B * 255.0) p.imgNRGBA.Pix[i+3] = f32u8(px.A * 255.0) case itNRGBA64: r16 := f32u16(px.R * 65535.0) g16 := f32u16(px.G * 65535.0) b16 := f32u16(px.B * 65535.0) a16 := f32u16(px.A * 65535.0) i := p.imgNRGBA64.PixOffset(x, y) p.imgNRGBA64.Pix[i+0] = uint8(r16 >> 8) p.imgNRGBA64.Pix[i+1] = uint8(r16 & 0xff) p.imgNRGBA64.Pix[i+2] = uint8(g16 >> 8) p.imgNRGBA64.Pix[i+3] = uint8(g16 & 0xff) p.imgNRGBA64.Pix[i+4] = uint8(b16 >> 8) p.imgNRGBA64.Pix[i+5] = uint8(b16 & 0xff) p.imgNRGBA64.Pix[i+6] = uint8(a16 >> 8) p.imgNRGBA64.Pix[i+7] = uint8(a16 & 0xff) case itRGBA: fa := px.A * 255.0 i := p.imgRGBA.PixOffset(x, y) p.imgRGBA.Pix[i+0] = f32u8(px.R * fa) p.imgRGBA.Pix[i+1] = f32u8(px.G * fa) p.imgRGBA.Pix[i+2] = f32u8(px.B * fa) p.imgRGBA.Pix[i+3] = f32u8(fa) case itRGBA64: fa := px.A * 65535.0 r16 := f32u16(px.R * fa) g16 := f32u16(px.G * fa) b16 := f32u16(px.B * fa) a16 := f32u16(fa) i := p.imgRGBA64.PixOffset(x, y) p.imgRGBA64.Pix[i+0] = uint8(r16 >> 8) p.imgRGBA64.Pix[i+1] = uint8(r16 & 0xff) p.imgRGBA64.Pix[i+2] = uint8(g16 >> 8) p.imgRGBA64.Pix[i+3] = uint8(g16 & 0xff) p.imgRGBA64.Pix[i+4] = uint8(b16 >> 8) p.imgRGBA64.Pix[i+5] = uint8(b16 & 0xff) p.imgRGBA64.Pix[i+6] = uint8(a16 >> 8) p.imgRGBA64.Pix[i+7] = uint8(a16 & 0xff) case itGray: i := p.imgGray.PixOffset(x, y) p.imgGray.Pix[i] = f32u8((0.299*px.R + 0.587*px.G + 0.114*px.B) * px.A * 255.0) case itGray16: i := p.imgGray16.PixOffset(x, y) y16 := f32u16((0.299*px.R + 0.587*px.G + 0.114*px.B) * px.A * 65535.0) p.imgGray16.Pix[i+0] = uint8(y16 >> 8) p.imgGray16.Pix[i+1] = uint8(y16 & 0xff) case itPaletted: px1 := pixel{ minf32(maxf32(px.R, 0), 1), minf32(maxf32(px.G, 0), 1), minf32(maxf32(px.B, 0), 1), minf32(maxf32(px.A, 0), 1), } i := p.imgPaletted.PixOffset(x, y) k := getPaletteIndex(p.imgPalette, px1) p.imgPaletted.Pix[i] = uint8(k) case itGeneric: r16 := f32u16(px.R * 65535.0) g16 := f32u16(px.G * 65535.0) b16 := f32u16(px.B * 65535.0) a16 := f32u16(px.A * 65535.0) p.imgGeneric.Set(x, y, color.NRGBA64{r16, g16, b16, a16}) } } func (p *pixelSetter) setPixelRow(y int, buf []pixel) { for i, x := 0, p.imgBounds.Min.X; i < len(buf); i, x = i+1, x+1 { p.setPixel(x, y, buf[i]) } } func (p *pixelSetter) setPixelColumn(x int, buf []pixel) { for i, y := 0, p.imgBounds.Min.Y; i < len(buf); i, y = i+1, y+1 { p.setPixel(x, y, buf[i]) } }
libraries/gift/pixels.go
0.543833
0.508788
pixels.go
starcoder
package xof import ( "io" "strconv" "golang.org/x/crypto/blake2b" "golang.org/x/crypto/blake2s" "golang.org/x/crypto/sha3" ) // XOF defines the interface to hash functions that support arbitrary-length output. type XOF interface { // Write absorbs more data into the hash's state. It panics if called // after Read. io.Writer // Read reads more output from the hash. It returns io.EOF if the limit // has been reached. io.Reader // Clone returns a copy of the XOF in its current state. Clone() XOF // Reset resets the XOF to its initial state. Reset() } type XOFFunc func() XOF type XofID uint func (x XofID) Available() bool { return x < maxXofID && xofRegistry[x] != nil } func (x XofID) XofIDFunc() XofID { return x } func (x XofID) New() XOF { if x < maxXofID { f := xofRegistry[x] if f != nil { return f() } } panic("crypto: requested XOF function #" + strconv.Itoa(int(x)) + " is unavailable") } func RegisterXOF(x XofID, f func() XOF) { if x >= maxXofID { panic("crypto: RegisterXOF of unknown XOF function") } xofRegistry[x] = f } var xofRegistry = make([]func() XOF, maxXofID) func init() { RegisterXOF(SHAKE128, newShake128) RegisterXOF(SHAKE256, newShake256) RegisterXOF(BLAKE2XB, newBlake2xb) RegisterXOF(BLAKE2XS, newBlake2xs) } const maxXofID = 4 const ( SHAKE128 XofID = iota SHAKE256 BLAKE2XB BLAKE2XS ) type shakeBody struct{ sha3.ShakeHash } func (s shakeBody) Clone() XOF { return shakeBody{s.ShakeHash.Clone()} } func newShake128() XOF { return shakeBody{sha3.NewShake128()} } func newShake256() XOF { return shakeBody{sha3.NewShake256()} } type blake2xb struct{ blake2b.XOF } func (s blake2xb) Clone() XOF { return blake2xb{s.XOF.Clone()} } func newBlake2xb() XOF { x, _ := blake2b.NewXOF(blake2b.OutputLengthUnknown, nil); return blake2xb{x} } type blake2xs struct{ blake2s.XOF } func (s blake2xs) Clone() XOF { return blake2xs{s.XOF.Clone()} } func newBlake2xs() XOF { x, _ := blake2s.NewXOF(blake2s.OutputLengthUnknown, nil); return blake2xs{x} }
xof/xof.go
0.678647
0.427337
xof.go
starcoder
package datastructure import ( "errors" "fmt" "github.com/duke-git/lancet/v2/datastructure" ) // SinglyLink is a linked list. Whose node has a Value generics and Next pointer points to a next node of the link. type SinglyLink[T any] struct { Head *datastructure.LinkNode[T] length int } // NewSinglyLink return *SinglyLink instance func NewSinglyLink[T any]() *SinglyLink[T] { return &SinglyLink[T]{Head: nil} } // InsertAtHead insert value into singly linklist at head index func (link *SinglyLink[T]) InsertAtHead(value T) { newNode := datastructure.NewLinkNode(value) newNode.Next = link.Head link.Head = newNode link.length++ } // InsertAtTail insert value into singly linklist at tail index func (link *SinglyLink[T]) InsertAtTail(value T) { current := link.Head if current == nil { link.InsertAtHead(value) return } for current.Next != nil { current = current.Next } newNode := datastructure.NewLinkNode(value) newNode.Next = nil current.Next = newNode link.length++ } // InsertAt insert value into singly linklist at index func (link *SinglyLink[T]) InsertAt(index int, value T) error { size := link.length if index < 0 || index > size { return errors.New("param index should between 0 and the length of singly link.") } if index == 0 { link.InsertAtHead(value) return nil } if index == size { link.InsertAtTail(value) return nil } i := 0 current := link.Head for current != nil { if i == index-1 { newNode := datastructure.NewLinkNode(value) newNode.Next = current.Next current.Next = newNode link.length++ return nil } i++ current = current.Next } return errors.New("singly link list no exist") } // DeleteAtHead delete value in singly linklist at head index func (link *SinglyLink[T]) DeleteAtHead() error { if link.Head == nil { return errors.New("singly link list no exist") } current := link.Head link.Head = current.Next link.length-- return nil } // DeleteAtTail delete value in singly linklist at tail index func (link *SinglyLink[T]) DeleteAtTail() error { if link.Head == nil { return errors.New("singly link list no exist") } current := link.Head if current.Next == nil { return link.DeleteAtHead() } for current.Next.Next != nil { current = current.Next } current.Next = nil link.length-- return nil } // DeleteAt delete value in singly linklist at index func (link *SinglyLink[T]) DeleteAt(index int) error { if link.Head == nil { return errors.New("singly link list no exist") } current := link.Head if current.Next == nil || index == 0 { return link.DeleteAtHead() } if index == link.length-1 { return link.DeleteAtTail() } if index < 0 || index > link.length-1 { return errors.New("param index should between 0 and link size -1.") } i := 0 for current != nil { if i == index-1 { current.Next = current.Next.Next link.length-- return nil } i++ current = current.Next } return errors.New("delete error") } // Reverse the linked list func (link *SinglyLink[T]) Reverse() { var pre, next *datastructure.LinkNode[T] current := link.Head for current != nil { next = current.Next current.Next = pre pre = current current = next } link.Head = pre } // GetMiddleNode return node at middle index of linked list func (link *SinglyLink[T]) GetMiddleNode() *datastructure.LinkNode[T] { if link.Head == nil { return nil } if link.Head.Next == nil { return link.Head } fast := link.Head slow := link.Head for fast != nil { fast = fast.Next if fast != nil { fast = fast.Next slow = slow.Next } else { return slow } } return slow } // Size return the count of singly linked list func (link *SinglyLink[T]) Size() int { return link.length } // Values return slice of all singly linklist node value func (link *SinglyLink[T]) Values() []T { res := []T{} current := link.Head for current != nil { res = append(res, current.Value) current = current.Next } return res } // IsEmpty checks if link is empty or not func (link *SinglyLink[T]) IsEmpty() bool { return link.length == 0 } // Clear checks if link is empty or not func (link *SinglyLink[T]) Clear() { link.Head = nil link.length = 0 } // Print all nodes info of a linked list func (link *SinglyLink[T]) Print() { current := link.Head info := "[ " for current != nil { info += fmt.Sprintf("%+v, ", current) current = current.Next } info += " ]" fmt.Println(info) }
datastructure/link/singlylink.go
0.588416
0.422981
singlylink.go
starcoder
package redis_rate import "github.com/go-redis/redis/v8" // Copyright (c) 2017 <NAME> // https://github.com/rwz/redis-gcra/blob/master/vendor/perform_gcra_ratelimit.lua var allowN = redis.NewScript(` -- this script has side-effects, so it requires replicate commands mode redis.replicate_commands() local rate_limit_key = KEYS[1] local burst = ARGV[1] local rate = ARGV[2] local period = ARGV[3] local cost = ARGV[4] local emission_interval = period / rate local increment = emission_interval * cost local burst_offset = emission_interval * burst -- redis returns time as an array containing two integers: seconds of the epoch -- time (10 digits) and microseconds (6 digits). for convenience we need to -- convert them to a floating point number. the resulting number is 16 digits, -- bordering on the limits of a 64-bit double-precision floating point number. -- adjust the epoch to be relative to Jan 1, 2017 00:00:00 GMT to avoid floating -- point problems. this approach is good until "now" is 2,483,228,799 (Wed, 09 -- Sep 2048 01:46:39 GMT), when the adjusted value is 16 digits. local jan_1_2017 = 1483228800 local now = redis.call("TIME") now = (now[1] - jan_1_2017) + (now[2] / 1000000) local tat = redis.call("GET", rate_limit_key) if not tat then tat = now else tat = tonumber(tat) end tat = math.max(tat, now) local new_tat = tat + increment local allow_at = new_tat - burst_offset local diff = now - allow_at local remaining = diff / emission_interval if remaining < 0 then local reset_after = tat - now local retry_after = diff * -1 return { tostring(0), -- allowed tostring(0), -- remaining tostring(retry_after), tostring(reset_after), } end local reset_after = new_tat - now if reset_after > 0 then redis.call("SET", rate_limit_key, new_tat, "EX", math.ceil(reset_after)) end local retry_after = -1 return {tostring(cost), tostring(remaining), tostring(retry_after), tostring(reset_after)} `) var allowAtMost = redis.NewScript(` -- this script has side-effects, so it requires replicate commands mode redis.replicate_commands() local rate_limit_key = KEYS[1] local burst = ARGV[1] local rate = ARGV[2] local period = ARGV[3] local cost = tonumber(ARGV[4]) local emission_interval = period / rate local burst_offset = emission_interval * burst -- redis returns time as an array containing two integers: seconds of the epoch -- time (10 digits) and microseconds (6 digits). for convenience we need to -- convert them to a floating point number. the resulting number is 16 digits, -- bordering on the limits of a 64-bit double-precision floating point number. -- adjust the epoch to be relative to Jan 1, 2017 00:00:00 GMT to avoid floating -- point problems. this approach is good until "now" is 2,483,228,799 (Wed, 09 -- Sep 2048 01:46:39 GMT), when the adjusted value is 16 digits. local jan_1_2017 = 1483228800 local now = redis.call("TIME") now = (now[1] - jan_1_2017) + (now[2] / 1000000) local tat = redis.call("GET", rate_limit_key) if not tat then tat = now else tat = tonumber(tat) end tat = math.max(tat, now) local diff = now - (tat - burst_offset) local remaining = diff / emission_interval if remaining < 1 then local reset_after = tat - now local retry_after = emission_interval - diff return { 0, -- allowed 0, -- remaining tostring(retry_after), tostring(reset_after), } end if remaining < cost then cost = remaining remaining = 0 else remaining = remaining - cost end local increment = emission_interval * cost local new_tat = tat + increment local reset_after = new_tat - now if reset_after > 0 then redis.call("SET", rate_limit_key, new_tat, "EX", math.ceil(reset_after)) end return { cost, remaining, tostring(-1), tostring(reset_after), } `)
lua.go
0.760651
0.455562
lua.go
starcoder