code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package extend
import (
"github.com/anaminus/luasyntax/go/tree"
)
// FileScope contains information about the scopes of a file, including
// variables and their associations with a parse tree.
type FileScope struct {
// Root is the root scope.
Root *Scope
// Globals is a list of global variables that have been assigned to.
Globals []*Variable
// VariableMap maps a NAME token to a Variable.
VariableMap map[*tree.Token]*Variable
// ScopeMap maps a Node to the scope that is opened by or is otherwise
// associated with the node.
ScopeMap map[tree.Node]*Scope
}
// Scope contains a list of the variables declared in the scope.
type Scope struct {
// Parent is the outer, surrounding scope.
Parent *Scope
// Children is a list of inner scopes.
Children []*Scope
// Variables is the list of variables declared in the scope.
Variables []*Variable
// Node is the tree node that opens or is otherwise associated with the
// scope. May be nil.
Node tree.Node
// Items is a list of NAME tokens and Scopes, ordered semantically.
Items []interface{}
// Start indicates the start of the lifetime of the scope. The value has no
// objective meaning, and should be used only for comparing with other
// lifetimes within the same generated FileScope.
Start int
// End indicates the end of the lifetime of the scope. The value has no
// objective meaning, and should be used only for comparing with other
// lifetimes within the same generated FileScope.
End int
}
// NewScope creates an inner scope, optionally associating the scope with the
// node that opens it.
func NewScope(parent *Scope, node tree.Node) *Scope {
scope := &Scope{Parent: parent, Node: node}
if parent != nil {
parent.Children = append(parent.Children, scope)
parent.Items = append(parent.Items, scope)
}
return scope
}
// VariableType indicates the type of Variable.
type VariableType uint8
const (
InvalidVar VariableType = iota
LocalVar // LocalVar indicates a variable local to its scope.
GlobalVar // GlobalVar indicates a variable defined in the global table.
)
func (t VariableType) String() string {
switch t {
case LocalVar:
return "Local"
case GlobalVar:
return "Global"
}
return "<invalid>"
}
// Variable describes a single named entity within a parse tree.
type Variable struct {
// Type is the variable type.
Type VariableType
// Name is the name of the variable.
Name string
// References is a list of NAME tokens that refer to the entity. When the
// variable is local, the first value is the declaration of the variable.
References []*tree.Token
// Scopes is a list of scopes corresponding to entries in References.
Scopes []*Scope
// Positions is a list of positions corresponding to entries in References.
Positions []int
// LifeStart indicates the start of the lifetime and visiblity of the
// variable. The value has no objective meaning, and should be used only for
// comparing with other lifetimes within the same generated FileScope.
LifeStart int
// LifeEnd indicates the end of the lifetime of the variable. The value has
// no objective meaning, and should be used only for comparing with other
// lifetimes within the same generated FileScope.
LifeEnd int
// ScopeEnd indicates the end of the visibility of the variable. The value
// has no objective meaning, and should be used only for comparing with
// other lifetimes within the same generated FileScope.
ScopeEnd int
}
// VisiblityOverlapsWith returns whether the visiblity of v overlaps with the
// visiblity of w.
func (v *Variable) VisiblityOverlapsWith(w *Variable) bool {
return v.ScopeEnd >= w.LifeStart && v.LifeStart <= w.ScopeEnd
}
// scopeParser holds the scope state while walking a parse tree. It must be
// initialized with init before using.
type scopeParser struct {
fileScope *FileScope
currentScope *Scope
position int
}
// init prepares the parser to walk a parse tree.
func (p *scopeParser) init() {
p.currentScope = nil
p.fileScope = &FileScope{
VariableMap: make(map[*tree.Token]*Variable, 4),
ScopeMap: make(map[tree.Node]*Scope, 4),
}
}
// mark marks the current position of the parser with a unique value.
func (p *scopeParser) mark() int {
pos := p.position + 1
p.position = pos
return pos
}
// openScope creates a new scope, setting it as an inner scope of the current
// scope, and then sets it as the current scope. The scope can optionally be
// associated with a node.
func (p *scopeParser) openScope(node tree.Node) {
p.currentScope = NewScope(p.currentScope, node)
p.currentScope.Start = p.mark()
if node != nil {
p.fileScope.ScopeMap[node] = p.currentScope
}
}
// closeScope sets the current scope to its parent.
func (p *scopeParser) closeScope() {
p.currentScope.End = p.mark()
for _, v := range p.currentScope.Variables {
v.ScopeEnd = p.currentScope.End
}
p.currentScope = p.currentScope.Parent
}
func (p *scopeParser) addVariableName(v *Variable, name *tree.Token) {
v.References = append(v.References, name)
v.Scopes = append(v.Scopes, p.currentScope)
v.LifeEnd = p.mark()
v.Positions = append(v.Positions, v.LifeEnd)
p.currentScope.Items = append(p.currentScope.Items, name)
p.fileScope.VariableMap[name] = v
}
func (p *scopeParser) newVariable(name *tree.Token) *Variable {
v := &Variable{
Name: string(name.Bytes),
LifeStart: p.mark(),
}
p.addVariableName(v, name)
return v
}
// AddLocalVar creates a new Variable, named by the given NAME token, and adds
// it to the current scope.
func (p *scopeParser) addLocalVar(name *tree.Token) {
v := p.newVariable(name)
v.Type = LocalVar
p.currentScope.Variables = append(p.currentScope.Variables, v)
}
// getLocalVar retrieves a variable, named by the given NAME token, from the
// current scope, or each outer scope until it is found. Returns nil if no
// variable of the given name could be found.
func (p *scopeParser) getLocalVar(name *tree.Token) *Variable {
for scope := p.currentScope; scope != nil; scope = scope.Parent {
// Iterate in reverse order to handle shadowing correctly.
for i := len(scope.Variables) - 1; i >= 0; i-- {
if scope.Variables[i].Name == string(name.Bytes) {
return scope.Variables[i]
}
}
}
return nil
}
// referenceVariable adds a reference to the variable named by the given NAME
// token. The variable may be local or global.
func (p *scopeParser) referenceVariable(name *tree.Token) *Variable {
v := p.getLocalVar(name)
if v != nil {
p.addVariableName(v, name)
} else {
v = p.addGlobalVar(name)
}
return v
}
// addGlobalVar adds a reference to a global variable, named by the given NAME
// token. A new Variable is created, if necessary.
func (p *scopeParser) addGlobalVar(name *tree.Token) (v *Variable) {
for _, g := range p.fileScope.Globals {
if g.Name == string(name.Bytes) {
v = g
break
}
}
if v != nil {
p.addVariableName(v, name)
} else {
v = p.newVariable(name)
v.Type = GlobalVar
p.fileScope.Globals = append(p.fileScope.Globals, v)
}
return v
}
// Visit implements the tree.Visitor interface.
func (p *scopeParser) Visit(node tree.Node) tree.Visitor {
switch node := node.(type) {
case *tree.File:
if p.fileScope.Root != nil {
panic("only one file can be read!")
}
p.openScope(node)
p.fileScope.Root = p.currentScope
tree.Walk(p, &node.Body)
p.closeScope()
return nil
case *tree.NameList:
for i := range node.Items {
p.addLocalVar(&node.Items[i])
}
return nil
case *tree.VariableExpr:
p.referenceVariable(&node.NameToken)
return nil
case *tree.FunctionExpr:
p.openScope(node)
if node.Params != nil {
tree.Walk(p, node.Params)
}
tree.Walk(p, &node.Body)
p.closeScope()
return nil
case *tree.DoStmt:
p.openScope(node)
tree.Walk(p, &node.Body)
p.closeScope()
return nil
case *tree.IfStmt:
p.openScope(node)
if node.Cond != nil {
tree.Walk(p, node.Cond)
}
tree.Walk(p, &node.Body)
for i := range node.ElseIf {
// Close previous if/elseif scope.
p.closeScope()
p.openScope(&node.ElseIf[i])
tree.Walk(p, &node.ElseIf[i])
}
if node.Else != nil {
// Close previous if/elseif scope.
p.closeScope()
p.openScope(node.Else)
tree.Walk(p, node.Else)
}
p.closeScope()
return nil
case *tree.NumericForStmt:
// Open a separate scope for range expressions, which should appear
// before the scope of the body, but not as a parent.
// TODO: Figure out a better way to map this scope to a node.
p.openScope(node.Min)
if node.Min != nil {
tree.Walk(p, node.Min)
}
if node.Max != nil {
tree.Walk(p, node.Max)
}
if node.Step != nil {
tree.Walk(p, node.Step)
}
p.closeScope()
p.openScope(node)
p.addLocalVar(&node.NameToken)
tree.Walk(p, &node.Body)
p.closeScope()
return nil
case *tree.GenericForStmt:
// Open a separate scope for iterator expressions, which must appear
// before the scope of the body, but not as a parent.
p.openScope(&node.Iterator)
tree.Walk(p, &node.Iterator)
p.closeScope()
p.openScope(node)
tree.Walk(p, &node.Names)
tree.Walk(p, &node.Body)
p.closeScope()
return nil
case *tree.WhileStmt:
p.openScope(node)
if node.Cond != nil {
tree.Walk(p, node.Cond)
}
tree.Walk(p, &node.Body)
p.closeScope()
return nil
case *tree.RepeatStmt:
p.openScope(node)
tree.Walk(p, &node.Body)
if node.Cond != nil {
tree.Walk(p, node.Cond)
}
p.closeScope()
return nil
case *tree.LocalVarStmt:
// Expressions must be added first.
if node.Values != nil {
tree.Walk(p, node.Values)
}
// Add variables.
tree.Walk(p, &node.Names)
return nil
case *tree.LocalFunctionStmt:
p.addLocalVar(&node.NameToken)
tree.Walk(p, &node.Func)
return nil
case *tree.FunctionStmt:
tree.Walk(p, &node.Name)
tree.Walk(p, &node.Func)
return nil
case *tree.FuncNameList:
// Refer to first name in list.
p.referenceVariable(&node.Items[0])
return nil
default:
}
return p
}
// BuildFileScope walks the given parse tree, building a tree of scopes and the
// variables they contain.
func BuildFileScope(file *tree.File) *FileScope {
var p scopeParser
p.init()
tree.Walk(&p, file)
if p.currentScope != nil {
panic("unbalanced scopes")
}
for _, g := range p.fileScope.Globals {
g.LifeStart = p.fileScope.Root.Start
g.LifeEnd = p.fileScope.Root.End
g.ScopeEnd = p.fileScope.Root.End
}
return p.fileScope
} | go/extend/scope.go | 0.664976 | 0.502014 | scope.go | starcoder |
package solutions
import (
"fmt"
"reflect"
)
func canCompleteCircuit(gas []int, cost []int) int {
size := len(gas)
total, left, start := 0, 0, 0
for i := 0; i < size; i++ {
diff := gas[i] - cost[i]
total += diff
left += diff
if left < 0 {
left = 0
start = (i + 1) % size
}
}
fmt.Println("total-> ", total)
if total < 0 {
return -1
}
return start
}
func init() {
desc := `
There are N gas stations along a circular route, where the amount of gas at station i is gas[i].
You have a car with an unlimited gas tank and it costs cost[i] of gas to travel from station i to its next station (i+1). You begin the journey with an empty tank at one of the gas stations.
Return the starting gas station's index if you can travel around the circuit once in the clockwise direction, otherwise return -1.
Note:
If there exists a solution, it is guaranteed to be unique.
Both input arrays are non-empty and have the same length.
Each element in the input arrays is a non-negative integer.
Example 1:
Input:
gas = [1,2,3,4,5]
cost = [3,4,5,1,2]
Output: 3
Explanation:
Start at station 3 (index 3) and fill up with 4 unit of gas. Your tank = 0 + 4 = 4
Travel to station 4. Your tank = 4 - 1 + 5 = 8
Travel to station 0. Your tank = 8 - 2 + 1 = 7
Travel to station 1. Your tank = 7 - 3 + 2 = 6
Travel to station 2. Your tank = 6 - 4 + 3 = 5
Travel to station 3. The cost is 5. Your gas is just enough to travel back to station 3.
Therefore, return 3 as the starting index.
Example 2:
Input:
gas = [2,3,4]
cost = [3,4,3]
Output: -1
Explanation:
You can't start at station 0 or 1, as there is not enough gas to travel to the next station.
Let's start at station 2 and fill up with 4 unit of gas. Your tank = 0 + 4 = 4
Travel to station 0. Your tank = 4 - 3 + 2 = 3
Travel to station 1. Your tank = 3 - 3 + 3 = 3
You cannot travel back to station 2, as it requires 4 unit of gas but you only have 3.
Therefore, you can't travel around the circuit once no matter where you start.
`
sol := Solution{
Title: "Gas Station",
Desc: desc,
Method: reflect.ValueOf(canCompleteCircuit),
Tests: make([]TestCase, 0),
}
a := TestCase{}
a.Input = []interface{}{[]int{1, 2, 3, 4, 5}, []int{3, 4, 5, 1, 2}}
a.Output = []interface{}{3}
sol.Tests = append(sol.Tests, a)
a.Input = []interface{}{[]int{5, 1, 2, 3, 4}, []int{4, 4, 1, 5, 1}}
a.Output = []interface{}{4}
sol.Tests = append(sol.Tests, a)
SolutionMap["0134"] = sol
} | solutions/0134.go | 0.549641 | 0.601828 | 0134.go | starcoder |
package encoder
import (
"fmt"
"github.com/gojek/merlin/pkg/transformer/spec"
"github.com/gojek/merlin/pkg/transformer/types/converter"
"math"
"time"
)
const (
floatZero = 0.0000000001
q1LastMonth = 3
q2LastMonth = 6
q3LastMonth = 9
h1LastMonth = 6
february = 2
minInSec = 60
hourInSec = 3600
dayInSec = 86400
weekInSec = 604800
daysInSec31 = 2678400
daysInSec30 = 2592000
daysInSec29 = 2505600
daysInSec28 = 2419200
q1InSec = 7776000
q1LeapInSec = 7862400
q2InSec = 7862400
q3InSec = 7948800
q4InSec = 7948800
h1InSec = 15638400
h1LeapInSec = 15724800
h2InSec = 15897600
yearInSec = 31536000
leapYearInSec = 31622400
completeAngle = 2 * math.Pi
// Unit angles for variable periods
unitDaysInSec31 = completeAngle / daysInSec31
unitDaysInSec30 = completeAngle / daysInSec30
unitDaysInSec29 = completeAngle / daysInSec29
unitDaysInSec28 = completeAngle / daysInSec28
unitQ1InSec = completeAngle / q1InSec
unitQ1LeapInSec = completeAngle / q1LeapInSec
unitQ2InSec = completeAngle / q2InSec
unitQ3InSec = completeAngle / q3InSec
unitQ4InSec = completeAngle / q4InSec
unitH1InSec = completeAngle / h1InSec
unitH1LeapInSec = completeAngle / h1LeapInSec
unitH2InSec = completeAngle / h2InSec
unitYearInSec = completeAngle / yearInSec
unitLeapYearInSec = completeAngle / leapYearInSec
)
// Unit angles for variable periods for each month
var monthInSec = map[int]float64{
1: unitDaysInSec31,
2: unitDaysInSec28,
3: unitDaysInSec31,
4: unitDaysInSec30,
5: unitDaysInSec31,
6: unitDaysInSec30,
7: unitDaysInSec31,
8: unitDaysInSec31,
9: unitDaysInSec30,
10: unitDaysInSec31,
11: unitDaysInSec30,
12: unitDaysInSec31,
13: unitDaysInSec29, // Leap year Feb
}
type CyclicalEncoder struct {
PeriodType spec.PeriodType
Min float64
Max float64
}
func NewCyclicalEncoder(config *spec.CyclicalEncoderConfig) (*CyclicalEncoder, error) {
// by range
byRange := config.GetByRange()
if byRange != nil {
if (byRange.Max - byRange.Min) < floatZero {
return nil, fmt.Errorf("max of cyclical range must be larger than min")
}
return &CyclicalEncoder{
PeriodType: spec.PeriodType_UNDEFINED,
Min: byRange.Min,
Max: byRange.Max,
}, nil
}
// by epoch time
byEpochTime := config.GetByEpochTime()
var min, max float64 = 0, 0
var period spec.PeriodType
if byEpochTime != nil {
switch byEpochTime.PeriodType {
case spec.PeriodType_HOUR:
period = spec.PeriodType_UNDEFINED
max = hourInSec
case spec.PeriodType_DAY:
period = spec.PeriodType_UNDEFINED
max = dayInSec
case spec.PeriodType_WEEK:
period = spec.PeriodType_UNDEFINED
max = weekInSec
case spec.PeriodType_MONTH, spec.PeriodType_QUARTER, spec.PeriodType_HALF, spec.PeriodType_YEAR:
period = byEpochTime.PeriodType
max = 0
default:
return nil, fmt.Errorf("invalid or unspported cycle period")
}
return &CyclicalEncoder{
PeriodType: period,
Min: min,
Max: max,
}, nil
}
return nil, fmt.Errorf("cyclical encoding config invalid or undefined")
}
func (oe *CyclicalEncoder) Encode(values []interface{}, column string) (map[string]interface{}, error) {
encodedCos := make([]interface{}, 0, len(values))
encodedSin := make([]interface{}, 0, len(values))
// config with fixed range
if oe.PeriodType == spec.PeriodType_UNDEFINED {
period := oe.Max - oe.Min
unitAngle := completeAngle / period
for _, val := range values {
// Check if value is missing
if val == nil {
return nil, fmt.Errorf("missing value")
}
// Check if value is valid
valFloat, err := converter.ToFloat64(val)
if err != nil {
return nil, err
}
// Encode to sin and cos
phase := (valFloat - oe.Min) * unitAngle
encodedCos = append(encodedCos, math.Cos(phase))
encodedSin = append(encodedSin, math.Sin(phase))
}
} else {
// config with variable range, by epoch time (e.g. different days in each month, leap year etc.)
for _, val := range values {
// Check if value is missing
if val == nil {
return nil, fmt.Errorf("missing value")
}
// Check if value is valid
valInt, err := converter.ToInt64(val)
if err != nil {
return nil, err
}
// convert epoch time to golang datetime
t := time.Unix(valInt, 0).In(time.UTC)
shareOfPeriod, err := getCycleTime(oe.PeriodType, t)
if err != nil {
return nil, err
}
unitAngle, err := getUnitAngle(oe.PeriodType, t)
if err != nil {
return nil, err
}
// Encode to sin and cos
phase := float64(shareOfPeriod) * unitAngle
encodedCos = append(encodedCos, math.Cos(phase))
encodedSin = append(encodedSin, math.Sin(phase))
}
}
return map[string]interface{}{
column + "_x": encodedCos,
column + "_y": encodedSin,
}, nil
}
// Computes the number of seconds past the beginning of a pre-defined cycle
// Only works with PeriodType with variable cycle time such as Month, Year etc
// For period type with fixed cycle time, it is handled differently by encoder and
// does not need the cycle time to be computed
func getCycleTime(periodType spec.PeriodType, t time.Time) (int, error) {
switch periodType {
case spec.PeriodType_MONTH:
dayElapsed := t.Day() - 1
hr, min, sec := t.Clock()
elapsed := getElapsedSec(dayElapsed, hr, min, sec)
return elapsed, nil
case spec.PeriodType_QUARTER:
dayElapsed := t.YearDay() - 1
hr, min, sec := t.Clock()
elapsed := getElapsedSec(dayElapsed, hr, min, sec)
var cycleTime int
if t.Month() <= q1LastMonth {
return elapsed, nil
} else if t.Month() <= q2LastMonth {
cycleTime = elapsed - q1InSec
} else if t.Month() <= q3LastMonth {
cycleTime = elapsed - h1InSec
} else {
cycleTime = elapsed - h1InSec - q3InSec
}
if isLeapYear(t.Year()) {
cycleTime -= dayInSec //minus extra day from leap year
}
return cycleTime, nil
case spec.PeriodType_HALF:
dayElapsed := t.YearDay() - 1
hr, min, sec := t.Clock()
elapsed := getElapsedSec(dayElapsed, hr, min, sec)
if t.Month() <= 6 {
return elapsed, nil
}
if isLeapYear(t.Year()) {
return elapsed - h1LeapInSec, nil
}
return elapsed - h1InSec, nil
case spec.PeriodType_YEAR:
dayElapsed := t.YearDay() - 1
hr, min, sec := t.Clock()
elapsed := getElapsedSec(dayElapsed, hr, min, sec)
return elapsed, nil
}
return 0, fmt.Errorf("period type is undefined for this use case")
}
// Convert time duration in days, hour, min, sec to number of seconds
func getElapsedSec(dayElapsed int, hr int, min int, sec int) int {
elapsed := dayElapsed*dayInSec + hr*hourInSec + min*minInSec + sec
return elapsed
}
// Computes the angle in radians represented by per unit second of a pre-defined period
// This is derived from the formula for calculating phase:
// phase = time passed / period * 2pi
// By rearranging the formula (for optimizing computation) into this:
// phase = time pass * 2pi / period
// we define unit angle as (2pi / period)
// The motivation is that we can pre-compute this value once and use it repeatedly.
func getUnitAngle(periodType spec.PeriodType, t time.Time) (float64, error) {
switch periodType {
case spec.PeriodType_MONTH:
if t.Month() == february && isLeapYear(t.Year()) {
return monthInSec[13], nil
}
return monthInSec[int(t.Month())], nil
case spec.PeriodType_QUARTER:
if t.Month() <= q1LastMonth {
if isLeapYear(t.Year()) {
return unitQ1LeapInSec, nil
}
return unitQ1InSec, nil
} else if t.Month() <= q2LastMonth {
return unitQ2InSec, nil
} else if t.Month() <= q3LastMonth {
return unitQ3InSec, nil
}
return unitQ4InSec, nil
case spec.PeriodType_HALF:
if t.Month() <= h1LastMonth {
if isLeapYear(t.Year()) {
return unitH1LeapInSec, nil
}
return unitH1InSec, nil
}
return unitH2InSec, nil
case spec.PeriodType_YEAR:
if isLeapYear(t.Year()) {
return unitLeapYearInSec, nil
}
return unitYearInSec, nil
}
return 0, fmt.Errorf("period type is undefined for this use case")
}
// test if a given year is leap year
// leap year is a year divisible by (4, but not 100) or (4, 100 and 400)
func isLeapYear(year int) bool {
if (year%4 == 0 && year%100 != 0) || (year%4 == 0 && year%400 == 0) {
return true
}
return false
} | api/pkg/transformer/types/encoder/cyclical_encoder.go | 0.678753 | 0.513242 | cyclical_encoder.go | starcoder |
package core
import (
"runtime"
. "github.com/gooid/gocv/opencv3/internal/native"
)
const _channelsMatOfRect2d = 4
var _depthMatOfRect2d = CvTypeCV_64F
type MatOfRect2d struct {
*Mat
}
func NewMatOfRect2d() (rcvr *MatOfRect2d) {
rcvr = &MatOfRect2d{}
rcvr.Mat = NewMat2()
runtime.SetFinalizer(rcvr, func(interface{}) { rcvr.finalize() })
return
}
func NewMatOfRect2d2(addr int64) (rcvr *MatOfRect2d) {
rcvr = &MatOfRect2d{}
rcvr.Mat = NewMat(addr)
runtime.SetFinalizer(rcvr, func(interface{}) { rcvr.finalize() })
if !rcvr.Empty() && rcvr.CheckVector2(_channelsMatOfRect2d, _depthMatOfRect2d) < 0 {
Throw(NewIllegalArgumentException("Incompatible Mat"))
}
return
}
func NewMatOfRect2d3(m *Mat) (rcvr *MatOfRect2d) {
rcvr = &MatOfRect2d{}
rcvr.Mat = NewMat8(m, RangeAll())
runtime.SetFinalizer(rcvr, func(interface{}) { rcvr.finalize() })
if !rcvr.Empty() && rcvr.CheckVector2(_channelsMatOfRect2d, _depthMatOfRect2d) < 0 {
Throw(NewIllegalArgumentException("Incompatible Mat"))
}
return
}
func (rcvr *MatOfRect2d) Alloc(elemNumber int) {
if elemNumber > 0 {
rcvr.Create(elemNumber, 1, CvTypeMakeType(_depthMatOfRect2d, _channelsMatOfRect2d))
}
}
func (rcvr *MatOfRect2d) FromArray(a []*Rect2d) {
if a == nil || len(a) == 0 {
return
}
num := len(a)
rcvr.Alloc(num)
buff := make([]float64, num*_channelsMatOfRect2d)
for i := 0; i < num; i++ {
r := a[i]
buff[_channelsMatOfRect2d*i+0] = r.X
buff[_channelsMatOfRect2d*i+1] = r.Y
buff[_channelsMatOfRect2d*i+2] = r.Width
buff[_channelsMatOfRect2d*i+3] = r.Height
}
rcvr.PutD(0, 0, buff)
}
func MatOfRect2dFromNativeAddr(addr int64) *MatOfRect2d {
return NewMatOfRect2d2(addr)
}
func (rcvr *MatOfRect2d) ToArray() []*Rect2d {
num := rcvr.Total()
a := make([]*Rect2d, num)
if num == 0 {
return a
}
buff := make([]float64, num*_channelsMatOfRect2d)
rcvr.GetD(0, 0, buff)
for i := int64(0); i < num; i++ {
a[i] = NewRect2d(buff[i*_channelsMatOfRect2d], buff[i*_channelsMatOfRect2d+1], buff[i*_channelsMatOfRect2d+2], buff[i*_channelsMatOfRect2d+3])
}
return a
} | opencv3/core/MatOfRect2d.java.go | 0.608361 | 0.496704 | MatOfRect2d.java.go | starcoder |
// Package day20 solves AoC 2017 day 20.
package day20
import (
"math"
"sort"
"strconv"
"github.com/fis/aoc/glue"
)
const inputRegexp = `^p=<(-?\d+),(-?\d+),(-?\d+)>, v=<(-?\d+),(-?\d+),(-?\d+)>, a=<(-?\d+),(-?\d+),(-?\d+)>$`
func init() {
glue.RegisterSolver(2017, 20, glue.RegexpSolver{
Solver: solve,
Regexp: inputRegexp,
})
}
func solve(input [][]string) ([]string, error) {
ps := parseInput(input)
p1 := closest(ps)
p2 := collideSim(ps, 40)
return glue.Ints(p1, p2), nil
}
type p3 struct {
x, y, z int
}
type particle struct {
p, v, a p3
}
func closest(ps []particle) (minI int) {
minP, minV, minA := math.MaxInt, math.MaxInt, math.MaxInt
for i, p := range ps {
dP, dV, dA := p.p.dist(), p.v.dist(), p.a.dist()
if dA < minA || (dA == minA && dV < minV) || (dA == minA && dV == minV && dP < minP) {
minI, minP, minV, minA = i, dP, dV, dA
}
}
return minI
}
func collideSim(ps []particle, rounds int) int {
for t := 0; t < rounds; t++ {
positions := map[p3][]int{}
for i, p := range ps {
if p == (particle{}) {
continue
}
at := p.pos(t)
positions[at] = append(positions[at], i)
}
for _, list := range positions {
if len(list) < 2 {
continue
}
for _, i := range list {
ps[i] = particle{}
}
}
}
surviving := 0
for _, p := range ps {
if p != (particle{}) {
surviving++
}
}
return surviving
}
func collideCalc(ps []particle) int {
type collisionKey struct {
t int
p p3
}
collisions := map[collisionKey][]int{}
collisionKeys := []collisionKey(nil)
for i, N := 0, len(ps); i < N-1; i++ {
for j := i + 1; j < N; j++ {
if t := collide3D(ps[i], ps[j]); t >= 0 {
key := collisionKey{t: t, p: ps[i].pos(t)}
if _, old := collisions[key]; !old {
collisionKeys = append(collisionKeys, key)
}
collisions[key] = insert(collisions[key], i, j)
}
}
}
sort.Slice(collisionKeys, func(i, j int) bool {
return collisionKeys[i].t < collisionKeys[j].t
})
collided := make([]bool, len(ps))
survived := len(ps)
for _, key := range collisionKeys {
size := 0
for _, p := range collisions[key] {
if !collided[p] {
size++
}
}
if size >= 2 {
for _, p := range collisions[key] {
if !collided[p] {
collided[p] = true
survived--
}
}
}
}
return survived
}
func insert(list []int, a, b int) []int {
foundA, foundB := false, false
for _, i := range list {
if i == a {
foundA = true
}
if i == b {
foundB = true
}
}
if !foundA {
list = append(list, a)
}
if !foundB {
list = append(list, b)
}
return list
}
func collide3D(p1, p2 particle) (t int) {
t1x, t2x, allX := collide1D(p1.p.x, p1.v.x, p1.a.x, p2.p.x, p2.v.x, p2.a.x)
if t1x < 0 && !allX {
return -1
}
t1y, t2y, allY := collide1D(p1.p.y, p1.v.y, p1.a.y, p2.p.y, p2.v.y, p2.a.y)
if t1y < 0 && !allY {
return -1
}
t1z, t2z, allZ := collide1D(p1.p.z, p1.v.z, p1.a.z, p2.p.z, p2.v.z, p2.a.z)
if t1z < 0 && !allZ {
return -1
}
t1, t2, all := combine(t1x, t2x, t1y, t2y, allX, allY)
t1, t2, _ = combine(t1, t2, t1z, t2z, all, allZ)
if t1 >= 0 && t2 >= 0 {
if t1 < t2 {
return t1
} else {
return t2
}
} else if t1 >= 0 {
return t1
} else if t2 >= 0 {
return t2
}
return -1
}
func collide1D(p1, v1, a1, p2, v2, a2 int) (t1, t2 int, all bool) {
pd, vd, ad := p1-p2, v1-v2, a1-a2
A, B, C := ad, 2*vd+ad, 2*pd
if A == 0 && B == 0 {
return -1, -1, C == 0
} else if A == 0 {
if C%B == 0 && -C/B >= 0 {
return -C / B, -1, false
} else {
return -1, -1, false
}
}
disc := B*B - 4*A*C
if disc < 0 {
return -1, -1, false
} else if disc == 0 {
if B%(2*A) == 0 && -B/(2*A) >= 0 {
return -B / (2 * A), -1, false
} else {
return -1, -1, false
}
}
sqDisc := sqrt(disc)
if sqDisc*sqDisc != disc {
return -1, -1, false
}
i1 := (-B-sqDisc)%(2*A) == 0 && (-B-sqDisc)/(2*A) >= 0
i2 := (-B+sqDisc)%(2*A) == 0 && (-B+sqDisc)/(2*A) >= 0
if i1 && i2 {
return (-B - sqDisc) / (2 * A), (-B + sqDisc) / (2 * A), false
} else if i1 {
return (-B - sqDisc) / (2 * A), -1, false
} else if i2 {
return (-B + sqDisc) / (2 * A), -1, false
}
return -1, -1, false
}
func combine(t1a, t2a, t1b, t2b int, allA, allB bool) (t1, t2 int, all bool) {
if allA && allB {
return -1, -1, true
} else if allA {
return t1b, t2b, false
} else if allB {
return t1a, t2a, false
}
if (t1a == t1b && t2a == t2b) || (t1a == t2b && t2a == t1b) {
return t1a, t2a, false
}
if t1a == t1b || t1a == t2b {
return t1a, -1, false
}
if t2a == t1b || t2a == t2b {
return t2a, -1, false
}
return -1, -1, false
}
func parseInput(input [][]string) []particle {
ps := make([]particle, len(input))
for i, row := range input {
ps[i].p.x, _ = strconv.Atoi(row[0])
ps[i].p.y, _ = strconv.Atoi(row[1])
ps[i].p.z, _ = strconv.Atoi(row[2])
ps[i].v.x, _ = strconv.Atoi(row[3])
ps[i].v.y, _ = strconv.Atoi(row[4])
ps[i].v.z, _ = strconv.Atoi(row[5])
ps[i].a.x, _ = strconv.Atoi(row[6])
ps[i].a.y, _ = strconv.Atoi(row[7])
ps[i].a.z, _ = strconv.Atoi(row[8])
}
return ps
}
func (p particle) pos(t int) p3 {
// p(t) = p0 + t*v0 + (t+1)*t/2 * a0
tt := (t + 1) * t / 2
return p3{
x: p.p.x + t*p.v.x + tt*p.a.x,
y: p.p.y + t*p.v.y + tt*p.a.y,
z: p.p.z + t*p.v.z + tt*p.a.z,
}
}
func (p p3) dist() int {
return abs(p.x) + abs(p.y) + abs(p.z)
}
func abs(x int) int {
if x < 0 {
return -x
}
return x
}
func sqrt(y int) int {
if y < 0 {
panic("sqrt(neg)")
} else if y <= 1 {
return y
}
x0 := y / 2
x1 := (x0 + y/x0) / 2
for x1 < x0 {
x0 = x1
x1 = (x0 + y/x0) / 2
}
return x0
} | 2017/day20/day20.go | 0.650578 | 0.432782 | day20.go | starcoder |
package storage
import (
"fmt"
"math/bits"
)
// ref Libra Position module
// maxLevel for index in uint64
const maxLevel = 63
// InorderIndex represents the inorder traversal index of a binary tree with limited level
type InorderIndex uint64
// FromIndexOnLevel calculates inorder index from the index of nodes upper certain level
func FromIndexOnLevel(indexOnLevel uint64, level int) InorderIndex {
return InorderIndex(indexOnLevel<<(level+1) | (1<<level - 1))
}
// FromLeafIndex calculates inorder index from the index of leaves
func FromLeafIndex(leafIndex uint64) InorderIndex {
return FromIndexOnLevel(leafIndex, 0)
}
// FromPostorder calculates inorder index from postorder index
func FromPostorder(postorder uint64) InorderIndex {
bitmap := uint64(0)
fullBinarySize := ^uint64(0)
for i := maxLevel; i >= 0; i-- {
if postorder >= fullBinarySize {
postorder -= fullBinarySize
bitmap |= 1 << i
}
fullBinarySize >>= 1
}
return FromIndexOnLevel(bitmap>>postorder, int(postorder))
}
func (i InorderIndex) children() uint64 {
return uint64(isolateRightMostZeroBit(i))<<1 - 2
}
// Postorder returns the postorder index converted from inorder index
func (i InorderIndex) Postorder() uint64 {
onesUpToLevel := uint64(isolateRightMostZeroBit(i)) - 1
unsetLevelZeros := uint64(i) ^ onesUpToLevel
return i.children() + unsetLevelZeros - uint64(bits.OnesCount64(unsetLevelZeros))
}
// Parent returns the parent
func (i InorderIndex) Parent() InorderIndex {
return (i | isolateRightMostZeroBit(i)) & ^(isolateRightMostZeroBit(i) << 1)
}
// Sibling returns the sibling
func (i InorderIndex) Sibling() InorderIndex {
return i ^ (isolateRightMostZeroBit(i) << 1)
}
//Level calculates the level of inorder index
func (i InorderIndex) Level() int {
return bits.TrailingZeros64(^uint64(i))
}
// LeafIndexOnLevel returns n that i is the n-th leaf on this level
func (i InorderIndex) LeafIndexOnLevel() uint64 {
return uint64(i) >> (1 + i.Level())
}
// IsLeaf judges whether the inorder index is a leaf
func (i InorderIndex) IsLeaf() bool {
return i&1 == 0
}
// IsLeftChild judges whether the inorder index is or can be a left child
func (i InorderIndex) IsLeftChild() bool {
return i&(isolateRightMostZeroBit(i)<<1) == 0
}
// IsRightChild judges whether the inorder index is or can be a right child
func (i InorderIndex) IsRightChild() bool {
return !i.IsLeftChild()
}
// LeftChild returns the left child
func (i InorderIndex) LeftChild() (InorderIndex, error) {
if i.IsLeaf() {
return 0, fmt.Errorf("leaf has no child")
}
return i & ^(isolateRightMostZeroBit(i) >> 1), nil
}
// RightChild returns the right child
func (i InorderIndex) RightChild() (InorderIndex, error) {
if i.IsLeaf() {
return 0, fmt.Errorf("leaf has no child")
}
return (i | isolateRightMostZeroBit(i)) & ^(isolateRightMostZeroBit(i) >> 1), nil
}
// LeftMostChild returns the left-most child
func (i InorderIndex) LeftMostChild() InorderIndex {
level := i.Level()
return (i >> level) << level
}
// RightMostChild returns the right-most child
func (i InorderIndex) RightMostChild() InorderIndex {
return i + (InorderIndex(i.children()) >> 1)
}
// RootLevelFromLeafIndex calculates the root level of a binary tree containing certain number of leaves
func RootLevelFromLeafIndex(leafIndex uint64) int {
return maxLevel + 1 - bits.LeadingZeros64(leafIndex)
}
func isolateRightMostZeroBit(x InorderIndex) InorderIndex {
return (^x) & (x + 1)
} | storage/inorderindex.go | 0.770465 | 0.496582 | inorderindex.go | starcoder |
package schemer
import (
"encoding/json"
"errors"
"fmt"
"io"
"reflect"
)
// BoolSchema is a Schema for encoding and decoding boolean values
type BoolSchema struct {
SchemaOptions
}
// Encode uses the schema to write the encoded value of i to the output stream
func (s *BoolSchema) Encode(w io.Writer, i interface{}) error {
return s.EncodeValue(w, reflect.ValueOf(i))
}
// EncodeValue uses the schema to write the encoded value of v to the output
// stream
func (s *BoolSchema) EncodeValue(w io.Writer, v reflect.Value) error {
done, err := PreEncode(w, &v, s.Nullable())
if err != nil || done {
return err
}
t := v.Type()
k := t.Kind()
if k != reflect.Bool {
return fmt.Errorf("BoolSchema only supports encoding boolean values")
}
var boolToEncode byte
if v.Bool() {
// we are trying to encode a true value
boolToEncode = 1
}
switch k {
case reflect.Bool:
n, err := w.Write([]byte{boolToEncode})
if err == nil && n != 1 {
return errors.New("unexpected number of bytes written")
}
default:
return errors.New("can only encode boolean types when using BoolSchema")
}
return nil
}
// Decode uses the schema to read the next encoded value from the input
// stream and stores it in i
func (s *BoolSchema) Decode(r io.Reader, i interface{}) error {
if i == nil {
return fmt.Errorf("cannot decode to nil destination")
}
return s.DecodeValue(r, reflect.ValueOf(i))
}
// DecodeValue uses the schema to read the next encoded value from the input
// stream and stores it in v
func (s *BoolSchema) DecodeValue(r io.Reader, v reflect.Value) error {
done, err := PreDecode(r, &v, s.Nullable())
if err != nil || done {
return err
}
t := v.Type()
k := t.Kind()
if k == reflect.Interface {
v.Set(reflect.New(s.GoType()))
v = v.Elem().Elem()
t = v.Type()
k = t.Kind()
}
buf := make([]byte, 1)
_, err = io.ReadAtLeast(r, buf, 1)
if err != nil {
return err
}
decodedBool := buf[0] > 0
// Ensure v is settable
if !v.CanSet() {
return fmt.Errorf("decode destination is not settable")
}
// take a look at the destination
// bools can be decoded to integer types, bools, and strings
switch k {
case reflect.Int:
fallthrough
case reflect.Int8:
fallthrough
case reflect.Int16:
fallthrough
case reflect.Int32:
fallthrough
case reflect.Int64:
if !s.WeakDecoding() {
return fmt.Errorf("weak decoding not enabled; cannot decode to int type")
}
if decodedBool {
v.SetInt(1)
} else {
v.SetInt(0)
}
case reflect.Uint:
fallthrough
case reflect.Uint8:
fallthrough
case reflect.Uint16:
fallthrough
case reflect.Uint32:
fallthrough
case reflect.Uint64:
if !s.WeakDecoding() {
return fmt.Errorf("weak decoding not enabled; cannot decode to uint type")
}
if decodedBool {
v.SetUint(1)
} else {
v.SetUint(0)
}
case reflect.Bool:
v.SetBool(decodedBool)
case reflect.String:
if !s.WeakDecoding() {
return fmt.Errorf("weak decoding not enabled; cannot decode to string")
}
if decodedBool {
v.SetString("True")
} else {
v.SetString("False")
}
default:
return fmt.Errorf("invalid destination %v", k)
}
return nil
}
// GoType returns the default Go type that represents the schema
func (s *BoolSchema) GoType() reflect.Type {
var b bool
retval := reflect.TypeOf(b)
if s.Nullable() {
retval = reflect.PtrTo(retval)
}
return retval
}
// MarshalJSON encodes the schema in a JSON format
func (s *BoolSchema) MarshalJSON() ([]byte, error) {
return json.Marshal(map[string]interface{}{
"type": "bool",
"nullable": s.Nullable(),
})
}
// MarshalSchemer encodes the schema in a portable binary format
func (s *BoolSchema) MarshalSchemer() ([]byte, error) {
// bool schemas are 1 byte long
var schema []byte = []byte{BoolByte}
// The most significant bit indicates whether or not the type is nullable
if s.Nullable() {
schema[0] |= NullMask
}
return schema, nil
} | bool.go | 0.741955 | 0.414247 | bool.go | starcoder |
// Snippet check looks through documents for CTE snippets (```cte ... ```)
// and attempts to parse it in order to verify that it is valid.
package main
import (
"bytes"
"flag"
"fmt"
"io/ioutil"
"os"
"regexp"
"github.com/kstenerud/go-concise-encoding/ce"
"github.com/kstenerud/go-describe"
)
func main() {
quiet := flag.Bool("q", false, "quiet")
verbose := flag.Bool("v", false, "verbose")
flag.Parse()
args := flag.Args()
verbosityLevel := getVerbosityLevel(*quiet, *verbose)
if len(args) < 1 {
printUsage()
return
}
for _, path := range args {
fi, err := os.Stat(path)
if err != nil {
fmt.Printf("Could not open %v: %v\n", path, err)
return
}
if fi.Mode().IsRegular() {
inspectFile(path, verbosityLevel)
} else {
fmt.Printf("Skipping %v because it is not a file.\n", path)
}
}
}
func printUsage() {
fmt.Printf("Inspects files for inline CTE snippets (anything between [```cte] and [```]) and tries to parse them.\n")
fmt.Printf("Usage: %v [opts] <files>\n", os.Args[0])
flag.PrintDefaults()
}
type verbosity int
const (
verbosityQuiet verbosity = iota
verbosityNormal
verbosityVerbose
)
func getVerbosityLevel(quiet bool, verbose bool) verbosity {
if quiet {
return verbosityQuiet
}
if verbose {
return verbosityVerbose
}
return verbosityNormal
}
func isWhitespace(ch byte) bool {
switch ch {
case ' ', '\r', '\n', '\t':
return true
default:
return false
}
}
func addHeaderIfNeeded(data []byte) []byte {
for len(data) > 0 && isWhitespace(data[0]) {
data = data[1:]
}
if len(data) < 2 {
return data
}
if data[0] == 'c' && (data[1] >= '0' && data[1] <= '9') {
return data
}
return append([]byte{'c', '0', '\n'}, data...)
}
func reportError(snippet []byte, err error) {
fmt.Printf("======================================================================\n")
fmt.Printf("📜 Snippet:\n%v\n", string(snippet))
fmt.Printf("----------------------------------------------------------------------\n")
fmt.Printf("❌ Failed: %v\n", err)
fmt.Printf("======================================================================\n")
}
func reportSuccess(snippet []byte, unmarshaled interface{}) {
fmt.Printf("======================================================================\n")
fmt.Printf("📜 Snippet:\n%v\n", string(snippet))
fmt.Printf("----------------------------------------------------------------------\n")
if unmarshaled != nil {
fmt.Printf("✅ Unmarshaled to:\n%v\n", describe.Describe(unmarshaled, 4))
} else {
fmt.Printf("✅ Success\n")
}
fmt.Printf("======================================================================\n")
}
func inspectFile(path string, verbosityLevel verbosity) {
contents, err := ioutil.ReadFile(path)
if err != nil {
fmt.Printf("Could not read file [%v]: %v\n", path, err)
}
if verbosityLevel >= verbosityNormal {
fmt.Printf("Inspecting %v\n", path)
}
for _, snippet := range getSnippets(contents) {
snippet = addHeaderIfNeeded(snippet)
unmarshaled, err := ce.UnmarshalCTE(bytes.NewBuffer(snippet), nil, nil)
if err != nil {
decoder := ce.NewCTEDecoder(nil)
if err = decoder.DecodeDocument(snippet, ce.NewRules(nil, nil)); err != nil {
reportError(snippet, err)
continue
}
}
if verbosityLevel >= verbosityVerbose {
reportSuccess(snippet, unmarshaled)
}
}
}
// var snippetMatcher = regexp.MustCompile("```cte\\s*(.*)```")
var snippetMatcher = regexp.MustCompile("(?s)```cte\\s*(.*?)```")
func getSnippets(data []byte) (snippets [][]byte) {
for _, match := range snippetMatcher.FindAllSubmatch(data, -1) {
snippets = append(snippets, match[1])
}
return
} | tests/snippet_check/main.go | 0.516352 | 0.543954 | main.go | starcoder |
Copyright 2016 GitHub Inc.
See https://github.com/github/gh-ost/blob/master/LICENSE
*/
/*
Copyright 2021 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vrepl
import (
"fmt"
"reflect"
"strings"
)
// Column represents a table column
type Column struct {
Name string
}
// NewColumns creates a new column array from non empty names
func NewColumns(names []string) []Column {
result := []Column{}
for _, name := range names {
if name == "" {
continue
}
result = append(result, Column{Name: name})
}
return result
}
// ParseColumns creates a new column array fby parsing comma delimited names list
func ParseColumns(names string) []Column {
namesArray := strings.Split(names, ",")
return NewColumns(namesArray)
}
// ColumnsMap maps a column name onto its ordinal position
type ColumnsMap map[string]int
// NewEmptyColumnsMap creates an empty map
func NewEmptyColumnsMap() ColumnsMap {
columnsMap := make(map[string]int)
return ColumnsMap(columnsMap)
}
// NewColumnsMap creates a column map based on ordered list of columns
func NewColumnsMap(orderedColumns []Column) ColumnsMap {
columnsMap := NewEmptyColumnsMap()
for i, column := range orderedColumns {
columnsMap[column.Name] = i
}
return columnsMap
}
// ColumnList makes for a named list of columns
type ColumnList struct {
columns []Column
Ordinals ColumnsMap
}
// NewColumnList creates an object given ordered list of column names
func NewColumnList(names []string) *ColumnList {
result := &ColumnList{
columns: NewColumns(names),
}
result.Ordinals = NewColumnsMap(result.columns)
return result
}
// ParseColumnList parses a comma delimited list of column names
func ParseColumnList(names string) *ColumnList {
result := &ColumnList{
columns: ParseColumns(names),
}
result.Ordinals = NewColumnsMap(result.columns)
return result
}
// Columns returns the list of columns
func (l *ColumnList) Columns() []Column {
return l.columns
}
// Names returns list of column names
func (l *ColumnList) Names() []string {
names := make([]string, len(l.columns))
for i := range l.columns {
names[i] = l.columns[i].Name
}
return names
}
// GetColumn gets a column by name
func (l *ColumnList) GetColumn(columnName string) *Column {
if ordinal, ok := l.Ordinals[columnName]; ok {
return &l.columns[ordinal]
}
return nil
}
// String returns a comma separated list of column names
func (l *ColumnList) String() string {
return strings.Join(l.Names(), ",")
}
// Equals checks for complete (deep) identities of columns, in order.
func (l *ColumnList) Equals(other *ColumnList) bool {
return reflect.DeepEqual(l.Columns, other.Columns)
}
// EqualsByNames chcks if the names in this list equals the names of another list, in order. Type is ignored.
func (l *ColumnList) EqualsByNames(other *ColumnList) bool {
return reflect.DeepEqual(l.Names(), other.Names())
}
// IsSubsetOf returns 'true' when column names of this list are a subset of
// another list, in arbitrary order (order agnostic)
func (l *ColumnList) IsSubsetOf(other *ColumnList) bool {
for _, column := range l.columns {
if _, exists := other.Ordinals[column.Name]; !exists {
return false
}
}
return true
}
// Len returns the length of this list
func (l *ColumnList) Len() int {
return len(l.columns)
}
// UniqueKey is the combination of a key's name and columns
type UniqueKey struct {
Name string
Columns ColumnList
HasNullable bool
IsAutoIncrement bool
}
// IsPrimary checks if this unique key is primary
func (k *UniqueKey) IsPrimary() bool {
return k.Name == "PRIMARY"
}
// Len returns the length of this list
func (k *UniqueKey) Len() int {
return k.Columns.Len()
}
// String returns a visual representation of this key
func (k *UniqueKey) String() string {
description := k.Name
if k.IsAutoIncrement {
description = fmt.Sprintf("%s (auto_increment)", description)
}
return fmt.Sprintf("%s: %s; has nullable: %+v", description, k.Columns.Names(), k.HasNullable)
} | go/vt/vttablet/onlineddl/vrepl/types.go | 0.881538 | 0.408454 | types.go | starcoder |
package disjointset
/**
* Node for DisjointSet datastructure.
* Contains the parent id, the node rank and the number
* of components in the tree (only useful for root nodes)
*/
type DisjointSetNode struct {
parent int
rank int
size int
}
/**
* DisjointSet type, contains the elements (nodes) that
* the disjoint set has and the number of components that
* it's representing
*/
type DisjointSet struct {
elements []DisjointSetNode
totalComponents int
}
/**
* Instantiates a new DisjointSet with 'size' elements
*/
func New(size int) *DisjointSet {
set := new(DisjointSet)
set.elements = make([]DisjointSetNode, size, size)
set.totalComponents = size
for i := 0; i < size; i++ {
set.elements[i].parent = i
set.elements[i].rank = 0
set.elements[i].size = 1
}
return set
}
/**
* Returns the total number of elements that the DisjointSet set has
*/
func (set *DisjointSet) TotalElements() int {
return len(set.elements)
}
/**
* Returns the id of the component to which the node i belongs
*/
func (set *DisjointSet) Find(i int) int {
if i != set.elements[i].parent {
set.elements[i].parent = set.Find(set.elements[i].parent)
}
return set.elements[i].parent
}
/**
* Returns the size of the component to which the node p belongs
*/
func (set *DisjointSet) Size(p int) int {
return set.elements[set.Find(p)].size
}
/**
* Returns the total number of components that the DisjointSet set has
*/
func (set *DisjointSet) Components() int {
return set.totalComponents
}
/**
* Returns true if both nodes p and q belong to the same component
*/
func (set *DisjointSet) Connected(p, q int) bool {
return set.Find(p) == set.Find(q)
}
/**
* Merges the two components to which p and q belong. It does nothing
* if they belong to the same component
*/
func (set *DisjointSet) Union(p, q int) int {
i := set.Find(p)
j := set.Find(q)
if i == j {
return i
}
set.totalComponents--
if set.elements[i].rank < set.elements[j].rank {
set.elements[i].parent = j
set.elements[j].size += set.elements[i].size
return j
} else {
set.elements[j].parent = i
set.elements[i].size += set.elements[j].size
if set.elements[i].rank == set.elements[j].rank {
set.elements[i].rank++
}
return i
}
} | disjointset/disjointset.go | 0.806396 | 0.449393 | disjointset.go | starcoder |
package feistel
import (
"bytes"
"encoding/binary"
"io"
)
type ecb struct{}
// ECB contains the Encrypt and Decrypt functions using the ECB algorithm
var ECB ecb
// EncryptReader reads data from an reader and writes the encrypted data to the writer
func (ecb) EncryptReader(r io.Reader, w io.Writer, rounds int, keys []uint32) error {
var block [8]byte
for {
left, right, err := readInt(r, block)
if err != nil {
if err == io.EOF {
return nil
}
return err
}
left, right = Encrypt(left, right, rounds, keys)
if err := writeInt(w, block, left, right); err != nil {
return err
}
}
}
// Encrypt encrypts a provided buffer and returns it
func (ecb) Encrypt(buf []byte, rounds int, keys []uint32) ([]byte, error) {
var out bytes.Buffer
if err := ECB.EncryptReader(bytes.NewBuffer(buf), &out, rounds, keys); err != nil {
return nil, err
}
return out.Bytes(), nil
}
// EncryptUInt64 encrypts a provided uint64 and returns it
func (ecb) EncryptUInt64(n uint64, rounds int, keys []uint32) (uint64, error) {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, n)
var err error
buf, err = ECB.Encrypt(buf, rounds, keys)
return binary.BigEndian.Uint64(buf), err
}
// EncryptInt64 encrypts a provided int64 and returns it
func (ecb) EncryptInt64(n int64, rounds int, keys []uint32) (int64, error) {
i, err := ECB.EncryptUInt64(uint64(n), rounds, keys)
return (int64(i)), err
}
// DecryptReader reads data from an reader and writes the decrypted data to the writer
func (ecb) DecryptReader(r io.Reader, w io.Writer, rounds int, keys []uint32) error {
var block [8]byte
for {
left, right, err := readInt(r, block)
if err != nil {
if err == io.EOF {
return nil
}
return err
}
left, right = Decrypt(left, right, rounds, keys)
if err := writeInt(w, block, left, right); err != nil {
return err
}
}
}
// Decrypt decrypts a provided buffer and returns it
func (ecb) Decrypt(buf []byte, rounds int, keys []uint32) ([]byte, error) {
var out bytes.Buffer
if err := ECB.DecryptReader(bytes.NewBuffer(buf), &out, rounds, keys); err != nil {
return nil, err
}
return out.Bytes(), nil
}
// DecryptUInt64 decrypts a provided uint64 and returns it
func (ecb) DecryptUInt64(n uint64, rounds int, keys []uint32) (uint64, error) {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, n)
var err error
buf, err = ECB.Decrypt(buf, rounds, keys)
return binary.BigEndian.Uint64(buf), err
}
// DecryptInt64 decrypts a provided int64 and returns it
func (ecb) DecryptInt64(n int64, rounds int, keys []uint32) (int64, error) {
i, err := ECB.DecryptUInt64(uint64(n), rounds, keys)
return (int64(i)), err
} | ecb.go | 0.7324 | 0.434701 | ecb.go | starcoder |
package mlpack
/*
#cgo CFLAGS: -I./capi -Wall
#cgo LDFLAGS: -L. -lmlpack_go_cf
#include <capi/cf.h>
#include <stdlib.h>
*/
import "C"
import "gonum.org/v1/gonum/mat"
type CfOptionalParam struct {
Algorithm string
AllUserRecommendations bool
InputModel *cfModel
Interpolation string
IterationOnlyTermination bool
MaxIterations int
MinResidue float64
NeighborSearch string
Neighborhood int
Normalization string
Query *mat.Dense
Rank int
Recommendations int
Seed int
Test *mat.Dense
Training *mat.Dense
Verbose bool
}
func CfOptions() *CfOptionalParam {
return &CfOptionalParam{
Algorithm: "NMF",
AllUserRecommendations: false,
InputModel: nil,
Interpolation: "average",
IterationOnlyTermination: false,
MaxIterations: 1000,
MinResidue: 1e-05,
NeighborSearch: "euclidean",
Neighborhood: 5,
Normalization: "none",
Query: nil,
Rank: 0,
Recommendations: 5,
Seed: 0,
Test: nil,
Training: nil,
Verbose: false,
}
}
/*
This program performs collaborative filtering (CF) on the given dataset. Given
a list of user, item and preferences (the "Training" parameter), the program
will perform a matrix decomposition and then can perform a series of actions
related to collaborative filtering. Alternately, the program can load an
existing saved CF model with the "InputModel" parameter and then use that
model to provide recommendations or predict values.
The input matrix should be a 3-dimensional matrix of ratings, where the first
dimension is the user, the second dimension is the item, and the third
dimension is that user's rating of that item. Both the users and items should
be numeric indices, not names. The indices are assumed to start from 0.
A set of query users for which recommendations can be generated may be
specified with the "Query" parameter; alternately, recommendations may be
generated for every user in the dataset by specifying the
"AllUserRecommendations" parameter. In addition, the number of
recommendations per user to generate can be specified with the
"Recommendations" parameter, and the number of similar users (the size of the
neighborhood) to be considered when generating recommendations can be
specified with the "Neighborhood" parameter.
For performing the matrix decomposition, the following optimization algorithms
can be specified via the "Algorithm" parameter:
- 'RegSVD' -- Regularized SVD using a SGD optimizer
- 'NMF' -- Non-negative matrix factorization with alternating least squares
update rules
- 'BatchSVD' -- SVD batch learning
- 'SVDIncompleteIncremental' -- SVD incomplete incremental learning
- 'SVDCompleteIncremental' -- SVD complete incremental learning
- 'BiasSVD' -- Bias SVD using a SGD optimizer
- 'SVDPP' -- SVD++ using a SGD optimizer
The following neighbor search algorithms can be specified via the
"NeighborSearch" parameter:
- 'cosine' -- Cosine Search Algorithm
- 'euclidean' -- Euclidean Search Algorithm
- 'pearson' -- Pearson Search Algorithm
The following weight interpolation algorithms can be specified via the
"Interpolation" parameter:
- 'average' -- Average Interpolation Algorithm
- 'regression' -- Regression Interpolation Algorithm
- 'similarity' -- Similarity Interpolation Algorithm
The following ranking normalization algorithms can be specified via the
"Normalization" parameter:
- 'none' -- No Normalization
- 'item_mean' -- Item Mean Normalization
- 'overall_mean' -- Overall Mean Normalization
- 'user_mean' -- User Mean Normalization
- 'z_score' -- Z-Score Normalization
A trained model may be saved to with the "OutputModel" output parameter.
To train a CF model on a dataset training_set using NMF for decomposition and
saving the trained model to model, one could call:
// Initialize optional parameters for Cf().
param := mlpack.CfOptions()
param.Training = training_set
param.Algorithm = "NMF"
_, model := mlpack.Cf(param)
Then, to use this model to generate recommendations for the list of users in
the query set users, storing 5 recommendations in recommendations, one could
call
// Initialize optional parameters for Cf().
param := mlpack.CfOptions()
param.InputModel = &model
param.Query = users
param.Recommendations = 5
recommendations, _ := mlpack.Cf(param)
Input parameters:
- Algorithm (string): Algorithm used for matrix factorization. Default
value 'NMF'.
- AllUserRecommendations (bool): Generate recommendations for all
users.
- InputModel (cfModel): Trained CF model to load.
- Interpolation (string): Algorithm used for weight interpolation.
Default value 'average'.
- IterationOnlyTermination (bool): Terminate only when the maximum
number of iterations is reached.
- MaxIterations (int): Maximum number of iterations. If set to zero,
there is no limit on the number of iterations. Default value 1000.
- MinResidue (float64): Residue required to terminate the factorization
(lower values generally mean better fits). Default value 1e-05.
- NeighborSearch (string): Algorithm used for neighbor search. Default
value 'euclidean'.
- Neighborhood (int): Size of the neighborhood of similar users to
consider for each query user. Default value 5.
- Normalization (string): Normalization performed on the ratings.
Default value 'none'.
- Query (mat.Dense): List of query users for which recommendations
should be generated.
- Rank (int): Rank of decomposed matrices (if 0, a heuristic is used to
estimate the rank). Default value 0.
- Recommendations (int): Number of recommendations to generate for each
query user. Default value 5.
- Seed (int): Set the random seed (0 uses std::time(NULL)). Default
value 0.
- Test (mat.Dense): Test set to calculate RMSE on.
- Training (mat.Dense): Input dataset to perform CF on.
- Verbose (bool): Display informational messages and the full list of
parameters and timers at the end of execution.
Output parameters:
- output (mat.Dense): Matrix that will store output recommendations.
- outputModel (cfModel): Output for trained CF model.
*/
func Cf(param *CfOptionalParam) (*mat.Dense, cfModel) {
resetTimers()
enableTimers()
disableBacktrace()
disableVerbose()
restoreSettings("Collaborative Filtering")
// Detect if the parameter was passed; set if so.
if param.Algorithm != "NMF" {
setParamString("algorithm", param.Algorithm)
setPassed("algorithm")
}
// Detect if the parameter was passed; set if so.
if param.AllUserRecommendations != false {
setParamBool("all_user_recommendations", param.AllUserRecommendations)
setPassed("all_user_recommendations")
}
// Detect if the parameter was passed; set if so.
if param.InputModel != nil {
setCFModel("input_model", param.InputModel)
setPassed("input_model")
}
// Detect if the parameter was passed; set if so.
if param.Interpolation != "average" {
setParamString("interpolation", param.Interpolation)
setPassed("interpolation")
}
// Detect if the parameter was passed; set if so.
if param.IterationOnlyTermination != false {
setParamBool("iteration_only_termination", param.IterationOnlyTermination)
setPassed("iteration_only_termination")
}
// Detect if the parameter was passed; set if so.
if param.MaxIterations != 1000 {
setParamInt("max_iterations", param.MaxIterations)
setPassed("max_iterations")
}
// Detect if the parameter was passed; set if so.
if param.MinResidue != 1e-05 {
setParamDouble("min_residue", param.MinResidue)
setPassed("min_residue")
}
// Detect if the parameter was passed; set if so.
if param.NeighborSearch != "euclidean" {
setParamString("neighbor_search", param.NeighborSearch)
setPassed("neighbor_search")
}
// Detect if the parameter was passed; set if so.
if param.Neighborhood != 5 {
setParamInt("neighborhood", param.Neighborhood)
setPassed("neighborhood")
}
// Detect if the parameter was passed; set if so.
if param.Normalization != "none" {
setParamString("normalization", param.Normalization)
setPassed("normalization")
}
// Detect if the parameter was passed; set if so.
if param.Query != nil {
gonumToArmaUmat("query", param.Query)
setPassed("query")
}
// Detect if the parameter was passed; set if so.
if param.Rank != 0 {
setParamInt("rank", param.Rank)
setPassed("rank")
}
// Detect if the parameter was passed; set if so.
if param.Recommendations != 5 {
setParamInt("recommendations", param.Recommendations)
setPassed("recommendations")
}
// Detect if the parameter was passed; set if so.
if param.Seed != 0 {
setParamInt("seed", param.Seed)
setPassed("seed")
}
// Detect if the parameter was passed; set if so.
if param.Test != nil {
gonumToArmaMat("test", param.Test)
setPassed("test")
}
// Detect if the parameter was passed; set if so.
if param.Training != nil {
gonumToArmaMat("training", param.Training)
setPassed("training")
}
// Detect if the parameter was passed; set if so.
if param.Verbose != false {
setParamBool("verbose", param.Verbose)
setPassed("verbose")
enableVerbose()
}
// Mark all output options as passed.
setPassed("output")
setPassed("output_model")
// Call the mlpack program.
C.mlpackCf()
// Initialize result variable and get output.
var outputPtr mlpackArma
output := outputPtr.armaToGonumUmat("output")
var outputModel cfModel
outputModel.getCFModel("output_model")
// Clear settings.
clearSettings()
// Return output(s).
return output, outputModel
} | cf.go | 0.663887 | 0.460956 | cf.go | starcoder |
package account
import (
"strconv"
)
type Account struct {
// The account's available funds
// Required: true
AvailableFunds float64
// The account's available to withdrawal funds
// Required: true
AvailableWithdrawalFunds float64
// The account's balance
// Required: true
Balance float64
// The selected currency
// Required: true
Currency string // *string
// The sum of position deltas (currently bugged : miscomputed by Deribit)
// Required: true
DeltaTotal float64
// The deposit address for the account (if available)
DepositAddress string
// User email (available when parameter `extended` = `true`)
// Required: true
Email string //*string
// The account's current equity
// Required: true
Equity float64 // *float64 `json:"equity"`
// Futures profit and Loss
// Required: true
FuturesPl float64 // *float64
// Futures session realized profit and Loss
// Required: true
FuturesSessionRpl float64 // *float64
// Futures session unrealized profit and Loss
// Required: true
FuturesSessionUpl float64 // *float64
// Account id (available when parameter `extended` = `true`)
// Required: true
ID int64 // *int64
// The account's initial margin
// Required: true
InitialMargin float64 // *float64
// The maintenance margin.
// Required: true
MaintenanceMargin float64 //*float64
// The account's margin balance
MarginBalance float64
// Options summary delta
// Required: true
OptionsDelta float64 // *float64
// Options summary gamma
// Required: true
OptionsGamma float64 // *float64
// Options profit and Loss
// Required: true
OptionsPl float64 // *float64
// Options session realized profit and Loss
// Required: true
OptionsSessionRpl float64 //*float64
// Options session unrealized profit and Loss
// Required: true
OptionsSessionUpl float64 //*float64
// Options summary theta
// Required: true
OptionsTheta float64 // *float64
// Options summary vega
// Required: true
OptionsVega float64 // *float64 `
// `true` when portfolio margining is enabled for user
PortfolioMarginingEnabled bool
// Projected initial margin (for portfolio margining users)
ProjectedInitialMargin float64
// Projected maintenance margin (for portfolio margining users)
ProjectedMaintenanceMargin float64
// Session funding
// Required: true
SessionFunding float64 // *float64
// Session realized profit and loss
// Required: true
SessionRpl float64 // *float64
// Session unrealized profit and loss
// Required: true
SessionUpl float64 //*float64
// System generated user nickname (available when parameter `extended` = `true`)
// Required: true
SystemName string // *string
// Whether two factor authentication is enabled (available when parameter `extended` = `true`)
// Required: true
TfaEnabled bool // *bool
// Profit and loss
// Required: true
TotalPl float64 // *float64
// Account type (available when parameter `extended` = `true`)
// Required: true
// Enum: [main subaccount]
Type string // *string
// Account name (given by user) (available when parameter `extended` = `true`)
// Required: true
Username string // *string
}
func (pAccount Account) Sprintf() string {
var output string
output = "Currency: " + pAccount.Currency + "\n"
output += "Available funds:" + strconv.FormatFloat(pAccount.AvailableFunds, 'f', 2, 32) + "\n"
output += "Balance: " + strconv.FormatFloat(pAccount.Balance, 'f', 2, 32) + "\n"
output += "Equity : " + strconv.FormatFloat(pAccount.Equity, 'f', 2, 32) + "\n"
output += "Delta Total: " + strconv.FormatFloat(pAccount.DeltaTotal, 'f', 2, 32) + "\n"
output += "Options Delta: " + strconv.FormatFloat(pAccount.OptionsDelta, 'f', 2, 32) + "\n"
output += "Options Gamma: " + strconv.FormatFloat(pAccount.OptionsGamma, 'f', 2, 32) + "\n"
output += "Options Vega: " + strconv.FormatFloat(pAccount.OptionsVega, 'f', 2, 32) + "\n"
output += "Options Theta: " + strconv.FormatFloat(pAccount.OptionsTheta, 'f', 2, 32) + "\n"
// fmt.Printf("Session funding: %f\n", (*pAccount).SessionFunding)
output += "Futures PnL: " + strconv.FormatFloat(pAccount.FuturesPl, 'f', 2, 32) + "\n"
output += "Options PnL: " + strconv.FormatFloat(pAccount.OptionsPl, 'f', 2, 32) + "\n"
output += "Total PnL: " + strconv.FormatFloat(pAccount.TotalPl, 'f', 2, 32) + "\n"
return output
} | v3/structures/account/account.go | 0.575827 | 0.40642 | account.go | starcoder |
package xmath
import (
"math"
"time"
)
// Set is a set of statistical properties of a set of numbers.
type Set struct {
count int
first, last float64
min, max float64
mean, dSquared float64
}
// NewSet creates a new Set.
func NewSet() Set {
return Set{
min: math.MaxFloat64,
}
}
// Push adds another element to the set.
func (s *Set) Push(v float64) {
s.count++
diff := (v - s.mean) / float64(s.count)
mean := s.mean + diff
squaredDiff := (v - mean) * (v - s.mean)
s.dSquared += squaredDiff
s.mean = mean
if s.first == 0.0 {
s.first = v
}
if s.min > v {
s.min = v
}
if s.max < v {
s.max = v
}
s.last = v
}
// Avg returns the average value of the set.
func (s Set) Avg() float64 {
return s.mean
}
// Diff returns the difference of max and min.
func (s Set) Diff() float64 {
return s.last - s.first
}
// Variance is the mathematical variance of the set.
func (s Set) Variance() float64 {
return s.dSquared / float64(s.count)
}
// StDev is the standard deviation of the set.
func (s Set) StDev() float64 {
return math.Sqrt(s.Variance())
}
// SampleVariance is the sample variance of the set.
func (s Set) SampleVariance() float64 {
return s.dSquared / float64(s.count-1)
}
// SampleStDev is the sample standard deviation of the set.
func (s Set) SampleStdev() float64 {
return math.Sqrt(s.SampleVariance())
}
// Stats gathers stats about a set of floats
type Stats struct {
Iteration int
Set
}
// NewStats creates a new stats struct.
// It allows to pass already gathered elements.
func NewStats(vv ...float64) *Stats {
stats := &Stats{
Iteration: 0,
Set: NewSet(),
}
for _, v := range vv {
stats.Push(v)
}
return stats
}
// Inc adds another stats element to the set.
func (s *Stats) Inc(v float64) {
s.Iteration++
s.Set.Push(v)
}
// Bucket groups together objects with the same Index
// it keeps track of statistical quantities relating to the collection
// by using streaming techniques
type Bucket struct {
stats Set
index int64
}
// NewBucket creates a new bucket
func NewBucket(id int64) Bucket {
return Bucket{
stats: NewSet(),
index: id,
}
}
// Push adds an element to the bucket for the given index.
func (b *Bucket) Push(v float64, index int64) bool {
if index != b.index {
return false
}
b.stats.Push(v)
return true
}
// Size returns the number of elements in the bucket.
func (b Bucket) Size() int {
return b.stats.count
}
// Stats returns the current stats for the bucket.
func (b Bucket) Stats() Set {
return b.stats
}
// Index returns the bucket index.
func (b Bucket) Index() int64 {
return b.index
}
// Window is a helper struct allowing to retrieve buckets of stats from a streaming data set.
type Window struct {
size int64
lastIndex int64
last *Bucket
current Bucket
}
// NewWindow creates a new window of the given window size e.g. the index range for each bucket.
func NewWindow(size int64) *Window {
return &Window{
size: size,
}
}
// Push adds an element to a window at the given index.
// returns if the window closed.
// and the index of the closed window
func (w *Window) Push(index int64, value float64) (int64, bool) {
ready := false
lastIndex := w.lastIndex
if index == 0 {
// new start ...
w.lastIndex = index
w.current = NewBucket(index)
} else if index >= w.lastIndex+w.size {
// start a new one
// but first close the last one
if w.last != nil {
panic("last bucket has not been consumed. Cant create a new one!")
}
if w.current.Size() > 0 {
tmpBucket := w.current
w.last = &tmpBucket
ready = true
}
w.current = NewBucket(index)
w.lastIndex = index
}
w.current.Push(value, w.lastIndex)
return lastIndex, ready
}
// Current returns the current index the window accumulates data on.
func (w *Window) Current() int64 {
return w.lastIndex
}
// Next is the next index at which a new bucket will be created
func (w *Window) Next() int64 {
return w.lastIndex + w.size
}
// Get returns the last complete Bucket.
func (w *Window) Get() Bucket {
tmpBucket := *w.last
w.last = nil
return tmpBucket
}
// TimeWindow is a window indexed by the current time.
type TimeWindow struct {
index int64
duration int64
window *Window
}
// NewTimeWindow creates a new TimeWindow with the given duration.
func NewTimeWindow(duration time.Duration) *TimeWindow {
d := int64(duration.Seconds())
return &TimeWindow{
duration: d,
window: NewWindow(1),
}
}
// Push adds an element to the time window.
func (tw *TimeWindow) Push(v float64, t time.Time) (*Bucket, bool) {
index := t.Unix() / tw.duration
index, closed := tw.window.Push(index, v)
if closed {
tw.index = index
bucket := tw.window.Get()
return &bucket, true
}
return nil, false
}
// Next returns the next timestamp for the coming window.
func (tw *TimeWindow) Next(iterations int64) time.Time {
nextIndex := tw.index + tw.duration*(iterations+1)
return time.Unix(nextIndex*int64(time.Second.Seconds()), 0)
}
// TimeWindow is a window indexed by the current time.
type SizeWindow struct {
i int64
window *Window
}
// NewSizeWindow creates a new SizeWindow with the given duration.
func NewSizeWindow(size int) *SizeWindow {
return &SizeWindow{
window: NewWindow(int64(size)),
}
}
// Push adds an element to the time window.
func (sw *SizeWindow) Push(v float64) (*Bucket, bool) {
iter := sw.i
sw.window.Push(iter, v)
if int(sw.window.size) > sw.window.Get().Size() {
b := sw.window.Get()
sw.i++
return &b, true
}
return nil, false
} | oremi/vendor/github.com/drakos74/go-ex-machina/xmath/stats.go | 0.879522 | 0.492798 | stats.go | starcoder |
package collection
import (
"constraints"
)
// Graph implements a directed graph.
type Graph[K comparable, V any, W constraints.Unsigned] struct {
edges map[K]map[K]edge[V, W]
}
// SetEdge inserts an edge into the graph. If an edge already exists between
// the nodes, that edge is overwritten.
func (g *Graph[K, V, W]) SetEdge(from, to K, value V, weight W) {
if g.edges == nil {
g.edges = make(map[K]map[K]edge[V, W], 1)
}
outgoing, ok := g.edges[from]
if !ok {
outgoing = make(map[K]edge[V, W], 1)
g.edges[from] = outgoing
}
outgoing[to] = edge[V, W]{value, weight}
}
// ShortestPath returns the shorted path between two nodes. Returns both the node path
// and the edge path. Return nil if the goal node can't be reached from the
// start node. If the start node and the goal node is the same node, that node is returned and the edge path is nil.
func (g *Graph[K, V, W]) ShortestPath(start, goal K) (nodes []K, edges []V) {
visited := map[K]struct{}{start: struct{}{}}
dist := map[K]W{} // lowest distance per node seen so far
origin := map[K]K{} // previous node for lowest distance seen so far
var queue Heap[W, K]
queue.Push(0, start)
for {
total, node, ok := queue.Pop()
if !ok {
return nil, nil
}
if node == goal {
break
}
outgoing, ok := g.edges[node]
if ok {
for next, edge := range outgoing {
if _, ok := visited[next]; !ok {
visited[next] = struct{}{}
newTotal := total + edge.weight
if oldTotal, ok := dist[next]; !ok || newTotal < oldTotal {
dist[next] = newTotal
origin[next] = node
}
queue.Push(newTotal, next)
}
}
}
}
// Found the goal. Reconstruct the nod path.
nodes = append(nodes, goal)
for {
prev, ok := origin[nodes[len(nodes)-1]]
if !ok {
break
}
nodes = append(nodes, prev)
}
// Reverse the node path.
for i, j := 0, len(nodes)-1; i < j; i, j = i+1, j-1 {
nodes[i], nodes[j] = nodes[j], nodes[i]
}
// Build the edge path
for i := 1; i < len(nodes); i++ {
outgoing := g.edges[nodes[i-1]]
edges = append(edges, outgoing[nodes[i]].value)
}
return nodes, edges
}
type edge[V any, W constraints.Ordered] struct {
value V
weight W
} | graph.go | 0.73678 | 0.478468 | graph.go | starcoder |
package main
import "fmt"
type matrix [][]float64
func zero(n int) matrix {
r := make([][]float64, n)
a := make([]float64, n*n)
for i := range r {
r[i] = a[n*i : n*(i+1)]
}
return r
}
func eye(n int) matrix {
r := zero(n)
for i := range r {
r[i][i] = 1
}
return r
}
func (m matrix) print(label string) {
if label > "" {
fmt.Printf("%s:\n", label)
}
for _, r := range m {
for _, e := range r {
fmt.Printf(" %9.5f", e)
}
fmt.Println()
}
}
func (a matrix) pivotize() matrix {
p := eye(len(a))
for j, r := range a {
max := r[j]
row := j
for i := j; i < len(a); i++ {
if a[i][j] > max {
max = a[i][j]
row = i
}
}
if j != row {
// swap rows
p[j], p[row] = p[row], p[j]
}
}
return p
}
func (m1 matrix) mul(m2 matrix) matrix {
r := zero(len(m1))
for i, r1 := range m1 {
for j := range m2 {
for k := range m1 {
r[i][j] += r1[k] * m2[k][j]
}
}
}
return r
}
func (a matrix) lu() (l, u, p matrix) {
l = zero(len(a))
u = zero(len(a))
p = a.pivotize()
a = p.mul(a)
for j := range a {
l[j][j] = 1
for i := 0; i <= j; i++ {
sum := 0.
for k := 0; k < i; k++ {
sum += u[k][j] * l[i][k]
}
u[i][j] = a[i][j] - sum
}
for i := j; i < len(a); i++ {
sum := 0.
for k := 0; k < j; k++ {
sum += u[k][j] * l[i][k]
}
l[i][j] = (a[i][j] - sum) / u[j][j]
}
}
return
}
func main() {
showLU(matrix{
{1, 3, 5},
{2, 4, 7},
{1, 1, 0}})
showLU(matrix{
{11, 9, 24, 2},
{1, 5, 2, 6},
{3, 17, 18, 1},
{2, 5, 7, 1}})
}
func showLU(a matrix) {
a.print("\na")
l, u, p := a.lu()
l.print("l")
u.print("u")
p.print("p")
} | lang/Go/lu-decomposition-1.go | 0.648466 | 0.469216 | lu-decomposition-1.go | starcoder |
package functiongrapher
import (
"math"
"github.com/benoitkugler/maths-online/maths/expression"
"github.com/benoitkugler/maths-online/maths/repere"
)
type BezierCurve struct {
P0, P1, P2 repere.Coord `dart-extern:"repere.gen.dart"`
}
func (seg segment) toCurve() BezierCurve {
p1 := controlFromDerivatives(seg.from, seg.to, seg.dFrom, seg.dTo)
return BezierCurve{
P0: seg.from,
P1: p1,
P2: seg.to,
}
}
// compute the control point matching the given derivatives,
// which is the intersection between
// the tangents at from and to
func controlFromDerivatives(from, to repere.Coord, dFrom, dTo float64) repere.Coord {
// special case when df1 = df2
if math.Abs(dFrom-dTo) < 0.1 {
return repere.Coord{X: (from.X + to.X) / 2, Y: (from.Y + to.Y) / 2}
}
xIntersec := (to.Y - from.Y + dFrom*from.X - dTo*to.X) / (dFrom - dTo)
yIntersec := dFrom*(xIntersec-from.X) + from.Y
return repere.Coord{X: xIntersec, Y: yIntersec}
}
// compute derivative with finite differences
func computeDf(f func(float64) float64, x, epsilon float64) float64 {
return (f(x+epsilon) - f(x)) / epsilon
}
type segment struct {
from, to repere.Coord
dFrom, dTo float64
}
// expr must be an expression containing only the variable `variable`
func newSegment(fn expression.FunctionExpr, from, to float64) segment {
f := fn.Closure()
yFrom := f(from)
yTo := f(to)
// compute derivative with finite differences
epsilon := (to - from) / 100_000
dFrom := computeDf(f, from, epsilon)
dTo := computeDf(f, to, epsilon)
return segment{
from: repere.Coord{X: from, Y: yFrom},
to: repere.Coord{X: to, Y: yTo},
dFrom: dFrom,
dTo: dTo,
}
}
type FunctionGraph struct {
Decoration FunctionDecoration
Segments []BezierCurve
}
func newFunctionGraph(fn expression.FunctionDefinition) []BezierCurve {
step := (fn.To - fn.From) / nbStep
curves := make([]BezierCurve, nbStep)
for i := range curves {
seg := newSegment(fn.FunctionExpr, fn.From+float64(i)*step, fn.From+float64(i+1)*step)
curves[i] = seg.toCurve()
}
return curves
}
type FunctionDecoration struct {
Label string
Color string
}
type FunctionsGraph struct {
Functions []FunctionGraph
Bounds repere.RepereBounds `dart-extern:"repere.gen.dart"`
}
// nbStep is the number of segments used when converting
// a function curve to Bezier curves.
const nbStep = 100
// Graph splits the curve of each function on the definition domain in small chunks on which it is approximated
// by Bezier curves.
// It will panic if the expression are not valid or evaluable given their input variable.
func NewFunctionGraph(functions []expression.FunctionDefinition, decorations []FunctionDecoration) FunctionsGraph {
var allCurves []BezierCurve
out := FunctionsGraph{
Functions: make([]FunctionGraph, len(functions)),
}
for i, fn := range functions {
out.Functions[i].Segments = newFunctionGraph(fn)
out.Functions[i].Decoration = decorations[i]
allCurves = append(allCurves, out.Functions[i].Segments...)
}
out.Bounds = boundingBox(allCurves)
return out
}
func controlFromPoints(from, to repere.Coord, firstHalf bool) repere.Coord {
var dFrom, dTo float64
if from.Y > to.Y { // decreasing
if firstHalf {
dFrom, dTo = 0, -4
} else {
dFrom, dTo = -4, 0
}
} else { // increasing
if firstHalf {
dFrom, dTo = 0, +4
} else {
dFrom, dTo = +4, 0
}
}
return controlFromDerivatives(from, to, dFrom, dTo)
}
// GraphFromVariations builds one possible representation for the given variations
func GraphFromVariations(dec FunctionDecoration, xs []float64, ys []float64) FunctionsGraph {
// each segment is drawn with two quadratic curves
var curves []BezierCurve
for i := range xs {
if i >= len(xs)-1 {
break
}
x1, x2 := xs[i], xs[i+1]
y1, y2 := ys[i], ys[i+1]
xMiddle, yMiddle := (x1+x2)/2, (y1+y2)/2
// first half
p0 := repere.Coord{X: x1, Y: y1}
p2 := repere.Coord{X: xMiddle, Y: yMiddle}
p1 := controlFromPoints(p0, p2, true)
curves = append(curves, BezierCurve{p0, p1, p2})
// second half
p0 = repere.Coord{X: xMiddle, Y: yMiddle}
p2 = repere.Coord{X: x2, Y: y2}
p1 = controlFromPoints(p0, p2, false)
curves = append(curves, BezierCurve{p0, p1, p2})
}
return FunctionsGraph{
Functions: []FunctionGraph{{Decoration: dec, Segments: curves}},
Bounds: boundingBox(curves),
}
}
// BoundsFromExpression returns the f(x) and f'(x) values for x in the grid
func BoundsFromExpression(fn expression.FunctionExpr, grid []int) (bounds repere.RepereBounds, fxs []int, dfxs []float64) {
f := fn.Closure()
fxs = make([]int, len(grid))
dfxs = make([]float64, len(grid))
// always add the origin
minX, maxX, minY, maxY := -1., 1., -1., 1.
for i, xValue := range grid {
x := float64(xValue)
y := f(x)
fxs[i] = int(y)
dfxs[i] = computeDf(f, x, 1e-5)
if x < minX {
minX = x
}
if x > maxX {
maxX = x
}
if y < minY {
minY = y
}
if y > maxY {
maxY = y
}
}
bounds = boundsFromBoudingBox(minX, minY, maxX, maxY)
return
} | server/src/maths/functiongrapher/grapher.go | 0.807688 | 0.525551 | grapher.go | starcoder |
package eighttree
import (
"errors"
"fmt"
)
var (
ErrInvalidTree = errors.New("invalid tree")
)
// InternalCountFromLeaves returns the number of internal nodes needed to store the given number of
// leaf nodes in a complete 8-tree representation.
func InternalCountFromLeaves(leafNodes int) int {
if leafNodes < 2 {
// One leaf needs no parent; don't worry about nonsense inputs <1
return 0
}
// When a leaf node is promoted, it has two leaves. There is a promotion every 7 leaves added.
// 2-8 leaves require 1 internal nodes (A with 1-8 leaves).
// 9-15 leaves require 2 internal nodes (A with B and 7 leaves; B with 2-8 leaves).
// 16-22 leaves require 3 internal nodes (A with B, C, and 6 leaves; B: 8 leaves; C: 2-8 leaves).
// 64 leaves require 9 internal nodes (A with B-I which each have 8 leaves).
// 65 leaves require 10 internal nodes:
// (A with B-I which have 8 leaves, except B which has J (which has 8 leaves) and 1 leaf).
return ((leafNodes - 2) / 7) + 1
}
// internalCountFromTotal returns the number of internal nodes in a tree with the given number of
// total nodes.
func InternalCountFromTotal(totalNodes int) (int, error) {
if totalNodes == 1 {
// One leaf needs no parent.
return 0, nil
}
// There are a number of invalid total amounts of leaves: 2, 10, 18, ..., 8n + 2.
// This is because promoting a leaf to an internal node gives it two children.
if (totalNodes < 1) || ((totalNodes-2)%8 == 0) {
return 0, fmt.Errorf("%w: %d nodes", ErrInvalidTree, totalNodes)
}
// 3-9 total nodes have 1 internal node (A with 1-8 leaves).
// 11-17 total have 2 internal (A with B and 7 leaves; B with 2-8 leaves).
// 19-25 total have 3 internal (A with B, C, and 6 leaves; B: 8 leaves; C: 2-8 leaves).
// 73 total have 9 internal nodes (A with B-I which each have 8 leaves.
// 75 total have 10.
return ((totalNodes - 3) / 8) + 1, nil
}
// ParentIndex returns the index into the complete array representation of the given node's parent.
func ParentIndex(child int) int {
return (child - 1) / 8
}
// ChildIndex returns the index into the complete array representation of the nth child node.
func ChildIndex(parent, n int) int {
return (8 * parent) + n + 1
} | pkg/eighttree/eighttree.go | 0.746878 | 0.424472 | eighttree.go | starcoder |
package main
import (
"errors"
"fmt"
)
const (
opening = iota
endgame
pawnPhase = 1 << iota
bishopPhase
rookPhase
queenPhase
knightPhase = bishopPhase
totalPhase = 2 * (queenPhase + 2*rookPhase + 2*bishopPhase + 2*knightPhase + 8*pawnPhase)
)
// errInsufficient is returned by Eval when neither player has sufficient material
// to deliver checkmate by any sequence of legal moves.
var errInsufficient = errors.New("insufficient material")
// Score represents the engine's evaluation of a Position.
type Score struct {
// n is the engine's evaluation of a non-terminal Position in centipawns.
n int
// err decsibes a terminal condition if one exists.
err error
}
// String returns a string representation of s.
func (s Score) String() string {
if s.err != nil {
return s.err.Error()
}
return fmt.Sprintf("%.2f", float64(s.n)/100)
}
// Less reports whether a is lower than b.
// Checkmates in an even number of plies are lower than any non-mate score,
// checkmates in an odd number of plies are higher than any non-mate score,
// and checkmates in fewer plies are more extremal than checkmates in more plies.
func Less(a, b Score) bool {
aply, ach := a.err.(checkmateError)
bply, bch := b.err.(checkmateError)
switch {
case ach && bch:
switch awin, bwin := aply&1 != 0, bply&1 != 0; {
case awin && bwin:
return aply > bply
case !awin && !bwin:
return aply < bply
default:
return bwin
}
case ach:
return aply&1 == 0
case bch:
return bply&1 != 0
}
return a.n < b.n
}
// Abs represents the engine's evaluation relative to White.
type Abs Score
// Rel returns the Rel of s with respect to c.
// This is equal to s for White and -s for Black.
func (s Abs) Rel(c Color) Rel {
if c == White {
return Rel(s)
}
return Rel{-s.n, s.err}
}
// String returns a string representation of s.
func (s Abs) String() string { return Score(s).String() }
// Rel represents the engine's evaluation relative to the side to move.
type Rel Score
// Abs returns the Abs of s relative to White, where s is with respect to c.
// This is equal to s for White and -s for Black.
func (s Rel) Abs(c Color) Abs {
if c == White {
return Abs(s)
}
return Abs{-s.n, s.err}
}
// String returns a string representation of s.
func (s Rel) String() string { return Score(s).String() }
// relcent represents the numerical component of an evaluation in relative centipawns.
type relcent int
// pieceEval represents the static evaluation of each type of Piece.
var pieceEval = [6][2]relcent{
// opening, endgame
{},
{100, 100},
{320, 300},
{315, 335},
{500, 525},
{900, 900},
}
// pieceSquare is a slice of relcents modifying the evaluation of a Piece depending on its Square.
type pieceSquare [64]relcent
// ps provides piece-square tables for each Piece of each Color.
// Values are based on <NAME>'s "Unified Evaluation" test tournament tables.
var ps = [2][6]pieceSquare{
// Generate White tables in init
{},
{
{},
// Black pawn
{
0, 0, 0, 0, 0, 0, 0, 0,
50, 50, 50, 50, 50, 50, 50, 50,
10, 10, 20, 30, 30, 20, 10, 10,
5, 5, 10, 25, 25, 10, 5, 5,
0, 0, 0, 20, 20, 0, 0, 0,
5, -5, -10, 0, 0, -10, -5, 5,
5, 10, 10, -20, -20, 10, 10, 5,
0, 0, 0, 0, 0, 0, 0, 0,
},
// Black knight
{
-50, -40, -30, -30, -30, -30, -40, -50,
-40, -20, 0, 5, 5, 0, -20, -40,
-30, 0, 15, 20, 20, 15, 0, -30,
-30, 5, 20, 25, 25, 20, 5, -30,
-30, 0, 15, 20, 20, 15, 0, -30,
-30, 0, 10, 15, 15, 10, 0, -30,
-40, -20, 0, 5, 5, 0, -20, -40,
-50, -40, -30, -30, -30, -30, -40, -50,
},
// Black bishop
{
-20, -10, -10, -10, -10, -10, -10, -20,
-10, 0, 0, 0, 0, 0, 0, -10,
-10, 0, 5, 5, 5, 5, 0, -10,
-10, 0, 5, 10, 10, 5, 0, -10,
-10, 0, 5, 10, 10, 5, 0, -10,
-10, 0, 5, 5, 5, 5, 0, -10,
-10, 5, 0, 0, 0, 0, 5, -10,
-20, -10, -10, -10, -10, -10, -10, -20,
},
// Black rook
{
0, 0, 0, 0, 0, 0, 0, 0,
5, 10, 10, 10, 10, 10, 10, 5,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
-5, 0, 0, 0, 0, 0, 0, -5,
0, 0, 0, 5, 5, 0, 0, 0,
},
// Black queen
{
-20, -10, -10, -5, -5, -10, -10, -20,
-10, 0, 0, 0, 0, 0, 0, -10,
-10, 0, 5, 5, 5, 5, 0, -10,
-5, 0, 5, 5, 5, 5, 0, -5,
0, 0, 5, 5, 5, 5, 0, -5,
-10, 5, 5, 5, 5, 5, 0, -10,
-10, 0, 5, 0, 0, 0, 0, -10,
-20, -10, -10, -5, -5, -10, -10, -20,
},
},
}
var kingps = [2][2]pieceSquare{
{},
{
// Black opening
{
-30, -40, -40, -50, -50, -40, -40, -30,
-30, -40, -40, -50, -50, -40, -40, -30,
-30, -40, -40, -50, -50, -40, -40, -30,
-30, -40, -40, -50, -50, -40, -40, -30,
-20, -30, -30, -40, -40, -30, -30, -20,
-10, -20, -20, -20, -20, -20, -20, -10,
20, 20, 0, 0, 0, 0, 20, 20,
20, 30, 10, 0, 0, 10, 30, 20,
},
// Black endgame
{
-50, -40, -30, -20, -20, -30, -40, -50,
-30, -20, -10, 0, 0, -10, -20, -30,
-30, -10, 20, 30, 30, 20, -10, -30,
-30, -10, 30, 40, 40, 30, -10, -30,
-30, -10, 30, 40, 40, 30, -10, -30,
-30, -10, 20, 30, 30, 20, -10, -30,
-30, -30, 0, 0, 0, 0, -30, -30,
-50, -30, -30, -30, -30, -30, -30, -50,
},
},
}
// flip returns the vertical transposition of ps.
func (ps pieceSquare) flip() pieceSquare {
var f pieceSquare
for rank := 0; rank < 8; rank++ {
for file := 0; file < 8; file++ {
f[8*rank+file] = ps[8*(7-rank)+file]
}
}
return f
}
func init() {
for piece := range ps[Black] {
ps[White][piece] = ps[Black][piece].flip()
}
for phase := range kingps[Black] {
kingps[White][phase] = kingps[Black][phase].flip()
}
}
// Eval returns a Position's Abs evaluation score.
// It returns errInsufficient in the case of insufficient material.
func Eval(pos Position) Abs {
if IsInsufficient(pos) {
return Abs{err: errInsufficient}
}
var (
npawns = PopCount(pos.b[White][Pawn] | pos.b[Black][Pawn])
nknights = PopCount(pos.b[White][Knight] | pos.b[Black][Knight])
nbishops = PopCount(pos.b[White][Bishop] | pos.b[Black][Bishop])
nrooks = PopCount(pos.b[White][Rook] | pos.b[Black][Rook])
nqueens = PopCount(pos.b[White][Queen] | pos.b[Black][Queen])
phase = queenPhase*nqueens + rookPhase*nrooks + bishopPhase*nbishops + knightPhase*nknights + pawnPhase*npawns
eval int
)
for s := a1; s <= h8; s++ {
c, p := pos.PieceOn(s)
if p == None {
continue
}
var r relcent
switch p {
case King:
r = taper(kingps[c][opening][s], kingps[c][endgame][s], phase)
default:
r = taper(pieceEval[p][opening], pieceEval[p][endgame], phase) + ps[c][p][s]
}
if c == White {
eval += int(r)
} else {
eval -= int(r)
}
}
return Abs{n: eval}
}
// IsInsufficient reports whether pos contains insufficient material to deliver checkmate.
// This is a subset of the condition of impossibility of checkmate, which results in an automatic draw.
func IsInsufficient(pos Position) bool {
if pos.b[White][Pawn] != 0 || pos.b[Black][Pawn] != 0 ||
pos.b[White][Rook] != 0 || pos.b[Black][Rook] != 0 ||
pos.b[White][Queen] != 0 || pos.b[Black][Queen] != 0 {
return false
}
var (
nknights = PopCount(pos.b[White][Knight] | pos.b[Black][Knight])
nbishops = PopCount(pos.b[White][Bishop] | pos.b[Black][Bishop])
)
if nknights+nbishops <= 1 {
// KvK, KNvK, and KBvK are drawn.
return true
}
if nknights > 0 {
// Knight and at least one other minor piece: in KNNvK, KBvKN, and KNvKN,
// mate can't be forced, although it can be given by a series of legal moves.
return false
}
if bishops := (pos.b[White][Bishop] | pos.b[Black][Bishop]); bishops&DarkSquares == 0 || bishops&LightSquares == 0 {
// Kings and any number of same color bishops only is drawn.
return true
}
// Opposite color bishops: KBvKB can mate, although not by force.
return false
}
// taper returns the weighted sum of open and end according to the fraction phase/totalPhase.
// This mitigates evaluation discontinuity in the event of rapid loss of material.
func taper(open, end relcent, phase int) relcent {
return (open*relcent(phase) + end*(totalPhase-relcent(phase))) / totalPhase
} | eval.go | 0.638272 | 0.447641 | eval.go | starcoder |
package stats
import (
"fmt"
"github.com/360EntSecGroup-Skylar/excelize/v2"
excel "github.com/zhs007/adacore/excel"
)
// memberDataSetStats - DataSetStats member
var memberDataSetStats = []string{
"Name",
"Nums",
"MeanSDev1",
"MeanSDev2",
"MeanSDev3",
"Min",
"Max",
"COV",
"Median",
"MedianAbsoluteDeviation",
"MedianAbsoluteDeviationPopulation",
"Midhinge",
"Mean",
"GeometricMean",
"HarmonicMean",
"InterQuartileRange",
"StandardDeviation",
"StandardDeviationPopulation",
"StandardDeviationSample",
"Trimean",
"Variance",
"PopulationVariance",
"SampleVariance",
}
// ExportExcel - export a excel file
func ExportExcel(f *excelize.File, sheet string, lst []*DataSetStats, floatFormat string) {
// write head
excel.SetSheet(f, sheet, 1, 1, memberDataSetStats, len(lst),
func(i int, member string) string {
return ""
},
func(i int, member string) (interface{}, error) {
v := lst[i]
if member == "Name" {
return v.Name, nil
} else if member == "Nums" {
return v.Nums, nil
} else if member == "MeanSDev1" {
return fmt.Sprintf("%.2f", float32(v.MeanSDev1)/float32(v.Nums)), nil
} else if member == "MeanSDev2" {
return fmt.Sprintf("%.2f", float32(v.MeanSDev2)/float32(v.Nums)), nil
} else if member == "MeanSDev3" {
return fmt.Sprintf("%.2f", float32(v.MeanSDev3)/float32(v.Nums)), nil
} else if member == "Min" {
return fmt.Sprintf(floatFormat, v.Min), nil
} else if member == "Max" {
return fmt.Sprintf(floatFormat, v.Max), nil
} else if member == "Median" {
return fmt.Sprintf(floatFormat, v.Median), nil
} else if member == "MedianAbsoluteDeviation" {
return fmt.Sprintf(floatFormat, v.MedianAbsoluteDeviation), nil
} else if member == "MedianAbsoluteDeviationPopulation" {
return fmt.Sprintf(floatFormat, v.MedianAbsoluteDeviationPopulation), nil
} else if member == "Midhinge" {
return fmt.Sprintf(floatFormat, v.Midhinge), nil
} else if member == "Mean" {
return fmt.Sprintf(floatFormat, v.Mean), nil
} else if member == "GeometricMean" {
return fmt.Sprintf(floatFormat, v.GeometricMean), nil
} else if member == "HarmonicMean" {
return fmt.Sprintf(floatFormat, v.HarmonicMean), nil
} else if member == "InterQuartileRange" {
return fmt.Sprintf(floatFormat, v.InterQuartileRange), nil
} else if member == "StandardDeviation" {
return fmt.Sprintf(floatFormat, v.StandardDeviation), nil
} else if member == "StandardDeviationPopulation" {
return fmt.Sprintf(floatFormat, v.StandardDeviationPopulation), nil
} else if member == "StandardDeviationSample" {
return fmt.Sprintf(floatFormat, v.StandardDeviationSample), nil
} else if member == "Trimean" {
return fmt.Sprintf(floatFormat, v.Trimean), nil
} else if member == "Variance" {
return fmt.Sprintf(floatFormat, v.Variance), nil
} else if member == "PopulationVariance" {
return fmt.Sprintf(floatFormat, v.PopulationVariance), nil
} else if member == "SampleVariance" {
return fmt.Sprintf(floatFormat, v.SampleVariance), nil
} else if member == "COV" {
return fmt.Sprintf(floatFormat, v.COV), nil
}
return nil, nil
})
} | stats/excel.go | 0.582372 | 0.405655 | excel.go | starcoder |
package main
import (
"encoding/json"
"math/rand"
"sort"
)
var FamousPlaces []Coordinate = []Coordinate{
{Latitude: 34.81667, Longitude: 137.4},
{Latitude: 34.4833, Longitude: 136.84186},
{Latitude: 36.65, Longitude: 138.31667},
{Latitude: 34.9, Longitude: 137.5},
{Latitude: 35.06667, Longitude: 135.21667},
{Latitude: 36, Longitude: 139.55722},
{Latitude: 36.53333, Longitude: 136.61667},
{Latitude: 36.75965, Longitude: 137.36215},
{Latitude: 35, Longitude: 136.51667},
{Latitude: 33.4425, Longitude: 129.96972},
{Latitude: 35.30889, Longitude: 139.55028},
{Latitude: 34.25, Longitude: 135.31667},
{Latitude: 35.82756, Longitude: 137.95378},
{Latitude: 33.3213, Longitude: 130.94098},
{Latitude: 36.24624, Longitude: 139.07204},
{Latitude: 36.33011, Longitude: 138.89585},
{Latitude: 35.815, Longitude: 139.6853},
{Latitude: 39.46667, Longitude: 141.95},
{Latitude: 37.56667, Longitude: 140.11667},
{Latitude: 43.82634, Longitude: 144.09638},
{Latitude: 44.35056, Longitude: 142.45778},
{Latitude: 41.77583, Longitude: 140.73667},
{Latitude: 35.48199, Longitude: 137.02166},
}
func convexHull(p []Coordinate) []Coordinate {
sort.Slice(p, func(i, j int) bool {
if p[i].Latitude == p[j].Latitude {
return p[i].Longitude < p[i].Longitude
}
return p[i].Latitude < p[j].Latitude
})
var h []Coordinate
// Lower hull
for _, pt := range p {
for len(h) >= 2 && !ccw(h[len(h)-2], h[len(h)-1], pt) {
h = h[:len(h)-1]
}
h = append(h, pt)
}
// Upper hull
for i, t := len(p)-2, len(h)+1; i >= 0; i-- {
pt := p[i]
for len(h) >= t && !ccw(h[len(h)-2], h[len(h)-1], pt) {
h = h[:len(h)-1]
}
h = append(h, pt)
}
return h[:len(h)-1]
}
// ccw returns true if the three Coordinates make a counter-clockwise turn
func ccw(a, b, c Coordinate) bool {
return ((b.Latitude - a.Latitude) * (c.Longitude - a.Longitude)) > ((b.Longitude - a.Longitude) * (c.Latitude - a.Latitude))
}
const (
rangeDiffLatitude = 3
rangeDiffLongitude = 3
rangeMaxWidth = 1.0
rangeMinWidth = 0.1
rangeMaxHeight = 1.0
rangeMinHeight = 0.1
numOfMaxPoints = 20
numOfMinPoints = 10
)
func createRandomConvexhull() string {
famousPlace := FamousPlaces[rand.Intn(len(FamousPlaces))]
width := rand.Float64()*(rangeMaxWidth-rangeMinWidth) + rangeMinWidth
height := rand.Float64()*(rangeMaxHeight-rangeMinHeight) + rangeMinHeight
center := Coordinate{
Latitude: famousPlace.Latitude + (rand.Float64()-0.5)*rangeDiffLatitude,
Longitude: famousPlace.Longitude + (rand.Float64()-0.5)*rangeDiffLongitude,
}
pointCounts := rand.Intn(numOfMaxPoints-numOfMinPoints) + numOfMinPoints
coordinates := []Coordinate{}
for i := 0; i < pointCounts; i++ {
coordinates = append(coordinates, Coordinate{
Latitude: center.Latitude + (rand.Float64()-0.5)*width,
Longitude: center.Longitude + (rand.Float64()-0.5)*height,
})
}
convexhulled := convexHull(coordinates)
convexhulled = append(convexhulled, convexhulled[0])
body, err := json.Marshal(NazotteRequestBody{
Coordinates: convexhulled,
})
if err != nil {
panic(err)
}
return string(body)
} | initial-data/make_verification_data/nazotte.go | 0.577734 | 0.54153 | nazotte.go | starcoder |
package function
import (
"encoding/hex"
"fmt"
"strconv"
"strings"
"time"
"unsafe"
"github.com/shopspring/decimal"
"github.com/liquidata-inc/go-mysql-server/sql"
)
// AsciiFunc implements the sql function "ascii" which returns the numeric value of the leftmost character
func AsciiFunc(_ *sql.Context, val interface{}) (interface{}, error) {
switch x := val.(type) {
case bool:
if x {
val = 1
} else {
val = 0
}
case time.Time:
val = x.Year()
}
x, err := sql.Text.Convert(val)
if err != nil {
return nil, err
}
s := x.(string)
return s[0], nil
}
func hexChar(b byte) byte {
if b > 9 {
return b - 10 + byte('A')
}
return b + byte('0')
}
// MySQL expects the 64 bit 2s complement representation for negative integer values. Typical methods for converting a
// number to a string don't handle negative integer values in this way (strconv.FormatInt and fmt.Sprintf for example).
func hexForNegativeInt64(n int64) string {
// get a pointer to the int64s memory
mem := (*[8]byte)(unsafe.Pointer(&n))
// make a copy of the data that I can manipulate
bytes := *mem
// reverse the order for printing
for i := 0; i < 4; i++ {
bytes[i], bytes[7-i] = bytes[7-i], bytes[i]
}
// print the hex encoded bytes
return fmt.Sprintf("%X", bytes)
}
func hexForFloat(f float64) (string, error) {
if f < 0 {
f -= 0.5
n := int64(f)
return hexForNegativeInt64(n), nil
}
f += 0.5
n := uint64(f)
return fmt.Sprintf("%X", n), nil
}
// HexFunc implements the sql function "hex" which returns the hexidecimal representation of the string or numeric value
func HexFunc(_ *sql.Context, arg interface{}) (interface{}, error) {
switch val := arg.(type) {
case string:
return hexForString(val), nil
case uint8, uint16, uint32, uint, int, int8, int16, int32, int64:
n, err := sql.Int64.Convert(arg)
if err != nil {
return nil, err
}
a := n.(int64)
if a < 0 {
return hexForNegativeInt64(a), nil
} else {
return fmt.Sprintf("%X", a), nil
}
case uint64:
return fmt.Sprintf("%X", val), nil
case float32:
return hexForFloat(float64(val))
case float64:
return hexForFloat(val)
case decimal.Decimal:
f, _ := val.Float64()
return hexForFloat(f)
case bool:
if val {
return "1", nil
}
return "0", nil
case time.Time:
s, err := formatDate("%Y-%m-%d %H:%i:%s", val)
if err != nil {
return nil, err
}
s += fractionOfSecString(val)
return hexForString(s), nil
default:
return nil, ErrInvalidArgument.New("crc32", fmt.Sprint(arg))
}
}
func hexForString(val string) string {
buf := make([]byte, 0, 2*len(val))
for _, c := range val {
high := byte(c / 16)
low := byte(c % 16)
buf = append(buf, hexChar(high))
buf = append(buf, hexChar(low))
}
return string(buf)
}
func UnhexFunc(_ *sql.Context, arg interface{}) (interface{}, error) {
val, err := sql.Text.Convert(arg)
if err != nil {
return nil, err
}
s := val.(string)
if len(s)%2 != 0 {
return nil, nil
}
s = strings.ToUpper(s)
for _, c := range s {
if c < '0' || c > '9' && c < 'A' || c > 'F' {
return nil, nil
}
}
res, err := hex.DecodeString(s)
if err != nil {
return nil, err
}
return string(res), nil
}
// MySQL expects the 64 bit 2s complement representation for negative integer values. Typical methods for converting a
// number to a string don't handle negative integer values in this way (strconv.FormatInt and fmt.Sprintf for example).
func binForNegativeInt64(n int64) string {
// get a pointer to the int64s memory
mem := (*[8]byte)(unsafe.Pointer(&n))
// make a copy of the data that I can manipulate
bytes := *mem
s := ""
for i := 7; i >= 0; i-- {
s += strconv.FormatInt(int64(bytes[i]), 2)
}
return s
}
// BinFunc implements the sql function "bin" which returns the binary representation of a number
func BinFunc(_ *sql.Context, arg interface{}) (interface{}, error) {
switch val := arg.(type) {
case time.Time:
return strconv.FormatUint(uint64(val.Year()), 2), nil
case uint64:
return strconv.FormatUint(val, 2), nil
default:
n, err := sql.Int64.Convert(arg)
if err != nil {
return "0", nil
}
if n.(int64) < 0 {
return binForNegativeInt64(n.(int64)), nil
} else {
return strconv.FormatInt(n.(int64), 2), nil
}
}
}
// BitLengthFunc implements the sql function "bit_length" which returns the length of the argument in bits
func BitLengthFunc(_ *sql.Context, arg interface{}) (interface{}, error) {
switch val := arg.(type) {
case uint8, int8, bool:
return 8, nil
case uint16, int16:
return 16, nil
case int, uint, uint32, int32, float32:
return 32, nil
case uint64, int64, float64:
return 64, nil
case string:
return 8 * len([]byte(val)), nil
case time.Time:
return 128, nil
}
return nil, ErrInvalidArgument.New("bit_length", fmt.Sprint(arg))
} | sql/expression/function/string.go | 0.714329 | 0.40539 | string.go | starcoder |
package merkletree2
import (
"fmt"
"math"
)
// The SkipPointers struct is constructed for a specific Seqno s (version) of
// the tree and contains the hashes of RootMetadata structs at specific previous
// Seqnos (versions) of the tree. Such versions are fixed given s according to
// the algorithm in GenerateSkipPointersSeqnos so that a "chain" of SkipPointers
// can "connect" any two roots in a logarithmic number of steps.
type SkipPointers []Hash
func SkipPointersForSeqno(s Seqno) (pointers []Seqno) {
if s == 1 {
return []Seqno{}
}
n := int(math.Log2(float64(s - 1)))
x := Seqno(0)
for i := n; i >= 0; i-- {
if x+(1<<uint(i)) < s {
x += 1 << uint(i)
pointers = append(pointers, x)
}
}
return pointers
}
// SkipPointersPath takes two seqno 0 < start <= end. It returns a slice of
// Seqno `pointers` such that:
// - start \in SkipPointersForSeqno(pointers[0]),
// - pointers[len(pointers)] == end,
// - pointers[i-1] \in SkipPointersForSeqno(pointers[i])
// for i = 1...len(pointers)-1.
// If start == end, returns [end]. The sequence has length
// at most logarithmic in end - start.
func SkipPointersPath(start, end Seqno) (pointers []Seqno, err error) {
if start > end {
return nil, fmt.Errorf("GenerateSkipPointersSequence: start > end: %v > %v", start, end)
}
current := end
pointers = append(pointers, current)
for current > start {
for _, i := range SkipPointersForSeqno(current) {
if start <= i {
current = i
if start != i {
pointers = append([]Seqno{i}, pointers...)
}
break
}
}
}
return pointers, nil
}
func ComputeRootMetadataSeqnosNeededInExtensionProof(start, end Seqno, isPartOfIncExtProof bool) ([]Seqno, error) {
seqnos, err := SkipPointersPath(start, end)
if err != nil {
return nil, err
}
// small optimization since in an InclusionExtension proof the root of the
// end seqno is already part of the Inclusion proof.
if isPartOfIncExtProof {
return seqnos[:len(seqnos)-1], nil
}
return seqnos, nil
}
func ComputeRootHashSeqnosNeededInExtensionProof(start, end Seqno) (ret []Seqno, err error) {
// this map prevents duplicates, as well as inserting the hashes of
// SkipPointersPath elements (as those can be recomputed from the rest).
unnecessarySeqnoMap := make(map[Seqno]bool)
unnecessarySeqnoMap[start] = true
ret = []Seqno{}
path, err := SkipPointersPath(start, end)
if err != nil {
return nil, err
}
for _, s := range path {
unnecessarySeqnoMap[s] = true
for _, s2 := range SkipPointersForSeqno(s) {
if unnecessarySeqnoMap[s2] {
continue
}
ret = append(ret, s2)
unnecessarySeqnoMap[s2] = true
}
}
return ret, nil
} | go/merkletree2/skippointers.go | 0.661486 | 0.432063 | skippointers.go | starcoder |
package gabi
import (
"crypto/rand"
"math/big"
)
// Some utility code (mostly math stuff) useful in various places in this
// package.
// Often we need to refer to the same small constant big numbers, no point in
// creating them again and again.
var (
bigZERO = big.NewInt(0)
bigONE = big.NewInt(1)
bigTWO = big.NewInt(2)
bigTHREE = big.NewInt(3)
bigFOUR = big.NewInt(4)
bigFIVE = big.NewInt(5)
bigEIGHT = big.NewInt(8)
)
// modInverse returns ia, the inverse of a in the multiplicative group of prime
// order n. It requires that a be a member of the group (i.e. less than n).
// This function was taken from Go's RSA implementation
func modInverse(a, n *big.Int) (ia *big.Int, ok bool) {
g := new(big.Int)
x := new(big.Int)
y := new(big.Int)
g.GCD(x, y, a, n)
if g.Cmp(bigONE) != 0 {
// In this case, a and n aren't coprime and we cannot calculate
// the inverse. This happens because the values of n are nearly
// prime (being the product of two primes) rather than truly
// prime.
return
}
if x.Cmp(bigONE) < 0 {
// 0 is not the multiplicative inverse of any element so, if x
// < 1, then x is negative.
x.Add(x, n)
}
return x, true
}
// modPow computes x^y mod m. The exponent (y) can be negative, in which case it
// uses the modular inverse to compute the result (in contrast to Go's Exp
// function).
func modPow(x, y, m *big.Int) *big.Int {
if y.Sign() == -1 {
t := new(big.Int).ModInverse(x, m)
return t.Exp(t, new(big.Int).Neg(y), m)
}
return new(big.Int).Exp(x, y, m)
}
// representToBases returns a representation of the given exponents in terms of
// the given bases. For given bases bases[1],...,bases[k]; exponents
// exps[1],...,exps[k] and modulus this function returns
// bases[k]^{exps[1]}*...*bases[k]^{exps[k]} (mod modulus).
func representToBases(bases, exps []*big.Int, modulus *big.Int, maxMessageLength uint) *big.Int {
r := big.NewInt(1)
tmp := new(big.Int)
for i := 0; i < len(exps); i++ {
exp := exps[i]
if exp.BitLen() > int(maxMessageLength) {
exp = intHashSha256(exp.Bytes())
}
// tmp = bases_i ^ exps_i (mod modulus), with exps_i hashed if it exceeds maxMessageLength
tmp.Exp(bases[i], exp, modulus)
// r = r * tmp (mod modulus)
r.Mul(r, tmp).Mod(r, modulus)
}
return r
}
// RandomBigInt returns a random big integer value in the range
// [0,(2^numBits)-1], inclusive.
func RandomBigInt(numBits uint) (*big.Int, error) {
t := new(big.Int).Lsh(bigONE, numBits)
return rand.Int(rand.Reader, t)
}
// legendreSymbol calculates the Legendre symbol (a/p).
func legendreSymbol(a, p *big.Int) int {
// Adapted from: https://programmingpraxis.com/2012/05/01/legendres-symbol/
// Probably needs more extensive checking? Also, no optimization has been applied.
j := 1
// Make a copy of the arguments
// rule 5
n := new(big.Int).Mod(a, p)
m := new(big.Int)
*m = *p // copy value
tmp := new(big.Int)
for n.Cmp(bigZERO) != 0 {
// rules 3 and 4
t := 0
for n.Bit(0) == 0 {
n.Rsh(n, 1)
t++
}
tmp.Mod(m, bigEIGHT)
if t&1 == 1 && (tmp.Cmp(bigTHREE) == 0 || tmp.Cmp(bigFIVE) == 0) {
j = -j
}
// rule 6
if tmp.Mod(m, bigFOUR).Cmp(bigTHREE) == 0 && tmp.Mod(n, bigFOUR).Cmp(bigTHREE) == 0 {
j = -j
}
// rules 5 and 6
m.Mod(m, n)
n, m = m, n
}
if m.Cmp(bigONE) == 0 {
return j
}
return 0
} | mathutil.go | 0.579519 | 0.486697 | mathutil.go | starcoder |
package nasConvert
import (
"encoding/hex"
"fmt"
"github.com/omec-project/nas/nasMessage"
"github.com/omec-project/nas/nasType"
"github.com/omec-project/openapi/models"
)
// TS 24.501 9.11.3.37
func RequestedNssaiToModels(nasNssai *nasType.RequestedNSSAI) ([]models.MappingOfSnssai, error) {
var requestNssai []models.MappingOfSnssai
buf := nasNssai.GetSNSSAIValue()
lengthOfBuf := int(nasNssai.GetLen())
offset := 0
for offset < lengthOfBuf {
lengthOfSnssaiContents := buf[offset]
if snssai, err := snssaiToModels(lengthOfSnssaiContents, buf[offset:]); err != nil {
return nil, err
} else {
requestNssai = append(requestNssai, snssai)
// lengthOfSnssaiContents is 1 byte
offset += int(lengthOfSnssaiContents + 1)
}
}
return requestNssai, nil
}
// TS 24.501 9.11.2.8, Length & value part of S-NSSAI IE
func snssaiToModels(lengthOfSnssaiContents uint8, buf []byte) (models.MappingOfSnssai, error) {
snssai := models.MappingOfSnssai{}
switch lengthOfSnssaiContents {
case 0x01: // SST
snssai.ServingSnssai = &models.Snssai{
Sst: int32(buf[1]),
}
return snssai, nil
case 0x02: // SST and mapped HPLMN SST
snssai.ServingSnssai = &models.Snssai{
Sst: int32(buf[1]),
}
snssai.HomeSnssai = &models.Snssai{
Sst: int32(buf[2]),
}
return snssai, nil
case 0x04: // SST and SD
snssai.ServingSnssai = &models.Snssai{
Sst: int32(buf[1]),
Sd: hex.EncodeToString(buf[2:5]),
}
return snssai, nil
case 0x05: // SST, SD and mapped HPLMN SST
snssai.ServingSnssai = &models.Snssai{
Sst: int32(buf[1]),
Sd: hex.EncodeToString(buf[2:5]),
}
snssai.HomeSnssai = &models.Snssai{
Sst: int32(buf[5]),
}
return snssai, nil
case 0x08: // SST, SD, mapped HPLMN SST and mapped HPLMN SD
snssai.ServingSnssai = &models.Snssai{
Sst: int32(buf[1]),
Sd: hex.EncodeToString(buf[2:5]),
}
snssai.HomeSnssai = &models.Snssai{
Sst: int32(buf[5]),
Sd: hex.EncodeToString(buf[6:9]),
}
return snssai, nil
default:
return snssai, fmt.Errorf("Invalid length of S-NSSAI contents: %d", lengthOfSnssaiContents)
}
}
func RejectedNssaiToNas(rejectedNssaiInPlmn []models.Snssai, rejectedNssaiInTa []models.Snssai) nasType.RejectedNSSAI {
var rejectedNssaiNas nasType.RejectedNSSAI
var byteArray []uint8
for _, rejectedSnssai := range rejectedNssaiInPlmn {
byteArray = append(byteArray, RejectedSnssaiToNas(rejectedSnssai,
nasMessage.RejectedSnssaiCauseNotAvailableInCurrentPlmn)...)
}
for _, rejectedSnssai := range rejectedNssaiInTa {
byteArray = append(byteArray, RejectedSnssaiToNas(rejectedSnssai,
nasMessage.RejectedSnssaiCauseNotAvailableInCurrentRegistrationArea)...)
}
rejectedNssaiNas.SetLen(uint8(len(byteArray)))
rejectedNssaiNas.SetRejectedNSSAIContents(byteArray)
return rejectedNssaiNas
} | nasConvert/Nssai.go | 0.564339 | 0.412175 | Nssai.go | starcoder |
package iso20022
// Execution of a redemption order.
type RedemptionExecution3 struct {
// Unique and unambiguous identifier for an order, as assigned by the instructing party.
OrderReference *Max35Text `xml:"OrdrRef"`
// Unique and unambiguous identifier for an order execution, as assigned by a confirming party.
DealReference *Max35Text `xml:"DealRef"`
// Specifies the category of the investment fund order.
OrderType []*FundOrderType1 `xml:"OrdrTp,omitempty"`
// Account between an investor(s) and a fund manager or a fund. The account can contain holdings in any investment fund or investment fund class managed (or distributed) by the fund manager, within the same fund family.
InvestmentAccountDetails *InvestmentAccount13 `xml:"InvstmtAcctDtls"`
// Additional information about the investor.
BeneficiaryDetails *IndividualPerson2 `xml:"BnfcryDtls,omitempty"`
// Number of investment funds units redeemed.
UnitsNumber *FinancialInstrumentQuantity1 `xml:"UnitsNb"`
// Indicates the rounding direction applied to nearest unit.
Rounding *RoundingDirection2Code `xml:"Rndg,omitempty"`
// Net amount of money paid to the investor as a result of the redemption.
NetAmount *ActiveCurrencyAndAmount `xml:"NetAmt"`
// Portion of the investor's holdings, in a specific investment fund/ fund class, that is redeemed.
HoldingsRedemptionRate *PercentageRate `xml:"HldgsRedRate,omitempty"`
// Amount of money paid to the investor as a result of the redemption, including all charges, commissions, and tax.
GrossAmount *ActiveCurrencyAndAmount `xml:"GrssAmt,omitempty"`
// Date and time at which a price is applied, according to the terms stated in the prospectus.
TradeDateTime *DateAndDateTimeChoice `xml:"TradDtTm"`
// Price at which the order was executed.
PriceDetails *UnitPrice5 `xml:"PricDtls"`
// Indicates whether the order has been partially executed, ie, the confirmed quantity does not match the ordered quantity for a given financial instrument.
PartiallyExecutedIndicator *YesNoIndicator `xml:"PrtlyExctdInd"`
// Indicates whether the dividend is included, ie, cum-dividend, in the executed price. When the dividend is not included, the price will be ex-dividend.
CumDividendIndicator *YesNoIndicator `xml:"CumDvddInd"`
// Part of the price deemed as accrued income or profit rather than capital. The interim profit amount is used for tax purposes.
InterimProfitAmount *ProfitAndLoss1Choice `xml:"IntrmPrftAmt,omitempty"`
// Information needed to process a currency exchange or conversion.
ForeignExchangeDetails []*ForeignExchangeTerms4 `xml:"FXDtls,omitempty"`
// Dividend option chosen by the account owner based on the options offered in the prospectus.
IncomePreference *IncomePreference1Code `xml:"IncmPref,omitempty"`
// Tax group to which the purchased investment fund units belong. The investor indicates to the intermediary operating pooled nominees, which type of unit is to be sold.
Group1Or2Units *UKTaxGroupUnitCode `xml:"Grp1Or2Units,omitempty"`
// Amount of money associated with a service.
ChargeGeneralDetails *TotalCharges2 `xml:"ChrgGnlDtls,omitempty"`
// Amount of money due to a party as compensation for a service.
CommissionGeneralDetails *TotalCommissions2 `xml:"ComssnGnlDtls,omitempty"`
// Tax related to an investment fund order.
TaxGeneralDetails *TotalTaxes2 `xml:"TaxGnlDtls,omitempty"`
// Parameters used to execute the settlement of an investment fund order.
SettlementAndCustodyDetails *FundSettlementParameters3 `xml:"SttlmAndCtdyDtls,omitempty"`
// Indicates whether the financial instrument is to be physically delivered.
PhysicalDeliveryIndicator *YesNoIndicator `xml:"PhysDlvryInd"`
// Parameters of a physical delivery.
PhysicalDeliveryDetails *DeliveryParameters3 `xml:"PhysDlvryDtls,omitempty"`
// Payment transaction resulting from the investment fund order execution.
CashSettlementDetails *PaymentTransaction18 `xml:"CshSttlmDtls,omitempty"`
}
func (r *RedemptionExecution3) SetOrderReference(value string) {
r.OrderReference = (*Max35Text)(&value)
}
func (r *RedemptionExecution3) SetDealReference(value string) {
r.DealReference = (*Max35Text)(&value)
}
func (r *RedemptionExecution3) AddOrderType() *FundOrderType1 {
newValue := new (FundOrderType1)
r.OrderType = append(r.OrderType, newValue)
return newValue
}
func (r *RedemptionExecution3) AddInvestmentAccountDetails() *InvestmentAccount13 {
r.InvestmentAccountDetails = new(InvestmentAccount13)
return r.InvestmentAccountDetails
}
func (r *RedemptionExecution3) AddBeneficiaryDetails() *IndividualPerson2 {
r.BeneficiaryDetails = new(IndividualPerson2)
return r.BeneficiaryDetails
}
func (r *RedemptionExecution3) AddUnitsNumber() *FinancialInstrumentQuantity1 {
r.UnitsNumber = new(FinancialInstrumentQuantity1)
return r.UnitsNumber
}
func (r *RedemptionExecution3) SetRounding(value string) {
r.Rounding = (*RoundingDirection2Code)(&value)
}
func (r *RedemptionExecution3) SetNetAmount(value, currency string) {
r.NetAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (r *RedemptionExecution3) SetHoldingsRedemptionRate(value string) {
r.HoldingsRedemptionRate = (*PercentageRate)(&value)
}
func (r *RedemptionExecution3) SetGrossAmount(value, currency string) {
r.GrossAmount = NewActiveCurrencyAndAmount(value, currency)
}
func (r *RedemptionExecution3) AddTradeDateTime() *DateAndDateTimeChoice {
r.TradeDateTime = new(DateAndDateTimeChoice)
return r.TradeDateTime
}
func (r *RedemptionExecution3) AddPriceDetails() *UnitPrice5 {
r.PriceDetails = new(UnitPrice5)
return r.PriceDetails
}
func (r *RedemptionExecution3) SetPartiallyExecutedIndicator(value string) {
r.PartiallyExecutedIndicator = (*YesNoIndicator)(&value)
}
func (r *RedemptionExecution3) SetCumDividendIndicator(value string) {
r.CumDividendIndicator = (*YesNoIndicator)(&value)
}
func (r *RedemptionExecution3) AddInterimProfitAmount() *ProfitAndLoss1Choice {
r.InterimProfitAmount = new(ProfitAndLoss1Choice)
return r.InterimProfitAmount
}
func (r *RedemptionExecution3) AddForeignExchangeDetails() *ForeignExchangeTerms4 {
newValue := new (ForeignExchangeTerms4)
r.ForeignExchangeDetails = append(r.ForeignExchangeDetails, newValue)
return newValue
}
func (r *RedemptionExecution3) SetIncomePreference(value string) {
r.IncomePreference = (*IncomePreference1Code)(&value)
}
func (r *RedemptionExecution3) SetGroup1Or2Units(value string) {
r.Group1Or2Units = (*UKTaxGroupUnitCode)(&value)
}
func (r *RedemptionExecution3) AddChargeGeneralDetails() *TotalCharges2 {
r.ChargeGeneralDetails = new(TotalCharges2)
return r.ChargeGeneralDetails
}
func (r *RedemptionExecution3) AddCommissionGeneralDetails() *TotalCommissions2 {
r.CommissionGeneralDetails = new(TotalCommissions2)
return r.CommissionGeneralDetails
}
func (r *RedemptionExecution3) AddTaxGeneralDetails() *TotalTaxes2 {
r.TaxGeneralDetails = new(TotalTaxes2)
return r.TaxGeneralDetails
}
func (r *RedemptionExecution3) AddSettlementAndCustodyDetails() *FundSettlementParameters3 {
r.SettlementAndCustodyDetails = new(FundSettlementParameters3)
return r.SettlementAndCustodyDetails
}
func (r *RedemptionExecution3) SetPhysicalDeliveryIndicator(value string) {
r.PhysicalDeliveryIndicator = (*YesNoIndicator)(&value)
}
func (r *RedemptionExecution3) AddPhysicalDeliveryDetails() *DeliveryParameters3 {
r.PhysicalDeliveryDetails = new(DeliveryParameters3)
return r.PhysicalDeliveryDetails
}
func (r *RedemptionExecution3) AddCashSettlementDetails() *PaymentTransaction18 {
r.CashSettlementDetails = new(PaymentTransaction18)
return r.CashSettlementDetails
} | RedemptionExecution3.go | 0.808937 | 0.44071 | RedemptionExecution3.go | starcoder |
package decim
import (
"errors"
"io"
"math"
)
// Same interface as gonum/plot XYer.
type XYer interface {
XY(i int) (x, y float64)
Len() int
}
type Sampler struct {
idx int
tol float64
xPivot, yPivot, xPrev, yPrev float64
angleMin, angleMax float64
xyer XYer
// Interp attempts to lessen the error
// by choosing next y value such that
// the line is contained in the middle of
// the angle limit range. Setting interp
// means y values will not coincide with input data.
Interp bool
}
func NewSampler(xyer XYer, tol float64) *Sampler {
if xyer == nil {
panic("got nil xyer")
}
if xyer.Len() < 3 {
panic("need at least 3 points to downsample")
}
// We initialize x and y values
s := &Sampler{
tol: tol,
xyer: xyer,
}
s.Reset()
// and also calculate initial permissible max angles line should be contained in.
s.setStartAngleLims()
return s
}
// Reset sets the sampler to initial value.
func (s *Sampler) Reset() {
x, y := s.xyer.XY(0)
s.idx = 1
s.xPrev = x
s.yPrev = y
s.xPivot = x
s.yPivot = y
}
func (s *Sampler) Next() (x, y float64, err error) {
n := s.xyer.Len()
for s.idx < n {
x, y = s.xyer.XY(s.idx)
s.idx++
if s.idx == n {
// Return last data without modification.
return x, y, nil
}
dx, dy := x-s.xPivot, y-s.yPivot
actualAngle := math.Atan2(dy, dx)
if math.IsNaN(actualAngle) || math.IsInf(actualAngle, 0) {
return 0, 0, errors.New("got infinity or NaN")
}
if !(actualAngle > s.angleMin) || !(actualAngle < s.angleMax) {
// The angle of the line exceeded permissible angle range.
if s.Interp {
s.yPivot = s.yPivot + (s.xPrev-s.xPivot)*(math.Tan(s.angleMax)+math.Tan(s.angleMin))/2 // interpolator
} else {
s.yPivot = s.yPrev
}
s.xPivot, s.xPrev, s.yPrev = s.xPrev, x, y
s.setStartAngleLims()
return x, y, nil
}
// We update the angle limits based on new point.
loangle, hiangle := math.Atan2(dy-s.tol, dx), math.Atan2(dy+s.tol, dx)
s.angleMin = math.Max(loangle, s.angleMin)
s.angleMax = math.Min(hiangle, s.angleMax)
s.xPrev = x
s.yPrev = y
}
return x, y, io.EOF
}
func (s *Sampler) setStartAngleLims() {
if s.idx >= s.xyer.Len() {
// Out of range. Stream is exhausted.
return
}
x, y := s.xyer.XY(s.idx)
dx, dy := x-s.xPivot, y-s.yPivot
s.angleMax = math.Atan2(dy+s.tol, dx)
s.angleMin = math.Atan2(dy-s.tol, dx)
}
// XYer processes xyer argument data and returns the downsampled data
func (s *Sampler) XYer() XYer {
s.Reset()
v := &sliceXYer{}
var err error
var x, y float64
for {
x, y, err = s.Next()
if err != nil {
break
}
v.x = append(v.x, x)
v.y = append(v.y, y)
}
if !errors.Is(err, io.EOF) {
panic(err)
}
return v
}
type sliceXYer struct {
x, y []float64
}
func (s *sliceXYer) XY(i int) (x, y float64) {
return s.x[i], s.y[i]
}
func (s *sliceXYer) Len() int {
return len(s.x)
} | sampler.go | 0.709321 | 0.442275 | sampler.go | starcoder |
package main
import (
"math/rand"
"time"
)
type tileType int
const (
floor tileType = iota
wall
boundary
)
type tile struct {
glyph rune
kind tileType
color int
}
func (t tile) isWalkable() bool {
return t.kind == floor
}
func (t tile) isDiggable() bool {
return t.kind == wall
}
type world struct {
dimensions *Point
cells [][]*tile
player *player
entities *entities
seed *rand.Rand
notifications *notificationCenter
}
func NewTile(kind tileType) *tile {
switch {
case kind == floor:
return &tile{'.', floor, 0x7}
case kind == wall:
return &tile{'#', wall, 0x55}
}
return &tile{'X', boundary, 0x91}
}
func NewWorld(width, height int) *world {
world := &world{dimensions: &Point{x: width, y: height}}
world.cells = make([][]*tile, world.dimensions.y)
world.seed = rand.New(rand.NewSource(time.Now().UnixNano()))
for row := range world.cells {
world.cells[row] = make([]*tile, world.dimensions.x)
for col := range world.cells[row] {
world.cells[row][col] = NewTile(floor)
}
}
RandomWorld(world)
world.notifications = NewNotificationCenter()
world.entities = NewEntities()
world.player = newPlayer(world)
world.entities.add(newFungus(world))
return world
}
func (w world) isWithinBoundaries(point *Point) bool {
switch {
case point.x < 0:
return false
case point.x >= w.dimensions.x:
return false
case point.y < 0:
return false
case point.y >= w.dimensions.y:
return false
}
return true
}
func (w world) GetTile(point *Point) *tile {
if w.isWithinBoundaries(point) {
return w.cells[point.y][point.x]
}
return NewTile(boundary)
}
func (w *world) SetTile(point *Point, tile *tile) {
if w.isWithinBoundaries(point) {
w.cells[point.y][point.x] = tile
}
}
func (w *world) dig(point *Point) {
w.SetTile(point, NewTile(floor))
}
func (w *world) entitiesInside(point *Point, width, height int, callback func(entity autonomous)) {
for i := range w.entities.pool {
entity := w.entities.pool[i]
entityLocation := entity.Position()
if entityLocation.x >= point.x &&
entityLocation.x <= point.x+width &&
entityLocation.y >= point.y &&
entityLocation.y <= point.y+height {
callback(entity)
}
}
}
// Finds a random walkable tile in the world
func (w *world) atWalkableTile() *Point {
point := &Point{
x: rand.Intn(w.dimensions.x),
y: rand.Intn(w.dimensions.y),
}
for w.GetTile(point).isWalkable() == false {
point = &Point{
x: rand.Intn(w.dimensions.x),
y: rand.Intn(w.dimensions.y),
}
}
return point
}
/* Find an entity by coordinates
* FIXME: Can be improved by previously indexing entities
*/
func (w *world) entityAt(x, y int) autonomous {
for i := range w.entities.pool {
entity := w.entities.pool[i]
position := entity.Position()
if position.x == x && position.y == y {
return entity
}
}
return nil
} | world.go | 0.580114 | 0.47658 | world.go | starcoder |
package database
import (
"cloud.google.com/go/datastore"
"golang.org/x/net/context"
)
const (
datastoreKind = "Tweet"
)
// Dao defines the interface for the data access object that abstracts database interactions.
type Dao interface {
WriteCelebrityTweets(tweets []Tweet) (err error)
GetCelebrityTweets(celebrityName string) (tweets []Tweet, err error)
DeleteAllTweetsForCelebrity(celebrityName string) (err error)
}
// DatastoreDao is a DAO for interacting with App Engine's Datastore.
type DatastoreDao struct {
Ctx context.Context // App Engine Context which can be obtained from an HTTP request.
DatastoreClient *datastore.Client
}
// Tweet is an entity for storing data in the datastore.
type Tweet struct {
CelebrityName string `json:",omitempty"`
Id int64 `json:",string"`
Score int32
}
func NewDatastoreDao(ctx context.Context) (datastoreDao DatastoreDao, err error) {
client, err := datastore.NewClient(ctx, "meantweets-1381")
if err != nil {
return
}
datastoreDao = DatastoreDao{ctx, client}
return
}
// WriteCelebrityTweets saves the slice of tweets to the database. Note that duplicates aren't
// caught here because of writing asynchronicity.
func (datastoreDao DatastoreDao) WriteCelebrityTweets(tweets []Tweet) (err error) {
keys := make([]*datastore.Key, len(tweets))
for i, _ := range tweets {
keys[i] = datastore.NewIncompleteKey(datastoreDao.Ctx, datastoreKind, nil)
}
_, err = datastoreDao.DatastoreClient.PutMulti(datastoreDao.Ctx, keys, tweets)
return
}
// GetCelebrityTweets retrieves all the celebrity tweets related to a celebrity sorted with highest
// scores first. Duplicate tweets will be filtered out.
func (datastoreDao DatastoreDao) GetCelebrityTweets(celebrityName string) (tweets []Tweet,
err error) {
q := datastore.NewQuery(datastoreKind).
Filter("CelebrityName = ", celebrityName).
Order("Score")
var results []Tweet
if _, err = datastoreDao.DatastoreClient.GetAll(datastoreDao.Ctx, q, &results); err != nil {
return
}
tweetSet := make(map[int64]bool)
for _, tweetResult := range results {
if !tweetSet[tweetResult.Id] {
tweets = append(tweets, tweetResult)
tweetSet[tweetResult.Id] = true
}
}
return
}
// DeleteAllTweetsForCelebrity deletes all tweets for a provided celebirty name.
func (datastoreDao DatastoreDao) DeleteAllTweetsForCelebrity(celebrityName string) (err error) {
q := datastore.NewQuery(datastoreKind).
Filter("CelebrityName = ", celebrityName).
KeysOnly()
keys, err := datastoreDao.DatastoreClient.GetAll(datastoreDao.Ctx, q, nil)
if err != nil {
return
}
err = datastoreDao.DatastoreClient.DeleteMulti(datastoreDao.Ctx, keys)
return
}
// DaoMock provides a mock Dao for unit tests of files that depend on a Dao.
type DaoMock struct {
Tweets *[]Tweet // Use a pointer so all copies of DaoMock modify the same "database".
}
// WriteCelebrityTweet implementation for DaoMock.
func (dao DaoMock) WriteCelebrityTweets(tweets []Tweet) (err error) {
*dao.Tweets = append(*dao.Tweets, tweets...)
return
}
// GetCelebrityTweets implementation for DaoMock.
func (dao DaoMock) GetCelebrityTweets(celebrityName string) (tweets []Tweet, err error) {
for _, tweet := range *dao.Tweets {
if tweet.CelebrityName == celebrityName {
tweets = append(tweets, tweet)
}
}
return
}
// DeleteAllTweetsForCelebrity implementation for DaoMock.
func (dao DaoMock) DeleteAllTweetsForCelebrity(celebrityName string) (err error) {
newTweets := []Tweet{}
for _, tweet := range *dao.Tweets {
if tweet.CelebrityName != celebrityName {
newTweets = append(newTweets, tweet)
}
}
*dao.Tweets = newTweets
return
} | database/dao.go | 0.575827 | 0.406626 | dao.go | starcoder |
package navigation
import (
"math"
"github.com/furgbol/ai/model"
)
// CasteljauPathPlanner - This type implements the PathPlanner interface. It plans the path based on Casteljau's algorithm that uses Bézier curves.
type CasteljauPathPlanner struct {
NumberOfPathPoints int
NumberOfUsedPoints int
DistanceFactor float64
}
// NewCasteljauPathPlanner creates an instance of a Casteljau Path Planner
func NewCasteljauPathPlanner(numberOfPathPoints, numberOfUsedPoints int, distanceFactor float64) *CasteljauPathPlanner {
return &CasteljauPathPlanner{
NumberOfPathPoints: numberOfPathPoints,
NumberOfUsedPoints: numberOfUsedPoints,
DistanceFactor: distanceFactor,
}
}
// PlanPath is the function that calculate the path
func (pathPlanner CasteljauPathPlanner) PlanPath(initialPose, targetPose model.Pose) Path {
distance := calculateDistanceBetweenTwoPoints(
model.Position2D{
X: initialPose.X,
Y: initialPose.Y,
},
model.Position2D{
X: targetPose.X,
Y: targetPose.Y,
},
)
firstControlPoint, secondControlPoint := pathPlanner.generateControlPoints(initialPose, targetPose, distance)
path := pathPlanner.getPath(initialPose, targetPose, firstControlPoint, secondControlPoint)
return path
}
func calculateDistanceBetweenTwoPoints(firstPoint, secondPoint model.Position2D) float64 {
return math.Sqrt(math.Pow((secondPoint.X-firstPoint.X), 2) + math.Pow((secondPoint.Y-firstPoint.Y), 2))
}
func (pathPlanner CasteljauPathPlanner) generateControlPoints(initialPose, targetPose model.Pose, distance float64) (model.Position2D, model.Position2D) {
firstPoint := model.Position2D{
math.Cos(initialPose.Orientation)*(distance/pathPlanner.DistanceFactor) + initialPose.X,
math.Sin(initialPose.Orientation)*(distance/pathPlanner.DistanceFactor) + initialPose.Y,
}
secondPoint := model.Position2D{
math.Cos(targetPose.Orientation+180)*(distance/pathPlanner.DistanceFactor) + targetPose.X,
math.Sin(targetPose.Orientation+180)*(distance/pathPlanner.DistanceFactor) + targetPose.Y,
}
return firstPoint, secondPoint
}
func (pathPlanner CasteljauPathPlanner) getPath(initialPose, targetPose model.Pose, firstControlPoint, secondControlPoint model.Position2D) Path {
path := NewPath(pathPlanner.NumberOfUsedPoints)
factor := 1.0 / float64(pathPlanner.NumberOfPathPoints)
t := 0.0
for i := 0; i < pathPlanner.NumberOfUsedPoints; i++ {
t += factor
path[i] = model.Position2D{
X: float64(math.Pow((1-t), 3)*initialPose.X + 3*t*math.Pow((1-t), 2)*firstControlPoint.X + 3*math.Pow(t, 2)*(1-t)*secondControlPoint.X + math.Pow(t, 3)*targetPose.X),
Y: float64(math.Pow((1-t), 3)*initialPose.Y + 3*t*math.Pow((1-t), 2)*firstControlPoint.Y + 3*math.Pow(t, 2)*(1-t)*secondControlPoint.Y + math.Pow(t, 3)*targetPose.Y),
}
}
return path
} | control/navigation/casteljau.go | 0.853455 | 0.587854 | casteljau.go | starcoder |
package prioq
import (
"golang.org/x/exp/constraints"
"errors"
)
// CompareFunc is a generic function that compares two values and that should return true
// whenever those values should be swapped
type CompareFunc[T any] func(a T, b T) bool
// PrioQ represents a generic priority queue data structure
type PrioQ[T any] struct {
size int
compare func(a T, b T) bool
elements []T
}
// New creates a new priority queue for elements in the Ordered constraint.
func New[T constraints.Ordered](elements []T) *PrioQ[T] {
return NewWithCompareFunc(elements, func(a T, b T) bool {
return a > b
})
}
// NewWithCompareFunc creates a new priority queue with the given initial capacity.
// Specifiing an initial capacity can be useful to avoid reallocations but in general
// you can just specify len(elements).
func NewWithCompareFunc[T any](elements []T, cf CompareFunc[T]) *PrioQ[T] {
elems := make([]T, len(elements))
copy(elems, elements)
h := &PrioQ[T]{
size: len(elems),
compare: cf,
elements: elems,
}
h.heapify()
return h
}
// heapify makes a heap of the slice in-place
func (h *PrioQ[T]) heapify() {
i := h.size/2 - 1
for i >= 0 {
left := 2*i + 1
right := left + 1
if right > h.size-1 {
// Look at only the left child
if h.compare(h.elements[i], h.elements[left]) {
h.elements[i], h.elements[left] = h.elements[left], h.elements[i]
}
} else {
// Look at both the left and right child
rightIsLarger := h.compare(h.elements[left], h.elements[right])
var compareIndex int
if rightIsLarger {
compareIndex = right
} else {
compareIndex = left
}
shouldSwap := h.compare(h.elements[i], h.elements[compareIndex])
if shouldSwap {
h.elements[i], h.elements[compareIndex] = h.elements[compareIndex], h.elements[i]
if compareIndex < h.size/2 {
i = compareIndex + 1
}
}
}
i--
}
}
// Insert adds a new element to the priority queue
// The time complexity is O(log(n)), n = # of elements in the priority queue
func (h *PrioQ[T]) Insert(x T) {
h.elements = append(h.elements, x)
h.size++
// Fix the heap
i := h.size - 1
for i >= 0 && h.compare(h.elements[parent(i)], h.elements[i]) {
h.elements[parent(i)], h.elements[i] = h.elements[i], h.elements[parent(i)]
i = parent(i)
}
}
// Extract returns the element at the front of the queue
// It returns an errors whenever the queue is empty
// The time complexity is O(log(n)), n = # of elements in the queue
func (h *PrioQ[T]) Extract() (T, error) {
if h.size == 0 {
// Trick for getting a generic zero value
var t T
return t, errors.New("heap: empty, no element to extract")
}
h.elements[h.size-1], h.elements[0] = h.elements[0], h.elements[h.size-1]
removedElem := h.elements[h.size-1]
h.size--
// Only one node left, no need to fix the heap
if h.size == 1 {
return removedElem, nil
}
// Fix the heap
i := 0
for i < h.size-1 {
child := h.largerChild(i)
if h.compare(h.elements[i], h.elements[child]) {
h.elements[i], h.elements[child] = h.elements[child], h.elements[i]
if child < h.size/2-1 {
i = child
}
} else {
break
}
}
return removedElem, nil
}
// IsEmpty indicates whether the queue is empty
func (h *PrioQ[T]) IsEmpty() bool {
return h.size == 0
}
// Len returns the current size of the priority queue
func (h *PrioQ[T]) Len() int {
return h.size
}
func (h *PrioQ[T]) largerChild(i int) int {
left := 2*i + 1
right := left + 1
if right > h.size-1 {
return left
}
if h.compare(h.elements[left], h.elements[right]) {
return right
}
return left
}
func parent(i int) int {
return (i - 1) / 2
} | prioq.go | 0.780412 | 0.514827 | prioq.go | starcoder |
package manifest
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"encoding/binary"
"fmt"
"math/big"
"github.com/tjfoc/gmsm/sm2"
)
// Key is a public key of an asymmetric crypto keypair.
type Key struct {
KeyAlg Algorithm `json:"key_alg"`
Version uint8 `require:"0x10" json:"key_version"`
KeySize BitSize `json:"key_bitsize"`
Data []byte `countValue:"keyDataSize()" json:"key_data"`
}
// BitSize is a size in bits.
type BitSize uint16
// InBits returns the size in bits.
func (ks BitSize) InBits() uint16 {
return uint16(ks)
}
// InBytes returns the size in bytes.
func (ks BitSize) InBytes() uint16 {
return uint16(ks >> 3)
}
// SetInBits sets the size in bits.
func (ks *BitSize) SetInBits(amountOfBits uint16) {
*ks = BitSize(amountOfBits)
}
// SetInBytes sets the size in bytes.
func (ks *BitSize) SetInBytes(amountOfBytes uint16) {
*ks = BitSize(amountOfBytes << 3)
}
// keyDataSize returns the expected length of Data for specified
// KeyAlg and KeySize.
func (k Key) keyDataSize() int64 {
switch k.KeyAlg {
case AlgRSA:
return int64(k.KeySize.InBytes()) + 4
case AlgECC, AlgSM2:
return int64(k.KeySize.InBytes()) * 2
}
return -1
}
// PubKey parses Data into crypto.PublicKey.
func (k Key) PubKey() (crypto.PublicKey, error) {
expectedSize := int(k.keyDataSize())
if expectedSize < 0 {
return nil, fmt.Errorf("unexpected algorithm: %s", k.KeyAlg)
}
if len(k.Data) != expectedSize {
return nil, fmt.Errorf("unexpected size: expected:%d, received %d", expectedSize, len(k.Data))
}
switch k.KeyAlg {
case AlgRSA:
result := &rsa.PublicKey{
N: new(big.Int).SetBytes(reverseBytes(k.Data[4:])),
E: int(binaryOrder.Uint32(k.Data)),
}
return result, nil
case AlgECC:
keySize := k.KeySize.InBytes()
x := new(big.Int).SetBytes(reverseBytes(k.Data[:keySize]))
y := new(big.Int).SetBytes(reverseBytes(k.Data[keySize:]))
return ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}, nil
case AlgSM2:
keySize := k.KeySize.InBytes()
x := new(big.Int).SetBytes(reverseBytes(k.Data[:keySize]))
y := new(big.Int).SetBytes(reverseBytes(k.Data[keySize:]))
return sm2.PublicKey{Curve: elliptic.P256(), X: x, Y: y}, nil
}
return nil, fmt.Errorf("unexpected TPM algorithm: %s", k.KeyAlg)
}
func reverseBytes(b []byte) []byte {
r := make([]byte, len(b))
for idx := range b {
r[idx] = b[len(b)-idx-1]
}
return r
}
// SetPubKey sets Data the value corresponding to passed `key`.
func (k *Key) SetPubKey(key crypto.PublicKey) error {
k.Version = 0x10
switch key := key.(type) {
case *rsa.PublicKey:
k.KeyAlg = AlgRSA
n := key.N.Bytes()
k.KeySize.SetInBytes(uint16(len(n)))
k.Data = make([]byte, 4+len(n))
binaryOrder.PutUint32(k.Data, uint32(key.E))
copy(k.Data[4:], reverseBytes(n))
return nil
case *ecdsa.PublicKey:
var x, y *big.Int
k.KeyAlg = AlgECC
x, y = key.X, key.Y
if x == nil || y == nil {
return fmt.Errorf("the pubkey '%#+v' is invalid: x == nil || y == nil", key)
}
k.KeySize.SetInBits(256)
xB, yB := x.Bytes(), y.Bytes()
if len(xB) != int(k.KeySize.InBytes()) || len(yB) != int(k.KeySize.InBytes()) {
return fmt.Errorf("the pubkey '%#+v' is invalid: len(x)<%d> != %d || len(y)<%d> == %d",
key, len(xB), int(k.KeySize.InBytes()), len(yB), int(k.KeySize.InBytes()))
}
k.Data = make([]byte, 2*k.KeySize.InBytes())
copy(k.Data[:], reverseBytes(xB))
copy(k.Data[len(xB):], reverseBytes(yB))
return nil
case *sm2.PublicKey:
var x, y *big.Int
k.KeyAlg = AlgSM2
x, y = key.X, key.Y
if x == nil || y == nil {
return fmt.Errorf("the pubkey '%#+v' is invalid: x == nil || y == nil", key)
}
k.KeySize.SetInBits(256)
xB, yB := x.Bytes(), y.Bytes()
if len(xB) != int(k.KeySize.InBytes()) || len(yB) != int(k.KeySize.InBytes()) {
return fmt.Errorf("the pubkey '%#+v' is invalid: len(x)<%d> != %d || len(y)<%d> == %d",
key, len(xB), int(k.KeySize.InBytes()), len(yB), int(k.KeySize.InBytes()))
}
k.Data = make([]byte, 2*k.KeySize.InBytes())
copy(k.Data[:], reverseBytes(xB))
copy(k.Data[len(xB):], reverseBytes(yB))
return nil
}
return fmt.Errorf("unexpected key type: %T", key)
}
//PrintBPMPubKey prints the BPM public signing key hash to fuse into the Intel ME
func (k *Key) PrintBPMPubKey(bpmAlg Algorithm) error {
buf := new(bytes.Buffer)
if len(k.Data) > 1 {
hash, err := bpmAlg.Hash()
if err != nil {
return err
}
if k.KeyAlg == AlgRSA {
if err := binary.Write(buf, binary.LittleEndian, k.Data[4:]); err != nil {
return err
}
hash.Write(buf.Bytes())
fmt.Printf(" Boot Policy Manifest Pubkey Hash: 0x%x\n", hash.Sum(nil))
} else if k.KeyAlg == AlgSM2 || k.KeyAlg == AlgECC {
if err := binary.Write(buf, binary.LittleEndian, k.Data); err != nil {
return err
}
hash.Write(buf.Bytes())
fmt.Printf(" Boot Policy Manifest Pubkey Hash: 0x%x\n", hash.Sum(nil))
} else {
fmt.Printf(" Boot Policy Manifest Pubkey Hash: Unknown Algorithm\n")
}
} else {
fmt.Printf(" Boot Policy Pubkey Hash: No km public key set in KM\n")
}
return nil
}
//PrintKMPubKey prints the KM public signing key hash to fuse into the Intel ME
func (k *Key) PrintKMPubKey(kmAlg Algorithm) error {
buf := new(bytes.Buffer)
if len(k.Data) > 1 {
if k.KeyAlg == AlgRSA {
if err := binary.Write(buf, binary.LittleEndian, k.Data[4:]); err != nil {
return err
}
if err := binary.Write(buf, binary.LittleEndian, k.Data[:4]); err != nil {
return err
}
if kmAlg != AlgSHA256 {
return fmt.Errorf("KM public key hash algorithm must be SHA256")
}
hash, err := kmAlg.Hash()
if err != nil {
return err
}
hash.Write(buf.Bytes())
fmt.Printf(" Key Manifest Pubkey Hash: 0x%x\n", hash.Sum(nil))
} else {
fmt.Printf(" Key Manifest Pubkey Hash: Unsupported Algorithm\n")
}
} else {
fmt.Printf(" Key Manifest Pubkey Hash: No km public key set in KM\n")
}
return nil
} | pkg/intel/metadata/manifest/key.go | 0.729038 | 0.437944 | key.go | starcoder |
package ratelimit
import (
"errors"
"time"
)
type LimitChange struct {}
// Increase the rate limit.
// LimitChange requires 1 argument of type time.Duration to be passed in
// This argument is the unit by which the piecewise function works around
func (l *LimitChange) Increase(limit time.Duration, states ...interface{}) (time.Duration, error) {
// Error since we didn't get what we wanted
if len(states) == 0 {
return 0, errors.New("need a proper time unit (time.Duration) to be passed in")
}
// Expect a RateLimiter to be passed in so we can use its state
r := states[0].(*RateLimiter)
// We have no limit, so just give it one of the unit
if limit == 0 {
limit = 1 * r.Unit
// If we have a fractional r.Unit
} else if limit < r.Unit {
// limit = limit + 5*r.Unit
limit *= 2
// If it's less than or equal to zero just set it equal to that r.Unit size
// ie. limit 6/10s -> 1 because 6ds + 5ds == 11ds == 1.1s
if limit > r.Unit {
limit = r.Unit
}
// We don't have a fractional r.Unit and we can just add 5 times that r.Unit to it
} else {
// Multiply by 1.5x
limit *= 3
limit /= 2
// This will chop off everything after 3 decimal places
limit -= limit %(r.Unit/1000)
}
return limit, nil
}
// Decrease the rate limit
func (l *LimitChange) Decrease(limit time.Duration, states ...interface{}) (time.Duration, error) {
// Error since we didn't get what we wanted
if len(states) == 0 {
return 0, errors.New("need a proper time unit (time.Duration) to be passed in")
}
// Expect a RateLimiter to be passed in so we can use its state
r := states[0].(*RateLimiter)
// We have no limit so just leave it alone
if limit == 0 {
return limit, nil
// If we are above the ratelimit
} else if limit > r.Unit {
// Subtract a r.Unit from the wait limit
limit -= r.Unit
// Just to clean up the numbers a bit set it to a measure of that r.Unit
if limit < r.Unit {
limit = r.Unit
}
} else {
var newUnit time.Duration = r.Unit/10
for limit <= newUnit {
newUnit /= 10
}
// Subtract by the new unit
limit -= newUnit
if limit < newUnit {
limit = newUnit
}
}
return limit, nil
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func maxDuration(a, b time.Duration) time.Duration {
if a > b {
return a
}
return b
}
func minDuration(a, b time.Duration) time.Duration {
if a < b {
return a
}
return b
} | lib.go | 0.689828 | 0.447762 | lib.go | starcoder |
package timeext
import (
"math"
"time"
errors "github.com/weathersource/go-errors"
)
const timestampFormat = time.RFC3339
// Timestamp validates t is formatted RFC 3339 and returns time object.
func Timestamp(t string) (timestamp time.Time, err error) {
timestamp, err = time.Parse(timestampFormat, t)
if err != nil {
err = errors.NewInvalidArgumentError("Timestamp must be formatted RFC 3339: "+t, err)
return
}
timestamp = timestamp.UTC()
return
}
// TimestampRounded validates t is formatted RFC 3339 and returns a time object
// rounded to the top of the UTC hour
func TimestampRounded(t string) (time.Time, error) {
ts, err := Timestamp(t)
if err != nil {
return time.Time{}, err
}
rounded := time.Date(ts.Year(), ts.Month(), ts.Day(), ts.Hour(), 0, 0, 0, time.UTC)
return rounded, nil
}
// TimestampRoundedString validates t is formatted RFC 3339 and returns a string formatted RFC 3339
// rounded to the top of the UTC hour
func TimestampRoundedString(t string) (string, error) {
timestamp, err := TimestampRounded(t)
if err != nil {
return "", err
}
return timestamp.Format(timestampFormat), nil
}
// TimestampRoundedQuarterHour validates t is formatted RFC 3339 and returns a time object
// rounded down to the nearest UTC quarter hour
func TimestampRoundedQuarterHour(t string) (time.Time, error) {
ts, err := Timestamp(t)
if err != nil {
return time.Time{}, err
}
rounded := time.Date(ts.Year(), ts.Month(), ts.Day(), ts.Hour(), (ts.Minute()) - (ts.Minute() % 15), 0, 0, time.UTC)
return rounded, nil
}
// TimestampRoundedQuarterHourString validates t is formatted RFC 3339 and returns a string formatted RFC 3339
// rounded down to the nearest UTC quarter hour
func TimestampRoundedQuarterHourString(t string) (string, error) {
timestamp, err := TimestampRoundedQuarterHour(t)
if err != nil {
return "", err
}
return timestamp.Format(timestampFormat), nil
}
// HourCount returns the count of days inclusively bounded by dateStart and dateEnd.
// dateStart and dateEnd must be formatted "YYYY-MM-DD"
func HourCount(timestampStart string, timestampEnd string) (int, error) {
tStart, err := Timestamp(timestampStart)
if err != nil {
return 0, err
}
tEnd, err := Timestamp(timestampEnd)
if err != nil {
return 0, err
}
if tEnd.Before(tStart) {
return 0, errors.NewInvalidArgumentError("Start Timestamp (" +
timestampStart + ") must not be after End Timestamp (" + timestampEnd + ")")
}
return int(math.Floor(tEnd.Sub(tStart).Hours() + 1)), nil
}
// QuarterHourCount returns the count of days inclusively bounded by dateStart and dateEnd.
// dateStart and dateEnd must be formatted "YYYY-MM-DD"
func QuarterHourCount(timestampStart string, timestampEnd string) (int, error) {
tStart, err := TimestampRoundedQuarterHour(timestampStart)
if err != nil {
return 0, err
}
tEnd, err := TimestampRoundedQuarterHour(timestampEnd)
if err != nil {
return 0, err
}
if tEnd.Before(tStart) {
return 0, errors.NewInvalidArgumentError("Start Timestamp (" +
timestampStart + ") must not be after End Timestamp (" + timestampEnd + ")")
}
return int((tEnd.Unix()-tStart.Unix())/(15*60) + 1), nil
}
// ToTimestamp converts a time object to a RFC 9993 timestamp string
func ToTimestamp(timestamp time.Time) string {
return timestamp.UTC().Format(timestampFormat)
} | timestamp.go | 0.845337 | 0.401688 | timestamp.go | starcoder |
package mud
import (
"bytes"
"encoding/binary"
"io"
"math"
"math/rand"
"strconv"
"strings"
"time"
"github.com/vmihailenco/msgpack"
)
// MessageType is a log message line type
type MessageType int
// Message types for log items
const (
MESSAGESYSTEM MessageType = iota
MESSAGECHAT
MESSAGEACTION
MESSAGEACTIVITY
)
// LogItem is individual chat log line
type LogItem struct {
Message string `json:""`
Author string `json:""`
Timestamp time.Time `json:""`
MessageType MessageType `json:""`
Location *Point `json:",omit"`
}
// Point represents an (X,Y) pair in the world
type Point struct {
X uint32
Y uint32
}
// Neighbor returns a box in that direction
func (p *Point) Neighbor(d Direction) Point {
return p.Add(VectorForDirection[d])
}
// Add applies a vector to a point
func (p *Point) Add(v Vector) Point {
return Point{
X: uint32(int(p.X) + v.X),
Y: uint32(int(p.Y) + v.Y)}
}
// Vector Gets the vector between two points such that v = p.Vector(q); p.Add(v) == q
func (p *Point) Vector(v Point) Vector {
return Vector{
X: int(v.X) - int(p.X),
Y: int(v.Y) - int(p.Y)}
}
// Bresenham uses Bresenham's algorithm to visit every involved frame
func (p *Point) Bresenham(v Point, visitor func(Point) error) {
x0, y0 := p.X, p.Y
x1, y1 := v.X, v.Y
if x1 < x0 {
x0, y0, x1, y1 = x1, y1, x0, y0
}
if x0 == x1 { // Vertical line
if y0 > y1 {
y0, y1 = y1, y0
}
for y := y0; y <= y1; y++ {
if visitor(Point{X: x0, Y: y}) != nil {
return
}
}
return
} else if y0 == y1 { // Horizontal line
if x0 > x1 {
x0, x1 = x1, x0
}
for x := x0; x <= x1; x++ {
if visitor(Point{X: x, Y: y0}) != nil {
return
}
}
return
}
deltax := x1 - x0
deltay := y1 - y0
deltaerr := math.Abs(float64(deltay) / float64(deltax))
err := float64(0.0)
y := y0
signDeltaY := int(1)
if math.Signbit(float64(deltay)) {
signDeltaY = -1
}
for x := x0; x <= x1; x++ {
if visitor(Point{X: uint32(x), Y: uint32(y)}) != nil {
return
}
err += deltaerr
for err >= 0.5 {
y = uint32(int(y) + signDeltaY)
err -= 1.0
}
}
}
// Vector is for doing point-to-point comparisons
type Vector struct {
X int
Y int
}
// Add combines two vectors
func (v *Vector) Add(p Vector) Vector {
return Vector{
X: v.X + p.X,
Y: v.Y + p.Y}
}
// Magnitude returns the pythagorean theorem to a vector
func (v *Vector) Magnitude() uint {
return uint(math.Sqrt(math.Pow(float64(v.X), 2.0) + math.Pow(float64(v.Y), 2.0)))
}
// ToBytes flushes point to buffer
func (p *Point) ToBytes(buf io.Writer) {
binary.Write(buf, binary.LittleEndian, p)
}
// Bytes dumps a point into a byte array
func (p *Point) Bytes() []byte {
buf := new(bytes.Buffer)
p.ToBytes(buf)
return buf.Bytes()
}
// PointFromBytes rehydrates a point struct
func PointFromBytes(ptBytes []byte) Point {
return PointFromBuffer(bytes.NewBuffer(ptBytes))
}
// PointFromBuffer pulls a point from a byte stream
func PointFromBuffer(buf io.Reader) Point {
var pt Point
binary.Read(buf, binary.LittleEndian, &pt)
return pt
}
// Box represents a Box, ya dingus
type Box struct {
TopLeft Point
BottomRight Point
}
// BoxFromCoords returns a box from coordinates
func BoxFromCoords(x1, y1, x2, y2 uint32) Box {
if x2 < x1 {
x1, x2 = x2, x1
}
if y2 < y1 {
y1, y2 = y2, y1
}
return Box{Point{x1, y1}, Point{x2, y2}}
}
// BoxFromCenteraAndWidthAndHeight takes a centroid and dimensions
func BoxFromCenteraAndWidthAndHeight(center *Point, width, height uint32) Box {
topLeft := Point{center.X - width/2, center.Y - height/2}
return Box{topLeft, Point{topLeft.X + width, topLeft.Y + height}}
}
// WidthAndHeight returns a width, height tuple
func (b *Box) WidthAndHeight() (uint32, uint32) {
return (b.BottomRight.X - b.TopLeft.X) + 1, (b.BottomRight.Y - b.TopLeft.Y) + 1
}
// ContainsPoint checks point membership
func (b *Box) ContainsPoint(p *Point) bool {
if p.X >= b.TopLeft.X && p.X <= b.BottomRight.X && p.Y >= b.TopLeft.Y && p.Y <= b.BottomRight.Y {
return true
}
return false
}
// Corners return the corners of a box
func (b *Box) Corners() (Point, Point, Point, Point) {
return b.TopLeft, Point{b.BottomRight.X, b.TopLeft.Y}, b.BottomRight, Point{b.TopLeft.X, b.BottomRight.Y}
}
// Neighbor returns a box in that direction
func (b *Box) Neighbor(d Direction) Box {
width, height := b.WidthAndHeight()
switch d {
case DIRECTIONNORTH:
return Box{Point{b.TopLeft.X, b.TopLeft.Y - height}, Point{b.BottomRight.X, b.BottomRight.Y - height}}
case DIRECTIONEAST:
return Box{Point{b.TopLeft.X + width, b.TopLeft.Y}, Point{b.BottomRight.X + width, b.BottomRight.Y}}
case DIRECTIONSOUTH:
return Box{Point{b.TopLeft.X, b.TopLeft.Y + height}, Point{b.BottomRight.X, b.BottomRight.Y + height}}
case DIRECTIONWEST:
return Box{Point{b.TopLeft.X - width, b.TopLeft.Y}, Point{b.BottomRight.X - width, b.BottomRight.Y}}
}
return *b
}
// Center returns a point on the middle of the edge, useful for doors
func (b *Box) Center() Point {
width, height := b.WidthAndHeight()
return Point{b.TopLeft.X + width/2, b.TopLeft.Y + height/2}
}
// Door returns a point on the middle of the edge, useful for doors
func (b *Box) Door(d Direction) Point {
width, height := b.WidthAndHeight()
switch d {
case DIRECTIONNORTH:
return Point{b.TopLeft.X + width/2, b.TopLeft.Y}
case DIRECTIONEAST:
return Point{b.BottomRight.X, b.TopLeft.Y + height/2}
case DIRECTIONSOUTH:
return Point{b.TopLeft.X + width/2, b.BottomRight.Y}
case DIRECTIONWEST:
return Point{b.TopLeft.X, b.TopLeft.Y + height/2}
}
return b.Center()
}
// Coordinates returns x1 y1 x2 y2
func (b *Box) Coordinates() (uint32, uint32, uint32, uint32) {
return b.TopLeft.X, b.TopLeft.Y, b.BottomRight.X, b.BottomRight.Y
}
// Direction is a cardinal direction
type Direction byte
// Cardinal directions
const (
DIRECTIONNORTH Direction = iota
DIRECTIONEAST
DIRECTIONSOUTH
DIRECTIONWEST
)
// ToTheRight gives the direction to the right of the current one
func ToTheRight(d Direction) Direction {
switch d {
case DIRECTIONNORTH:
return DIRECTIONEAST
case DIRECTIONEAST:
return DIRECTIONSOUTH
case DIRECTIONSOUTH:
return DIRECTIONEAST
case DIRECTIONWEST:
return DIRECTIONNORTH
}
return DIRECTIONNORTH
}
// ToTheLeft gives the direction to the rigleftt of the current one
func ToTheLeft(d Direction) Direction {
switch d {
case DIRECTIONNORTH:
return DIRECTIONWEST
case DIRECTIONWEST:
return DIRECTIONSOUTH
case DIRECTIONSOUTH:
return DIRECTIONEAST
case DIRECTIONEAST:
return DIRECTIONNORTH
}
return DIRECTIONNORTH
}
// VectorForDirection maps directions to a distance vector
var VectorForDirection map[Direction]Vector
// DirectionForVector maps vectors to directions
var DirectionForVector map[Vector]Direction
// LoadResources loads data for the game
func LoadResources() {
loadCreatureTypes("./bestiary.json")
loadItemTypes("./items.json")
loadTerrainTypes("./terrain.json")
}
type transitionName struct {
name string
weight int
}
func makeTransitionGradient(transitionList []string) ([]transitionName, int, []string) {
total := 0
transitionInternalList := make([]transitionName, 0)
returnTransitionList := make([]string, 0)
for _, transition := range transitionList {
splitString := strings.SplitN(transition, ":", 2)
weightString := "1"
returnTransitionList = append(returnTransitionList, splitString[0])
if (len(splitString)) > 1 {
weightString = splitString[1]
}
weight, err := strconv.Atoi(weightString)
if err != nil {
panic(err)
}
transitionInternalList = append(transitionInternalList, transitionName{name: splitString[0], weight: weight})
total += weight
}
return transitionInternalList, total, returnTransitionList
}
// MakeGradientTransitionFunction helps build Markov chains.
func MakeGradientTransitionFunction(transitionList []string) func(float64) string {
transitionInternalList, total, _ := makeTransitionGradient(transitionList)
return func(inNumber float64) string {
endWeight := float64(total) * inNumber
weight := float64(0)
for _, item := range transitionInternalList {
weight += float64(item.weight)
if weight > endWeight {
return item.name
}
}
return transitionInternalList[len(transitionInternalList)-1].name
}
}
// MakeTransitionFunction helps build Markov chains.
func MakeTransitionFunction(name string, transitionList []string) (func() string, []string) {
transitionInternalList, total, returnTransitionList := makeTransitionGradient(transitionList)
return func() string {
if transitionInternalList != nil && len(transitionInternalList) != 0 {
weight := 0
countTo := rand.Int() % total
for _, item := range transitionInternalList {
weight += item.weight
if weight > countTo {
return item.name
}
}
}
return ""
}, returnTransitionList
}
// MSGPack packs to msgpack using JSON rules
func MSGPack(target interface{}) ([]byte, error) {
var outBuffer bytes.Buffer
writer := msgpack.NewEncoder(&outBuffer)
writer.UseJSONTag(true)
err := writer.Encode(target)
return outBuffer.Bytes(), err
}
// MSGUnpack unpacks from msgpack using JSON rules
func MSGUnpack(inBytes []byte, outItem interface{}) error {
var inBuffer = bytes.NewBuffer(inBytes)
reader := msgpack.NewDecoder(inBuffer)
reader.UseJSONTag(true)
err := reader.Decode(outItem)
return err
}
func init() {
VectorForDirection = map[Direction]Vector{
DIRECTIONNORTH: Vector{X: 0, Y: -1},
DIRECTIONEAST: Vector{X: 1, Y: 0},
DIRECTIONSOUTH: Vector{X: 0, Y: 1},
DIRECTIONWEST: Vector{X: -1, Y: 0}}
DirectionForVector = make(map[Vector]Direction)
for k, v := range VectorForDirection {
DirectionForVector[v] = k
}
} | util.go | 0.801237 | 0.485661 | util.go | starcoder |
package ent
import (
"fmt"
"strings"
"entgo.io/ent/dialect/sql"
"github.com/joelschutz/gomecoma/src/ent/rating"
)
// Rating is the model entity for the Rating schema.
type Rating struct {
config `json:"-"`
// ID of the ent.
ID int `json:"id,omitempty"`
// Origin holds the value of the "origin" field.
Origin string `json:"origin,omitempty"`
// OriginalRating holds the value of the "original_rating" field.
OriginalRating string `json:"original_rating,omitempty"`
// NormalizedRating holds the value of the "normalized_rating" field.
NormalizedRating int `json:"normalized_rating,omitempty"`
movie_ratings *int
}
// scanValues returns the types for scanning values from sql.Rows.
func (*Rating) scanValues(columns []string) ([]interface{}, error) {
values := make([]interface{}, len(columns))
for i := range columns {
switch columns[i] {
case rating.FieldID, rating.FieldNormalizedRating:
values[i] = new(sql.NullInt64)
case rating.FieldOrigin, rating.FieldOriginalRating:
values[i] = new(sql.NullString)
case rating.ForeignKeys[0]: // movie_ratings
values[i] = new(sql.NullInt64)
default:
return nil, fmt.Errorf("unexpected column %q for type Rating", columns[i])
}
}
return values, nil
}
// assignValues assigns the values that were returned from sql.Rows (after scanning)
// to the Rating fields.
func (r *Rating) assignValues(columns []string, values []interface{}) error {
if m, n := len(values), len(columns); m < n {
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
}
for i := range columns {
switch columns[i] {
case rating.FieldID:
value, ok := values[i].(*sql.NullInt64)
if !ok {
return fmt.Errorf("unexpected type %T for field id", value)
}
r.ID = int(value.Int64)
case rating.FieldOrigin:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field origin", values[i])
} else if value.Valid {
r.Origin = value.String
}
case rating.FieldOriginalRating:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field original_rating", values[i])
} else if value.Valid {
r.OriginalRating = value.String
}
case rating.FieldNormalizedRating:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field normalized_rating", values[i])
} else if value.Valid {
r.NormalizedRating = int(value.Int64)
}
case rating.ForeignKeys[0]:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for edge-field movie_ratings", value)
} else if value.Valid {
r.movie_ratings = new(int)
*r.movie_ratings = int(value.Int64)
}
}
}
return nil
}
// Update returns a builder for updating this Rating.
// Note that you need to call Rating.Unwrap() before calling this method if this Rating
// was returned from a transaction, and the transaction was committed or rolled back.
func (r *Rating) Update() *RatingUpdateOne {
return (&RatingClient{config: r.config}).UpdateOne(r)
}
// Unwrap unwraps the Rating entity that was returned from a transaction after it was closed,
// so that all future queries will be executed through the driver which created the transaction.
func (r *Rating) Unwrap() *Rating {
tx, ok := r.config.driver.(*txDriver)
if !ok {
panic("ent: Rating is not a transactional entity")
}
r.config.driver = tx.drv
return r
}
// String implements the fmt.Stringer.
func (r *Rating) String() string {
var builder strings.Builder
builder.WriteString("Rating(")
builder.WriteString(fmt.Sprintf("id=%v", r.ID))
builder.WriteString(", origin=")
builder.WriteString(r.Origin)
builder.WriteString(", original_rating=")
builder.WriteString(r.OriginalRating)
builder.WriteString(", normalized_rating=")
builder.WriteString(fmt.Sprintf("%v", r.NormalizedRating))
builder.WriteByte(')')
return builder.String()
}
// Ratings is a parsable slice of Rating.
type Ratings []*Rating
func (r Ratings) config(cfg config) {
for _i := range r {
r[_i].config = cfg
}
} | src/ent/rating.go | 0.685107 | 0.442396 | rating.go | starcoder |
package helpers
import (
"errors"
"reflect"
"sort"
)
// GetTotalArea calculates total area of rectangles
func GetTotalArea(rects []Rectangle) (int, error) {
nonZeroRects := Filter(rects, func(rect Rectangle) bool {
return rect.Area() != 0
})
dividers := getUniqueSortedXSlice(nonZeroRects)
splitted, err := splitRectsByX(rects, dividers)
if err != nil {
return 0, err
}
combined, err := combineRectsOnY(splitted, dividers)
if err != nil {
return 0, err
}
totalArea := 0
for i := range combined {
totalArea += combined[i].Area()
}
return totalArea, nil
}
func (r *Rectangle) splitAtX(x int) []Rectangle {
if x <= r.X1 || x >= r.X2 {
return []Rectangle{*r}
}
r1 := Rectangle{
X1: r.X1,
Y1: r.Y1,
X2: x,
Y2: r.Y2,
}
r2 := Rectangle{
X1: x,
Y1: r.Y1,
X2: r.X2,
Y2: r.Y2,
}
return []Rectangle{r1, r2}
}
func getUniqueSortedXSlice(rects []Rectangle) []int {
uniqueXMap := make(map[int]bool)
for _, r := range rects {
uniqueXMap[r.X1] = true
uniqueXMap[r.X2] = true
}
uniqueX := make([]int, len(uniqueXMap))
i := 0
for k := range uniqueXMap {
uniqueX[i] = k
i++
}
sort.Ints(uniqueX)
return uniqueX
}
func splitRectsByX(rects []Rectangle, dividers []int) ([]Rectangle, error) {
for i := range rects {
if rects[i].Area() <= 0 {
return nil, errors.New("one of the Rectangles has non-positive area")
}
}
totalDivided := rects
for _, x := range dividers {
var divided []Rectangle
for _, r := range totalDivided {
splitted := r.splitAtX(x)
divided = append(divided, splitted...)
}
totalDivided = divided
}
return totalDivided, nil
}
func combineRectsOnY(rects []Rectangle, dividers []int) ([]Rectangle, error) {
var totalCombined []Rectangle
for _, x := range dividers {
filtered := Filter(rects, func(rect Rectangle) bool {
return rect.X1 == x
})
sort.Slice(filtered, func(i int, j int) bool {
return filtered[i].Y1 < filtered[j].Y1
})
if len(filtered) == 0 {
continue
}
first := filtered[0]
last := filtered[len(filtered)-1]
if reflect.DeepEqual(first, last) {
totalCombined = append(totalCombined, first)
continue
}
prev := first
for _, r := range filtered {
if prev.Width() != r.Width() {
return nil, errors.New("two rectangles with same X has different width")
}
intersects, _ := prev.Intersect(r)
if intersects {
prev = *prev.Union(r)
} else {
totalCombined = append(totalCombined, prev)
prev = r
}
}
totalCombined = append(totalCombined, prev)
}
return totalCombined, nil
} | area.go | 0.651244 | 0.431045 | area.go | starcoder |
package drawing
import (
"github.com/rs/zerolog"
"image"
"image/color"
)
// Canvas to draw on
type Canvas struct {
img image.Image
log zerolog.Logger
}
// Pixel is an x,y point on the image
type Pixel struct {
X, Y int
}
// Line is an array of pixels that we'll create from the approximate changes
type Line []*Pixel
// Lines is a nicer way of declaring an array of line objects
type Lines []Line
// CanSet is our quick interface that allows us to use the Set() function from the various types used to make an image.Image.
type CanSet interface {
Set(x, y int, c color.Color)
}
// ColorCount is the counts of each range
type ColorCount struct {
Count uint32
Range *ColorRange
}
// ChangeMap is the map of changes we made by named range
type ChangeMap map[string][]*Pixel
// ColorMap is a map of colors to their color counts
type ColorMap map[color.RGBA]*ColorCount
// ColorItem is for counting each color item
type ColorItem struct {
Count uint32
Color color.RGBA
}
// ColorList is a nice data type for an array of color items
type ColorList []ColorItem
// Len returns the length of the ColorList
func (c ColorList) Len() int { return len(c) }
// Swap swaps two values in the color list array
func (c ColorList) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
// Less checks if the count of one color is less than another color in the list
func (c ColorList) Less(i, j int) bool { return c[i].Count > c[j].Count }
// RangeItem is an item in a range
type RangeItem struct{}
// RangeList is a nicer way of declaring an array of RangeItems
type RangeList []RangeItem
// ColorRange holds min/max values for colors
type ColorRange struct {
// The R, G, B minimum and maximum range to
// match this color.
RMin uint8
RMax uint8
GMin uint8
GMax uint8
BMin uint8
BMax uint8
// The name given to this range
Name string
// If we replace this color with another
Replace bool
// The RGBA to replace the input color with
Make color.RGBA
}
// ColorRanges is a nicer way of declaring an array of ColorRange
type ColorRanges []ColorRange | drawing/types.go | 0.61555 | 0.487368 | types.go | starcoder |
package mathut
import (
"fmt"
"github.com/colt3k/utils/stats"
"math"
"strconv"
)
// Round
func Round(x, unit float64) float64 {
return math.Round(x/unit) * unit
}
/*
The format fmt is one of
'b' (-ddddp±ddd, a binary exponent),
'e' (-d.dddde±dd, a decimal exponent),
'E' (-d.ddddE±dd, a decimal exponent),
'f' (-ddd.dddd, no exponent),
'g' ('e' for large exponents, 'f' otherwise), or
'G' ('E' for large exponents, 'f' otherwise).
*/
// FmtFloat formats a float to string
func FmtFloat(val float64) string {
return FmtFloatWithPrecision(val, -1)
}
// FmtFloatWithPrecision formats a float to precision to string
func FmtFloatWithPrecision(val float64, precision int) string {
return strconv.FormatFloat(val, 'f', precision, 64)
}
// FmtFloatExponentiation formats float using exponentiation to string
func FmtFloatExponentiation(val float64) string {
return strconv.FormatFloat(val, 'E', -1, 64)
}
// FmtInt formats int to string
func FmtInt(val int) string {
switch IntSize() {
case 32:
return strconv.Itoa(val)
case 64:
return strconv.FormatInt(int64(val), 10)
}
return strconv.Itoa(val)
}
// ParseFloat size 32 or 64
func ParseFloat(val string, sz int) (float64,error) {
return strconv.ParseFloat(val, sz)
}
func IntSize() int {
return strconv.IntSize
}
func ParseInt(val string) int64 {
v, _ := strconv.ParseInt(val, 10, 64)
return v
}
/*
Percentile returns percentile rounded to 4 positions after decimal
i.e.
Median percentile = .5
90th Percentile = .90
99th Percentile = .99
*/
func Percentile(vals []float64, percentile float64) (float64,error) {
s := stats.Sample{Xs: vals}
median := s.Percentile(percentile)
fVal, err := strconv.ParseFloat(fmt.Sprintf("%.4f", median), 64)
if err != nil {
return 0, err
}
return fVal, nil
}
// Median returns percentile rounded to 4 positions after decimal
func Median(vals []float64) (float64,error) {
return Percentile(vals, .5)
}
// PercentDiff percentage difference between 2 values
func PercentDiff(val1, val2 float64) float64 {
pctDiff := (math.Abs(val1 - val2) / ((val1 + val2) / 2)) * 100
if pctDiff <= 0 {
return 0
}
return math.Round(pctDiff)
}
/* Bitwise operations
1101 =
1*2³ + 1*2² + 0*2¹ + 1*2⁰ =
8 + 4 + 0 + 1 = 13
The ^ operator does a bitwise complement, flips bits from 1 to 0 and 0 to 1
for example with 3 unsigned bits, ^(101) = 010
The >> is the right shift operator, a right shift moves all of the bits to the right,
dropping bits off the right and inserting zeros on the left.
Example: 3 unsigned bits, 101 >> 2 = 001
The << is the left shift operator just like the right except that bits shift
the opposite direction. Example: 101 << 2 = 100
Here is how these operators are used in the strconv.IntSize expression
Expression 32 bit representation 64 bit representation
uint(0) 00...00 (32 zeros) 0000...0000 (64 zeros)
^uint(0) 11...11 (32 ones) 1111...1111 (64 ones)
(^uint(0) >> 63) 00...00 = 0 0000...0001 = 1
32 00...100000 0000...100000
32 << (^uint(0) >> 63) 32 << (0) 32 << (1)
= 100000 << 0 = 100000 << 1
= 32 = 1000000
= 64
In other words
1. Start with 0
2. ^ to flip all bits to 1
3. Right shift (>>) by 63 to only keep a single 1 from 64-bit numbers and zero out 32-bit numbers.
4. Left shift (<<) 32 by whatever the result is.
5. This leaves 32 on architectures that use 32-bit integer representations and 64 for 64-bit architectures.
*/ | mathut/math.go | 0.608245 | 0.529324 | math.go | starcoder |
package geom
/*
#include "geos.h"
*/
import "C"
import "errors"
// CoordinateSeq wraps C coordinate sequence
type CoordinateSeq struct {
CSeq *C.GEOSCoordSequence
}
func initCoordSeq(size int, dims int) (*CoordinateSeq, error) {
seq := C.GEOSCoordSeq_create(C.uint(size), C.uint(dims))
if seq == nil {
return nil, errors.New("Could not create coordinate sequence")
}
return &CoordinateSeq{
CSeq: seq,
}, nil
}
// SetX sets the longitude of the coordinate sequence element at given index
func (cs *CoordinateSeq) SetX(idx uint, x float64) {
C.GEOSCoordSeq_setX(cs.CSeq, C.uint(idx), C.double(x))
}
// SetY sets the latitude of the coordinate sequence element at given index
func (cs *CoordinateSeq) SetY(idx uint, y float64) {
C.GEOSCoordSeq_setY(cs.CSeq, C.uint(idx), C.double(y))
}
// SetZ sets the altitude of the coordinate sequence element at given index
func (cs *CoordinateSeq) SetZ(idx uint, z float64) {
C.GEOSCoordSeq_setZ(cs.CSeq, C.uint(idx), C.double(z))
}
// GetX returns the value of x of a given index
func (cs *CoordinateSeq) GetX(idx uint) float64 {
var val C.double
res := C.GEOSCoordSeq_getX(cs.CSeq, C.uint(idx), &val)
if res == 0 {
return 0.0
}
return float64(val)
}
// GetY returns the value of y of a given index
func (cs *CoordinateSeq) GetY(idx uint) float64 {
var val C.double
res := C.GEOSCoordSeq_getY(cs.CSeq, C.uint(idx), &val)
if res == 0 {
return 0.0
}
return float64(val)
}
// GetZ returns the value of z of a given index
func (cs *CoordinateSeq) GetZ(idx uint) float64 {
var val C.double
res := C.GEOSCoordSeq_getZ(cs.CSeq, C.uint(idx), &val)
if res == 0 {
return 0.0
}
return float64(val)
}
// GetSize returns the size of coordinate sequence
func (cs *CoordinateSeq) GetSize() uint {
var size C.uint
res := C.GEOSCoordSeq_getSize(cs.CSeq, &size)
if res == 0 {
return 0
}
return uint(size)
}
// Destroy clears the coordinate sequence allocations from the memory
func (cs *CoordinateSeq) Destroy() {
C.GEOSCoordSeq_destroy(cs.CSeq)
} | geom/coordinateseq.go | 0.87938 | 0.410579 | coordinateseq.go | starcoder |
package compact_float
import (
"fmt"
"io"
"math/big"
"github.com/cockroachdb/apd/v2"
"github.com/kstenerud/go-uleb128"
)
var ErrorIncomplete = fmt.Errorf("Compact float value is incomplete")
// Maximum number of bytes required to encode a DFloat.
func MaxEncodeLength() int {
// (64 bits / 7) + (33 bits / 7)
return 10 + 5
}
// Maximum number of bytes required to encode a particular apd.Decimal.
// This is an estimate; it may be smaller, but never bigger.
func MaxEncodeLengthBig(value *apd.Decimal) int {
if is32Bit() {
return len(value.Coeff.Bits())*32/7 + 1 + 5
}
return len(value.Coeff.Bits())*64/7 + 1 + 5
}
// Encodes a DFloat to a writer.
func Encode(value DFloat, writer io.Writer) (bytesEncoded int, err error) {
buffer := make([]byte, MaxEncodeLength())
bytesEncoded = EncodeToBytes(value, buffer)
return writer.Write(buffer[:bytesEncoded])
}
// Encodes a DFloat to a byte buffer.
// Assumes the buffer is big enough (see MaxEncodeLength()).
func EncodeToBytes(value DFloat, buffer []byte) (bytesEncoded int) {
if value.IsZero() {
if value.IsNegativeZero() {
return EncodeNegativeZero(buffer)
}
return EncodeZero(buffer)
}
if value.IsSpecial() {
switch value.Coefficient {
case CoeffInfinity:
return EncodeInfinity(buffer)
case CoeffNegativeInfinity:
return EncodeNegativeInfinity(buffer)
case CoeffNan:
return EncodeQuietNan(buffer)
case CoeffSignalingNan:
return EncodeSignalingNan(buffer)
default:
panic(fmt.Errorf("%v: Illegal special coefficient", value.Coefficient))
}
}
exponent := value.Exponent
exponentSign := 0
if exponent < 0 {
exponent = -exponent
exponentSign = 2
}
coefficient := value.Coefficient
coefficientSign := 0
if coefficient < 0 {
coefficient = -coefficient
coefficientSign = 1
}
exponentField := uint64(exponent)<<2 | uint64(exponentSign) | uint64(coefficientSign)
bytesEncoded = uleb128.EncodeUint64ToBytes(exponentField, buffer)
bytesEncoded += uleb128.EncodeUint64ToBytes(uint64(coefficient), buffer[bytesEncoded:])
return
}
// Encodes an apd.Decimal to a writer.
func EncodeBig(value *apd.Decimal, writer io.Writer) (bytesEncoded int, err error) {
buffer := make([]byte, MaxEncodeLengthBig(value))
bytesEncoded = EncodeBigToBytes(value, buffer)
return writer.Write(buffer[:bytesEncoded])
}
// Encodes an apt.Decimal to a buffer.
// Assumes the buffer is big enough (see MaxEncodeLengthBig()).
func EncodeBigToBytes(value *apd.Decimal, buffer []byte) (bytesEncoded int) {
if value.IsZero() {
if value.Negative {
return EncodeNegativeZero(buffer)
}
return EncodeZero(buffer)
}
switch value.Form {
case apd.Infinite:
if value.Negative {
return EncodeNegativeInfinity(buffer)
}
return EncodeInfinity(buffer)
case apd.NaN:
return EncodeQuietNan(buffer)
case apd.NaNSignaling:
return EncodeSignalingNan(buffer)
}
exponent := value.Exponent
exponentSign := 0
if exponent < 0 {
exponent = -exponent
exponentSign = 1
}
significandSign := 0
if value.Negative {
significandSign = 1
}
exponentField := uint64(exponent)<<2 | uint64(exponentSign)<<1 | uint64(significandSign)
bytesEncoded = uleb128.EncodeUint64ToBytes(exponentField, buffer)
bytesEncoded += uleb128.EncodeToBytes(&value.Coeff, buffer[bytesEncoded:])
return
}
// Encodes a quiet NaN, using 2 bytes.
func EncodeQuietNan(buffer []byte) (bytesEncoded int) {
return encodeExtendedSpecialValue(0, buffer)
}
// Encodes a signaling NaN, using 2 bytes.
func EncodeSignalingNan(buffer []byte) (bytesEncoded int) {
return encodeExtendedSpecialValue(1, buffer)
}
// Encodes positive infinity, using 2 bytes.
func EncodeInfinity(buffer []byte) (bytesEncoded int) {
return encodeExtendedSpecialValue(2, buffer)
}
// Encodes negative infinity, using 2 bytes.
func EncodeNegativeInfinity(buffer []byte) (bytesEncoded int) {
return encodeExtendedSpecialValue(3, buffer)
}
// Encodes positive zero, using 1 byte.
func EncodeZero(buffer []byte) (bytesEncoded int) {
return encodeSpecialValue(2, buffer)
}
// Encodes negative zero, using 1 byte.
func EncodeNegativeZero(buffer []byte) (bytesEncoded int) {
return encodeSpecialValue(3, buffer)
}
// Decode a float.
// bigValue will be nil unless the decoded value is too big to fit into a DFloat.
func Decode(reader io.Reader) (value DFloat, bigValue *apd.Decimal, bytesDecoded int, err error) {
buffer := []byte{0}
return DecodeWithByteBuffer(reader, buffer)
}
// Decode a float using the supplied single-byte buffer.
// bigValue will be nil unless the decoded value is too big to fit into a DFloat.
func DecodeWithByteBuffer(reader io.Reader, buffer []byte) (value DFloat, bigValue *apd.Decimal, bytesDecoded int, err error) {
asUint, asBig, bytesDecoded, err := uleb128.DecodeWithByteBuffer(reader, buffer)
if err != nil {
return
}
if asBig != nil {
err = fmt.Errorf("Exponent %v is too big", asBig)
return
}
switch bytesDecoded {
case 1:
switch asUint {
case 2:
value = dfloatZero
return
case 3:
value = dfloatNegativeZero
return
}
case 2:
switch asUint {
case 0:
value = dfloatNaN
return
case 1:
value = dfloatSignalingNaN
return
case 2:
value = dfloatInfinity
return
case 3:
value = dfloatNegativeInfinity
return
}
}
maxEncodedExponent := uint64(0x1ffffffff)
if asUint > maxEncodedExponent {
err = fmt.Errorf("Exponent %v is too big", asUint)
return
}
negMult := []int{1, -1}
coeffMult := int64(negMult[asUint&1])
expMult := int32(negMult[(asUint>>1)&1])
exponent := int32(asUint>>2) * expMult
offset := bytesDecoded
if asUint, asBig, bytesDecoded, err = uleb128.DecodeWithByteBuffer(reader, buffer); err != nil {
return
}
bytesDecoded += offset
if asBig != nil {
bigValue = apd.NewWithBigInt(asBig, exponent)
bigValue.Negative = coeffMult < 0
return
}
if asUint&0x8000000000000000 != 0 {
if is32Bit() {
bigValue = &apd.Decimal{
Negative: coeffMult < 0,
Exponent: exponent,
}
bigValue.Coeff.SetBits([]big.Word{big.Word(asUint), big.Word(asUint >> 32)})
} else {
bigValue = &apd.Decimal{
Negative: coeffMult < 0,
Exponent: exponent,
}
bigValue.Coeff.SetBits([]big.Word{big.Word(asUint)})
}
return
}
coefficient := int64(asUint) * coeffMult
value = DFloat{
Exponent: exponent,
Coefficient: coefficient,
}
return
}
func encodeSpecialValue(value byte, buffer []byte) (bytesEncoded int) {
buffer[0] = value
return 1
}
func encodeExtendedSpecialValue(value byte, buffer []byte) (bytesEncoded int) {
buffer[0] = value | 0x80
buffer[1] = 0
return 2
}
func is32Bit() bool {
return ^uint(0) == 0xffffffff
} | compact-float.go | 0.764012 | 0.452899 | compact-float.go | starcoder |
package yarf
import (
"reflect"
)
var uintType = reflect.TypeOf(uint64(0))
var intType = reflect.TypeOf(int64(0))
var floatType = reflect.TypeOf(float64(0))
var stringType = reflect.TypeOf(string(""))
var boolType = reflect.TypeOf(false)
type converter func(in interface{}) (interface{}, bool)
func untypedFloat(in interface{}) (interface{}, bool) {
return toFloat(in)
}
func untypedUint(in interface{}) (interface{}, bool) {
return toUint(in)
}
func untypedInt(in interface{}) (interface{}, bool) {
return toInt(in)
}
func untypedBool(in interface{}) (res interface{}, ok bool) {
return toBool(in)
}
func untypedString(in interface{}) (res interface{}, ok bool) {
return toString(in)
}
//NewParam creates a new key value param from input
func NewParam(key string, value interface{}) Param {
return Param{key, value}
}
//Param is a key/value entry and a struct which implements helper methods to help with retrial of data types from value.
type Param struct {
key string
value interface{}
}
// Key returns the key of the key/value pair
func (m *Param) Key() string {
return m.key
}
// Value returns the value of the key/value pair
func (m *Param) Value() interface{} {
return m.value
}
// IsNil returns true if value is nil
func (m *Param) IsNil() bool {
return m.value == nil
}
// IsSlice returns true if value is a array
func (m *Param) IsSlice() bool {
if m.value == nil {
return false
}
return reflect.TypeOf(m.value).Kind() == reflect.Slice
}
// String returns value as a string, if possible
func (m *Param) String() (string, bool) {
return toString(m.value)
}
// StringOr returns value as a string, otherwise the provided default
func (m *Param) StringOr(defaultTo string) string {
str, ok := m.String()
if ok {
return str
}
return defaultTo
}
// StringSlice returns value as a []string, if possible
func (m *Param) StringSlice() ([]string, bool) {
if m.value == nil {
return nil, false
}
var res []string
res, ok := m.value.([]string)
if ok {
return res, true
}
r, ok := toSliceOf(m.value, stringType, untypedString)
if !ok {
return nil, false
}
res, ok = r.([]string)
if ok {
return res, true
}
return nil, false
}
// StringSliceOr returns value as a []string, otherwise the provided default
func (m *Param) StringSliceOr(defaultTo []string) []string {
arr, ok := m.StringSlice()
if ok {
return arr
}
return defaultTo
}
// Uint returns value as a uint64, if possible
func (m *Param) Uint() (uint64, bool) {
return toUint(m.value)
}
// UintOr returns value as a uint64, otherwise the provided default
func (m *Param) UintOr(def uint64) uint64 {
i, ok := m.Uint()
if ok {
return i
}
return def
}
// UintSlice returns value as a []uint64, if possible
func (m *Param) UintSlice() ([]uint64, bool) {
if m.value == nil {
return nil, false
}
var res []uint64
res, ok := m.value.([]uint64)
if ok {
return res, true
}
r, ok := toSliceOf(m.value, uintType, untypedUint)
if !ok {
return nil, false
}
res, ok = r.([]uint64)
if ok {
return res, true
}
return nil, false
}
// UintSliceOr returns value as a []uint64, otherwise the provided default
func (m *Param) UintSliceOr(def []uint64) []uint64 {
arr, ok := m.UintSlice()
if ok {
return arr
}
return def
}
// Int returns value as a int64, if possible
func (m *Param) Int() (int64, bool) {
return toInt(m.value)
}
// IntOr returns value as a int64, otherwise the provided default
func (m *Param) IntOr(def int64) int64 {
i, ok := m.Int()
if ok {
return i
}
return def
}
// IntSlice returns value as a []int64, if possible
func (m *Param) IntSlice() ([]int64, bool) {
if m.value == nil {
return nil, false
}
var res []int64
res, ok := m.value.([]int64)
if ok {
return res, ok
}
r, ok := toSliceOf(m.value, intType, untypedInt)
if !ok {
return nil, false
}
res, ok = r.([]int64)
if ok {
return res, true
}
return nil, false
}
// IntSliceOr returns value as a []int64, otherwise the provided default
func (m *Param) IntSliceOr(def []int64) []int64 {
arr, ok := m.IntSlice()
if ok {
return arr
}
return def
}
// Float returns value as a float64, if possible
func (m *Param) Float() (float64, bool) {
if m.value == nil {
return 0.0, false
}
return toFloat(m.value)
}
// FloatOr returns value as a float64, otherwise the provided default
func (m *Param) FloatOr(def float64) float64 {
i, ok := m.Float()
if ok {
return i
}
return def
}
// FloatSlice returns value as a []float64, if possible
func (m *Param) FloatSlice() ([]float64, bool) {
if m.value == nil {
return nil, false
}
var res []float64
res, ok := m.value.([]float64)
if ok {
return res, ok
}
r, ok := toSliceOf(m.value, floatType, untypedFloat)
if !ok {
return nil, false
}
res, ok = r.([]float64)
if ok {
return res, true
}
return nil, false
}
// FloatSliceOr returns value as a []float64, otherwise the provided default
func (m *Param) FloatSliceOr(def []float64) []float64 {
arr, ok := m.FloatSlice()
if ok {
return arr
}
return def
}
// Bool returns value as a bool, if possible
func (m *Param) Bool() (bool, bool) {
return toBool(m.value)
}
// BoolOr returns value as a bool, otherwise the provided default
func (m *Param) BoolOr(def bool) bool {
i, ok := m.Bool()
if ok {
return i
}
return def
}
// BoolSlice returns value as a []bool, if possible
func (m *Param) BoolSlice() ([]bool, bool) {
if m.value == nil {
return nil, false
}
var res []bool
res, ok := m.value.([]bool)
if ok {
return res, ok
}
r, ok := toSliceOf(m.value, boolType, untypedBool)
if !ok {
return nil, false
}
res, ok = r.([]bool)
if ok {
return res, true
}
return nil, false
}
// BoolSliceOr returns value as a []bool, otherwise the provided default
func (m *Param) BoolSliceOr(def []bool) []bool {
arr, ok := m.BoolSlice()
if ok {
return arr
}
return def
}
func toString(in interface{}) (res string, ok bool) {
if in == nil {
return "", false
}
switch in.(type) {
case string:
res, ok = in.(string)
case []byte:
var b []byte
b, ok = in.([]byte)
res = string(b)
case []rune:
var r []rune
r, ok = in.([]rune)
res = string(r)
}
return
}
func toBool(in interface{}) (res bool, ok bool) {
if in == nil {
return false, false
}
switch in.(type) {
case bool:
res, ok = in.(bool)
}
return
}
func toUint(num interface{}) (uint64, bool) {
if num == nil {
return 0, false
}
var i uint64
ok := false
switch num.(type) {
case int, int8, int16, int32, int64:
a := reflect.ValueOf(num).Int() // a has type int64
return uint64(a), true
case uint, uint8, uint16, uint32, uint64:
a := reflect.ValueOf(num).Uint() // a has type uint64
return a, true
case float64:
f, ok := num.(float64)
return uint64(f), ok
case float32:
f, ok := num.(float32)
return uint64(f), ok
}
return i, ok
}
func toInt(num interface{}) (int64, bool) {
if num == nil {
return 0, false
}
var i int64
ok := false
switch num.(type) {
case int, int8, int16, int32, int64:
a := reflect.ValueOf(num).Int() // a has type int64
return a, true
case uint, uint8, uint16, uint32, uint64:
a := reflect.ValueOf(num).Uint() // a has type uint64
return int64(a), true
case float64:
f, ok := num.(float64)
return int64(f), ok
case float32:
f, ok := num.(float32)
return int64(f), ok
}
return i, ok
}
func toFloat(num interface{}) (float64, bool) {
if num == nil {
return 0, false
}
var i float64
ok := false
// TODO maybe remove reflection
switch num.(type) {
case int, int8, int16, int32, int64:
a := reflect.ValueOf(num).Int() // a has type int64
return float64(a), true
case uint, uint8, uint16, uint32, uint64:
a := reflect.ValueOf(num).Uint() // a has type uint64
return float64(a), true
case float64:
f, ok := num.(float64)
return float64(f), ok
case float32:
f, ok := num.(float32)
return float64(f), ok
}
return i, ok
}
func toSliceOf(value interface{}, typ reflect.Type, converter converter) (interface{}, bool) {
if reflect.TypeOf(value).Kind() != reflect.Slice {
return nil, false
}
slice := reflect.ValueOf(value)
resSlice := reflect.MakeSlice(reflect.SliceOf(typ), slice.Len(), slice.Len())
for i := 0; i < slice.Len(); i++ {
val, ok := converter(slice.Index(i).Interface())
if !ok {
return nil, false
}
resSlice.Index(i).Set(reflect.ValueOf(val))
}
return resSlice.Interface(), true
} | param.go | 0.711932 | 0.495789 | param.go | starcoder |
package unityai
import (
"fmt"
"math"
)
type Vector3f struct {
x, y, z float32
}
var Vector3_One = Vector3f{1, 1, 1}
var Vector2_One = Vector2f{1, 1}
func NewVector3f(x, y, z float32) Vector3f {
var v Vector3f
v.Set(x, y, z)
return v
}
func (this *Vector3f) Set(x, y, z float32) {
this.x = x
this.y = y
this.z = z
}
func (this *Vector3f) SetData(i int, data float32) {
switch i {
case 0:
this.x = data
case 1:
this.y = data
case 2:
this.z = data
default:
panic(fmt.Errorf("invalid vector index:%d", i))
}
}
func (this *Vector3f) GetData(i int) float32 {
switch i {
case 0:
return this.x
case 1:
return this.y
case 2:
return this.z
default:
panic(fmt.Errorf("invalid vector index:%d", i))
}
}
func (this Vector3f) Sub(data Vector3f) Vector3f {
return Vector3f{this.x - data.x, this.y - data.y, this.z - data.z}
}
func (this Vector3f) Add(data Vector3f) Vector3f {
return Vector3f{this.x + data.x, this.y + data.y, this.z + data.z}
}
func (this Vector3f) Mulf(data float32) Vector3f {
return Vector3f{this.x * data, this.y * data, this.z * data}
}
func (this Vector3f) Mulv(data Vector3f) Vector3f {
return Vector3f{this.x * data.x, this.y * data.y, this.z * data.z}
}
func (this Vector3f) Div(v float32) Vector3f {
return Vector3f{this.x / v, this.y / v, this.z / v}
}
func (this Vector3f) Neg() Vector3f {
return Vector3f{-this.x, -this.y, -this.z}
}
func (this *Vector3f) SetZero() {
this.x = 0
this.y = 0
this.z = 0
}
func ScaleVector3f(l, r Vector3f) Vector3f {
return Vector3f{l.x * r.x, l.y * r.y, l.z * r.z}
}
func MinVector3f(l, r Vector3f) Vector3f {
return Vector3f{FloatMin(l.x, r.x), FloatMin(l.y, r.y), FloatMin(l.z, r.z)}
}
func MaxVector3f(l, r Vector3f) Vector3f {
return Vector3f{FloatMax(l.x, r.x), FloatMax(l.y, r.y), FloatMax(l.z, r.z)}
}
func DotVector3f(l, r Vector3f) float32 {
return l.x*r.x + l.y*r.y + l.z*r.z
}
func Dot2D(a, b Vector3f) float32 {
return a.x*b.x + a.z*b.z
}
func DotQuaternionf(l, r Quaternionf) float32 {
return l.x*r.x + l.y*r.y + l.z*r.z + l.w*r.w
}
func LerpVector3f(from, to Vector3f, t float32) Vector3f {
return to.Mulf(t).Add(from.Mulf(1.0 - t))
}
func Perp2D(u, v Vector3f) float32 {
return u.z*v.x - u.x*v.z
}
func Magnitude(inV Vector3f) float32 {
return float32(math.Sqrt(float64(DotVector3f(inV, inV))))
}
func Magnitude2(inV Vector2f) float32 {
return float32(math.Sqrt(float64(DotVector2f(inV, inV))))
}
func SqrMagnitude(inV Vector3f) float32 {
return DotVector3f(inV, inV)
}
func Distance(a, b Vector3f) float32 {
return Magnitude(b.Sub(a))
}
func Cross(lhs, rhs Vector3f) Vector3f {
return Vector3f{
lhs.y*rhs.z - lhs.z*rhs.y,
lhs.z*rhs.x - lhs.x*rhs.z,
lhs.x*rhs.y - lhs.y*rhs.x,
}
}
func AbsVector3f(v Vector3f) Vector3f {
return NewVector3f(FloatAbs(v.x), FloatAbs(v.y), FloatAbs(v.z))
}
func NormalizeSafe(inV, defaultV Vector3f) Vector3f {
mag := Magnitude(inV)
if mag > kEpsilon {
return inV.Div(mag)
} else {
return defaultV
}
}
func NormalizeSafe2(inV, defaultV Vector2f) Vector2f {
mag := Magnitude2(inV)
if mag > kEpsilon {
return inV.Div(mag)
} else {
return defaultV
}
}
func CompareApproximately(inV0, inV1 Vector3f, inMaxDist float32) bool {
return SqrMagnitude(inV1.Sub(inV0)) <= inMaxDist*inMaxDist
}
func Normalize(inV Vector3f) Vector3f {
mag := Magnitude(inV)
return inV.Div(mag)
}
type Vector2f struct {
x, y float32
}
func NewVector2f(x, y float32) Vector2f {
return Vector2f{x, y}
}
func MinVector2f(l, r Vector2f) Vector2f {
return Vector2f{FloatMin(l.x, r.x), FloatMin(l.y, r.y)}
}
func DotVector2f(l, r Vector2f) float32 {
return l.x*r.x + l.y*r.y
}
func MaxVector2f(l, r Vector2f) Vector2f {
return Vector2f{FloatMax(l.x, r.x), FloatMax(l.y, r.y)}
}
func (this Vector2f) Sub(data Vector2f) Vector2f {
return Vector2f{this.x - data.x, this.y - data.y}
}
func (this Vector2f) Div(v float32) Vector2f {
return Vector2f{this.x / v, this.y / v}
}
func (this Vector2f) Mulf(data float32) Vector2f {
return Vector2f{this.x * data, this.y * data}
}
func (this Vector2f) Add(data Vector2f) Vector2f {
return Vector2f{this.x + data.x, this.y + data.y}
} | vector.go | 0.824108 | 0.755862 | vector.go | starcoder |
package BrickMosaic
// This package is responsible for translating the Extent ([]Location) of pieces relative to
// different anchor points. E.g. by default the extent is relative to 'upper left' corner
// of brick. But if we're placing it such that lower right corner is the origin, we need
// to translate the upper left locations to match.
type AnchorPoint int
const (
UpperLeft AnchorPoint = iota
UpperRight
LowerRight
LowerLeft
)
func (a AnchorPoint) String() string {
switch a {
case UpperLeft:
return "UpperLeft"
case UpperRight:
return "UpperRight"
case LowerRight:
return "LowerRight"
case LowerLeft:
return "LowerLeft"
}
panic("shouldn't reach here")
}
func Translate(locs []Location, pt AnchorPoint) []Location {
if pt == UpperLeft {
return locs
}
// All of the x (col) values need to become negative
if pt == UpperRight {
var points []Location
for _, p := range locs {
points = append(points, Location{Row: p.Row, Col: -p.Col})
}
return points
}
//All of the x values need to become negative, and all of the y values as well
if pt == LowerRight {
var points []Location
for _, p := range locs {
points = append(points, Location{Row: -p.Row, Col: -p.Col})
}
return points
}
// All of the y (row) values need to become negative
if pt == LowerLeft {
var points []Location
for _, p := range locs {
points = append(points, Location{Row: -p.Row, Col: p.Col})
}
return points
}
panic("Shouldn't reach here")
}
func TranslateAbsoluteOrigin(absLoc Location, p MosaicPiece, pt AnchorPoint) Location {
if pt == UpperLeft {
return absLoc
} else if pt == UpperRight {
// Need to translate the point LEFT by the width of the brick
return absLoc.Add(Location{Col: -p.Cols() + 1})
} else if pt == LowerRight {
// Tranlsate LEFT and UP
return absLoc.Add(Location{Row: -p.Rows() + 1, Col: -p.Cols() + 1})
} else if pt == LowerLeft {
// Translate UP
return absLoc.Add(Location{Row: -p.Rows() + 1})
}
panic("Shouldn't reach here")
} | translate.go | 0.705176 | 0.528229 | translate.go | starcoder |
package table
import (
"bytes"
"encoding/csv"
"errors"
"fmt"
"strings"
"github.com/grokify/mogo/math/mathutil"
)
// Pivot takes a "straight table" where the columnn names
// and values are in a single column and lays it out as a standard tabular data.
func (tbl *Table) Pivot(colCount uint, haveColumns bool) (Table, error) {
newTbl := NewTable(tbl.Name)
if len(tbl.Columns) != 0 {
return newTbl, fmt.Errorf("has defined columns count [%d]", len(tbl.Columns))
}
isWellFormed, colCountActual, _ := tbl.IsWellFormed()
if !isWellFormed {
return newTbl, errors.New("table is not well-defined")
} else if colCountActual != 1 {
return newTbl, fmt.Errorf("has non-1 column count [%d]", colCountActual)
}
rowCount := len(tbl.Rows)
_, remainder := mathutil.DivideInt64(int64(rowCount), int64(colCount))
if remainder != 0 {
return newTbl, fmt.Errorf("row count [%d] is not a multiple of col count [%d]", rowCount, colCount)
}
addedColumns := false
newRow := []string{}
for i, row := range tbl.Rows {
_, remainder := mathutil.DivideInt64(int64(i), int64(colCount))
if remainder == 0 {
if len(newRow) > 0 {
if haveColumns && !addedColumns {
newTbl.Columns = newRow
addedColumns = true
} else {
newTbl.Rows = append(newTbl.Rows, newRow)
}
newRow = []string{}
}
}
newRow = append(newRow, row[0])
}
if len(newRow) > 0 {
if haveColumns && !addedColumns {
newTbl.Columns = newRow
} else {
newTbl.Rows = append(newTbl.Rows, newRow)
}
}
return newTbl, nil
}
/*
// FormatColumn takes a function to format all cell values.
func (tbl *Table) FormatColumn(colIdx uint, conv func(cellVal string) (string, error)) error {
colInt := int(colIdx)
for i, row := range tbl.Rows {
if colInt >= len(row) {
return fmt.Errorf("row [%d] is len [%d] without col index [%d]", i, len(row), colInt)
}
newVal, err := conv(row[colInt])
if err != nil {
return err
}
tbl.Rows[i][colInt] = newVal
}
return nil
}
*/
// FormatRows formats row cells using a start and ending column index and a convert function.
// The `format.ConvertDecommify()` and `format.ConvertRemoveControls()` functions are available to use.
func (tbl *Table) FormatRows(colIdxMinInc, colIdxMaxInc int, conv func(cellVal string) (string, error)) error {
err := tbl.formatRowsTry(colIdxMinInc, colIdxMaxInc, conv, false)
if err != nil {
return err
}
return tbl.formatRowsTry(colIdxMinInc, colIdxMaxInc, conv, true)
}
func (tbl *Table) formatRowsTry(colIdxMinInc, colIdxMaxInc int, conv func(cellVal string) (string, error), exec bool) error {
if len(tbl.Rows) == 0 {
return nil
}
if colIdxMinInc < 0 {
colIdxMinInc = 0
}
// test and return errors
for y, row := range tbl.Rows {
if colIdxMinInc >= len(row) {
continue
}
rowMaxIdxInc := colIdxMaxInc
if rowMaxIdxInc < 0 || rowMaxIdxInc >= len(row) {
rowMaxIdxInc = len(row) - 1
}
for x := colIdxMinInc; x <= rowMaxIdxInc; x++ {
val, err := conv(row[x])
if err != nil {
return err
}
if exec {
row[x] = val
}
}
if exec {
tbl.Rows[y] = row
}
}
return nil
}
// String writes the table out to a CSV string.
func (tbl *Table) String(comma rune, useCRLF bool) (string, error) {
var b bytes.Buffer
w := csv.NewWriter(&b)
w.Comma = comma
w.UseCRLF = useCRLF
if len(tbl.Columns) > 0 {
if err := w.Write(tbl.Columns); err != nil {
return "", fmt.Errorf("error writing columns to csv [%s]",
strings.Join(tbl.Columns, ","))
}
}
for i, row := range tbl.Rows {
if err := w.Write(row); err != nil {
return "", fmt.Errorf("error writing row to csv: idx [%d] content [%s]",
i, strings.Join(row, ","))
}
}
w.Flush()
return b.String(), w.Error()
}
// Transpose creates a new table by transposing the matrix data.
// In the new table, it does not set anything other than than `Name`, `Columns`, and `Rows`.
func (tbl *Table) Transpose() (Table, error) {
tbl2 := NewTable(tbl.Name)
isWellFormed, _, _ := tbl.IsWellFormed()
if !isWellFormed {
return tbl2, errors.New("can only transpose well formed table")
}
for x := 0; x < len(tbl.Columns); x++ {
newRow := []string{}
if len(tbl.Columns) > 0 {
newRow = append(newRow, tbl.Columns[x])
}
for y := 0; y < len(tbl.Rows); y++ {
newRow = append(newRow, tbl.Rows[y][x])
}
if x == 0 {
tbl2.Columns = newRow
} else {
tbl2.Rows = append(tbl2.Rows, newRow)
}
}
return tbl2, nil
} | data/table/format.go | 0.582135 | 0.423279 | format.go | starcoder |
package lib
import "golang.org/x/exp/constraints"
// Min returns the minimum of the supplied values.
func Min[T constraints.Ordered](vals ...T) T {
Assertf(len(vals) > 0, "No values given")
min := vals[0]
for _, v := range vals[1:] {
if v < min {
min = v
}
}
return min
}
// Max returns the maximum of the supplied values.
func Max[T constraints.Ordered](vals ...T) T {
Assertf(len(vals) > 0, "No values given")
max := vals[0]
for _, v := range vals[1:] {
if v > max {
max = v
}
}
return max
}
// AtLeast returns the number of values greater than or equal to n.
func AtLeast[T constraints.Ordered](n T, vals ...T) int {
var cnt int
for _, v := range vals {
if v >= n {
cnt++
}
}
return cnt
}
// Clamp clamps val within [min, max].
func Clamp[T constraints.Ordered](val, min, max T) T {
return Min(Max(val, min), max)
}
// Sum returns the sum of the supplied values.
func Sum[T constraints.Integer | constraints.Float](vals ...T) T {
var sum T
for _, v := range vals {
sum += v
}
return sum
}
// Product returns the product of the supplied values.
func Product[T constraints.Integer | constraints.Float](vals ...T) T {
Assertf(len(vals) > 0, "No values given")
prod := vals[0]
for _, v := range vals[1:] {
prod *= v
}
return prod
}
// Abs returns the absolute value of v.
func Abs[T constraints.Signed | constraints.Float](v T) T {
if v < 0 {
return -v
}
return v
}
// Pow returns x to the power of n.
func Pow[T constraints.Integer](x T, n int) T {
return powInt(1, x, n)
}
// https://en.wikipedia.org/wiki/Exponentiation_by_squaring
func powInt[T constraints.Integer](y, x T, n int) T {
switch {
case n < 0:
panic("Negative exponent")
case n == 0:
return y
case n == 1:
return x * y
case n%2 == 0:
return powInt(y, x*x, n/2)
default:
return powInt(x*y, x*x, (n-1)/2)
}
}
// GCD returns the greatest common denominator of a and b using the Euclidean algorithm.
// See https://www.khanacademy.org/computing/computer-science/cryptography/modarithmetic/a/the-euclidean-algorithm.
func GCD[T constraints.Integer](a, b T) T {
// From Khan Academy:
// If A = 0 then GCD(A,B)=B, since the GCD(0,B)=B, and we can stop.
// If B = 0 then GCD(A,B)=A, since the GCD(A,0)=A, and we can stop.
// Write A in quotient remainder form (A = B*Q + R)
// Find GCD(B,R) using the Euclidean Algorithm since GCD(A,B) = GCD(B,R)
for b != 0 {
if a == 0 {
return b
}
b0 := b
b = a % b
a = b0
}
return a
}
// LCM reaturns the least common multiple of the supplied integers.
func LCM[T constraints.Integer](vals ...T) T {
Assert(len(vals) > 0)
if len(vals) == 1 {
return vals[0]
}
res := vals[0] * vals[1] / GCD(vals[0], vals[1])
for i := 2; i < len(vals); i++ {
res = LCM(res, vals[i])
}
return res
} | lib/math.go | 0.846038 | 0.555676 | math.go | starcoder |
package lexer
import (
"bytes"
"io"
"strings"
)
// lexer.Lexer helps you tokenize bytes
type Lexer interface {
// PeekRune allows you to look ahead at runes without consuming them
PeekRune(int) rune
// NetRune consumes and returns the next rune in the input
NextRune() rune
// BackupRune un-conumes the last rune from the input
BackupRune()
// BackupRunes un-consumes the last n runes from the input
BackupRunes(int)
// NewLine increments the line number counter, resets the column counter
NewLine()
// Line returns the current line number, 1-based
Line() int
// Column returns the current column number, 1-based
Column() int
// EmitToken emits a token of the specified type, consuming matched runes
// without emitting them
EmitToken(TokenType)
// EmitTokenWithBytes emits a token along with all the consumed runes
EmitTokenWithBytes(TokenType)
// IgnoreToken ignores the consumed bytes without emitting any tokens
IgnoreToken()
// EmitEOF emits a token of type TokenEOF
EmitEOF()
// NextToken retrieves the next emmitted token from the input
NextToken() *Token
// Marker returns a marker that you can use to reset the lexer state later
Marker() *Marker
// CanReset confirms if the marker is still valid
CanReset(*Marker) bool
// Reset resets the lexer state to the specified marker
Reset(*Marker)
// MatchZeroOrOneBytes consumes the next rune if it matches, always returning true
MatchZeroOrOneBytes([]byte) bool
// MatchZeroOrOneRuness consumes the next rune if it matches, always returning true
MatchZeroOrOneRunes([]rune) bool
// MatchZeroOrOneRune consumes the next rune if it matches, always returning true
MatchZeroOrOneRune(rune) bool
// MatchZeroOrOneFunc consumes the next rune if it matches, always returning true
MatchZeroOrOneFunc(MatchFn) bool
// MatchZeroOrMoreBytes consumes a run of matching runes, always returning true
MatchZeroOrMoreBytes([]byte) bool
// MatchZeroOrMoreRunes consumes a run of matching runes, always returning true
MatchZeroOrMoreRunes([]rune) bool
// MatchZeroOrMoreFunc consumes a run of matching runes, always returning true
MatchZeroOrMoreFunc(MatchFn) bool
// MatchOneBytes consumes the next rune if its in the list of bytes
MatchOneBytes([]byte) bool
// MatchOneRune consumes the next rune if its in the list of bytes
MatchOneRunes([]rune) bool
// MatchOneRune consumes the next rune if it matches
MatchOneRune(rune) bool
// MatchOneFunc consumes the next rune if it matches
MatchOneFunc(MatchFn) bool
// MatchOneOrMoreBytes consumes a run of matching runes
MatchOneOrMoreBytes([]byte) bool
// MatchOneOrMoreRunes consumes a run of matching runes
MatchOneOrMoreRunes([]rune) bool
// MatchOneOrMoreFunc consumes a run of matching runes
MatchOneOrMoreFunc(MatchFn) bool
// MatchMinMaxBytes consumes a specified run of matching runes
MatchMinMaxBytes([]byte, int, int) bool
// MatchMinMaxRunes consumes a specified run of matching runes
MatchMinMaxRunes([]rune, int, int) bool
// MatchMinMaxFunc consumes a specified run of matching runes
MatchMinMaxFunc(MatchFn, int, int) bool
// NonMatchZeroOrOneBytes consumes the next rune if it does not match, always returning true
NonMatchZeroOrOneBytes([]byte) bool
// NonMatchZeroOrOneRunes consumes the next rune if it does not match, always returning true
NonMatchZeroOrOneRunes([]rune) bool
// NonMatchZeroOrOneFunc consumes the next rune if it does not match, always returning true
NonMatchZeroOrOneFunc(MatchFn) bool
// NonMatchZeroOrMoreBytes consumes a run of non-matching runes, always returning true
NonMatchZeroOrMoreBytes([]byte) bool
// NonMatchZeroOrMoreRunes consumes a run of non-matching runes, always returning true
NonMatchZeroOrMoreRunes([]rune) bool
// NonMatchZeroOrMoreFunc consumes a run of non-matching runes, always returning true
NonMatchZeroOrMoreFunc(MatchFn) bool
// NonMatchOneBytes consumes the next rune if its NOT in the list of bytes
NonMatchOneBytes([]byte) bool
// NonMatchOneRunes consumes the next rune if its NOT in the list of runes
NonMatchOneRunes([]rune) bool
// NonMatchOneFunc consumes the next rune if it does NOT match
NonMatchOneFunc(MatchFn) bool
// NonMatchOneOrMoreBytes consumes a run of non-matching runes
NonMatchOneOrMoreBytes([]byte) bool
// NonMatchOneOrMoreRunes consumes a run of non-matching runes
NonMatchOneOrMoreRunes([]rune) bool
// NonMatchOneOrMoreFunc consumes a run of non-matching runes
NonMatchOneOrMoreFunc(MatchFn) bool
// MatchEOF tries to match the next rune against RuneEOF
MatchEOF() bool
}
// TokenType identifies the type of lex tokens.
type TokenType int
// Token represents a token (with optional text string) returned from the scanner.
type Token struct {
typ TokenType
bytes []byte
line int
column int
}
// Type returns the TokenType of the token
func (t *Token) Type() TokenType { return t.typ }
// Bytes returns the byte array associated with the token, or nil if none
func (t *Token) Bytes() []byte { return t.bytes }
// EOF returns true if the TokenType == TokenTypeEOF
func (t *Token) EOF() bool { return TokenTypeEOF == t.typ }
// Line returns the line number of the token
func (t *Token) Line() int { return t.line }
// Column returns the column number of the token
func (t *Token) Column() int { return t.column }
// TokenType representing EOF
const TokenTypeEOF TokenType = -1
// TokenType representing an unknown rune(s)
const TokenTypeUnknown TokenType = -2
// Rune represending EOF
const RuneEOF = -1
// StateFn represents the state of the scanner as a function that returns the next state.
type StateFn func(Lexer) StateFn
// MatchFn represents a callback function for matching runes that are not
// feasable for a range
type MatchFn func(rune) bool
// Marker stores the state of the lexer to allow rewinding
type Marker struct {
sequence int
pos int
tokenLen int
line int
column int
}
// New returns a new Lexer object with an unlimited read-buffer
func New(startState StateFn, reader io.Reader, channelCap int) Lexer {
return newLex(startState, reader, defaultBufSize, true, channelCap)
}
// NewSize returns a new Lexer object for the specified reader and read-buffer size
func NewSize(startState StateFn, reader io.Reader, readerBufLen int, channelCap int) Lexer {
return newLex(startState, reader, readerBufLen, false, channelCap)
}
// NewFromString returns a new Lexer object for the specified string
func NewFromString(startState StateFn, input string, channelCap int) Lexer {
return newLex(startState, strings.NewReader(input), len(input), false, channelCap)
}
// NewFromBytes returns a new Lexer object for the specified byte array
func NewFromBytes(startState StateFn, input []byte, channelCap int) Lexer {
return newLex(startState, bytes.NewReader(input), len(input), false, channelCap)
} | lexer.go | 0.772015 | 0.425187 | lexer.go | starcoder |
package useful
import . "github.com/SimonRichardson/wishful/wishful"
type Either interface {
Of(Any) Point
Ap(Applicative) Applicative
Chain(func(Any) Monad) Monad
Concat(Semigroup) Semigroup
Map(Morphism) Functor
Bimap(Morphism, Morphism) Monad
Fold(Morphism, Morphism) Any
Swap() Monad
Sequence(Point) Any
Traverse(Morphism, Point) Functor
}
// right
type left struct {
x Any
}
func NewLeft(x Any) left {
return left{
x: x,
}
}
func (x left) Of(v Any) Point {
return NewRight(v)
}
func (x left) Ap(v Applicative) Applicative {
return x
}
func (x left) Chain(f func(Any) Monad) Monad {
return x
}
func (x left) Map(f Morphism) Functor {
return x
}
func (x left) Concat(y Semigroup) Semigroup {
return x
}
func (x left) Swap() Monad {
return NewRight(x.x)
}
func (x left) Bimap(f Morphism, g Morphism) Monad {
return NewLeft(f(x.x))
}
func (x left) Fold(f Morphism, g Morphism) Any {
return f(x.x)
}
func (x left) Sequence(p Point) Any {
return x.Traverse(Identity, p)
}
func (x left) Traverse(f Morphism, p Point) Functor {
return p.Of(NewLeft(x.x)).(Functor)
}
// right
type right struct {
x Any
}
func NewRight(x Any) right {
return right{
x: x,
}
}
func (x right) Of(v Any) Point {
return NewRight(v)
}
func (x right) Ap(v Applicative) Applicative {
return fromMonadToApplicativeAp(x, v)
}
func (x right) Chain(f func(v Any) Monad) Monad {
return f(x.x)
}
func (x right) Map(f Morphism) Functor {
res := x.Chain(func(v Any) Monad {
return NewRight(f(v))
})
return res.(Functor)
}
func (x right) Concat(y Semigroup) Semigroup {
return concat(x, y)
}
func (x right) Fold(f Morphism, g Morphism) Any {
return g(x.x)
}
func (x right) Swap() Monad {
return NewLeft(x.x)
}
func (x right) Bimap(f Morphism, g Morphism) Monad {
return NewRight(g(x.x))
}
func (x right) Sequence(p Point) Any {
return x.Traverse(Identity, p)
}
func (x right) Traverse(f Morphism, p Point) Functor {
return f(x.x).(Functor).Map(func(a Any) Any {
return Either_.Of(a)
})
}
// Either_
var (
Either_ = either_{}
)
type either_ struct{}
func (e either_) As(x Any) Either {
return x.(Either)
}
func (e either_) Ref() Either {
return right{}
}
func (e either_) Of(x Any) Point {
return NewRight(x)
} | useful/either.go | 0.716417 | 0.42054 | either.go | starcoder |
package aeshash
import _ "unsafe"
import "leb.io/hashland/nhash"
var masks [32]uint64
var shifts [32]uint64
// used in asm_{386,amd64}.s
const hashRandomBytes = 32
// this is really 2 x 128 bit round keys
var aeskeysched [hashRandomBytes]byte
var aesdebug [hashRandomBytes]byte
func aeshashbody()
//func Hash(p unsafe.Pointer, s, h uintptr) uintptr
//func HashStr(p string, s, h uintptr) uintptr
func Hash(b []byte, seed uint64) uint64
func HashStr(s string, seed uint64) uint64
func Hash64(v uint64, s uint64) uint64
func Hash32(v uint32, s uint64) uint64
//func aeshash(p unsafe.Pointer, s, h uintptr) uintptr
//func aeshash32(p unsafe.Pointer, s, h uintptr) uintptr
//func aeshash64(p unsafe.Pointer, s, h uintptr) uintptr
//func aeshashstr(p unsafe.Pointer, s, h uintptr) uintptr
func init() {
p := aeskeysched[:]
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7] = 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8
p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15] = 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF, 0x10
p[16], p[17], p[18], p[19], p[20], p[21], p[22], p[23] = 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18
p[24], p[25], p[26], p[27], p[28], p[29], p[30], p[31] = 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0xFF
p = aesdebug[:]
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7] = 0xFF, 0, 0, 0, 0, 0, 0, 0xFE
p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15] = 0xFD, 0, 0, 0, 0, 0, 0, 0xFC
}
// Make sure interfaces are correctly implemented. Stolen from another implementation.
// I did something similar in another package to verify the interface but didn't know you could elide the variable in a var.
// What a cute wart it is.
var (
//_ hash.Hash = new(Digest)
_ nhash.Hash64 = new(StateAES)
_ nhash.HashStream = new(StateAES)
)
type StateAES struct {
hash uint64
seed uint64
clen int
tail []byte
}
func NewAES(seed uint64) nhash.Hash64 {
s := new(StateAES)
s.seed = seed
s.Reset()
return s
}
// Return the size of the resulting hash.
func (d *StateAES) Size() int { return 8 }
// Return the blocksize of the hash which in this case is 1 byte.
func (d *StateAES) BlockSize() int { return 1 }
// Return the maximum number of seed bypes required. In this case 2 x 32
func (d *StateAES) NumSeedBytes() int {
return 8
}
// Return the number of bits the hash function outputs.
func (d *StateAES) HashSizeInBits() int {
return 64
}
// Reset the hash state.
func (d *StateAES) Reset() {
d.hash = 0
d.clen = 0
d.tail = nil
}
// Accept a byte stream p used for calculating the hash. For now this call is lazy and the actual hash calculations take place in Sum() and Sum32().
func (d *StateAES) Write(p []byte) (nn int, err error) {
l := len(p)
d.clen += l
d.tail = append(d.tail, p...)
return l, nil
}
func (d *StateAES) Write64(h uint64) (err error) {
d.clen += 8
d.tail = append(d.tail, byte(h>>56), byte(h>>48), byte(h>>40), byte(h>>32), byte(h>>24), byte(h>>16), byte(h>>8), byte(h))
return nil
}
// Return the current hash as a byte slice.
func (d *StateAES) Sum(b []byte) []byte {
d.hash = Hash(d.tail, d.seed)
h := d.hash
return append(b, byte(h>>56), byte(h>>48), byte(h>>40), byte(h>>32), byte(h>>24), byte(h>>16), byte(h>>8), byte(h))
}
// Return the current hash as a 64 bit unsigned type.
func (d *StateAES) Sum64() uint64 {
d.hash = Hash(d.tail, d.seed)
return d.hash
}
func (d *StateAES) Hash64(b []byte, seeds ...uint64) uint64 {
switch len(seeds) {
case 1:
d.seed = seeds[0]
}
d.hash = Hash(b, d.seed)
//fmt.Printf("pc=0x%08x, pb=0x%08x\n", d.pc, d.pb)
return d.hash
} | aeshash.go | 0.538498 | 0.467453 | aeshash.go | starcoder |
package wordwrap
import (
"io"
"strings"
)
// Options adjust word-wrapping behavior.
type Options struct {
// NoWrap disables word wrapping, so that only the existing line breaks are used.
NoWrap bool
// BreakWords allows to break the line mid-word if absolutely necessary.
BreakWords bool
// BreakMarker is appended to a line that is broken mid-word.
// It counts as a single character for the purposes of width computation.
BreakMarker string
}
var eol = []byte("\n")
/*
WrapString splits the text into lines up to the given width, and returns the
result, including the trailing end-of-line character.
Any pre-existing line breaks are preserved. If opt.BreakWords is true,
the resulting lines are guaranteed to fit into the given width. If the width
is zero or opt.NoWrap is true, no new line breaks will be added.
*/
func WrapString(text string, width int, opt Options) string {
var buf strings.Builder
_, _ = WrapTo(&buf, text, width, opt)
return buf.String()
}
/*
WrapTo splits the text into lines up to the given width, and writes the resulting
lines into the given io.Writer, including the trailing end-of-line character.
Any pre-existing line breaks are preserved. If opt.BreakWords is true,
the resulting lines are guaranteed to fit into the given width. If the width
is zero or opt.NoWrap is true, no new line breaks will be added.
*/
func WrapTo(w io.Writer, text string, width int, opt Options) (n int, err error) {
Wrap(text, width, opt, func(line string) {
if err != nil {
return
}
var k int
k, err = io.WriteString(w, line)
n += k
if err != nil {
return
}
k, err = w.Write(eol)
n += k
})
return
}
/*
Estimate predicts the number of lines in the given text for memory allocation
purposes.
*/
func Estimate(text string, width int) int {
explicit := strings.Count(text, "\n") + 1
if width == 0 {
return explicit
} else {
return explicit + (len(text)+width-1)/width
}
}
/*
WrapSlice splits the text into lines up to the given width, and returns the lines
as a slice of strings.
Any pre-existing line breaks are preserved. If opt.BreakWords is true,
the resulting lines are guaranteed to fit into the given width. If the width
is zero or opt.NoWrap is true, no new line breaks will be added.
*/
func WrapSlice(text string, width int, opt Options) []string {
lines := make([]string, 0, Estimate(text, width))
Wrap(text, width, opt, func(line string) {
lines = append(lines, line)
})
return lines
}
/*
Wrap splits the text into lines up to the given width, and calls linef for each
line.
Any pre-existing line breaks are preserved. If opt.BreakWords is true,
the resulting lines are guaranteed to fit into the given width. If the width
is zero or opt.NoWrap is true, no new line breaks will be added.
*/
func Wrap(text string, width int, opt Options, linef func(line string)) {
for {
i := strings.IndexByte(text, byte('\n'))
if i < 0 {
wrapLine(text, width, opt, linef)
return
}
wrapLine(text[:i], width, opt, linef)
text = text[i+1:]
}
}
func wrapLine(text string, w int, opt Options, linef func(line string)) {
if len(text) == 0 || w == 0 || opt.NoWrap {
linef(text)
return
}
for {
n := len(text)
if n == 0 {
break
}
var b int
var marker bool
if n <= w {
b = n
} else if i := strings.LastIndexByte(text[:min(w+1, len(text))], ' '); i >= 0 {
b = i
} else if opt.BreakWords {
if len(opt.BreakMarker) > 0 && w > 1 {
b = w - 1
} else {
b = w
}
marker = true
} else {
i := strings.IndexByte(text[min(w, len(text)):], ' ')
if i < 0 {
b = n
} else {
b = w + i
}
}
s := rtrim(text, b)
if s > 0 {
line := text[:s]
if marker {
linef(line + opt.BreakMarker)
} else {
linef(line)
}
}
text = text[ltrim(text, b):]
}
}
func ltrim(s string, p int) int {
n := len(s)
for p < n && s[p] == ' ' {
p++
}
return p
}
func rtrim(s string, p int) int {
for p > 0 && s[p-1] == ' ' {
p--
}
return p
}
func min(a, b int) int {
if a < b {
return a
} else {
return b
}
} | wrap.go | 0.644561 | 0.425486 | wrap.go | starcoder |
package glw
import "errors"
/*
First I found a list of transforms in the field of mathematics on wikipedia:
https://en.wikipedia.org/wiki/List_of_transforms
Then I found Sequential euclidean distance transforms:
https://en.wikipedia.org/wiki/Sequential_euclidean_distance_transforms
This led me to the Euclidean distance or Euclidean metric:
https://en.wikipedia.org/wiki/Euclidean_distance
Currently I have Scale. I've been anticipating this becoming Size or a similar name. Currently it's Scale b/c of the projection matrix where screen height is (-1 .. +1) and aspect ratio is maintained for width. This made the imageviewer easier to implement instead of dealing with pixels.
But this will no longer be relevant with constraint based layout so Scale no longer makes sense except as a function that acts on Size.
Both fields then, Translate and Size, are euclidean distances. Size draws a line perpendicular to its boundaries. Translate draws a line from an origin point, normally (0, 0).
Field Rotate is a quaternion, describing a distance along a circle.
Ok, but "distance or metric"? What's a metric? Which led me to:
https://en.wikipedia.org/wiki/Metric_(mathematics)
A metric is a distance function. A few moments before the start of finding any of this, I was considering the performance impact of generating a stack of closures that are evaluated during an animation sequence or a layout phase that calculates any missing values.
So, a product of the constraint-based layout will be a set of metrics "that defines a distance between each pair of elements of a set". Such a set is called a metric space:
https://en.wikipedia.org/wiki/Metric_space
"A metric space is a set for which distances between all members of the set are defined". I do not know what the inverse of this is called, but such is what the constraint-based layout will operate on using it's first product, the metric space produced from programmer input.
Once all undefined members values are defined, the layout process is complete and has produced a final metric space; that is, a set of metrics.
This set of metrics is what enforces conformance of other objects when one object animates.
And that set of metrics could very well be a slice of closures. It's certainly be the simplest implementation. Not writing any code until I develop a full picture of all interfaces.
---
One thing I just realized is that during iteration, not all incomplete metrics can be evaluated right away; they must be weighted by importance.
If there exists a metric where the right of the left-box and the left of the right-box are undefined, this can evaluate so distance reaches zero and they meet center of parent.
But if there follows another metric for evaluation that assigns a discrete value to the left of the right-box, this would alter the evaluation of the previous metric.
So, discrete value assigns have a higher weight than undefines. Each iteration needs to sort the incomplete metrics based on dynamically calculated weight given that each iteration step generates discrete value assignments for values previously not defined.
Sort order can be maintained by pop, and insert methods operating on a binary search tree of the set.
I'm defining a new term, opaque metric: both values for calculating distance are undefined.
Opaque metrics may come in any order from a programmer. All weighted the same, this means for a given set of metrics, sort is not stable. Each iteration assigns discrete values based on sort order. This means the end evaluation for a given set depends on the sorted order. This means sort should be made stable for predictable results.
Right now I'm just thinking priority goes to boundary location; left, top, right, bottom.
---
I've been reading about closed sets and set closures:
https://en.wikipedia.org/wiki/Closed_set
https://en.wikipedia.org/wiki/Closure_(mathematics)
Under examples of closed sets, I came across this:
"The Cantor set is an unusual closed set in the sense that it consists entirely of boundary points and is nowhere dense."
https://en.wikipedia.org/wiki/Cantor_set
That sounds a lot like what I'm doing.
I also found this example illuminating:
"The unit interval [0,1] is closed in the metric space of real numbers"
What's interesting about that is the "the metric space of real numbers" makes clear to me how to go from programmer defined metrics to data storage. Compared to package material which used struct hierarchies that were ignorant of siblings right up until trying to resolve layout, this is telling me that each such input can go directly into the set when created.
Why is that interesting? As I was reading about closed sets, I found that a closed set can be formally verified as closed. Initially I thought that would be excellent for writing tests, but if the verification is cheap then problems can be identified much early in the process.
What I'm trying to address now is what the data type of a metric would actually look like as code. I'll be reading about cantor sets.
---
"the most common modern construction is the Cantor ternary set, built by removing the middle thirds of a line segment"
I was already thinking a metric would look like defining a ternary relation, I think I'm on the right track.
"Cantor himself mentioned the ternary construction only in passing, as an example of a more general idea, that of a perfect set that is nowhere dense."
This scares me, a perfect set that is nowhere dense. When I was in AR, I told about the perfect solution to a problematic is one that produces no side effects. That's so perfectly theoretical it's perfectly useless as the point of any software project is to produce side effects. The reason for considering the perfect solution is to stay grounded in the number of side effects your generating, and to reduce as much as possible to nil. I think my perfect solution rings of thinking like a mathematician.
*/
/*
think about as a function of sound; the left input and right input collide to the center; SetLeftInput, SetRightInput, LeftOutput, RightOutput; this is 1 dimensional; consider pkg snd; represented as a linked-list:
var frame, lbl, btn T
frame.SetSize(0, 0, 800, 600)
lbl.LeftInput = frame.LeftOutput
btn.LeftInput = lbl.RightOutput
btn.RightInput = frame.RightOutput
lbl.LeftInput == frame.LeftOutput
lbl.RightInput == btn.LeftInput // ??? btn.SetLeftInput(lbl) or (lbl.RightOutput) which still allows linking to lbl ??? (lbl, RightOutput)
frame.SetBounds(0, 0, 800, 600)
lbl.StartAt(frame, Start)
btn.StartAt(lbl, End)
btn.EndAt(frame, End)
*/
type Side int
const (
Start Side = iota
End
Top
Bottom
)
type T struct {
}
func (a *T) SetWidth(x int) {}
func (a *T) SetHeight(x int) {}
func (a *T) StartAt(t T, d Side) {}
func (a *T) EndAt(t T, d Side) {}
func (a *T) TopAt(t T, d Side) {}
func (a *T) BottomAt(t T, d Side) {}
var ErrNotZero = errors.New("Metric distance not zero")
// cset represents a cantor set; iterations are applied by bitshifting for
// powers of two with the result being a count of segments.
// type cset uint64
type metric struct {
w, h int
}
func (a metric) Do() error { return ErrNotZero }
type metricSpace []metric
func (a metricSpace) Closed() bool { return false } | glw/metric.go | 0.837321 | 0.906446 | metric.go | starcoder |
package rui
import (
"fmt"
"math"
"strconv"
"strings"
)
// AngleUnitType : type of enumerated constants for define a type of AngleUnit value.
// Can take the following values: Radian, Degree, Gradian, and Turn
type AngleUnitType uint8
const (
// Radian - angle in radians
Radian AngleUnitType = 0
// Radian - angle in radians * π
PiRadian AngleUnitType = 1
// Degree - angle in degrees
Degree AngleUnitType = 2
// Gradian - angle in gradian (1⁄400 of a full circle)
Gradian AngleUnitType = 3
// Turn - angle in turns (1 turn = 360 degree)
Turn AngleUnitType = 4
)
// AngleUnit describe a size (Value field) and size unit (Type field).
type AngleUnit struct {
Type AngleUnitType
Value float64
}
// Deg creates AngleUnit with Degree type
func Deg(value float64) AngleUnit {
return AngleUnit{Type: Degree, Value: value}
}
// Rad create AngleUnit with Radian type
func Rad(value float64) AngleUnit {
return AngleUnit{Type: Radian, Value: value}
}
// PiRad create AngleUnit with PiRadian type
func PiRad(value float64) AngleUnit {
return AngleUnit{Type: PiRadian, Value: value}
}
// Grad create AngleUnit with Gradian type
func Grad(value float64) AngleUnit {
return AngleUnit{Type: Gradian, Value: value}
}
// Equal compare two AngleUnit. Return true if AngleUnit are equal
func (angle AngleUnit) Equal(size2 AngleUnit) bool {
return angle.Type == size2.Type && angle.Value == size2.Value
}
func angleUnitSuffixes() map[AngleUnitType]string {
return map[AngleUnitType]string{
Degree: "deg",
Radian: "rad",
PiRadian: "pi",
Gradian: "grad",
Turn: "turn",
}
}
// StringToAngleUnit converts the string argument to AngleUnit
func StringToAngleUnit(value string) (AngleUnit, bool) {
var angle AngleUnit
ok, err := angle.setValue(value)
if !ok {
ErrorLog(err)
}
return angle, ok
}
func (angle *AngleUnit) setValue(value string) (bool, string) {
value = strings.ToLower(strings.Trim(value, " \t\n\r"))
setValue := func(suffix string, unitType AngleUnitType) (bool, string) {
val, err := strconv.ParseFloat(value[:len(value)-len(suffix)], 64)
if err != nil {
return false, `AngleUnit.SetValue("` + value + `") error: ` + err.Error()
}
angle.Value = val
angle.Type = unitType
return true, ""
}
if value == "π" {
angle.Value = 1
angle.Type = PiRadian
return true, ""
}
if strings.HasSuffix(value, "π") {
return setValue("π", PiRadian)
}
if strings.HasSuffix(value, "°") {
return setValue("°", Degree)
}
for unitType, suffix := range angleUnitSuffixes() {
if strings.HasSuffix(value, suffix) {
return setValue(suffix, unitType)
}
}
if val, err := strconv.ParseFloat(value, 64); err == nil {
angle.Value = val
angle.Type = Radian
return true, ""
}
return false, `AngleUnit.SetValue("` + value + `") error: invalid argument`
}
// String - convert AngleUnit to string
func (angle AngleUnit) String() string {
if suffix, ok := angleUnitSuffixes()[angle.Type]; ok {
return fmt.Sprintf("%g%s", angle.Value, suffix)
}
return fmt.Sprintf("%g", angle.Value)
}
// cssString - convert AngleUnit to string
func (angle AngleUnit) cssString() string {
if angle.Type == PiRadian {
return fmt.Sprintf("%grad", angle.Value*math.Pi)
}
return angle.String()
}
// ToDegree returns the angle in radians
func (angle AngleUnit) ToRadian() AngleUnit {
switch angle.Type {
case PiRadian:
return AngleUnit{Value: angle.Value * math.Pi, Type: Radian}
case Degree:
return AngleUnit{Value: angle.Value * math.Pi / 180, Type: Radian}
case Gradian:
return AngleUnit{Value: angle.Value * math.Pi / 200, Type: Radian}
case Turn:
return AngleUnit{Value: angle.Value * 2 * math.Pi, Type: Radian}
}
return angle
}
// ToDegree returns the angle in degrees
func (angle AngleUnit) ToDegree() AngleUnit {
switch angle.Type {
case Radian:
return AngleUnit{Value: angle.Value * 180 / math.Pi, Type: Degree}
case PiRadian:
return AngleUnit{Value: angle.Value * 180, Type: Degree}
case Gradian:
return AngleUnit{Value: angle.Value * 360 / 400, Type: Degree}
case Turn:
return AngleUnit{Value: angle.Value * 360, Type: Degree}
}
return angle
}
// ToGradian returns the angle in gradians (1⁄400 of a full circle)
func (angle AngleUnit) ToGradian() AngleUnit {
switch angle.Type {
case Radian:
return AngleUnit{Value: angle.Value * 200 / math.Pi, Type: Gradian}
case PiRadian:
return AngleUnit{Value: angle.Value * 200, Type: Gradian}
case Degree:
return AngleUnit{Value: angle.Value * 400 / 360, Type: Gradian}
case Turn:
return AngleUnit{Value: angle.Value * 400, Type: Gradian}
}
return angle
}
// ToTurn returns the angle in turns (1 turn = 360 degree)
func (angle AngleUnit) ToTurn() AngleUnit {
switch angle.Type {
case Radian:
return AngleUnit{Value: angle.Value / (2 * math.Pi), Type: Turn}
case PiRadian:
return AngleUnit{Value: angle.Value / 2, Type: Turn}
case Degree:
return AngleUnit{Value: angle.Value / 360, Type: Turn}
case Gradian:
return AngleUnit{Value: angle.Value / 400, Type: Turn}
}
return angle
} | angleUnit.go | 0.844794 | 0.515437 | angleUnit.go | starcoder |
package pass
import (
"fmt"
"github.com/mmcloughlin/addchain/acc/ir"
)
// Allocator pass assigns a minimal number of temporary variables to execute a program.
type Allocator struct {
// Input is the name of the input variable. Note this is index 0, or the
// identity element of the addition chain.
Input string
// Output is the name to give to the final output of the addition chain. This
// variable may itself be used as a temporary during execution.
Output string
// Format defines how to format any temporary variables. This format string
// must accept one integer value. For example "t%d" would be a reasonable
// choice.
Format string
}
// Execute performs temporary variable allocation.
func (a Allocator) Execute(p *ir.Program) error {
// Canonicalize operands, collect unique indexes, and delete all names.
if err := Exec(p, Func(CanonicalizeOperands), Func(Indexes), Func(ClearNames)); err != nil {
return err
}
// Initialize an allocation. This maintains a map from operand index to
// variable, and a pool of free variables.
allocation := newallocation()
// Process instructions in reverse, similar to liveness analysis. Allocate
// when variables are read, mark available for re-allocation on write.
for i := len(p.Instructions) - 1; i >= 0; i-- {
inst := p.Instructions[i]
// The output operand variable now becomes free.
v := allocation.Variable(inst.Output.Index)
allocation.Free(v)
// Inputs may need variables, if they are not already live.
for _, input := range inst.Op.Inputs() {
allocation.Allocate(input.Index)
}
}
// Assign names to the operands. Reuse of the output variable is handled
// specially, since we have to account for the possibility that it could be
// aliased with the input. Prior to the last use of the input, variable 0
// will be a temporary, after it will be the output.
lastinputread := 0
for _, inst := range p.Instructions {
for _, input := range inst.Op.Inputs() {
if input.Index == 0 {
lastinputread = inst.Output.Index
}
}
}
// Map from variable index to name.
name := map[int]string{}
outv := allocation.Variable(p.Output().Index)
for _, index := range p.Indexes {
op := p.Operands[index]
v := allocation.Variable(op.Index)
_, ok := name[v]
switch {
// Operand index 0 is the input.
case op.Index == 0:
op.Identifier = a.Input
// Use the output variable name after the last use of the input.
case v == outv && op.Index >= lastinputread:
op.Identifier = a.Output
// Unnamed variable: allocate a temporary.
case !ok:
name[v] = fmt.Sprintf(a.Format, len(p.Temporaries))
p.Temporaries = append(p.Temporaries, name[v])
fallthrough
default:
op.Identifier = name[v]
}
}
return nil
}
// allocation of a pool of variables to operands.
type allocation struct {
// Allocation map from operand index to variable index.
variable map[int]int
// List of available variables.
available []int
// Total number of variables.
n int
}
// newallocation initializes an empty allocation.
func newallocation() *allocation {
return &allocation{
variable: map[int]int{},
available: []int{},
n: 0,
}
}
// Allocate ensures a variable is allocated to operand index i.
func (a *allocation) Allocate(i int) {
// Return if it already has an assignment.
_, ok := a.variable[i]
if ok {
return
}
// If there's nothing available, we'll need another variable.
if len(a.available) == 0 {
a.available = append(a.available, a.n)
a.n++
}
// Assign from the available list.
last := len(a.available) - 1
a.variable[i] = a.available[last]
a.available = a.available[:last]
}
// Variable allocated to operand index i. Allocates one if it doesn't already
// have an allocation.
func (a *allocation) Variable(i int) int {
a.Allocate(i)
return a.variable[i]
}
// Free marks v as available to be allocated to another operand.
func (a *allocation) Free(v int) {
a.available = append(a.available, v)
} | acc/pass/alloc.go | 0.666171 | 0.471041 | alloc.go | starcoder |
package main // In computer science, merge sort (also commonly spelled mergesort) is an efficient, general-purpose, comparison-based
import (
"fmt"
"math/rand"
"time"
)
// sorting algorithm. Most implementations produce a stable sort, which means that the implementation preserves the
// input order of equal elements in the sorted output. Mergesort is a divide and conquer algorithm that was invented by
// <NAME> in 1945.
// Conceptually, a merge sort works as follows:
// Divide the unsorted list into n sublists, each containing 1 element (a list of 1 element is considered sorted).
// Repeatedly merge sublists to produce new sorted sublists until there is only 1 sublist remaining. This will be the
// sorted list.
// In sorting n objects, merge sort has an average and worst-case performance of O(n log n). If the running time of
// merge sort for a list of length n is T(n), then the recurrence T(n) = 2T(n/2) + n follows from the definition of the
// algorithm (apply the algorithm to two lists of half the size of the original list, and add the n steps taken to
// merge the resulting two lists). The closed form follows from the master theorem.
// In the worst case, the number of comparisons merge sort makes is equal to or slightly smaller than (n ⌈lg n⌉ - 2⌈lg
// n⌉ + 1), which is between (n lg n - n + 1) and (n lg n + n + O(lg n)).
// In the worst case, merge sort does about 39% fewer comparisons than quicksort does in the average case. In terms of
// moves, merge sort's worst case complexity is O(n log n)—the same complexity as quicksort's best case, and merge
// sort's best case takes about half as many iterations as the worst case.
// Merge sort is more efficient than quicksort for some types of lists if the data to be sorted can only be efficiently
// accessed sequentially, and is thus popular in languages such as Lisp, where sequentially accessed data structures
// are very common. Unlike some (efficient) implementations of quicksort, merge sort is a stable sort.
// Merge sort's most common implementation does not sort in place;[5] therefore, the memory size of the input must be
// allocated for the sorted output to be stored in (see below for versions that need only n/2 extra spaces).
// source : https://en.wikipedia.org/wiki/Merge_sort
// MergeSort takes a slice and sorts them using the above algo
func MergeSort(arr []int) []int {
if len(arr) <= 1 {
return arr
}
var mid int = len(arr) / 2
var left []int
var right []int
// make copies for the left and right side
left = make([]int, len(arr[:mid]))
copy(left, MergeSort(arr[:mid])) // copy left side recursively
right = make([]int, len(arr[mid:]))
copy(right, MergeSort(arr[mid:])) // copy right side recursively
var i int
var j int
var k int
// merge using 2 pointers on the left and right to compare
for (i < len(left)) && (j < len(right)) {
if left[i] < right[j] {
arr[k] = left[i]
i++
} else {
arr[k] = right[j]
j++
}
k++
}
// process the left block items
for i < len(left) {
arr[k] = left[i]
i++
k++
}
// process the right block items
if j < len(right) {
arr[k] = right[j]
j++
k++
}
return arr
}
func main() {
a := time.Now()
var arr [100]int
var slice = arr[:]
// set up rand numbers
source := rand.NewSource(time.Now().UnixNano())
random := rand.New(source)
// initialize array with random numbers from 0 to 10000000
for index := range arr {
arr[index] = random.Intn(10000000)
}
// output before sort
fmt.Printf("Before: %v\n", arr)
b := time.Now()
// sorting
result := MergeSort(slice)
// output after
fmt.Printf("After: %v\n", result)
fmt.Printf("Time for sort to complete: %v\n", time.Since(b))
fmt.Printf("Time overall: %v\n", time.Since(a))
} | merge-sort/mergeSort.go | 0.765681 | 0.671131 | mergeSort.go | starcoder |
package sorting
import (
"sync"
)
// Sorts the given slice of integer using bubble sort algorithm.
func BubbleSort(sl []int) {
sliceLength := len(sl)
for i := 0; i < sliceLength-1; i++ {
for j := 0; j < sliceLength-i-1; j++ {
if sl[j] > sl[j+1] {
sl[j], sl[j+1] = sl[j+1], sl[j]
}
}
}
}
// Sorts the given slice of integer using selection algorithm.
func SelectionSort(sl []int) {
sliceLength := len(sl)
for i := 0; i < sliceLength; i++ {
// Find minimum element in slice [i+1:sliceLength]
minimumIdx := i
for j := i + 1; j < sliceLength; j++ {
if sl[j] < sl[minimumIdx] {
minimumIdx = j
}
}
// Swap these elements
sl[i], sl[minimumIdx] = sl[minimumIdx], sl[i]
}
}
// Sorts the given slice of integer using insertion algorithm.
func InsertionSort(sl []int) {
sliceLength := len(sl)
for i := 1; i < sliceLength; i++ {
for j := i - 1; j >= 0; j-- {
if sl[j] > sl[j+1] {
sl[j], sl[j+1] = sl[j+1], sl[j]
} else {
break
}
}
}
}
// Helper for function for merge sort
func merge(firstHalf []int, secondHalf []int) []int {
firstHalfLength := len(firstHalf)
secondHalfLength := len(secondHalf)
result := make([]int, 0, firstHalfLength+secondHalfLength)
firstHalfIdx, secondHalfIdx := 0, 0
// Iterate through both and put smaller value
for firstHalfIdx < firstHalfLength && secondHalfIdx < secondHalfLength {
if firstHalf[firstHalfIdx] < secondHalf[secondHalfIdx] {
result = append(result, firstHalf[firstHalfIdx])
firstHalfIdx++
} else {
result = append(result, secondHalf[secondHalfIdx])
secondHalfIdx++
}
}
// Pick from first, if left
for firstHalfIdx < firstHalfLength {
result = append(result, firstHalf[firstHalfIdx])
firstHalfIdx++
}
// Pick from second, if left
for secondHalfIdx < secondHalfLength {
result = append(result, secondHalf[secondHalfIdx])
secondHalfIdx++
}
return result
}
// Sorts the given slice of integer using merge algorithm.
func MergeSort(sl []int) {
sliceLength := len(sl)
if sliceLength == 1 {
return
}
var middle int = (sliceLength - 1) / 2
firstHalf := sl[0 : middle+1]
secondHalf := sl[middle+1 : sliceLength]
MergeSort(firstHalf)
MergeSort(secondHalf)
copy(sl, merge(firstHalf, secondHalf))
}
// Sorts the given slice of integer using simple parallel merge algorithm.
func MergeSortParallel(sl []int) {
wg := &sync.WaitGroup{}
sliceLength := len(sl)
if sliceLength == 1 {
return
}
var middle int = (sliceLength - 1) / 2
firstHalf := sl[0 : middle+1]
secondHalf := sl[middle+1 : sliceLength]
wg.Add(1)
go func() {
defer wg.Done()
MergeSort(firstHalf)
}()
wg.Add(1)
go func() {
defer wg.Done()
MergeSort(secondHalf)
}()
wg.Wait()
copy(sl, merge(firstHalf, secondHalf))
}
// Helper function for sorting according to pivot element for quicksort.
// Returns the final position of pivot element
func doPivot(sl []int, start, end int) int {
pivotIdx := end - 1
swapIdx := start
for i := start; i < end-1; i++ {
if sl[pivotIdx] > sl[i] {
sl[i], sl[swapIdx] = sl[swapIdx], sl[i]
swapIdx++
}
}
sl[pivotIdx], sl[swapIdx] = sl[swapIdx], sl[pivotIdx]
return swapIdx
}
// Helper recursive function for quicksort
func recursiveQuickSort(sl []int, start, end int) {
if start < end-1 {
pivotIdx := doPivot(sl, start, end)
recursiveQuickSort(sl, start, pivotIdx)
recursiveQuickSort(sl, pivotIdx, end)
}
}
// Sorts the given slice of integer using quicksort algorithm. Last element is chosen
// as pivot.
func QuickSort(sl []int) {
recursiveQuickSort(sl, 0, len(sl))
} | golang/sorting/sorting.go | 0.752559 | 0.584894 | sorting.go | starcoder |
package agent
import (
"github.com/aperturerobotics/bifrost/peer"
"github.com/aperturerobotics/controllerbus/directive"
)
// AttachAgentToNode is a directive to attach an agent to a node.
type AttachAgentToNode interface {
// Directive indicates AttachAgentToNode is a directive.
directive.Directive
// AttachAgentToNodeID returns a specific node ID we are looking for.
// Cannot be empty.
AttachAgentToNodeID() peer.ID
}
// AttachAgentToNodeSingleton implements AttachAgentToNode with a peer ID constraint.
type AttachAgentToNodeSingleton struct {
peerIDConstraint peer.ID
}
// NewAttachAgentToNodeSingleton constructs a new AttachAgentToNodeSingleton directive.
func NewAttachAgentToNodeSingleton(peerID peer.ID) *AttachAgentToNodeSingleton {
return &AttachAgentToNodeSingleton{peerIDConstraint: peerID}
}
// AttachAgentToNodeID returns a specific peer ID node we are looking for.
// If empty, any node is matched.
func (d *AttachAgentToNodeSingleton) AttachAgentToNodeID() peer.ID {
return d.peerIDConstraint
}
// Validate validates the directive.
// This is a cursory validation to see if the values "look correct."
func (d *AttachAgentToNodeSingleton) Validate() error {
return nil
}
// GetValueOptions returns options relating to value handling.
func (d *AttachAgentToNodeSingleton) GetValueOptions() directive.ValueOptions {
return directive.ValueOptions{}
}
// IsEquivalent checks if the other directive is equivalent. If two
// directives are equivalent, and the new directive does not superceed the
// old, then the new directive will be merged (de-duplicated) into the old.
func (d *AttachAgentToNodeSingleton) IsEquivalent(other directive.Directive) bool {
od, ok := other.(AttachAgentToNode)
if !ok {
return false
}
return d.peerIDConstraint == od.AttachAgentToNodeID()
}
// Superceeds checks if the directive overrides another.
// The other directive will be canceled if superceded.
func (d *AttachAgentToNodeSingleton) Superceeds(other directive.Directive) bool {
return false
}
// GetName returns the directive's type name.
// This is not necessarily unique, and is primarily intended for display.
func (d *AttachAgentToNodeSingleton) GetName() string {
return "AttachAgentToNode"
}
// GetDebugString returns the directive arguments stringified.
// This should be something like param1="test", param2="test".
// This is not necessarily unique, and is primarily intended for display.
func (d *AttachAgentToNodeSingleton) GetDebugVals() directive.DebugValues {
vals := directive.DebugValues{}
if pid := d.AttachAgentToNodeID(); pid != peer.ID("") {
vals["node-id"] = []string{pid.Pretty()}
}
return vals
}
// _ is a type constraint
var _ AttachAgentToNode = ((*AttachAgentToNodeSingleton)(nil)) | agent/directive.go | 0.728845 | 0.426441 | directive.go | starcoder |
package parser
import (
"errors"
"fmt"
"regexp"
"strings"
)
type Scanner struct {
src source // the source the scanner is drawing from
sliceStart int // the start of the slice visible to the scanner, based on the original src
sliceLength int // the length of the slice visible to the scanner, based on the original src
}
type source interface {
length() int // the length of the entire source string
slice(i, length int) string // the string of the given slice
filename() string // the name of the file from which the source is derived (or empty if none)
stripSource(i, length int) source
}
type stringSource struct {
origin string // the entire source string
f string // the source filename
}
func NewScanner(str string) *Scanner {
return &Scanner{stringSource{origin: str}, 0, len(str)}
}
func NewScannerWithFilename(str, filename string) *Scanner {
return &Scanner{stringSource{str, filename}, 0, len(str)}
}
func NewScannerAt(str string, offset, size int) *Scanner {
return &Scanner{stringSource{origin: str}, offset, size}
}
// - Scanner
func (s Scanner) StripSource() Scanner {
s.src = s.src.stripSource(s.sliceStart, s.sliceLength)
return s
}
// The name of the file from which the source is derived (or empty if none).
func (s Scanner) Filename() string {
return s.src.filename()
}
func (s Scanner) String() string {
if s.src == nil {
return ""
}
return s.slice()
}
func (s Scanner) IsNil() bool {
return s.src == nil
}
func (s Scanner) Format(state fmt.State, c rune) {
if c == 'q' {
_, _ = fmt.Fprintf(state, "%q", s.slice())
} else {
_, _ = state.Write([]byte(s.slice()))
}
}
var (
NoLimit = -1
DefaultLimit = 1
)
func (s Scanner) Contains(sn Scanner) bool {
if s.Filename() != sn.Filename() || s.src != sn.src {
return false
}
return s.sliceStart <= sn.sliceStart &&
s.sliceStart+s.sliceLength >= sn.sliceStart+sn.sliceLength
}
func (s Scanner) Context(limitLines int) string {
end := s.sliceStart + s.sliceLength
lineno, colno := s.Position()
aboveCxt := s.src.slice(0, s.sliceStart)
belowCxt := s.src.slice(end, s.src.length()-end)
if limitLines != NoLimit {
a := strings.Split(aboveCxt, "\n")
if len(a) > limitLines {
aboveCxt = strings.Join(a[len(a)-limitLines-1:], "\n")
}
b := strings.Split(belowCxt, "\n")
if len(b) > limitLines {
belowCxt = strings.Join(b[:limitLines], "\n")
}
}
return fmt.Sprintf("\n\033[1;37m%s:%d:%d:\033[0m\n%s\033[1;31m%s\033[0m%s",
s.Filename(),
lineno,
colno,
aboveCxt,
s.slice(),
belowCxt,
)
}
// The position of the start of the scanner within the original source.
func (s Scanner) Offset() int {
return s.sliceStart
}
// The 1-indexed line and column number of the start of the scanner within the original source.
func (s Scanner) Position() (int, int) {
return lineColumn(s.src.slice(0, s.sliceStart), s.sliceStart)
}
// The slice that is visible to the scanner
func (s Scanner) slice() string {
return s.src.slice(s.sliceStart, s.sliceLength)
}
func (s Scanner) Slice(a, b int) *Scanner {
return &Scanner{s.src, s.sliceStart + a, b - a}
}
func (s Scanner) Skip(i int) *Scanner {
return &Scanner{s.src, s.sliceStart + i, s.sliceLength - i}
}
func MergeScanners(items ...Scanner) (Scanner, error) {
if len(items) == 0 {
return Scanner{}, errors.New("needs at least one scanner")
}
if len(items) == 1 {
return items[0], nil
}
l, r := items[0].sliceStart, items[0].sliceStart+items[0].sliceLength
src := items[0].src
for _, v := range items[1:] {
if v.src != src {
return Scanner{}, fmt.Errorf("scanners' sources are not the same: %s vs %s", src, v.src)
}
if v.sliceStart < l {
l = v.sliceStart
}
if v.sliceStart+v.sliceLength > r {
r = v.sliceStart + v.sliceLength
}
}
return Scanner{
src: src,
sliceStart: l,
sliceLength: r - l,
}, nil
}
// Eat returns a scanner containing the next i bytes and advances s past them.
func (s *Scanner) Eat(i int, eaten *Scanner) *Scanner {
eaten.src = s.src
eaten.sliceStart = s.sliceStart
eaten.sliceLength = i
*s = *s.Skip(i)
return s
}
func (s *Scanner) EatString(str string, eaten *Scanner) bool {
if strings.HasPrefix(s.slice(), str) {
s.Eat(len(str), eaten)
return true
}
return false
}
// EatRegexp eats the text matching a regexp, populating match (if != nil) with
// the whole match and captures (if != nil) with any captured groups. Returns
// n as the number of captures set and ok iff a match was found.
func (s *Scanner) EatRegexp(re *regexp.Regexp, match *Scanner, captures []Scanner) (n int, ok bool) {
if loc := re.FindStringSubmatchIndex(s.slice()); loc != nil {
if loc[0] != 0 {
panic(`re not \A-anchored`)
}
if match != nil {
*match = *s.Slice(loc[0], loc[1])
}
skip := loc[1]
loc = loc[2:]
n = len(loc) / 2
if len(captures) > n {
captures = captures[:n]
}
for i := range captures {
captures[i] = *s.Slice(loc[2*i], loc[2*i+1])
}
*s = *s.Skip(skip)
return n, true
}
return 0, false
}
// - stringSource
func (s stringSource) stripSource(offset, size int) source {
s.origin = s.slice(offset, size)
return s
}
func (s stringSource) length() int {
return len(s.origin)
}
func (s stringSource) slice(i, length int) string {
// Since offset and length based on the original origin string, so they might be out of range
if i < 0 || i+length < 0 || i > len(s.origin) || i+length > len(s.origin) {
return s.origin
}
return (s.origin)[i : i+length]
}
func (s stringSource) filename() string {
return s.f
}
// The 1-indexed line and column number of the given position within the given string.
func lineColumn(str string, pos int) (line, col int) {
prefix := str[:pos]
line = strings.Count(prefix, "\n") + 1
col = pos - strings.LastIndex(prefix, "\n")
return
} | parser/scanner.go | 0.67405 | 0.424531 | scanner.go | starcoder |
package cam
import (
"math"
"github.com/go-gl/mathgl/mgl32"
"github.com/go-gl/mathgl/mgl64"
"github.com/nvisioner/glutils/win"
)
type FpsCamera struct {
// Camera options
moveSpeed float64
cursorSensitivity float64
// Eular Angles
pitch float64
yaw float64
// Camera attributes
pos mgl32.Vec3
front mgl32.Vec3
up mgl32.Vec3
right mgl32.Vec3
worldUp mgl32.Vec3
inputManager *win.InputManager
}
func NewFpsCamera(position, worldUp mgl32.Vec3, yaw, pitch float64, im *win.InputManager) *FpsCamera {
cam := FpsCamera{
moveSpeed: 5.00,
cursorSensitivity: 0.7,
pitch: pitch,
yaw: yaw,
pos: position,
up: mgl32.Vec3{0, 1, 0},
worldUp: worldUp,
inputManager: im,
}
return &cam
}
func (c *FpsCamera) Update(dTime float64) {
c.updatePosition(dTime)
c.updateDirection()
}
// UpdatePosition updates this camera's position by giving directions that
// the camera is to travel in and for how long
func (c *FpsCamera) updatePosition(dTime float64) {
adjustedSpeed := float32(dTime * c.moveSpeed)
fixedFront := mgl32.Vec3{c.front.X(), 0, c.front.Z()}
if c.inputManager.IsActive(win.PLAYER_FORWARD) {
c.pos = c.pos.Add(fixedFront.Mul(adjustedSpeed))
}
if c.inputManager.IsActive(win.PLAYER_BACKWARD) {
c.pos = c.pos.Sub(fixedFront.Mul(adjustedSpeed))
}
if c.inputManager.IsActive(win.PLAYER_LEFT) {
c.pos = c.pos.Sub(c.front.Cross(c.up).Normalize().Mul(adjustedSpeed))
}
if c.inputManager.IsActive(win.PLAYER_RIGHT) {
c.pos = c.pos.Add(c.front.Cross(c.up).Normalize().Mul(adjustedSpeed))
}
}
// UpdateCursor updates the direction of the camera by giving it delta x/y values
// that came from a cursor input device
func (c *FpsCamera) updateDirection() {
dCursor := c.inputManager.CursorChange()
dx := c.cursorSensitivity * dCursor[0]
dy := -c.cursorSensitivity * dCursor[1]
c.pitch += dy
if c.pitch > 89.0 {
c.pitch = 89.0
} else if c.pitch < -89.0 {
c.pitch = -89.0
}
c.yaw = math.Mod(c.yaw+dx, 360)
c.updateVectors()
}
func (c *FpsCamera) updateVectors() {
// x, y, z
c.front[0] = float32(math.Cos(mgl64.DegToRad(c.pitch)) * math.Cos(mgl64.DegToRad(c.yaw)))
c.front[1] = float32(math.Sin(mgl64.DegToRad(c.pitch)))
c.front[2] = float32(math.Cos(mgl64.DegToRad(c.pitch)) * math.Sin(mgl64.DegToRad(c.yaw)))
c.front = c.front.Normalize()
// Gram-Schmidt process to figure out right and up vectors
c.right = c.worldUp.Cross(c.front).Normalize()
c.up = c.right.Cross(c.front).Normalize()
}
// GetCameraTransform gets the matrix to transform from world coordinates to
// this camera's coordinates.
func (camera *FpsCamera) GetTransform() mgl32.Mat4 {
cameraTarget := camera.pos.Add(camera.front)
return mgl32.LookAt(
camera.pos.X(), camera.pos.Y(), camera.pos.Z(),
cameraTarget.X(), cameraTarget.Y(), cameraTarget.Z(),
camera.up.X(), camera.up.Y(), camera.up.Z(),
)
}
func (camera *FpsCamera) GetPos() mgl32.Vec3 {
return camera.pos
}
func (camera *FpsCamera) GetFront() mgl32.Vec3 {
return camera.front
}
func (camera *FpsCamera) GetAngle() float64 {
return camera.yaw
} | cam/camera.go | 0.788787 | 0.501099 | camera.go | starcoder |
package syntax
import (
"bytes"
"fmt"
"regexp"
"strings"
)
// The parse tree for search input. It is a list of expressions.
type ParseTree []*Expr
// Values returns the raw string values associated with a field.
func (p ParseTree) Values(field string) []string {
var v []string
for _, expr := range p {
if expr.Field == field {
v = append(v, expr.Value)
}
}
return v
}
// WithErrorsQuoted converts a search input like `f:foo b(ar` to `f:foo "b(ar"`.
func (p ParseTree) WithErrorsQuoted() ParseTree {
p2 := []*Expr{}
for _, e := range p {
e2 := e.WithErrorsQuoted()
p2 = append(p2, &e2)
}
return p2
}
// Map builds a new parse tree by running a function f on each expression in an
// existing parse tree and substituting the resulting expression. If f returns
// nil, the expression is removed in the new parse tree.
func Map(p ParseTree, f func(e Expr) *Expr) ParseTree {
p2 := make(ParseTree, 0, len(p))
for _, e := range p {
cpy := *e
e = &cpy
if result := f(*e); result != nil {
p2 = append(p2, result)
}
}
return p2
}
// String returns a string that parses to the parse tree, where expressions are
// separated by a single space.
func (p ParseTree) String() string {
s := make([]string, len(p))
for i, e := range p {
s[i] = e.String()
}
return strings.Join(s, " ")
}
// An Expr describes an expression in the parse tree.
type Expr struct {
Pos int // the starting character position of the expression
Not bool // the expression is negated (e.g., -term or -field:term)
Field string // the field that this expression applies to
Value string // the raw field value
ValueType TokenType // the type of the value
}
func (e Expr) String() string {
var buf bytes.Buffer
if e.Not {
buf.WriteByte('-')
}
if e.Field != "" {
buf.WriteString(e.Field)
buf.WriteByte(':')
}
if e.ValueType == TokenPattern {
buf.WriteByte('/')
}
buf.WriteString(e.Value)
if e.ValueType == TokenPattern {
buf.WriteByte('/')
}
return buf.String()
}
// WithErrorsQuoted returns a new version of the expression,
// quoting in case of TokenError or an invalid regular expression.
func (e Expr) WithErrorsQuoted() Expr {
e2 := e
needsQuoting := false
switch e.ValueType {
case TokenError:
needsQuoting = true
case TokenPattern, TokenLiteral:
_, err := regexp.Compile(e2.Value)
if err != nil {
needsQuoting = true
}
}
if needsQuoting {
e2.Not = false
e2.Field = ""
e2.Value = fmt.Sprintf("%q", e.String())
e2.ValueType = TokenQuoted
}
return e2
} | enterprise/internal/batches/search/syntax/parse_tree.go | 0.729231 | 0.422207 | parse_tree.go | starcoder |
package check
import (
"math"
)
// IsUniqueFloat64Slice checks if all elements of a float64 slice are unique (so the slice does not contain duplicated elements).
// The Epsilon parameter sets the accuracy of the comparison of two floats.
// If the slice has no elements, the function returns true (since it does not contain duplicated elements).
func IsUniqueFloat64Slice(Slice []float64, Epsilon float64) bool {
if len(Slice) == 0 {
return true
}
for i, value := range Slice {
for j := i + 1; j < len(Slice); j++ {
if math.Abs(value-Slice[j]) <= Epsilon {
return false
}
}
}
return true
}
// IsUniqueIntSlice checks if all elements of an int slice are unique (so the slice does not contain duplicated elements).
// If the slice has no elements, the function returns true (since it does not contain duplicated elements).
func IsUniqueIntSlice(Slice []int) bool {
if len(Slice) == 0 {
return true
}
for i, value := range Slice {
for j := i; j < len(Slice); j++ {
if i != j {
if value == Slice[j] {
return false
}
}
}
}
return true
}
// IsUniqueStringSlice checks if all elements in a string slice are unique (so the slice does not contain duplicated elements).
// If the slice has no elements, the function returns true (since it does not contain duplicated elements).
func IsUniqueStringSlice(Slice []string) bool {
if len(Slice) == 0 {
return true
}
for i, value := range Slice {
for j := i; j < len(Slice); j++ {
if i != j {
if value == Slice[j] {
return false
}
}
}
}
return true
}
// IsUnique checks whether a slice ([]int, []string and []float64) is unique (so it does not contain duplicated elements).
// Note that if you're using it for a float slice, the comparisons are made with Epsilon set to 0.
// If you need to use a different level of accuracy, use IsUniqueFloat64 instead.
func IsUnique(Slice interface{}) bool {
if SliceInt, ok := Slice.([]int); ok {
return IsUniqueIntSlice(SliceInt)
}
if SliceString, ok := Slice.([]string); ok {
return IsUniqueStringSlice(SliceString)
}
if SliceFloat64, ok := Slice.([]float64); ok {
return IsUniqueFloat64Slice(SliceFloat64, 0)
}
panic("implemented only for []int, []string and []float64")
}
// UniqueStringSlice returns a slice with unique elements of a string slice.
// If the slice has no elements, the function returns an empty slice.
func UniqueStringSlice(Slice []string) []string {
if len(Slice) == 0 {
return []string{}
}
unique := make([]string, 1)
unique[0] = Slice[0]
for _, str := range Slice {
if !IsValueInStringSlice(str, unique) {
unique = append(unique, str)
}
}
return unique
}
// UniqueIntSlice returns a slice with unique elements of an int slice.
// If the slice has no elements, the function returns an empty slice.
func UniqueIntSlice(Slice []int) []int {
if len(Slice) == 0 {
return []int{}
}
unique := make([]int, 1)
unique[0] = Slice[0]
for _, value := range Slice {
if !IsValueInIntSlice(value, unique) {
unique = append(unique, value)
}
}
return unique
}
// UniqueFloat64Slice returns a slice with unique elements of a float64 slice.
// The Epsilon parameter sets the accuracy of the comparison of two floats.
// If the slice has no elements, the function returns an empty slice.
func UniqueFloat64Slice(Slice []float64, Epsilon float64) []float64 {
if len(Slice) == 0 {
return []float64{}
}
unique := make([]float64, 1)
unique[0] = Slice[0]
for _, value := range Slice {
if !IsValueInFloat64Slice(value, unique, Epsilon) {
unique = append(unique, value)
}
}
return unique
}
// IsUniqueMapIntFloat64 checks if all values of map[int]float64 are unique (so the map does not contain duplicated elements).
// The Epsilon parameter sets the accuracy of the comparison of two floats.
// Note that uniqueness here means the equality of values, since a map will never be unique in terms of key-value pairs.
func IsUniqueMapIntFloat64(Map map[int]float64, Epsilon float64) bool {
if len(Map) == 0 {
return true
}
for key1, value := range Map {
for key2, otherValue := range Map {
if key1 != key2 {
if math.Abs(value-otherValue) <= Epsilon {
return false
}
}
}
}
return true
}
// IsUniqueMapStringFloat64 checks if all values of map[string]float64 are unique (so the map does not contain duplicated elements).
// The Epsilon parameter sets the accuracy of the comparison of two floats.
// Note that uniqueness here means the equality of values, since a map will never be unique in terms of key-value pairs.
func IsUniqueMapStringFloat64(Map map[string]float64, Epsilon float64) bool {
if len(Map) == 0 {
return true
}
for key1, value := range Map {
for key2, otherValue := range Map {
if key1 != key2 {
if math.Abs(value-otherValue) <= Epsilon {
return false
}
}
}
}
return true
}
// IsUniqueMapIntInt checks if all values of map[int]int are unique (so the map does not contain duplicated elements).
// Note that uniqueness here means the equality of values, since a map will never be unique in terms of key-value pairs.
func IsUniqueMapIntInt(Map map[int]int) bool {
if len(Map) == 0 {
return true
}
for key1, value := range Map {
for key2, otherValue := range Map {
if key1 != key2 {
if value == otherValue {
return false
}
}
}
}
return true
}
// IsUniqueMapStringInt checks if all values of map[string]int are unique (so the map does not contain duplicated elements).
// Note that uniqueness here means the equality of values, since a map will never be unique in terms of key-value pairs.
func IsUniqueMapStringInt(Map map[string]int) bool {
if len(Map) == 0 {
return true
}
for key1, value := range Map {
for key2, otherValue := range Map {
if key1 != key2 {
if value == otherValue {
return false
}
}
}
}
return true
}
// IsUniqueMapIntString checks if all values of map[int]string are unique (so the map does not contain duplicated elements).
// Note that uniqueness here means the equality of values, since a map will never be unique in terms of key-value pairs.
func IsUniqueMapIntString(Map map[int]string) bool {
if len(Map) == 0 {
return true
}
for key1, value := range Map {
for key2, otherValue := range Map {
if key1 != key2 {
if value == otherValue {
return false
}
}
}
}
return true
}
// IsUniqueMapStringString checks if all values of map[int]string are unique (so the map does not contain duplicated elements).
// Note that uniqueness here means the equality of values, since a map will never be unique in terms of key-value pairs.
func IsUniqueMapStringString(Map map[string]string) bool {
if len(Map) == 0 {
return true
}
for key1, value := range Map {
for key2, otherValue := range Map {
if key1 != key2 {
if value == otherValue {
return false
}
}
}
}
return true
} | unique.go | 0.86521 | 0.549701 | unique.go | starcoder |
package integration
// EqualsAST does deep equals between the two objects.
func EqualsAST(inA, inB AST) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case BasicType:
b, ok := inB.(BasicType)
if !ok {
return false
}
return a == b
case Bytes:
b, ok := inB.(Bytes)
if !ok {
return false
}
return EqualsBytes(a, b)
case InterfaceContainer:
b, ok := inB.(InterfaceContainer)
if !ok {
return false
}
return EqualsInterfaceContainer(a, b)
case InterfaceSlice:
b, ok := inB.(InterfaceSlice)
if !ok {
return false
}
return EqualsInterfaceSlice(a, b)
case *Leaf:
b, ok := inB.(*Leaf)
if !ok {
return false
}
return EqualsRefOfLeaf(a, b)
case LeafSlice:
b, ok := inB.(LeafSlice)
if !ok {
return false
}
return EqualsLeafSlice(a, b)
case *NoCloneType:
b, ok := inB.(*NoCloneType)
if !ok {
return false
}
return EqualsRefOfNoCloneType(a, b)
case *RefContainer:
b, ok := inB.(*RefContainer)
if !ok {
return false
}
return EqualsRefOfRefContainer(a, b)
case *RefSliceContainer:
b, ok := inB.(*RefSliceContainer)
if !ok {
return false
}
return EqualsRefOfRefSliceContainer(a, b)
case *SubImpl:
b, ok := inB.(*SubImpl)
if !ok {
return false
}
return EqualsRefOfSubImpl(a, b)
case ValueContainer:
b, ok := inB.(ValueContainer)
if !ok {
return false
}
return EqualsValueContainer(a, b)
case ValueSliceContainer:
b, ok := inB.(ValueSliceContainer)
if !ok {
return false
}
return EqualsValueSliceContainer(a, b)
default:
// this should never happen
return false
}
}
// EqualsBytes does deep equals between the two objects.
func EqualsBytes(a, b Bytes) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if a[i] != b[i] {
return false
}
}
return true
}
// EqualsInterfaceContainer does deep equals between the two objects.
func EqualsInterfaceContainer(a, b InterfaceContainer) bool {
return true
}
// EqualsInterfaceSlice does deep equals between the two objects.
func EqualsInterfaceSlice(a, b InterfaceSlice) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsAST(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfLeaf does deep equals between the two objects.
func EqualsRefOfLeaf(a, b *Leaf) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.v == b.v
}
// EqualsLeafSlice does deep equals between the two objects.
func EqualsLeafSlice(a, b LeafSlice) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfLeaf(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfNoCloneType does deep equals between the two objects.
func EqualsRefOfNoCloneType(a, b *NoCloneType) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.v == b.v
}
// EqualsRefOfRefContainer does deep equals between the two objects.
func EqualsRefOfRefContainer(a, b *RefContainer) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.NotASTType == b.NotASTType &&
EqualsAST(a.ASTType, b.ASTType) &&
EqualsRefOfLeaf(a.ASTImplementationType, b.ASTImplementationType)
}
// EqualsRefOfRefSliceContainer does deep equals between the two objects.
func EqualsRefOfRefSliceContainer(a, b *RefSliceContainer) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsSliceOfAST(a.ASTElements, b.ASTElements) &&
EqualsSliceOfInt(a.NotASTElements, b.NotASTElements) &&
EqualsSliceOfRefOfLeaf(a.ASTImplementationElements, b.ASTImplementationElements)
}
// EqualsRefOfSubImpl does deep equals between the two objects.
func EqualsRefOfSubImpl(a, b *SubImpl) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsSubIface(a.inner, b.inner) &&
EqualsRefOfBool(a.field, b.field)
}
// EqualsValueContainer does deep equals between the two objects.
func EqualsValueContainer(a, b ValueContainer) bool {
return a.NotASTType == b.NotASTType &&
EqualsAST(a.ASTType, b.ASTType) &&
EqualsRefOfLeaf(a.ASTImplementationType, b.ASTImplementationType)
}
// EqualsValueSliceContainer does deep equals between the two objects.
func EqualsValueSliceContainer(a, b ValueSliceContainer) bool {
return EqualsSliceOfAST(a.ASTElements, b.ASTElements) &&
EqualsSliceOfInt(a.NotASTElements, b.NotASTElements) &&
EqualsSliceOfRefOfLeaf(a.ASTImplementationElements, b.ASTImplementationElements)
}
// EqualsSubIface does deep equals between the two objects.
func EqualsSubIface(inA, inB SubIface) bool {
if inA == nil && inB == nil {
return true
}
if inA == nil || inB == nil {
return false
}
switch a := inA.(type) {
case *SubImpl:
b, ok := inB.(*SubImpl)
if !ok {
return false
}
return EqualsRefOfSubImpl(a, b)
default:
// this should never happen
return false
}
}
// EqualsRefOfInterfaceContainer does deep equals between the two objects.
func EqualsRefOfInterfaceContainer(a, b *InterfaceContainer) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return true
}
// EqualsSliceOfAST does deep equals between the two objects.
func EqualsSliceOfAST(a, b []AST) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsAST(a[i], b[i]) {
return false
}
}
return true
}
// EqualsSliceOfInt does deep equals between the two objects.
func EqualsSliceOfInt(a, b []int) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if a[i] != b[i] {
return false
}
}
return true
}
// EqualsSliceOfRefOfLeaf does deep equals between the two objects.
func EqualsSliceOfRefOfLeaf(a, b []*Leaf) bool {
if len(a) != len(b) {
return false
}
for i := 0; i < len(a); i++ {
if !EqualsRefOfLeaf(a[i], b[i]) {
return false
}
}
return true
}
// EqualsRefOfBool does deep equals between the two objects.
func EqualsRefOfBool(a, b *bool) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return *a == *b
}
// EqualsRefOfValueContainer does deep equals between the two objects.
func EqualsRefOfValueContainer(a, b *ValueContainer) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return a.NotASTType == b.NotASTType &&
EqualsAST(a.ASTType, b.ASTType) &&
EqualsRefOfLeaf(a.ASTImplementationType, b.ASTImplementationType)
}
// EqualsRefOfValueSliceContainer does deep equals between the two objects.
func EqualsRefOfValueSliceContainer(a, b *ValueSliceContainer) bool {
if a == b {
return true
}
if a == nil || b == nil {
return false
}
return EqualsSliceOfAST(a.ASTElements, b.ASTElements) &&
EqualsSliceOfInt(a.NotASTElements, b.NotASTElements) &&
EqualsSliceOfRefOfLeaf(a.ASTImplementationElements, b.ASTImplementationElements)
} | go/tools/asthelpergen/integration/ast_equals.go | 0.639511 | 0.502441 | ast_equals.go | starcoder |
package dprocedures
import "github.com/dolthub/go-mysql-server/sql"
var DoltProcedures = []sql.ExternalStoredProcedureDetails{
{Name: "dolt_add", Schema: int64Schema("status"), Function: doltAdd},
{Name: "dolt_backup", Schema: int64Schema("success"), Function: doltBackup},
{Name: "dolt_branch", Schema: int64Schema("status"), Function: doltBranch},
{Name: "dolt_checkout", Schema: int64Schema("status"), Function: doltCheckout},
{Name: "dolt_clean", Schema: int64Schema("status"), Function: doltClean},
{Name: "dolt_commit", Schema: stringSchema("hash"), Function: doltCommit},
{Name: "dolt_fetch", Schema: int64Schema("success"), Function: doltFetch},
{Name: "dolt_merge", Schema: int64Schema("fast_forward", "conflicts"), Function: doltMerge},
{Name: "dolt_pull", Schema: int64Schema("fast_forward", "conflicts"), Function: doltPull},
{Name: "dolt_push", Schema: int64Schema("success"), Function: doltPush},
{Name: "dolt_reset", Schema: int64Schema("status"), Function: doltReset},
{Name: "dolt_revert", Schema: int64Schema("status"), Function: doltRevert},
{Name: "dolt_verify_constraints", Schema: int64Schema("no_violations"), Function: doltVerifyConstraints},
{Name: "dolt_verify_all_constraints", Schema: int64Schema("no_violations"), Function: doltVerifyAllConstraints},
{Name: "dadd", Schema: int64Schema("status"), Function: doltAdd},
{Name: "dbranch", Schema: int64Schema("status"), Function: doltBranch},
{Name: "dcheckout", Schema: int64Schema("status"), Function: doltCheckout},
{Name: "dclean", Schema: int64Schema("status"), Function: doltClean},
{Name: "dcommit", Schema: stringSchema("hash"), Function: doltCommit},
{Name: "dfetch", Schema: int64Schema("success"), Function: doltFetch},
{Name: "dmerge", Schema: int64Schema("fast_forward", "conflicts"), Function: doltMerge},
{Name: "dpull", Schema: int64Schema("fast_forward", "conflicts"), Function: doltPull},
{Name: "dpush", Schema: int64Schema("success"), Function: doltPush},
{Name: "dreset", Schema: int64Schema("status"), Function: doltReset},
{Name: "drevert", Schema: int64Schema("status"), Function: doltRevert},
{Name: "dverify_constraints", Schema: int64Schema("no_violations"), Function: doltVerifyConstraints},
{Name: "dverify_all_constraints", Schema: int64Schema("no_violations"), Function: doltVerifyAllConstraints},
}
// stringSchema returns a non-nullable schema with all columns as LONGTEXT.
func stringSchema(columnNames ...string) sql.Schema {
sch := make(sql.Schema, len(columnNames))
for i, colName := range columnNames {
sch[i] = &sql.Column{
Name: colName,
Type: sql.LongText,
Nullable: false,
}
}
return sch
}
// int64Schema returns a non-nullable schema with all columns as BIGINT.
func int64Schema(columnNames ...string) sql.Schema {
sch := make(sql.Schema, len(columnNames))
for i, colName := range columnNames {
sch[i] = &sql.Column{
Name: colName,
Type: sql.Int64,
Nullable: false,
}
}
return sch
}
// rowToIter returns a sql.RowIter with a single row containing the values passed in.
func rowToIter(vals ...interface{}) sql.RowIter {
row := make(sql.Row, len(vals))
for i, val := range vals {
row[i] = val
}
return sql.RowsToRowIter(row)
} | go/libraries/doltcore/sqle/dprocedures/init.go | 0.547706 | 0.549641 | init.go | starcoder |
package exodus
import "math/rand"
func NewPopulation(populationSize int, individualSize int, newGene NewGeneFunction) Population {
population := NewEmptyPopulation(populationSize)
for i := 0; i < populationSize; i++ {
population.Individuals[i] = NewIndividual(individualSize, newGene)
}
return population
}
func NewEmptyPopulation(populationSize int) Population {
population := Population{}
population.Individuals = make([]Individual, populationSize)
return population
}
func (population *Population) Evaluate(fitness FitnessFunction) {
channel := make(chan Individual)
for i := 0; i < len(population.Individuals); i++ {
go asyncEvaluate(population.Individuals[i], fitness, channel)
}
for i := 0; i < len(population.Individuals); i++ {
population.Individuals[i] = <- channel
if population.Best.Fitness < population.Individuals[i].Fitness {
population.Best = population.Individuals[i]
}
}
}
func asyncEvaluate(individual Individual, fitness FitnessFunction, channel chan Individual) {
individual.Evaluate(fitness)
channel <- individual
}
func (population *Population) Migrate(migrationRate float64, imigrants *[]Individual) {
if rand.Float64() < migrationRate {
if InServer() {
MigrateLocally()
}
if InClient() {
AcceptImigrants(imigrants, population)
go SendBestToServer(population.Best)
go MigrateToServer(population.SelectIndividual(), imigrants)
}
}
}
func (population *Population) Evolve(crossoverRate float64, mutationRate float64, newGene NewGeneFunction) {
populationSize := len(population.Individuals)
newPopulation := NewEmptyPopulation(populationSize)
for i := 0; i < (populationSize/2); i++ {
parents := population.SelectParents()
offspring := Crossover(parents, crossoverRate)
offspring[0].Mutate(mutationRate, newGene)
offspring[1].Mutate(mutationRate, newGene)
newPopulation.Individuals[i+i] = offspring[0].Copy()
newPopulation.Individuals[i+i+1] = offspring[1].Copy()
}
for i := 0; i < populationSize; i++ {
population.Individuals[i] = newPopulation.Individuals[i]
}
population.Individuals[populationSize-1] = population.Best.Copy()
} | population.go | 0.706393 | 0.541106 | population.go | starcoder |
// Package codec provides support for interpreting byte slices as slices of
// other basic types such as runes, int64's or strings. Go's lack of generics
// make this awkward and this package currently supports a fixed set of
// basic types (slices of byte/uint8, rune/int32, int64 and string).
package codec
import "fmt"
// Decoder represents the ability to decode a byte slice into a slice of
// some other data type.
type Decoder interface {
Decode(input []byte) interface{}
}
type options struct {
resizePrecent int
}
// Option represents an option accepted by NewDecoder.
type Option func(*options)
// ResizePercent requests that the returned slice be reallocated if the
// ratio of unused to used capacity exceeds the specified percentage.
// That is, if cap(slice) - len(slice)) / len(slice) exceeds the percentage
// new underlying storage is allocated and contents copied. The default
// value for ResizePercent is 100.
func ResizePercent(percent int) Option {
return func(o *options) {
o.resizePrecent = percent
}
}
// NewDecode returns an instance of Decoder appropriate for the supplied
// function. The currently supported function signatures are:
// func([]byte) (uint8, int)
// func([]byte) (int32, int)
// func([]byte) (int64, int)
// func([]byte) (string, int)
func NewDecoder(fn interface{}, opts ...Option) (Decoder, error) {
var o options
o.resizePrecent = 100
for _, fn := range opts {
fn(&o)
}
switch v := fn.(type) {
case func([]byte) (uint8, int):
return &decoder8{o, v}, nil
case func([]byte) (int32, int):
return &decoder32{o, v}, nil
case func([]byte) (int64, int):
return &decoder64{o, v}, nil
case func([]byte) (string, int):
return &decoderString{o, v}, nil
}
return nil, fmt.Errorf("unsupported type for decoder function: %T", fn)
}
type decoder8 struct {
options
fn func([]byte) (uint8, int)
}
// Decode implements Decoder.
func (d *decoder8) Decode(input []byte) interface{} {
out := make([]uint8, len(input))
n := decode(input, func(in []byte, i int) (n int) {
out[i], n = d.fn(in)
return
})
return resize(out[:n], d.resizePrecent)
}
type decoder32 struct {
options
fn func([]byte) (int32, int)
}
// Decode implements Decoder.
func (d *decoder32) Decode(input []byte) interface{} {
out := make([]int32, len(input))
n := decode(input, func(in []byte, i int) (n int) {
out[i], n = d.fn(in)
return
})
return resize(out[:n], d.resizePrecent)
}
type decoder64 struct {
options
fn func([]byte) (int64, int)
}
// Decode implements Decoder.
func (d *decoder64) Decode(input []byte) interface{} {
out := make([]int64, len(input))
n := decode(input, func(in []byte, i int) (n int) {
out[i], n = d.fn(in)
return
})
return resize(out[:n], d.resizePrecent)
}
type decoderString struct {
options
fn func([]byte) (string, int)
}
// Decode implements Decoder.
func (d *decoderString) Decode(input []byte) interface{} {
out := make([]string, len(input))
n := decode(input, func(in []byte, i int) (n int) {
out[i], n = d.fn(in)
return
})
return resize(out[:n], d.resizePrecent)
}
func decode(input []byte, fn func([]byte, int) int) int {
if len(input) == 0 {
return 0
}
cursor, i := 0, 0
for {
n := fn(input[cursor:], i)
if n == 0 {
break
}
i++
cursor += n
if cursor >= len(input) {
break
}
}
return i
}
func resizedNeeded(used, available int, percent int) bool {
wasted := available - used
if used == 0 {
used = 1
}
return ((wasted * 100) / used) > percent
}
// resize will allocate new underlying storage and copy the contents of
// slice to it if the ratio of wasted to used, ie:
// (cap(slice) - len(slice)) / len(slice))
// exceeds the specified percentage.
func resize(slice interface{}, percent int) interface{} {
switch v := slice.(type) {
case []uint8:
if resizedNeeded(len(v), cap(v), percent) {
r := make([]uint8, len(v))
copy(r, v)
return r
}
case []int32:
if resizedNeeded(len(v), cap(v), percent) {
r := make([]int32, len(v))
copy(r, v)
return r
}
case []int64:
if resizedNeeded(len(v), cap(v), percent) {
r := make([]int64, len(v))
copy(r, v)
return r
}
case []string:
if resizedNeeded(len(v), cap(v), percent) {
r := make([]string, len(v))
copy(r, v)
return r
}
default:
panic(fmt.Sprintf("unsupported type %T", slice))
}
return slice
} | algo/codec/codec.go | 0.746693 | 0.418043 | codec.go | starcoder |
package ckks
import (
//"fmt"
"math"
"math/cmplx"
)
func chebyshevNodesU(n int, a, b complex128) (u []complex128) {
u = make([]complex128, n)
var x, y complex128
for k := 1; k < n+1; k++ {
x = 0.5 * (a + b)
y = 0.5 * (b - a)
u[n-k] = x + y*complex(math.Cos((float64(k)-0.5)*(3.141592653589793/float64(n))), 0)
}
return
}
func chebyshevNodesX(u []complex128, a, b complex128) (x []complex128) {
x = make([]complex128, len(u))
for i := 0; i < len(u); i++ {
x[i] = 0.5*(b-a)*u[i] + 0.5*(a+b)
}
return
}
func average(y []complex128) (avg complex128) {
avg = 0
for i := 0; i < len(y); i++ {
avg += y[i]
}
avg /= complex(float64(len(y)), 0)
return
}
func evaluate_cheby(coeffs []complex128, x complex128, a, b complex128) (y complex128) {
var u, Tprev, Tnext, T complex128
u = (2*x - a - b) / (b - a)
Tprev = 1
T = u
y = coeffs[0]
for i := 1; i < len(coeffs); i++ {
y = y + T*coeffs[i]
Tnext = 2*u*T - Tprev
Tprev = T
T = Tnext
}
return
}
func evaluate(degree int, x, a, b complex128) (T complex128) {
if degree == 0 {
return 1
}
var u, Tprev, Tnext complex128
u = (2*x - a - b) / (b - a)
Tprev = 1
T = u
for i := 1; i < degree; i++ {
Tnext = 2*u*T - Tprev
Tprev = T
T = Tnext
}
return
}
func chebyCoeffs(u, y []complex128, a, b complex128) (coeffs []complex128) {
n := len(y)
coeffs = make([]complex128, n)
var tmp []complex128
for i := 0; i < n; i++ {
tmp = make([]complex128, n)
for j := 0; j < n; j++ {
tmp[j] = y[j] * evaluate(i, u[j], -1, 1)
}
if i != 0 {
coeffs[i] = 2 * average(tmp)
} else {
coeffs[i] = average(tmp)
}
}
return
}
type chebyshevinterpolation struct {
coeffs []complex128
a complex128
b complex128
}
// Approximate computes a Chebyshev approximation of the input function, for the tange [-a, b] of degree degree.
// To be used in conjonction with the function EvaluateCheby.
func Approximate(function func(complex128) complex128, a, b complex128, degree int) (cheby *chebyshevinterpolation) {
cheby = new(chebyshevinterpolation)
cheby.coeffs = make([]complex128, degree+1)
cheby.a = a
cheby.b = b
u := chebyshevNodesU(degree, -1, 1)
x := chebyshevNodesX(u, a, b)
y := make([]complex128, len(x))
for i := range x {
y[i] = function(x[i])
}
cheby.coeffs = chebyCoeffs(u, y, a, b)
return
}
// Given a hash table with the first three evaluations of the Chebyshev ring at x in the interval a, b:
// C0 = 1 (actually not stored in the hash table)
// C1 = (2*x - a - b)/(b-a)
// C2 = 2*C1*C1 - C0
// Evaluates the nth degree Chebyshev ring in a recursive manner, storing intermediate results in the hashtable.
// Consumes at most ceil(sqrt(n)) levels for an evaluation at Cn.
// Uses the following property : for a given Chebyshev ring Cn = 2*Ca*Cb - Cc, n = a+b and c = abs(a-b)
func evaluateCheby(n uint64, C map[uint64]*Ciphertext, evaluator *Evaluator, evakey *EvaluationKey) (err error) {
if C[n] == nil {
// Computes the index required to compute the asked ring evaluation
a := uint64(math.Ceil(float64(n) / 2))
b := n >> 1
c := uint64(math.Abs(float64(a) - float64(b)))
// Recurses on the given indexes
if err = evaluateCheby(a, C, evaluator, evakey); err != nil {
return err
}
if err = evaluateCheby(b, C, evaluator, evakey); err != nil {
return err
}
// Since C[0] is not stored (but rather seen as the constant 1), only recurses on c if c!= 0
if c != 0 {
evaluateCheby(c, C, evaluator, evakey)
}
// Computes C[n] = C[a]*C[b]
if C[n], err = evaluator.MulRelinNew(C[a].Element(), C[b].Element(), evakey); err != nil {
return err
}
if err = evaluator.Rescale(C[n], C[n]); err != nil {
return err
}
// Computes C[n] = 2*C[a]*C[b]
evaluator.Add(C[n], C[n], C[n])
// Computes C[n] = 2*C[a]*C[b] - C[c]
if c == 0 {
evaluator.AddConst(C[n], -1, C[n])
} else {
if err = evaluator.Sub(C[n], C[c], C[n]); err != nil {
return err
}
}
return nil
}
return nil
}
// EvaluateCheby evaluates a chebyshev approximation in log(n) + 1 (+1 if 2/(b-a) is not a gaussian integer) levels.
func (evaluator *Evaluator) EvaluateCheby(ct *Ciphertext, cheby *chebyshevinterpolation, evakey *EvaluationKey) (res *Ciphertext, err error) {
a := cheby.a
b := cheby.b
ChebyCoeffs := cheby.coeffs
C := make(map[uint64]*Ciphertext)
// C0 = 1, so we treat it as a constant
// Computes C1 and C2 which are required for the rest of the recursive computation of the Chebyshev ring
C[1] = ct.CopyNew().Ciphertext()
evaluator.MultConst(C[1], 2/(b-a), C[1])
evaluator.AddConst(C[1], (-a-b)/(b-a), C[1])
if C[1].Scale() > ct.Scale() {
evaluator.Rescale(C[1], C[1])
}
C[2], _ = evaluator.MulRelinNew(C[1].Element(), C[1].Element(), evakey)
if err = evaluator.Rescale(C[2], C[2]); err != nil {
return nil, err
}
evaluator.Add(C[2], C[2], C[2])
evaluator.AddConst(C[2], -1, C[2])
res = C[1].CopyNew().Ciphertext() // res = C[1]
if err = evaluator.MultConst(res, ChebyCoeffs[1], res); err != nil { // res = A[1]*C[1]
return nil, err
}
if err = evaluator.AddConst(res, ChebyCoeffs[0], res); err != nil { // res = A[0] + A[1]*C[1]
return nil, err
}
for i := uint64(2); i < uint64(len(ChebyCoeffs)); i++ {
// Evaluates the C[i] Chebyshev ring
if err = evaluateCheby(i, C, evaluator, evakey); err != nil {
return nil, err
}
if err = evaluator.MultByConstAndAdd(C[i], ChebyCoeffs[i], res); err != nil { // res = A[0] + A[1]*C[1] + ... + A[i]*C[i]
return nil, err
}
}
// We only rescale at the end to save computation
if err = evaluator.Rescale(res, res); err != nil {
return nil, err
}
return res, nil
}
func exp2pi(x complex128) complex128 {
return cmplx.Exp(2 * 3.141592653589793 * complex(0, 1) * x)
}
func sin2pi2pi(x complex128) complex128 {
return cmplx.Sin(2*3.141592653589793*x) / (2 * 3.141592653589793)
} | ckks/function_approximations.go | 0.610802 | 0.429429 | function_approximations.go | starcoder |
package forGraphBLASGo
import (
"github.com/intel/forGoParallel/pipeline"
"sort"
)
// sparseVector has no duplicate cols
type sparseVector[T any] struct {
nsize int
cols []int
values []T
}
func newSparseVector[T any](size int, cols []int, values []T) sparseVector[T] {
return sparseVector[T]{
nsize: size,
cols: cols,
values: values,
}
}
func (vector sparseVector[T]) resize(ref *vectorReference[T], newSize int) *vectorReference[T] {
switch {
case newSize == vector.nsize:
return ref
case newSize > vector.nsize:
return newVectorReference[T](newSparseVector[T](newSize, vector.cols, vector.values), int64(len(vector.values)))
default:
index := sort.SearchInts(vector.cols, newSize)
if index == 0 {
return newVectorReference[T](newSparseVector[T](newSize, nil, nil), 0)
}
return newVectorReference[T](newSparseVector[T](
newSize,
vector.cols[:index],
vector.values[:index],
),
int64(index),
)
}
}
func (vector sparseVector[T]) size() int {
return vector.nsize
}
func (vector sparseVector[T]) nvals() int {
return len(vector.cols)
}
func (vector sparseVector[T]) setElement(ref *vectorReference[T], value T, index int) *vectorReference[T] {
return setVectorElement[T](vector, ref, len(vector.values), value, index)
}
func (vector sparseVector[T]) removeElement(ref *vectorReference[T], index int) *vectorReference[T] {
return removeVectorElement[T](vector, ref, len(vector.values), index, true)
}
func (vector sparseVector[T]) extractElement(index int) (result T, ok bool) {
i := sort.SearchInts(vector.cols, index)
if i < len(vector.cols) && vector.cols[i] == index {
return vector.values[i], true
}
return
}
func (vector sparseVector[T]) getPipeline() *pipeline.Pipeline[any] {
index := 0
var p pipeline.Pipeline[any]
p.Source(pipeline.NewFunc[any](len(vector.values), func(size int) (data any, fetched int, err error) {
var result vectorSlice[T]
if index >= len(vector.values) {
return result, 0, nil
}
if index+size > len(vector.values) {
size = len(vector.values) - index
}
result = vectorSlice[T]{
cow: cow0 | cowv,
indices: vector.cols[index : index+size],
values: vector.values[index : index+size],
}
index += size
return result, size, nil
}))
return &p
}
func (_ sparseVector[T]) optimized() bool {
return true
}
func (vector sparseVector[T]) optimize() functionalVector[T] {
return vector
} | functional_VectorSparse.go | 0.635675 | 0.689789 | functional_VectorSparse.go | starcoder |
package saml
import (
"crypto/x509"
"fmt"
"time"
)
// A Checker is a predicate against a signed element. The element can be a response or an assertion,
// but bear in mind that all not data might be signed. Checkers in this package will mention when
// they operate on the assertion only (in which case they require signed values).
type Checker func(Principal) error
// InResponseTo rejects unsolicited responses
func InResponseTo(id string) Checker {
return func(p Principal) error {
if id == p.InResponseTo {
return nil
}
return fmt.Errorf("invalid response to: %s", p.InResponseTo)
}
}
// ValidTimestamp accepts only assertion still currently valid. Leeway parameter allows to accept
// SAML providers which are known to be too slow, and where a strict validation would result in
// rejecting too many legitimate login attempts.
func ValidTimestamp(leeway time.Duration) Checker {
return func(p Principal) error {
nb, err := time.Parse("2006-01-02T15:04:05.999Z", p.Conditions["NotBefore"])
if err != nil || nb.Add(-leeway).Before(time.Now()) {
return fmt.Errorf("Assertion only valid after %s", nb)
}
na, err := time.Parse("2006-01-02T15:04:05.999Z", p.Conditions["NotOnOrAfter"])
if err != nil || na.Add(leeway).After(time.Now()) {
return fmt.Errorf("Assertion only valid before %s", na)
}
return nil
}
}
// AcceptableCertificate checks that the certificate used to sign the assertion is valid for a given
// issuer. The pool is used as a root of trust.
func AcceptableCertificate(jar interface {
Find(issuer string) *x509.CertPool
}) Checker {
return func(p Principal) error {
pool := jar.Find(p.Issuer)
if pool == nil {
return fmt.Errorf("unknown issuer")
}
cs, err := p.Cert.Verify(x509.VerifyOptions{Roots: pool, CurrentTime: acceptTime})
if err != nil {
return err
}
if len(cs) == 0 {
return fmt.Errorf("no chain of verification could be created")
}
return nil
}
}
type JarFunc func(string) *x509.CertPool
func (jf JarFunc) Find(issuer string) *x509.CertPool { return jf(issuer) }
var acceptTime time.Time // can be set to accept older certificates. Reset to zero after use (in test only)
const StrictTime = 0
// Validate assertion time
// AuthN statement
// check destination
// check audiences
// check issuers
// check session expiration
// validate subject confirmation
// check signed response
// check signed assertion | checkers.go | 0.61451 | 0.458349 | checkers.go | starcoder |
package model
import (
"fmt"
"io"
"strconv"
)
type File struct {
ID string `json:"id"`
Name string `json:"name"`
}
type Ingredient struct {
ID string `json:"id"`
Title *string `json:"title"`
Description *string `json:"description"`
Image *string `json:"image"`
}
type IngredientInput struct {
Description *string `json:"description"`
Title *string `json:"title"`
Image *string `json:"image"`
}
type Recipe struct {
ID string `json:"id"`
Title *string `json:"title"`
Description *string `json:"description"`
Image *string `json:"image"`
Ingredients []*RecipeIngredient `json:"ingredients"`
}
type RecipeIngredient struct {
ID string `json:"id"`
Title *string `json:"title"`
Description *string `json:"description"`
Image *string `json:"image"`
MeasurementType *MeasurementType `json:"measurementType"`
MeasurementValue *float64 `json:"measurementValue"`
}
type RecipeIngredientInput struct {
ID string `json:"id"`
}
type RecipeInput struct {
Title *string `json:"title"`
Description *string `json:"description"`
}
type MeasurementType string
const (
MeasurementTypeTeaspoon MeasurementType = "TEASPOON"
MeasurementTypeTablespoon MeasurementType = "TABLESPOON"
MeasurementTypeFluidounce MeasurementType = "FLUIDOUNCE"
MeasurementTypeGill MeasurementType = "GILL"
MeasurementTypeCup MeasurementType = "CUP"
MeasurementTypePint MeasurementType = "PINT"
MeasurementTypeQuart MeasurementType = "QUART"
MeasurementTypeMilliliter MeasurementType = "MILLILITER"
MeasurementTypeCenterliter MeasurementType = "CENTERLITER"
MeasurementTypeDeciliter MeasurementType = "DECILITER"
MeasurementTypeLiter MeasurementType = "LITER"
MeasurementTypePound MeasurementType = "POUND"
MeasurementTypeOunce MeasurementType = "OUNCE"
MeasurementTypeMilligram MeasurementType = "MILLIGRAM"
MeasurementTypeGram MeasurementType = "GRAM"
MeasurementTypeKilogram MeasurementType = "KILOGRAM"
)
var AllMeasurementType = []MeasurementType{
MeasurementTypeTeaspoon,
MeasurementTypeTablespoon,
MeasurementTypeFluidounce,
MeasurementTypeGill,
MeasurementTypeCup,
MeasurementTypePint,
MeasurementTypeQuart,
MeasurementTypeMilliliter,
MeasurementTypeCenterliter,
MeasurementTypeDeciliter,
MeasurementTypeLiter,
MeasurementTypePound,
MeasurementTypeOunce,
MeasurementTypeMilligram,
MeasurementTypeGram,
MeasurementTypeKilogram,
}
func (e MeasurementType) IsValid() bool {
switch e {
case MeasurementTypeTeaspoon, MeasurementTypeTablespoon, MeasurementTypeFluidounce, MeasurementTypeGill, MeasurementTypeCup, MeasurementTypePint, MeasurementTypeQuart, MeasurementTypeMilliliter, MeasurementTypeCenterliter, MeasurementTypeDeciliter, MeasurementTypeLiter, MeasurementTypePound, MeasurementTypeOunce, MeasurementTypeMilligram, MeasurementTypeGram, MeasurementTypeKilogram:
return true
}
return false
}
func (e MeasurementType) String() string {
return string(e)
}
func (e *MeasurementType) UnmarshalGQL(v interface{}) error {
str, ok := v.(string)
if !ok {
return fmt.Errorf("enums must be strings")
}
*e = MeasurementType(str)
if !e.IsValid() {
return fmt.Errorf("%s is not a valid MeasurementType", str)
}
return nil
}
func (e MeasurementType) MarshalGQL(w io.Writer) {
fmt.Fprint(w, strconv.Quote(e.String()))
} | graph/model/models_gen.go | 0.638046 | 0.416263 | models_gen.go | starcoder |
// This package implements a basic LISP interpretor for embedding in a go program for scripting.
// This file contains the binary primitive functions.
package golisp
import (
"fmt"
)
func RegisterBinaryPrimitives() {
MakePrimitiveFunction("binary-and", "2", BinaryAndImpl)
MakePrimitiveFunction("binary-or", "2", BinaryOrImpl)
MakePrimitiveFunction("binary-not", "1", BinaryNotImpl)
MakePrimitiveFunction("left-shift", "2", LeftShiftImpl)
MakePrimitiveFunction("right-shift", "2", RightShiftImpl)
}
func BinaryAndImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
arg1 := First(args)
if !IntegerP(arg1) {
err = ProcessError(fmt.Sprintf("Integer expected, received %s %s", TypeName(TypeOf(arg1)), String(arg1)), env)
return
}
b1 := uint64(IntegerValue(arg1))
arg2 := Second(args)
if !IntegerP(arg2) {
err = ProcessError(fmt.Sprintf("Integer expected, received %s %s", TypeName(TypeOf(arg2)), String(arg2)), env)
return
}
b2 := uint64(IntegerValue(arg2))
return IntegerWithValue(int64(b1 & b2)), nil
}
func BinaryOrImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
arg1 := First(args)
if !IntegerP(arg1) {
err = ProcessError(fmt.Sprintf("Integer expected, received %s %s", TypeName(TypeOf(arg1)), String(arg1)), env)
return
}
b1 := uint64(IntegerValue(arg1))
arg2 := Second(args)
if !IntegerP(arg2) {
err = ProcessError(fmt.Sprintf("Integer expected, received %s %s", TypeName(TypeOf(arg2)), String(arg2)), env)
return
}
b2 := uint64(IntegerValue(arg2))
return IntegerWithValue(int64(b1 | b2)), nil
}
func BinaryNotImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
arg1 := First(args)
if !IntegerP(arg1) {
err = ProcessError(fmt.Sprintf("Integer expected, received %s %s", TypeName(TypeOf(arg1)), String(arg1)), env)
return
}
b1 := uint64(IntegerValue(arg1))
return IntegerWithValue(int64(b1 ^ uint64(0xFFFFFFFF))), nil
}
func LeftShiftImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
arg1 := First(args)
if !IntegerP(arg1) {
err = ProcessError(fmt.Sprintf("Integer expected, received %s %s", TypeName(TypeOf(arg1)), String(arg1)), env)
return
}
b1 := uint64(IntegerValue(arg1))
arg2 := Second(args)
if !IntegerP(arg2) {
err = ProcessError(fmt.Sprintf("Integer expected, received %s %s", TypeName(TypeOf(arg2)), String(arg2)), env)
return
}
b2 := uint64(IntegerValue(arg2))
return IntegerWithValue(int64(b1 << b2)), nil
}
func RightShiftImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {
arg1 := First(args)
if !IntegerP(arg1) {
err = ProcessError(fmt.Sprintf("Integer expected, received %s %s", TypeName(TypeOf(arg1)), String(arg1)), env)
return
}
b1 := uint64(IntegerValue(arg1))
arg2 := Second(args)
if !IntegerP(arg2) {
err = ProcessError(fmt.Sprintf("Integer expected, received %s %s", TypeName(TypeOf(arg2)), String(arg2)), env)
return
}
b2 := uint64(IntegerValue(arg2))
return IntegerWithValue(int64(b1 >> b2)), nil
} | prim_binary.go | 0.735452 | 0.501343 | prim_binary.go | starcoder |
package neural
import (
"fmt"
)
type Sample struct {
Inputs []float64
Outputs []float64
}
// Backpropagation
type BP struct {
p *MLP
ldeltas [][]float64
learningRate float64 // (0 <= learningRate <= 1)
outputs []float64
costFunc CostFunc
}
func NewBP(p *MLP, cf CostFunc) *BP {
if cf == nil {
cf = costMeanSquared{}
}
ldeltas := make([][]float64, len(p.layers))
for i, layer := range p.layers {
ldeltas[i] = make([]float64, len(layer.neurons))
}
return &BP{
p: p,
ldeltas: ldeltas,
learningRate: 1,
outputs: make([]float64, len(p.getOutputLayer().neurons)),
costFunc: cf,
}
}
func (bp *BP) SetLearningRate(learningRate float64) {
bp.learningRate = clampFloat64(learningRate, 0, 1)
}
func (bp *BP) LearnSample(sample Sample) {
p := bp.p
p.SetInputs(sample.Inputs)
p.Calculate()
var (
lastIndex = len(p.layers) - 1
lastLayer = p.layers[lastIndex]
lastDeltas = bp.ldeltas[lastIndex]
)
for j, n := range lastLayer.neurons {
var (
afD = lastLayer.actFunc.Derivative(n.out)
cfD = bp.costFunc.Derivative(sample.Outputs[j], n.out)
)
lastDeltas[j] = afD * cfD
}
for k := lastIndex - 1; k > 0; k-- {
var (
currLayer = p.layers[k]
currDeltas = bp.ldeltas[k]
nextLayer = p.layers[k+1]
nextDeltas = bp.ldeltas[k+1]
)
for j, currNeuron := range currLayer.neurons {
var sum float64
for i, nextNeuron := range nextLayer.neurons {
sum += nextDeltas[i] * nextNeuron.weights[j]
}
afD := currLayer.actFunc.Derivative(currNeuron.out)
currDeltas[j] = afD * sum
}
}
for k := 1; k < len(p.layers); k++ {
var (
prevLayer = p.layers[k-1]
currLayer = p.layers[k]
currDeltas = bp.ldeltas[k]
)
for j, currNeuron := range currLayer.neurons {
for i, prevNeuron := range prevLayer.neurons {
currNeuron.weights[i] -= bp.learningRate * currDeltas[j] * prevNeuron.out
}
currNeuron.bias -= bp.learningRate * currDeltas[j] * 1
}
}
}
func (bp *BP) SampleCost(sample Sample) (cost float64) {
p := bp.p
p.SetInputs(sample.Inputs)
p.Calculate()
p.GetOutputs(bp.outputs)
return bp.costFunc.Func(sample.Outputs, bp.outputs)
}
func (bp *BP) LearnSamples(samples []Sample) (averageCost float64) {
var sum float64
for _, sample := range samples {
bp.LearnSample(sample)
sum += bp.SampleCost(sample)
}
averageCost = sum / float64(len(samples))
return averageCost
}
func Learn(p *MLP, samples []Sample, learnRate float64, epochMax int,
f func(epoch int, averageCost float64) bool) error {
err := checkSamplesTopology(p, samples)
if err != nil {
return err
}
bp := NewBP(p, CFMeanSquared)
bp.SetLearningRate(learnRate)
for epoch := 0; epoch < epochMax; epoch++ {
averageCost := bp.LearnSamples(samples)
if !f(epoch, averageCost) {
break
}
}
return nil
}
func checkSamplesTopology(p *MLP, samples []Sample) error {
format := "invalid sample (%d): wrong %s length (%d), must be (%d)"
var (
inLen = len(p.getInputLayer().neurons)
outLen = len(p.getOutputLayer().neurons)
)
for i, sample := range samples {
if len(sample.Inputs) != inLen {
return fmt.Errorf(format, i, "inputs", len(sample.Inputs), inLen)
}
if len(sample.Outputs) != outLen {
return fmt.Errorf(format, i, "outputs", len(sample.Outputs), outLen)
}
}
return nil
} | backpropagation.go | 0.622574 | 0.454775 | backpropagation.go | starcoder |
package line
import (
"strings"
"github.com/adamcolton/geom/calc/cmpr"
"github.com/adamcolton/geom/d2"
"github.com/adamcolton/geom/geomerr"
)
// Line in 2D space invoked parametrically
type Line struct {
T0 d2.Pt
D d2.V
}
// Pt1 returns a Pt on the line
func (l Line) Pt1(t float64) d2.Pt {
return l.T0.Add(l.D.Multiply(t))
}
// V1 always returns l.D, the slope of the line
func (l Line) V1(t float64) d2.V {
return l.D
}
// AtX Returns the value of t at x. May return Inf.
func (l Line) AtX(x float64) float64 {
return (x - l.T0.X) / l.D.X
}
// AtY Returns the value of t at x. May return Inf.
func (l Line) AtY(y float64) float64 {
return (y - l.T0.Y) / l.D.Y
}
// B from the form y = mx + b, this will panic if l.D.X is zero
func (l Line) B() float64 {
return l.Pt1(l.AtX(0)).Y
}
// M from the form y = mx + b, this will panic if l.D.X is zero
func (l Line) M() float64 {
return l.D.Y / l.D.X
}
// LineIntersections returns the points at which the lines intersect. It fulls
// the Intersections interface. If the lines are parallel, nil is returned.
// Otherwise a slice with a single value is returned indicating the parametric
// point along l2 at which the intersection occures.
func (l Line) LineIntersections(l2 Line, buf []float64) []float64 {
t, _, does := l.Intersection(l2)
if !does {
return buf[:0]
}
return append(buf[:0], t)
}
// Intersection returns the parametric value of the intersection point on the
// line passed in as an argument and a bool indicating if there was an
// intersection.
func (l Line) Intersection(l2 Line) (float64, float64, bool) {
d := l.D.Cross(l2.D)
if d == 0 {
// lines are parallel do not intersect or overlap
return 0, 0, false
}
v := l.T0.Subtract(l2.T0)
t0 := (l.D.X*v.Y - l.D.Y*v.X) / d
t1 := (l2.D.Y*(v.X) + l2.D.X*(-v.Y)) / -d
return t0, t1, true
}
// Closest returns the point on the line closest to pt
func (l Line) Closest(pt d2.Pt) d2.Pt {
l2 := Line{
T0: pt,
D: d2.V{-l.D.Y, l.D.X},
}
t0, _, hit := l2.Intersection(l)
if !hit {
return l.T0
}
return l.Pt1(t0)
}
// String fulfills Stringer
func (l Line) String() string {
return strings.Join([]string{
"Line( ",
l.D.String(),
"t + ",
l.T0.String(),
" )",
}, "")
}
// New line from start to end so that l.Pt1(0)==start and l.Pt1(1)==end.
func New(start, end d2.Pt) Line {
return Line{
T0: start,
D: end.Subtract(start),
}
}
// Bisect returns a line that bisects points a and b. All points on the line are
// equadistant from both a and b. At t=0, the mid-point is returned. At t=1, the
// point is the same distance from t=0 as the two definition points.
func Bisect(a, b d2.Point) Line {
m, n := a.Pt(), b.Pt()
return Line{
T0: d2.Pt{(m.X + n.X) / 2.0, (m.Y + n.Y) / 2.0},
D: d2.V{(m.Y - n.Y) / 2.0, (n.X - m.X) / 2.0},
}
}
// TangentLine takes a Pt1V1 and a parametric t0 and returns a line on
// the curve at that point, tangent to that point.
func TangentLine(c d2.Pt1V1, t0 float64) Line {
return Line{
T0: c.Pt1(t0),
D: c.V1(t0),
}
}
// L fulfills d2.Limiter
func (Line) L(t, c int) d2.Limit {
if t == 1 && c == 1 {
return d2.LimitUnbounded
}
return d2.LimitUndefined
}
// VL fulfills d2.VLimiter
func (Line) VL(t, c int) d2.Limit {
if t == 1 && c == 1 {
return d2.LimitUnbounded
}
return d2.LimitUndefined
}
// T applies a transform to the line returning a new line.
func (l Line) T(t *d2.T) Line {
return Line{
T0: t.Pt(l.T0),
D: t.V(l.D),
}
}
// Centroid point on the line
func (l Line) Centroid() d2.Pt {
return l.Pt1(0.5)
}
// Cross product of the vector of the line with the vector from T0 to pt
func (l Line) Cross(pt d2.Pt) float64 {
return l.D.Cross(pt.Subtract(l.T0))
}
// AssertEqual fulfils geomtest.AssertEqualizer
func (l Line) AssertEqual(actual interface{}, t cmpr.Tolerance) error {
l2, ok := actual.(Line)
if !ok {
return geomerr.TypeMismatch(l, actual)
}
if l.T0.AssertEqual(l2.T0, t) != nil || l.D.AssertEqual(l2.D, t) != nil {
return geomerr.NotEqual(l, l2)
}
return nil
} | d2/curve/line/line.go | 0.854733 | 0.647422 | line.go | starcoder |
package main
import (
"bufio"
"fmt"
"math"
"os"
"strconv"
"strings"
)
var permutations [][]byte
func perm(a []byte, f func([]byte), i int) {
if i > len(a) {
f(a)
return
}
perm(a, f, i+1)
for j := i + 1; j < len(a); j++ {
a[i], a[j] = a[j], a[i]
perm(a, f, i+1)
a[i], a[j] = a[j], a[i]
}
}
type position struct {
coordinates []int
steps int
}
var visitedPositions = map[string]int{}
func adjacentPositions(pos position) []position {
var positions []position
for _, x := range []int{-1, 1} {
coord := []int{pos.coordinates[0] + x, pos.coordinates[1]}
if coord[0] >= 0 && coord[1] >= 0 && layout[coord[1]][coord[0]] != '#' {
positions = append(positions, position{coordinates: coord})
}
}
for _, y := range []int{-1, 1} {
coord := []int{pos.coordinates[0], pos.coordinates[1] + y}
if coord[0] >= 0 && coord[1] >= 0 && layout[coord[1]][coord[0]] != '#' {
positions = append(positions, position{coordinates: coord})
}
}
return positions
}
func getCoordinatesString(coordinates []int) string {
return strconv.Itoa(coordinates[0]) + "," + strconv.Itoa(coordinates[1])
}
func breadthFirstSearch(root position, target byte) position {
queue := []position{root}
visitedPositions = map[string]int{}
visitedPositions[getCoordinatesString(root.coordinates)] = 0
var pos position
for len(queue) > 0 {
pos = queue[0]
queue = queue[1:]
if layout[pos.coordinates[1]][pos.coordinates[0]] == target {
break
}
adjacent := adjacentPositions(pos)
for _, next := range adjacent {
if _, ok := visitedPositions[getCoordinatesString(next.coordinates)]; !ok {
next.steps = pos.steps + 1
queue = append(queue, next)
visitedPositions[getCoordinatesString(next.coordinates)] = next.steps
}
}
}
return pos
}
func getPathSteps(path []byte, initialPosition []int) int {
pos := breadthFirstSearch(position{coordinates: initialPosition}, path[0])
for i := 1; i < len(path); i++ {
pos = breadthFirstSearch(pos, path[i])
}
return pos.steps
}
func getShortestPathLength(initialPosition []int) int {
steps := math.MaxFloat64
for _, path := range permutations {
steps = math.Min(float64(steps), float64(getPathSteps(path, initialPosition)))
}
return int(steps)
}
var layout []string
func main() {
file, _ := os.Open("input.txt")
defer file.Close()
scanner := bufio.NewScanner(file)
var initialPosition []int
for scanner.Scan() {
layout = append(layout, scanner.Text())
if strings.Contains(scanner.Text(), "0") {
initialPosition = []int{strings.Index(scanner.Text(), "0"), len(layout) - 1}
}
}
perm([]byte{'1', '2', '3', '4', '5', '6', '7'}, func(a []byte) {
x := make([]byte, len(a))
copy(x, a)
permutations = append(permutations, x)
}, 0)
fmt.Printf("Shortest path passing through all numbers (1): %d\n",
getShortestPathLength(initialPosition))
for i := range permutations {
permutations[i] = append(permutations[i], '0')
}
fmt.Printf("Shortest path passing through all numbers and returning to 0 (2): %d\n",
getShortestPathLength(initialPosition))
} | day_24/main.go | 0.554953 | 0.453201 | main.go | starcoder |
package calculator
import (
"errors"
"fmt"
"math"
"strconv"
"strings"
)
// Add takes two or subsequent numbers and returns the result of adding them together.
func Add(a, b float64, nums ...float64) float64 {
result := a + b
for _, n := range nums {
result += n
}
return result
}
// Subtract takes two or subsequent numbers and returns the result of subtracting the subsequent
// number from previous.
func Subtract(a, b float64, nums ...float64) float64 {
result := a - b
for _, n := range nums {
result -= n
}
return result
}
// Multiply takes two or subsequent numbers and returns the result of multiplication of the
// given numbers
func Multiply(a, b float64, nums ...float64) float64 {
result := a * b
for _, n := range nums {
result *= n
}
return result
}
// Divide takes two or subsequent numbers and returns the result of dividing the given numbers.
// It returns error if the division operation is not valid
func Divide(a, b float64, nums ...float64) (float64, error) {
if b == 0 {
return 0, errors.New("Divide by Zero")
}
result := a / b
for _, n := range nums {
if n == 0 {
return 0, errors.New("Divide by Zero")
}
result /= n
}
return result, nil
}
// SquareRoot returns the square root of the provided number, or an error if the input
// number is invalid
func SquareRoot(a float64) (float64, error) {
if a < 0 {
return 0, fmt.Errorf("bad input %f; square root is undefined for negative values", a)
}
return math.Sqrt(a), nil
}
// Evaluate processes the input string as two operands separated by the operator
// and returns the result of that operation, or an error if the string is not in
// the acceptable form "operand1 operator operand2". Leading, embedded and trailing
// spaces are allowed between operands and operator.
// Supported operators are : + , -, * and /
func Evaluate(expr string) (float64, error) {
opIndex := strings.IndexAny(expr, "+-*/")
if opIndex == -1 {
return 0, fmt.Errorf("Supported operator (+,-,*,/) not found")
}
operand1 := strings.TrimSpace(expr[0:opIndex])
operand2 := strings.TrimSpace(expr[opIndex+1:])
num1, err := strconv.ParseFloat(operand1, 64)
if err != nil {
return 0, errors.New("error parsing the first number")
}
num2, err := strconv.ParseFloat(operand2, 64)
if err != nil {
return 0, errors.New("error parsing the second number")
}
switch expr[opIndex] {
case '+':
return num1 + num2, nil
case '-':
return num1 - num2, nil
case '*':
return num1 * num2, nil
case '/':
return num1 / num2, nil
default:
return 0, fmt.Errorf("unsupported operator %v in expression %s ", expr[opIndex], expr)
}
} | calculator.go | 0.866302 | 0.585309 | calculator.go | starcoder |
package slices
// Filter returns a new slice consisting of all elements that pass the predicate function
func Filter[TSlice ~[]T, T any](slice TSlice, predicate func(T) bool) TSlice {
selected := make(TSlice, 0)
for _, t := range slice {
if predicate(t) {
selected = append(selected, t)
}
}
return selected
}
// FirstMatch returns the first element that passes the predicate function.
// If no element is found the zero value of the type will be returned.
func FirstMatch[TSlice ~[]T, T any](slice TSlice, predicate func(T) bool) T {
for _, t := range slice {
if predicate(t) {
return t
}
}
var zero T
return zero
}
// LastMatch returns the last element that passes the predicate function.
// If no element is found the zero value of the type will be returned.
func LastMatch[TSlice ~[]T, T any](slice TSlice, predicate func(T) bool) T {
for i := len(slice)-1; i >= 0; i-- {
if predicate(slice[i]) {
return slice[i]
}
}
var zero T
return zero
}
// AnyMatch returns true if any element passes the predicate function
func AnyMatch[TSlice ~[]T, T any](slice TSlice, predicate func(T) bool) bool {
for _, t := range slice {
if predicate(t) {
return true
}
}
return false
}
// AllMatch returns true if all elements pass the predicate function
func AllMatch[TSlice ~[]T, T any](slice TSlice, predicate func(T) bool) bool {
for _, t := range slice {
if !predicate(t) {
return false
}
}
return true
}
// Map returns a new slice where each element is the result of fn for the corresponding element in the original slice
func Map[TSlice ~[]T, T any, U any](slice []T, fn func(T) U) []U {
result := make([]U, len(slice))
for i, t := range slice {
result[i] = fn(t)
}
return result
}
// Contains returns true if find appears in slice
func Contains[TSlice ~[]T, T comparable](slice TSlice, find T) bool {
for _, t := range slice {
if t == find {
return true
}
}
return false
}
// IndexOf returns the index of find if it appears in slice. If find is not in slice, -1 will be returned.
func IndexOf[TSlice ~[]T, T comparable](slice TSlice, find T) int {
for i, t := range slice {
if t == find {
return i
}
}
return -1
}
// GroupBy returns a map that is keyed by keySelector and contains a slice of elements returned by valSelector
func GroupBy[TSlice ~[]T, T any, K comparable, V any](slice TSlice, keySelector func(T) K, valSelector func(T) V) map[K][]V {
grouping := make(map[K][]V)
for _, t := range slice {
key := keySelector(t)
grouping[key] = append(grouping[key], valSelector(t))
}
return grouping
}
// ToSet returns a map keyed by keySelector and contains a value of an empty struct
func ToSet[TSlice ~[]T, T any, K comparable](slice TSlice, keySelector func(T) K) map[K]struct{} {
set := make(map[K]struct{})
for _, t := range slice {
set[keySelector(t)] = struct{}{}
}
return set
}
// ToMap return a map that is keyed keySelector and has the value of valSelector for each element in slice.
// If multiple elements return the same key the element that appears later in slice will be chosen.
func ToMap[TSlice ~[]T, T any, K comparable, V any](slice TSlice, keySelector func(T) K, valSelector func(T) V) map[K]V {
m := make(map[K]V)
for _, t := range slice {
m[keySelector(t)] = valSelector(t)
}
return m
} | slices/slice.go | 0.903449 | 0.610453 | slice.go | starcoder |
package core
import (
"errors"
"fmt"
"io/ioutil"
"log"
"math"
"math/rand"
"os"
"os/exec"
"path"
"sort"
"strconv"
"strings"
"time"
)
type ModelerType uint8
const (
ScriptBasedModelerType ModelerType = iota
KNNModelerType ModelerType = iota + 1
)
func NewModelerType(t string) ModelerType {
if strings.ToLower(t) == "script" {
return ScriptBasedModelerType
} else {
return KNNModelerType
}
}
// NewModeler is the factory method for the modeler object
func NewModeler(
modelerType ModelerType,
datasets []*Dataset,
sr float64,
evaluator DatasetEvaluator) Modeler {
if modelerType == ScriptBasedModelerType {
modeler := new(ScriptBasedModeler)
modeler.datasets = datasets
modeler.samplingRate = sr
modeler.evaluator = evaluator
return modeler
} else if modelerType == KNNModelerType {
modeler := new(KNNModeler)
modeler.datasets = datasets
modeler.samplingRate = sr
modeler.evaluator = evaluator
return modeler
}
return nil
}
// Modeler is the interface for the objects that model the dataset space.
type Modeler interface {
// Configure is responsible to provide the necessary configuration
// options to the Modeler struct. Call it before Run.
Configure(map[string]string) error
// Run initiates the modeling process.
Run() error
// Datasets returns the datasets slice
Datasets() []*Dataset
// Samples returns the indices of the chosen datasets.
Samples() map[int]float64
// AppxValues returns a slice of the approximated values
AppxValues() []float64
// ErrorMetrics returns a list of error metrics for the specified modeler
ErrorMetrics() map[string]float64
// ExecTime returns the total execution time of the Modeler
ExecTime() float64
// EvalTime returns the evaluation time of the Modeler
EvalTime() float64
}
// AbstractModeler implements the common methods of the Modeler structs
type AbstractModeler struct {
datasets []*Dataset // the datasets the modeler refers to
evaluator DatasetEvaluator // the evaluator struct that gets the values
samplingRate float64 // the portion of the datasets to examine
samples map[int]float64 // the dataset indices chosen for samples
appxValues []float64 // the appx values of ALL the datasets
execTime float64 // the total time in seconds
evalTime float64 // the time needed to evaluate the datasets in seconds
}
// Datasets returns the datasets slice
func (a *AbstractModeler) Datasets() []*Dataset {
return a.datasets
}
// Samples return the indices of the chosen datasets
func (a *AbstractModeler) Samples() map[int]float64 {
return a.samples
}
// AppxValues returns the values of all the datasets
func (a *AbstractModeler) AppxValues() []float64 {
return a.appxValues
}
// ErrorMetrics returns a list of error metrics for the specified model
func (a *AbstractModeler) ErrorMetrics() map[string]float64 {
if a.appxValues == nil || len(a.appxValues) == 0 {
return nil
}
errors := make(map[string]float64)
var actual []float64
for _, d := range a.datasets {
val, err := a.evaluator.Evaluate(d.Path())
if err != nil {
log.Println(err)
actual = append(actual, math.NaN())
} else {
actual = append(actual, val)
}
}
// evaluation for the entire dataset
allIndices := make([]int, len(actual))
for i := range actual {
allIndices[i] = i
}
for k, v := range a.getMetrics(allIndices, actual, "all") {
errors[k] = v
}
// evaluation for the unknown datasets
var unknownIndices []int
for i := range actual {
if _, ok := a.samples[i]; !ok {
unknownIndices = append(unknownIndices, i)
}
}
for k, v := range a.getMetrics(unknownIndices, actual, "unknown") {
errors[k] = v
}
tempErrors := make(map[string][]float64)
for r := 0; r < 10; r++ {
perm := rand.Perm(len(unknownIndices))
var idcs []int
for _, i := range perm {
idcs = append(idcs, unknownIndices[i])
}
for _, s := range []float64{0.05, 0.10, 0.20, 0.30, 0.40} {
newlength := int(float64(len(actual)) * s)
if newlength < len(idcs) && newlength > 0 {
for k, v := range a.getMetrics(idcs[1:newlength], actual, fmt.Sprintf("%02.0f%%", 100*s)) {
//errors[k] = v
if _, ok := tempErrors[k]; !ok {
tempErrors[k] = make([]float64, 0)
}
tempErrors[k] = append(tempErrors[k], v)
}
}
}
}
for k, v := range tempErrors {
errors[k] = Mean(v)
}
return errors
}
func (a *AbstractModeler) getMetrics(testIdx []int, actual []float64, label string) map[string]float64 {
actualUnknown, appxUnknown, residualsUnknown :=
make([]float64, len(testIdx)),
make([]float64, len(testIdx)),
make([]float64, len(testIdx))
maxValue := math.NaN()
for i, v := range testIdx {
if math.IsNaN(maxValue) || maxValue < actual[v] {
maxValue = actual[v]
}
actualUnknown[i] = actual[v]
appxUnknown[i] = a.appxValues[v]
residualsUnknown[i] = math.Abs(actualUnknown[i] - appxUnknown[i])
}
errors := make(map[string]float64)
errors["RMSE-"+label] = RootMeanSquaredError(actualUnknown, appxUnknown)
errors["NRMSE-"+label] = RootMeanSquaredError(actualUnknown, appxUnknown) / maxValue
errors["RMSLE-"+label] = RootMeanSquaredLogError(actualUnknown, appxUnknown)
errors["MAPE-"+label] = MeanAbsolutePercentageError(actualUnknown, appxUnknown)
errors["MdAPE-"+label] = MedianAbsolutePercentageError(actualUnknown, appxUnknown)
errors["MxAPE-"+label] = MaxAbsolutePercentageError(actualUnknown, appxUnknown)
errors["MxAPE10-"+label] = MaxAbsoluteCountPercentageError(actualUnknown, appxUnknown, 10)
errors["MxAPE20-"+label] = MaxAbsoluteCountPercentageError(actualUnknown, appxUnknown, 20)
errors["MAE-"+label] = MeanAbsoluteError(actualUnknown, appxUnknown)
errors["R^2-"+label] = RSquared(actualUnknown, appxUnknown)
errors["Res000-"+label] = Percentile(residualsUnknown, 0)
errors["Res025-"+label] = Percentile(residualsUnknown, 25)
errors["Res050-"+label] = Percentile(residualsUnknown, 50)
errors["Res075-"+label] = Percentile(residualsUnknown, 75)
errors["Res100-"+label] = Percentile(residualsUnknown, 100)
errors["Kendall-"+label] = Kendall(actualUnknown, appxUnknown)
return errors
}
// ExecTime returns the total exection time of the Modeler
func (a *AbstractModeler) ExecTime() float64 {
return a.execTime
}
// EvalTime returns the dataset evaluation time of the Modeler
func (a *AbstractModeler) EvalTime() float64 {
return a.evalTime
}
func (m *AbstractModeler) deploySamples() {
s := int(math.Floor(m.samplingRate * float64(len(m.datasets))))
// sample the datasets
permutation := rand.Perm(len(m.datasets))
m.samples = make(map[int]float64)
// deploy samples
for i := 0; i < len(permutation) && (len(m.samples) < s); i++ {
idx := permutation[i]
start2 := time.Now()
val, err := m.evaluator.Evaluate(m.datasets[idx].Path())
m.evalTime += (time.Since(start2).Seconds())
if err != nil {
log.Printf("%s: %s\n", m.datasets[idx].Path(), err.Error())
} else {
m.samples[idx] = val
}
}
}
// ScriptBasedModeler utilizes a script to train an ML model and obtain is values
type ScriptBasedModeler struct {
AbstractModeler
script string // the script to use for modeling
coordinates []DatasetCoordinates // the dataset coordinates
}
// Configure expects the necessary conf options for the specified struct.
// Specifically, the following parameters are necessary:
// - script: the path of the script to use
func (m *ScriptBasedModeler) Configure(conf map[string]string) error {
if val, ok := conf["script"]; ok {
m.script = val
} else {
log.Println("script parameter is missing")
return errors.New("script parameter is missing")
}
if val, ok := conf["coordinates"]; ok {
buf, err := ioutil.ReadFile(val)
if err != nil {
log.Println(err)
}
m.coordinates = DeserializeCoordinates(buf)
} else {
log.Println("coordinates parameter is missing")
return errors.New("coordinates parameter is missing")
}
return nil
}
// Run executes the modeling process and populates the samples, realValues and
// appxValues slices.
func (m *ScriptBasedModeler) Run() error {
start := time.Now()
m.deploySamples()
var trainingSet, testSet [][]float64
for idx, val := range m.samples {
trainingSet = append(trainingSet, append(m.coordinates[idx], val))
}
trainFile := createCSVFile(trainingSet, true)
for _, v := range m.coordinates {
testSet = append(testSet, v)
}
testFile := createCSVFile(testSet, false)
appx, err := m.executeMLScript(trainFile, testFile)
if err != nil {
return err
}
m.appxValues = appx
os.Remove(trainFile)
os.Remove(testFile)
m.execTime = time.Since(start).Seconds()
return nil
}
// executeMLScript executes the ML script, utilizing the selected samples (indices)
// and populates the real and appx values slices
func (m *ScriptBasedModeler) executeMLScript(trainFile, testFile string) ([]float64, error) {
var result []float64
cmd := exec.Command(m.script, trainFile, testFile)
out, err := cmd.CombinedOutput()
if err != nil {
return nil, errors.New(err.Error() + string(out))
}
outputString := string(out)
array := strings.Split(outputString, "\n")
result = make([]float64, len(m.datasets))
for i := 0; i < len(m.datasets); i++ {
val, err := strconv.ParseFloat(array[i], 64)
if err != nil {
log.Println(err)
} else {
result[i] = val
}
}
return result, nil
}
// KNNModeler utilizes a similarity matrix in order to approximate the training set
type KNNModeler struct {
AbstractModeler
k int // the number of neighbors to check
sm *DatasetSimilarityMatrix // the similarity matrix
regression bool // true if doing regression
}
// Configure is the method used to provide the essential paremeters for the conf of the modeler
func (m *KNNModeler) Configure(conf map[string]string) error {
if val, ok := conf["k"]; ok {
intVal, err := strconv.ParseInt(val, 10, 32)
m.k = int(intVal)
if err != nil {
log.Println(err)
}
} else {
log.Println("No k parameter provided")
return errors.New("No k parameter provided")
}
if val, ok := conf["smatrix"]; ok {
buf, err := ioutil.ReadFile(val)
if err != nil {
log.Println(err)
return err
}
m.sm = new(DatasetSimilarityMatrix)
m.sm.Deserialize(buf)
} else {
log.Println("No smatrix parameter provided")
return errors.New("No smatrix parameter provided")
}
if val, ok := conf["regression"]; ok {
m.regression = (val == "true")
} else {
log.Println("No argument for regression provided - assuming regression")
m.regression = true
}
return nil
}
// Run executes the training part and obtains the model
func (k *KNNModeler) Run() error {
start := time.Now()
k.deploySamples()
k.appxValues = make([]float64, len(k.datasets))
for i := range k.datasets {
if _, ok := k.samples[i]; ok {
k.appxValues[i] = k.samples[i]
} else {
k.appxValues[i] = k.approximateValue(i)
}
}
k.execTime = time.Since(start).Seconds()
return nil
}
func (k *KNNModeler) approximateValue(id int) float64 {
var pList pairList
for j := range k.samples {
s := k.sm.Get(id, j)
pList = append(pList, pair{j, s})
}
sort.Sort(sort.Reverse((pList)))
weights := 0.0
values := 0.0
if k.regression {
for i := 0; i < len(pList) && i < k.k; i++ {
p := pList[i]
values += p.Similarity * k.samples[p.Id]
weights += p.Similarity
}
return values / weights
} else {
vals := make(map[float64]int)
for i := 0; i < len(pList) && i < k.k; i++ {
p := pList[i]
if _, ok := vals[k.samples[p.Id]]; !ok {
vals[k.samples[p.Id]] = 0
}
vals[k.samples[p.Id]] += 1
}
maxVal, maxOcc := 0.0, 0
for k, v := range vals {
if v > maxOcc {
maxVal, maxOcc = k, v
}
}
return maxVal
}
}
type pair struct {
Id int
Similarity float64
}
type pairList []pair
func (p pairList) Len() int { return len(p) }
func (p pairList) Less(i, j int) bool { return p[i].Similarity < p[j].Similarity }
func (p pairList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// createCSVFile serializes a double float slice to a CSV file and returns
// the filename
func createCSVFile(matrix [][]float64, output bool) string {
f, err := ioutil.TempFile("/tmp", "csv")
if err != nil {
log.Println(err)
}
cols := 0
if len(matrix) > 0 {
cols = len(matrix[0])
}
if output {
cols--
}
for i := 1; i < cols+1; i++ {
fmt.Fprintf(f, "x%d", i)
if i < cols {
fmt.Fprintf(f, ",")
}
}
if output {
fmt.Fprintf(f, ",class")
}
fmt.Fprintf(f, "\n")
for i := range matrix {
for j := range matrix[i] {
fmt.Fprintf(f, "%.5f", matrix[i][j])
if j < len(matrix[i])-1 {
fmt.Fprintf(f, ",")
}
}
fmt.Fprintf(f, "\n")
}
f.Close()
return f.Name()
}
// Return `modeler.AppxValues()` as a `DatasetScores` struct
func AppxScores(modeler Modeler) *DatasetScores {
appxScores := NewDatasetScores()
datasets := modeler.Datasets()
appxValues := modeler.AppxValues()
for i := range appxValues {
appxScores.Scores[path.Base(datasets[i].Path())] = appxValues[i]
}
return appxScores
}
// RootMeanSquaredError returns the RMSE of the actual vs the predicted values
func RootMeanSquaredError(actual, predicted []float64) float64 {
if len(actual) != len(predicted) || len(actual) == 0 {
log.Printf("actual (%d) and predicted (%d) are of different size", len(actual), len(predicted))
return math.NaN()
}
sum := 0.0
count := 0.0
for i := range actual {
if !math.IsNaN(actual[i]) {
diff := actual[i] - predicted[i]
sum += diff * diff
count += 1
}
}
if count > 0 {
return math.Sqrt(sum / count)
}
return math.NaN()
}
// RootMeanSquaredLogError returns the RMSLE of the actual vs the predicted values
func RootMeanSquaredLogError(actual, predicted []float64) float64 {
if len(actual) != len(predicted) || len(actual) == 0 {
log.Println("actual and predicted values are of different size!!")
return math.NaN()
}
sum := 0.0
count := 0.0
for i := range actual {
if !math.IsNaN(actual[i]) && actual[i] > -1 && predicted[i] > -1 {
diff := math.Log(predicted[i]+1) - math.Log(actual[i]+1)
sum += diff * diff
count += 1
}
}
if count > 0 {
return math.Sqrt(sum / count)
}
return math.NaN()
}
// MeanAbsoluteError returns the MAE of the actual vs the predicted values
func MeanAbsoluteError(actual, predicted []float64) float64 {
if len(actual) != len(predicted) || len(actual) == 0 {
log.Println("actual and predicted values are of different size!!")
return math.NaN()
}
sum := 0.0
count := 0.0
for i := range actual {
if actual[i] != 0.0 && !math.IsNaN(actual[i]) {
count += 1.0
sum += math.Abs((actual[i] - predicted[i]))
}
}
if count > 0 {
return sum / count
}
return math.NaN()
}
// MedianAbsolutePercentageError returns the MdAPE of the actual vs the predicted values
func MedianAbsolutePercentageError(actual, predicted []float64) float64 {
if len(actual) != len(predicted) || len(actual) == 0 {
log.Println("actual and predicted values are of different size!!")
return math.NaN()
}
apes := make([]float64, 0)
for i := range actual {
if actual[i] != 0.0 && !math.IsNaN(actual[i]) {
val := math.Abs((actual[i] - predicted[i]) / actual[i])
apes = append(apes, val)
}
}
if len(apes) > 0 {
return Percentile(apes, 50)
}
return math.NaN()
}
// MeanAbsolutePercentageError returns the MAPE of the actual vs the predicted values
func MeanAbsolutePercentageError(actual, predicted []float64) float64 {
if len(actual) != len(predicted) || len(actual) == 0 {
log.Println("actual and predicted values are of different size!!")
return math.NaN()
}
sum := 0.0
count := 0.0
for i := range actual {
if actual[i] != 0.0 && !math.IsNaN(actual[i]) {
count += 1.0
val := math.Abs((actual[i] - predicted[i]) / actual[i])
sum += val
}
}
if count > 0 {
return sum / count
}
return math.NaN()
}
// MaxAbsolutePercentageError returns the Max error of the actual vs the
// predicted values as a percentage.
func MaxAbsolutePercentageError(actual, predicted []float64) float64 {
if len(actual) != len(predicted) || len(actual) == 0 {
log.Println("actual and predicted values are of different size!!")
return math.NaN()
}
err := 0.0
for i := range actual {
val := 0.0
if actual[i] == 0.0 || math.IsNaN(actual[i]) {
val = 1.0
} else {
val = math.Abs((actual[i] - predicted[i]) / actual[i])
}
if val > err {
err = val
}
}
return err
}
// MaxAbsoluteCountPercentageError returns the percentage of the dataset
// that has AbsolutePercentageError gte to percentile.
func MaxAbsoluteCountPercentageError(actual, predicted []float64, percentile int) float64 {
if len(actual) != len(predicted) || len(actual) == 0 {
log.Println("actual and predicted values are of different size!!")
return math.NaN()
}
count := 0.0
percentage := float64(percentile) / 100.0
for i := range actual {
err := 0.0
if actual[i] == 0.0 || math.IsNaN(actual[i]) {
err = 1.0
} else {
err = math.Abs((actual[i] - predicted[i]) / actual[i])
}
if err > percentage {
count += 1.0
}
}
return count / float64(len(actual))
}
// RSquared returns the coeff. of determination of the actual vs the predicted values
func RSquared(actual, predicted []float64) float64 {
if len(predicted) != len(actual) || len(predicted) == 0 {
log.Println("actual and predicted values are of different size!!")
return math.NaN()
}
mean := Mean(actual)
ssRes, ssTot := 0.0, 0.0
for i := range actual {
if !math.IsNaN(actual[i]) {
ssTot += (actual[i] - mean) * (actual[i] - mean)
ssRes += (actual[i] - predicted[i]) * (actual[i] - predicted[i])
}
}
if ssTot > 0 {
return 1.0 - (ssRes / ssTot)
}
return math.NaN()
}
// Percentile returns the i-th percentile of an array of values
func Percentile(values []float64, percentile int) float64 {
valuesCopy := make([]float64, len(values))
copy(valuesCopy, values)
if !sort.Float64sAreSorted(valuesCopy) {
sort.Float64s(valuesCopy)
}
idx := int(math.Ceil((float64(percentile) / 100.0) * float64(len(valuesCopy))))
if idx < len(valuesCopy) {
return valuesCopy[idx]
}
if len(valuesCopy) > 0 {
return valuesCopy[len(valuesCopy)-1]
}
return math.NaN()
} | core/modeling.go | 0.595022 | 0.470068 | modeling.go | starcoder |
package gobulk
// TrackerNextContainersOpt represents optional paratemers which could be used to modify the tracker
// behaviour in the NextContainers method.
type TrackerNextContainersOpt int
const (
// TrackerNextContainersOptNoLock prevents containers from being locked.
TrackerNextContainersOptNoLock TrackerNextContainersOpt = iota
// TrackerNextContainersOptOnlyNew enhances the query to get only containers which got to the
// tracker after the last processed one.
TrackerNextContainersOptOnlyNew
)
// Tracker represents a storage that acts as a registry for import iterations. It tracks iteration
// state and details and state of containers, issues and operations. The Tracker interface is used
// to track the import progress.
type Tracker interface {
Storage
// CurrentIteration retrieves the current iteration state of the passed format. A completely
// populated iteration is expected as the result with all the fields set.
CurrentIteration(format Format) (*Iteration, error)
// NewIteration creates a new iteration based on the format definitions and saves it in the tracker.
// A completely populated iteration is expected as the result with all the fields set.
NewIteration(format Format, number uint) (*Iteration, error)
// GetUnfinishedContainers returns a list of containers which have already been tracked but haven't
// yet been finished.
GetUnfinishedContainers() ([]*Container, error)
// TrackContainers tracks the containers in the slice and updates corresponding Iteration last
// tracked container with the last one in the slice.
TrackContainers(containers []*Container) (*TrackContainersResponse, error)
// NextContainers by default, searches for and returns new processable containers and locks them
// (marks as started). However, it's possible to modify the method behaviour by the opts parameter.
NextContainers(readStrategy Strategy, number int, opts ...TrackerNextContainersOpt) ([]*Container, error)
// TrackContainerOperations persists the containers operations and their error/success status.
TrackContainerOperations(container []*Container) (*ProcessContainersResult, error)
// FinishContainers sets the containers state as successfully and completely imported.
FinishContainers(container []*Container) (*ProcessContainersResult, error)
// TrackIssue tracks the issue.
TrackIssue(issue *Issue) error
}
// TrackContainersResponse represents a successful result of a TrackContainers call.
type TrackContainersResponse struct {
// Tracked is a subslice of containers to track which are new and have been tracked.
Tracked []*Container
// Conflicted is a subslice of containers to track which already exist in the tracker.
Conflicted []*Container
} | tracker.go | 0.545286 | 0.418429 | tracker.go | starcoder |
package zcalendar
import (
"bytes"
"errors"
"fmt"
"sort"
"strconv"
"strings"
)
// A component is a single unit of the event expression. It represent a
// potentially repeating value or range in an unspecified time unit.
type component struct {
From int
To int
Repeat int
}
// parseValue create a component from a string representing a simple value with
// an optional repetition.
func parseValue(raw string) (c component, err error) {
var repeat = ""
index := strings.Index(raw, "/")
if index != -1 {
raw, repeat = raw[:index], raw[index+1:]
}
v, err := strconv.ParseInt(raw, 10, 64)
if err != nil {
return c, fmt.Errorf(`invalid value: %w`, err)
}
if v < 0 {
return c, errors.New("invalid negative value")
}
c.From = int(v)
v, err = strconv.ParseInt(repeat, 10, 64)
if index != -1 && err != nil {
return c, fmt.Errorf(`invalid repeat: %w`, err)
}
if v < 0 {
return c, errors.New("invalid negative repeat")
}
c.Repeat = int(v)
return c, nil
}
// parseRange create a component from a string representing a range value with
// an optional repetition.
func parseRange(raw string) (c component, err error) {
var repeat = ""
index := strings.Index(raw, "/")
if index != -1 {
raw, repeat = raw[:index], raw[index+1:]
}
bounds := strings.Split(raw, "..")
if len(bounds) != 2 {
return c, errors.New("invalid range")
}
v, err := strconv.ParseInt(bounds[0], 10, 64)
if err != nil {
return c, fmt.Errorf(`invalid value: %w`, err)
}
if v < 0 {
return c, errors.New("invalid negative lower bound")
}
c.From = int(v)
v, err = strconv.ParseInt(bounds[1], 10, 64)
if err != nil {
return c, fmt.Errorf(`invalid value: %w`, err)
}
if v < 0 {
return c, errors.New("invalid negative upper bound")
}
c.To = int(v)
if c.From >= c.To {
return c, errors.New("invalid bounds")
}
v, err = strconv.ParseInt(repeat, 10, 64)
if index != -1 && err != nil {
return c, fmt.Errorf(`invalid repeat: %w`, err)
}
if v < 0 {
return c, errors.New("invalid negative repeat")
}
c.Repeat = int(v)
return c, nil
}
// MarshalText implements the encoding.TextMarshaler interface.
func (c component) MarshalText() (text []byte, err error) {
var buf bytes.Buffer
fmt.Fprintf(&buf, "%02d", c.From)
if c.To != 0 {
fmt.Fprintf(&buf, "..%02d", c.To)
}
if c.Repeat != 0 {
fmt.Fprintf(&buf, "/%d", c.Repeat)
}
return buf.Bytes(), nil
}
type components []component
// parseComponents create a slice of components from a string representing a
// comma-separated list of values and ranges.
func parseComponents(raw string) (cs components, err error) {
for index, chunk := range strings.Split(raw, ",") {
if strings.Contains(chunk, "..") {
c, err := parseRange(chunk)
if err != nil {
return cs, fmt.Errorf(`parsing range %d: %w`, index, err)
}
cs = append(cs, c)
continue
}
c, err := parseValue(chunk)
if err != nil {
return cs, fmt.Errorf(`parsing value %d: %w`, index, err)
}
cs = append(cs, c)
}
return cs, err
}
// MarshalText implements the encoding.MarshalText interface for a component
// slice.
func (cs components) MarshalText() (text []byte, err error) {
var parts [][]byte
for _, c := range cs {
b, _ := c.MarshalText()
parts = append(parts, b)
}
return bytes.Join(parts, []byte(",")), nil
}
// String implements the fmt.Stringer interface.
func (cs components) String() string {
b, _ := cs.MarshalText()
return string(b)
}
// Values return the list of actual values from the various sub-components.
func (cs components) Values(max int) (values []int) {
var seen = make(map[int]struct{})
for _, c := range cs {
for {
if c.To == 0 {
seen[c.From] = struct{}{}
} else {
for v := c.From; v <= c.To && v <= max; v++ {
seen[v] = struct{}{}
}
}
if c.Repeat == 0 {
break
}
c.From += c.Repeat
if c.To != 0 {
c.To += c.Repeat
}
if c.From > max {
break
}
}
}
values = make([]int, 0, len(seen))
for k := range seen {
values = append(values, k)
}
sort.Ints(values)
return
}
// Next returns the next valid value for the components, based on the current
// value. The next value can be equal to the current value if it is valid. The
// returned value can be smaller than the current value as the values are
// considered modulo the maximum value.
func (cs components) Next(current, max int) (next int, diff int, ok bool) {
values := cs.Values(max)
if len(values) == 0 {
return
}
// Get the first value that is greater or equal to the current value.
var i int
for i = 0; i < len(values) && values[i] < current; i++ {
}
var val = values[i%len(values)]
return val, val - current, true
} | component.go | 0.776877 | 0.405743 | component.go | starcoder |
package p502
/**
Suppose LeetCode will start its IPO soon. In order to sell a good price of its shares to Venture Capital, LeetCode would like to work on some projects to increase its capital before the IPO. Since it has limited resources, it can only finish at most k distinct projects before the IPO. Help LeetCode design the best way to maximize its total capital after finishing at most k distinct projects.
You are given several projects. For each project i, it has a pure profit Pi and a minimum capital of Ci is needed to start the corresponding project. Initially, you have W capital. When you finish a project, you will obtain its pure profit and the profit will be added to your total capital.
To sum up, pick a list of at most k distinct projects from given projects to maximize your final capital, and output your final maximized capital.
Example 1:
Input: k=2, W=0, Profits=[1,2,3], Capital=[0,1,1].
Output: 4
Explanation: Since your initial capital is 0, you can only start the project indexed 0.
After finishing it you will obtain profit 1 and your capital becomes 1.
With capital 1, you can either start the project indexed 1 or the project indexed 2.
Since you can choose at most 2 projects, you need to finish the project indexed 2 to get the maximum capital.
Therefore, output the final maximized capital, which is 0 + 1 + 3 = 4.
Note:
You may assume all numbers in the input are non-negative integers.
The length of Profits array and Capital array will not exceed 50,000.
The answer is guaranteed to fit in a 32-bit signed integer.
*/
type pAndC struct {
p int
c int
}
type greater func(a, b pAndC) bool
type MaxPq struct {
cmp greater
pq []pAndC
}
func (p *MaxPq) swim(k int) {
for k > 0 && p.cmp(p.pq[k], p.pq[(k-1)>>1]) {
p.pq[(k-1)>>1], p.pq[k] = p.pq[k], p.pq[(k-1)>>1]
k = (k - 1) >> 1
}
}
func (p *MaxPq) sink(k int) {
n := len(p.pq)
for (k<<1)+1 < n {
j := (k << 1) + 1
if j < n-1 && !p.cmp(p.pq[j], p.pq[j+1]) {
j++
}
if p.cmp(p.pq[j], p.pq[k]) {
p.pq[j], p.pq[k] = p.pq[k], p.pq[j]
} else {
break
}
k = j
}
}
func (p *MaxPq) Insert(v pAndC) {
n := len(p.pq)
p.pq = append(p.pq, v)
p.swim(n)
}
func (p *MaxPq) Max() pAndC {
return p.pq[0]
}
func (p *MaxPq) DelMax() {
n := len(p.pq)
p.pq[0], p.pq[n-1] = p.pq[n-1], p.pq[0]
p.pq = p.pq[:n-1]
p.sink(0)
}
func (p *MaxPq) Empty() bool {
return len(p.pq) == 0
}
func findMaximizedCapital(k int, W int, Profits []int, Capital []int) int {
minC := MaxPq{pq: make([]pAndC, 0), cmp: func(a, b pAndC) bool {
return a.c < b.c
}}
maxP := MaxPq{pq: make([]pAndC, 0), cmp: func(a, b pAndC) bool {
return a.p > b.p
}}
for i := 0; i < len(Profits); i++ {
minC.Insert(pAndC{p: Profits[i], c: Capital[i]})
}
for i := 0; i < k; i++ {
for !minC.Empty() && minC.Max().c <= W {
maxP.Insert(minC.Max())
minC.DelMax()
}
if maxP.Empty() {
break
} else {
W += maxP.Max().p
maxP.DelMax()
}
}
return W
} | algorithms/p502/502.go | 0.70304 | 0.635392 | 502.go | starcoder |
package generation
import (
"image"
"image/color"
"image/png"
"io"
"math"
"github.com/breiting/g3next/noise"
"github.com/g3n/engine/math32"
)
type NoiseMap struct {
data [][]float32
Width int
Height int
}
func NewNoiseMap(seed int64, width, height int, ofs, scale float64, octaves int, persistance, lacunarity float64) NoiseMap {
min := math.MaxFloat64
max := -math.MaxFloat64
// seed := int64(1587046793530293277)
// offset := [2]float64{1, 1}
offset := [2]float64{ofs, 0}
// NewPerlin creates new Perlin noise generator
// In what follows "alpha" is the weight when the sum is formed.
// Typically it is 2, As this approaches 1 the function is noisier.
// "beta" is the harmonic scaling/spacing, typically 2, n is the
// number of iterations and seed is the math.rand seed value to use
perlinEngine := noise.NewPerlin(2, 2, 3, seed)
noiseMap := NoiseMap{
Width: width,
Height: height,
}
noiseMap.data = make([][]float32, height)
for i := 0; i < height; i++ {
noiseMap.data[i] = make([]float32, width)
}
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
amplitude := float64(1)
frequency := float64(1)
noiseHeight := float64(0)
for i := 0; i < octaves; i++ {
perlinValue := perlinEngine.Noise2D(
float64(x)/scale*frequency+offset[0],
float64(y)/scale*frequency+offset[1])
noiseHeight += perlinValue * amplitude
amplitude *= persistance
frequency += lacunarity
}
if noiseHeight < min {
min = noiseHeight
}
if noiseHeight > max {
max = noiseHeight
}
noiseMap.data[x][y] = float32(noiseHeight)
}
}
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
noiseMap.data[x][y] = normalize(float32(min), float32(max), noiseMap.data[x][y])
}
}
return noiseMap
}
func (n *NoiseMap) Get(x, y int) float32 {
// TODO check boundaries
return n.data[x][y]
}
func (n *NoiseMap) GetColor(x, y int) color.RGBA {
return getColor(n.data[x][y])
}
func (n *NoiseMap) WriteImage(w io.Writer) error {
upLeft := image.Point{0, 0}
lowRight := image.Point{n.Width, n.Height}
img := image.NewRGBA(image.Rectangle{upLeft, lowRight})
for y := 0; y < n.Height; y++ {
for x := 0; x < n.Width; x++ {
img.Set(x, y, getColor(n.data[x][y]))
}
}
png.Encode(w, img)
return nil
}
func interpolateColor(v float32, r1, g1, b1, r2, g2, b2 float32) color.RGBA {
sample := math32.Color{
R: r1 / 255.0,
G: g1 / 255.0,
B: b1 / 255.0,
}
sample.Lerp(&math32.Color{
R: r2 / 255.0,
G: g2 / 255.0,
B: r2 / 255.0,
}, v)
return color.RGBA{
R: uint8(sample.R * 255.0),
G: uint8(sample.G * 255.0),
B: uint8(sample.B * 255.0),
A: 0xff,
}
}
func getColor(v float32) color.RGBA {
if v < 0.4 {
return interpolateColor(v, 14, 0, 100, 0, 51, 100)
}
if v < 0.45 {
// sand
return interpolateColor(v, 100, 80, 0, 77, 100, 0)
}
if v < 0.6 {
return interpolateColor(v, 0, 48, 23, 0, 95, 44)
}
if v < 0.9 {
return interpolateColor(v, 50, 26, 20, 26, 21, 20)
}
// snow
return interpolateColor(v, 72, 72, 72, 255, 255, 255)
}
// normalize maps the value from range [min..max] to [0..1]
func normalize(min, max, val float32) float32 {
return (val - min) / (max - min)
} | generation/noisemap.go | 0.633977 | 0.471284 | noisemap.go | starcoder |
package yamlpath
import (
"errors"
"strings"
"github.com/dprotaso/go-yit"
"gopkg.in/yaml.v3"
)
// Path is a compiled YAML path expression.
type Path struct {
f func(node, root *yaml.Node) yit.Iterator
}
// Find applies the Path to a YAML node and returns the addresses of the subnodes which match the Path.
func (p *Path) Find(node *yaml.Node) []*yaml.Node {
return p.find(node, node)
}
func (p *Path) find(node, root *yaml.Node) []*yaml.Node {
return p.f(node, root).ToArray()
}
// NewPath constructs a Path from a string expression.
func NewPath(path string) (*Path, error) {
return newPath(lex("Path lexer", path))
}
func newPath(l *lexer) (*Path, error) {
lx := l.nextLexeme()
switch lx.typ {
case lexemeError:
return nil, errors.New(lx.val)
case lexemeIdentity, lexemeEOF:
return new(identity), nil
case lexemeRoot:
subPath, err := newPath(l)
if err != nil {
return new(empty), err
}
return new(func(node, root *yaml.Node) yit.Iterator {
if node.Kind == yaml.DocumentNode {
node = node.Content[0]
}
return compose(yit.FromNode(node), subPath, root)
}), nil
case lexemeRecursiveDescent:
subPath, err := newPath(l)
if err != nil {
return new(empty), err
}
childName := strings.TrimPrefix(lx.val, "..")
if childName == "*" { // includes all nodes, not just mapping nodes
return new(func(node, root *yaml.Node) yit.Iterator {
return compose(yit.FromNode(node).RecurseNodes(), subPath, root)
}), nil
}
return new(func(node, root *yaml.Node) yit.Iterator {
return compose(yit.FromNode(node).RecurseNodes(), childThen(childName, subPath), root)
}), nil
case lexemeDotChild:
subPath, err := newPath(l)
if err != nil {
return new(empty), err
}
childName := strings.TrimPrefix(lx.val, ".")
return childThen(childName, subPath), nil
case lexemeUndottedChild:
subPath, err := newPath(l)
if err != nil {
return new(empty), err
}
return childThen(lx.val, subPath), nil
case lexemeBracketChild:
subPath, err := newPath(l)
if err != nil {
return new(empty), err
}
childNames := strings.TrimSuffix(strings.TrimPrefix(lx.val, "['"), "']")
return childrenThen(childNames, subPath), nil
case lexemeArraySubscript:
subPath, err := newPath(l)
if err != nil {
return new(empty), err
}
subscript := strings.TrimSuffix(strings.TrimPrefix(lx.val, "["), "]")
return arraySubscriptThen(subscript, subPath), nil
case lexemeFilterBegin:
filterLexemes := []lexeme{}
filterNestingLevel := 1
f:
for {
lx := l.nextLexeme()
switch lx.typ {
case lexemeFilterBegin:
filterNestingLevel++
case lexemeFilterEnd:
filterNestingLevel--
if filterNestingLevel == 0 {
break f
}
case lexemeEOF:
// should never happen as lexer should have detected an error
return new(empty), errors.New("missing end of filter")
}
filterLexemes = append(filterLexemes, lx)
}
subPath, err := newPath(l)
if err != nil {
return new(empty), err
}
return filterThen(filterLexemes, subPath), nil
}
return new(empty), errors.New("invalid path syntax")
}
func identity(node, root *yaml.Node) yit.Iterator {
if node.Kind == 0 {
return yit.FromNodes()
}
return yit.FromNode(node)
}
func empty(node, root *yaml.Node) yit.Iterator {
return yit.FromNodes()
}
func compose(i yit.Iterator, p *Path, root *yaml.Node) yit.Iterator {
its := []yit.Iterator{}
for a, ok := i(); ok; a, ok = i() {
its = append(its, p.f(a, root))
}
return yit.FromIterators(its...)
}
func new(f func(node, root *yaml.Node) yit.Iterator) *Path {
return &Path{f: f}
}
func childrenThen(childNames string, p *Path) *Path {
c := strings.SplitN(childNames, ".", 2)
if len(c) == 2 {
return childThen(c[0], childrenThen(c[1], p))
}
return childThen(c[0], p)
}
func childThen(childName string, p *Path) *Path {
if childName == "*" {
return allChildrenThen(p)
}
return new(func(node, root *yaml.Node) yit.Iterator {
if node.Kind != yaml.MappingNode {
return empty(node, root)
}
for i, n := range node.Content {
if i%2 == 0 && n.Value == childName {
return compose(yit.FromNode(node.Content[i+1]), p, root)
}
}
return empty(node, root)
})
}
func allChildrenThen(p *Path) *Path {
return new(func(node, root *yaml.Node) yit.Iterator {
if node.Kind != yaml.MappingNode {
return empty(node, root)
}
its := []yit.Iterator{}
for i, n := range node.Content {
if i%2 == 0 {
continue // skip child names
}
its = append(its, compose(yit.FromNode(n), p, root))
}
return yit.FromIterators(its...)
})
}
func arraySubscriptThen(subscript string, p *Path) *Path {
return new(func(node, root *yaml.Node) yit.Iterator {
if node.Kind != yaml.SequenceNode {
return empty(node, root)
}
slice, err := slice(subscript, len(node.Content))
if err != nil {
panic(err) // should not happen, lexer should have detected errors
}
its := []yit.Iterator{}
for _, s := range slice {
its = append(its, compose(yit.FromNode(node.Content[s]), p, root))
}
return yit.FromIterators(its...)
})
}
func filterThen(filterLexemes []lexeme, p *Path) *Path {
filter := newFilter(newFilterNode(filterLexemes))
return new(func(node, root *yaml.Node) yit.Iterator {
if node.Kind != yaml.SequenceNode {
panic("not implemented")
}
its := []yit.Iterator{}
for _, c := range node.Content {
if filter(c, root) {
its = append(its, compose(yit.FromNode(c), p, root))
}
}
return yit.FromIterators(its...)
})
} | pkg/yamlpath/path.go | 0.682045 | 0.480479 | path.go | starcoder |
package hash
import (
"math"
"sort"
"sync"
"github.com/sachaservan/vec"
)
/*
This implements a hash function by finding the closest point of the leech lattice
See Appendix B of http://web.mit.edu/andoni/www/papers/cSquared.pdf
The lattice provides the densest sphere packing of 24 dimensional space.
Sloane proved that the lattice points are at most sqrt(2) away from any point
Which bounds the error accordingly
*/
var once = sync.Once{}
type LatticeHash struct {
H *HashCommon
Scale float64
}
/*
This constructs a new leech lattice hash where the lattice points are the centers of spheres of radius sqrt(8)
A random rotation and translation are applied and the space is scaled for the desired "R" - LSH hash width
The JL-transform step is performed in the same matrix as rotation
*/
func NewLatticeHash(dim int, width, max float64) *LatticeHash {
// alternatively this could be read from a file
once.Do(Precompute)
// lattice is scaled by sqrt(8)
baseScale := 2 * math.Sqrt2
// HashCommon projection with an orthogonal matrix is implicitly a JL transform
// However, it needs to be normalized by column rather than row
jlScale := math.Sqrt(float64(dim) / 24.0)
// width scales the space down to fit within the lattice
H := &LatticeHash{H: NewHashCommon(dim, 24, max, true), Scale: baseScale * jlScale / width}
return H
}
// This computes the hash and the squared distance to the closest vector
func (l *LatticeHash) HashWithDist(v *vec.Vec) (*vec.Vec, float64) {
// apply rotation and translation
v = l.H.Project(v)
// apply scaling
v = v.Scale(l.Scale)
// this always returns an integer coordinate of the leech lattice
v, dist := LeechLatticeClosestVector(v)
// lattice points are always the same set of keys
// To distinguish between hashes add a random value to the vector
// We could also unrotate to return the true closest point
v, _ = v.Add(l.H.Offsets)
return v, dist
}
// This returns the k closest hashes and squared distances
func (l *LatticeHash) MultiProbeHashWithDist(v *vec.Vec, probes int) ([]*vec.Vec, []float64) {
// apply rotation and translation
v = l.H.Project(v)
// apply scaling
v = v.Scale(l.Scale)
vs, dists := LeechLatticeClosestVectors(v, probes)
for i := range vs {
vs[i], _ = vs[i].Add(l.H.Offsets)
}
return vs, dists
}
func (l *LatticeHash) Hash(v *vec.Vec) uint64 {
H, _ := l.HashWithDist(v)
return l.H.UHash.Hash(H)
}
func (l *LatticeHash) MultiHash(v *vec.Vec, probes int) []uint64 {
H, _ := l.MultiProbeHashWithDist(v, probes)
hashes := make([]uint64, probes)
for i := range H {
hashes[i] = l.H.UHash.Hash(H[i])
}
return hashes
}
func LeechLatticeClosestVector(v *vec.Vec) (*vec.Vec, float64) {
p, dist := LeechLatticeClosestPoint(v.Coords)
for i := range p {
// ensure no floating point shenanigans
p[i] = math.Round(p[i])
}
return vec.NewVec(p), dist
}
func LeechLatticeClosestVectors(v *vec.Vec, numPoints int) ([]*vec.Vec, []float64) {
points, distances := LeechLatticeClosestPoints(v.Coords, numPoints)
out := make([]*vec.Vec, numPoints)
dists := make([]float64, numPoints)
for i := range out {
for j := range points[i] {
points[i][j] = math.Round(points[i][j])
}
out[i] = vec.NewVec(points[i])
dists[i] = distances[i]
}
return out, dists
}
/*
This algorithm is based on https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1057135
According to https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=243466 this takes 56000 flops
While the more optimal algorithm takes only 3595, so this could be improved significantly
But it is much more understandable.
We build the Leech lattice out of 3 copies of the E_8 lattice, which are built out of 2 copies of the D_8 lattice.
*/
/*
The D_8 lattice consists of integer points in 8 dimensional space where the sum of the coordinates is even
The simplest algorithm for decoding (finding the closest point) is given in neilsloane.com/doc/Me83.pdf
First we find the closest integer point and check if the sum of the coordinates is even.
If it is, then we are done.
Otherwise, we find the second closest point, and note that this can be found by taking the coordinate that
was rounded the most, and then rounding it the other direction. This changes the sum by 1 and makes it even.
*/
func D8Decode(f []float64) [8]float64 {
v := [8]float64{}
sum := 0
farthestDist := -1.0
farthestPos := 0
otherDirection := 0.0
for i := range f {
v[i] = math.Round(f[i])
sum += int(v[i])
diff := f[i] - v[i]
dist := math.Abs(diff)
if dist > farthestDist {
farthestDist = dist
farthestPos = i
if diff > 0 {
// we rounded down, so the other direction is up
otherDirection = v[i] + 1
} else {
otherDirection = v[i] - 1
}
}
}
if sum%2 == 0 {
return v
}
v[farthestPos] = otherDirection
return v
}
/*
The E_8 lattice consists of two copies of D_8, offset by the vector (1/2,1/2,1/2,1/2,1/2,1/2,1/2,1/2)
See https://en.wikipedia.org/wiki/E8_lattice#Lattice_points for details
We follow the algorithm in neilsloane.com/doc/Me83.pdf
We first find the closest point in each of the two copies of D_8
Then we return the closer of the two
*/
func E8Decode(f []float64) ([8]float64, float64) {
y0 := D8Decode(f)
t := [8]float64{}
for k := range f {
t[k] = f[k] - 0.5
}
y1 := D8Decode(t[:])
for k := range y1 {
y1[k] += 0.5
}
d0 := DistSquared(f, y0[:])
d1 := DistSquared(f, y1[:])
if d0 < d1 {
return y0, d0
} else {
return y1, d1
}
}
/*
Now we construct the Leech lattice based on the Turyn code as in https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1057135
There are 4096 possible ways for the three copies of E_8 to be arranged, and then these are all stuck together
We simply iterate over each possible construction.
The E_8 lattices are reused, so we first find the closest point in each possible E_8 lattice in the following function
*/
/*
Out lattice is scaled by sqrt8, so we are actually using lambda_8 = E8 * 4 - where all coordinates are multiplied by 4
Thus all lattice points are integers instead of half integers (in fact they are only even integers)
This function iterates through the possible arrangements as given in table 6 and stores the closest points to each in p
It also stores the (squared) distances to those points in d
*/
func LeechLatticeClosest(f []float64) ([256][3][8]float64, [256][3]float64) {
p := [256][3][8]float64{}
d := [256][3]float64{}
for j := range TableVi {
t := [24]float64{}
pj := [8]float64{}
for k := range pj {
pj[k] = float64(TableVi[j][k])
}
for k := range t {
t[k] = f[k] + pj[k%8]
// Scale for E8
t[k] = t[k] / 4
}
p[j][0], d[j][0] = E8Decode(t[0:8])
p[j][1], d[j][1] = E8Decode(t[8:16])
p[j][2], d[j][2] = E8Decode(t[16:24])
for k := 0; k < 8; k++ {
// Unscale
// These should all be integers after this multiplication
p[j][0][k] *= 4
p[j][1][k] *= 4
p[j][2][k] *= 4
// technically each d (square of distance) should also be unscaled by 16
// but it doesn't effect which one is the minimum
}
}
return p, d
}
/*
This function iterates through each of the possible arrangements as given in Table 7
And finds the distance given by the arrangement
It then returns the closest point and its corresponding distance
*/
func LeechLatticeClosestPoint(f []float64) ([]float64, float64) {
p, d := LeechLatticeClosest(f)
best := math.MaxFloat64
bestIndex := 0
for j := range TableVii {
dist := d[TableVii[j][0]][0] + d[TableVii[j][1]][1] + d[TableVii[j][2]][2]
if dist < best {
best = dist
bestIndex = j
}
}
bestPoint := make([]float64, 24)
copy(bestPoint[0:8], p[TableVii[bestIndex][0]][0][:])
copy(bestPoint[8:16], p[TableVii[bestIndex][1]][1][:])
copy(bestPoint[16:24], p[TableVii[bestIndex][2]][2][:])
// here we unscale d in case someone wants the accurate distance information
return bestPoint, best * 16
}
/*
This function returns the k closest points and distances instead
*/
func LeechLatticeClosestPoints(f []float64, numPoints int) ([][]float64, []float64) {
p, d := LeechLatticeClosest(f)
c := Candidates{make([]uint64, len(TableVii)), make([]float64, len(TableVii))}
for j := range TableVii {
c.Distances[j] = d[TableVii[j][0]][0] + d[TableVii[j][1]][1] + d[TableVii[j][2]][2]
c.Indexes[j] = uint64(j)
}
// priority queue or heap would be faster
sort.Sort(&c)
bestPoints := make([][]float64, numPoints)
for i := 0; i < numPoints; i++ {
bestPoints[i] = make([]float64, 24)
copy(bestPoints[i][0:8], p[TableVii[c.Indexes[i]][0]][0][:])
copy(bestPoints[i][8:16], p[TableVii[c.Indexes[i]][1]][1][:])
copy(bestPoints[i][16:24], p[TableVii[c.Indexes[i]][2]][2][:])
}
return bestPoints, c.Distances[:numPoints]
}
/*
We use table IV instead of table V, it is slower but more understandable
*/
var TableIVa = [16][8]int8{
{0, 0, 0, 0, 0, 0, 0, 0},
{4, 0, 0, 0, 0, 0, 0, 0},
{2, 2, 2, 2, 0, 0, 0, 0},
{-2, 2, 2, 2, 0, 0, 0, 0},
{2, 2, 0, 0, 2, 2, 0, 0},
{-2, 2, 0, 0, 2, 2, 0, 0},
{2, 2, 0, 0, 0, 0, 2, 2},
{-2, 2, 0, 0, 0, 0, 2, 2},
{2, 0, 2, 0, 2, 0, 2, 0},
{-2, 0, 2, 0, 2, 0, 2, 0},
{2, 0, 2, 0, 0, 2, 0, 2},
{-2, 0, 2, 0, 0, 2, 0, 2},
{2, 0, 0, 2, 2, 0, 0, 2},
{-2, 0, 0, 2, 2, 0, 0, 2},
{2, 0, 0, 2, 0, 2, 2, 0},
{-2, 0, 0, 2, 0, 2, 2, 0},
}
var TableIVt = [18][8]int8{
{0, 0, 0, 0, 0, 0, 0, 0},
{2, 2, 2, 0, 0, 2, 0, 0},
{2, 2, 0, 2, 0, 0, 0, 2},
{2, 0, 2, 2, 0, 0, 2, 0},
{0, 2, 2, 2, 2, 0, 0, 0},
{2, 2, 0, 0, 2, 0, 2, 0},
{2, 0, 2, 0, 2, 0, 0, 2},
{2, 0, 0, 2, 2, 2, 0, 0},
{-3, 1, 1, 1, 1, 1, 1, 1},
{3, -1, -1, 1, 1, -1, 1, 1},
{3, -1, 1, -1, 1, 1, 1, -1},
{3, 1, -1, -1, 1, 1, -1, 1},
{3, 1, 1, 1, 1, -1, -1, -1},
{3, -1, 1, 1, -1, 1, -1, 1},
{3, 1, -1, 1, -1, 1, 1, -1},
{3, 1, 1, -1, -1, -1, 1, 1},
}
// Table 5 requires a less understandable E_8 decoder as it rotates the lattice
// If speed is a concern, there are much better decoders
/*
var TableVa = [16][8]int8{
{0, 0, 0, 0, 0, 0, 0, 0},
{2, 2, 0, 0, 0, 0, 0, 0},
{2, 0, 2, 0, 0, 0, 0, 0},
{2, 0, 0, 2, 0, 0, 0, 0},
{2, 0, 0, 0, 2, 0, 0, 0},
{2, 0, 0, 0, 0, 2, 0, 0},
{2, 0, 0, 0, 0, 0, 2, 0},
{2, 0, 0, 0, 0, 0, 0, 2},
{1, 1, 1, 1, 1, 1, 1, 1},
{-1, -1, 1, 1, 1, 1, 1, 1},
{-1, 1, -1, 1, 1, 1, 1, 1},
{-1, 1, 1, -1, 1, 1, 1, 1},
{-1, 1, 1, 1, -1, 1, 1, 1},
{-1, 1, 1, 1, 1, -1, 1, 1},
{-1, 1, 1, 1, 1, 1, -1, 1},
{-1, 1, 1, 1, 1, 1, 1, -1},
}
var TableVt = [16][8]int8{
{0, 0, 0, 0, 0, 0, 0, 0},
{1, 1, 1, 1, -1, 1, 1, 1},
{-1, 1, 1, 1, 2, 0, 0, 0},
{2, 0, 0, 0, 1, 1, 1, 1},
{1, 1, 0, 2, 1, 1, 0, 0},
{2, 0, 1, 1, 0, 0, 1, -1},
{1, 1, 0, 0, 2, 0, -1, 1},
{2, 0, -1, 1, -1, 1, 0, 0},
{1, 2, 1, 0, 1, 0, 1, 0},
{2, 1, 0, 1, 0, -1, 0, 1},
{1, 0, 1, 0, 2, 1, 0, -1},
{2, 1, 0, -1, -1, 0, 1, 0},
{1, 0, 2, 1, 1, 0, 0, 1},
{2, 1, 1, 0, 0, 1, -1, 0},
{1, 0, 0, 1, 2, -1, 1, 0},
{2, -1, 1, 0, -1, 0, 0, 1},
}
*/
var TableVi = [256][8]int8{}
var TableVii = [4096][3]uint8{}
/*
This function implements the precomputation steps as described in https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=1057135
*/
func Precompute() {
index := 0
for i := 0; i < 16; i++ {
for j := 0; j < 16; j++ {
for k := 0; k < 8; k++ {
TableVi[index][k] = TableIVa[i][k] + TableIVt[j][k]
}
index++
}
}
if index != 256 {
panic("expected 256 elements")
}
index = 0
for ti := 0; ti < 16; ti++ {
t := TableIVt[ti]
for ai := 0; ai < 16; ai++ {
a := TableIVa[ai]
at := [8]int8{}
for k := 0; k < 8; k++ {
at[k] = a[k] + t[k]
}
atIndex := FindTableIndex(at)
for bi := 0; bi < 16; bi++ {
b := TableIVa[bi]
bt := [8]int8{}
sum := [8]int8{}
for k := 0; k < 8; k++ {
bt[k] = b[k] + t[k]
sum[k] = a[k] + b[k]
}
btIndex := FindTableIndex(bt)
for ci := 0; ci < 16; ci++ {
c := TableIVa[ci]
s := sum
for k := 0; k < 8; k++ {
s[k] += c[k]
}
if !Is4E8Point(s) {
continue
}
ct := [8]int8{}
for k := 0; k < 8; k++ {
ct[k] = c[k] + t[k]
}
ctIndex := FindTableIndex(ct)
TableVii[index][0] = atIndex
TableVii[index][1] = btIndex
TableVii[index][2] = ctIndex
index++
break
}
}
}
}
if index != 4096 {
panic("Expected 4096 elements")
}
}
func FindTableIndex(v [8]int8) uint8 {
for i := 0; i < 256; i++ {
if TableVi[i] == v {
return uint8(i)
}
}
panic("No matching vector found")
}
// Determine if v is a point of the lambda_8 = E_8 * 4 lattice
func Is4E8Point(v [8]int8) bool {
sum := 0
for i := range v {
sum += int(v[i])
}
// must be even in E8, so must be multiple of 8 in 4E8
if sum%8 != 0 {
return false
}
// all of the coordinates are integers or all of the coordinates are half integers
// -> all of the coordinates are 0 mod 4 or all are 2 mod 4
ok := true
for i := range v {
if v[i]%4 != 0 {
ok = false
break
}
}
if ok {
return true
}
for i := range v {
m := v[i] % 4
// because of how modulus operator works on negatives
// (could just use bit operations instead of modulus)
if m != 2 && m != -2 {
return false
}
}
return true
}
func DistSquared(f1 []float64, f2 []float64) float64 {
d := 0.0
for k := range f1 {
diff := f1[k] - f2[k]
d += diff * diff
}
return d
} | hash/lattice_hash.go | 0.832169 | 0.523664 | lattice_hash.go | starcoder |
package cronschedule
import (
"errors"
"fmt"
"strconv"
"strings"
)
// Range definitions for the Time fields of a cron entry
const (
MinuteMinimum int = 0
MinuteMaximum int = 59
HourMinimum int = 0
HourMaximum int = 23
DayOfMonthMinimum int = 1
DayOfMonthMaximum int = 31
MonthMinimum int = 1
MonthMaximum int = 12
DayOfWeekMinimum int = 0
DayOfWeekMaximum int = 6
)
// Don't mind the EBNF. This is in-place to define the allowable Cron codes which Dune will implement
/*
Minute Timecode can be of the format:
MinuteLiteral: ["0" ... "5"], "0"..."9"
MinuteRange: Literal, "-", Literal
MinuteStepRange: Range, "/", Literal
MinuteTimeCode: ( Literal | Range | Step | StepRange ) { "," ( Literal | Range | Step | StepRange ) }
*/
/*
Hour Timecode can be of the format:
HourLiteral: ( ["0" ... "1"], "0"..."9" ) | ( "2", "0"..."3" )
HourRange: Literal, "-", Literal
HourStepRange: Range, "/", Literal
HourTimeCode: ( Literal | Range | Step | StepRange ) { "," ( Literal | Range | Step | StepRange ) }
*/
/*
DayOfMonth Timecode can be of the format:
DayOfMonthLiteral: ( "0", "1"..."9" ) | ( ["1" ... "2"], "0"..."9" ) | ( "3", "0"..."1" )
DayOfMonthRange: Literal, "-", Literal
DayOfMonthStepRange: Range, "/", Literal
DayOfMonthTimeCode: ( Literal | Range | Step | StepRange ) { "," ( Literal | Range | Step | StepRange ) }
*/
/*
Month Timecode can be of the format:
MonthLiteral: ( "0", "1"..."9" ) | ( "1", "0"..."2" )
MonthRange: Literal, "-", Literal
MonthStepRange: Range, "/", Literal
MonthTimeCode: ( Literal | Range | Step | StepRange ) { "," ( Literal | Range | Step | StepRange ) }
*/
/*
DayOfWeek Timecode can be of the format:
DayOfWeekLiteral: "0"..."6"
DayOfWeekRange: Literal, "-", Literal
DayOfWeekStepRange: Range, "/", Literal
DayOfWeekTimeCode: ( Literal | Range | Step | StepRange ) { "," ( Literal | Range | Step | StepRange ) }
*/
/*
Full Cron Timecode is fully defined as:
CronTimeCode: MinuteTimeCode, " ", HourTimeCode, " ", DayOfMonthTimeCode, " ", MonthTimeCode, " ", DayOfWeekTimeCode
*/
// ParseSchedule will convert a single Cron Timecode string into a set of integer arrays corresponding to:
// a) Minutes
// b) Hours
// c) Days Of Month
// d) Months
// e) Days of Week
func ParseSchedule(Code string) (Schedule [5][]int, err error) {
Fields := strings.Split(Code, " ")
if len(Fields) != 5 {
return [5][]int{}, errors.New("cron-schedule error - invalid timecode - Must be 5 whitespace-delimited fields")
}
if Schedule[0], err = ParseTimeCode(Fields[0], MinuteMinimum, MinuteMaximum); err != nil {
return [5][]int{}, err
}
if Schedule[1], err = ParseTimeCode(Fields[1], HourMinimum, HourMaximum); err != nil {
return [5][]int{}, err
}
if Schedule[2], err = ParseTimeCode(Fields[2], DayOfMonthMinimum, DayOfMonthMaximum); err != nil {
return [5][]int{}, err
}
if Schedule[3], err = ParseTimeCode(Fields[3], MonthMinimum, MonthMaximum); err != nil {
return [5][]int{}, err
}
if Schedule[4], err = ParseTimeCode(Fields[4], DayOfWeekMinimum, DayOfWeekMaximum); err != nil {
return [5][]int{}, err
}
return Schedule, nil
}
// ParseTimeCode is the full parser for a single element of the timecode. This will parse a single timecode into an array of corresponding matching times, as well as indicating if this is a valid timecode.
func ParseTimeCode(Code string, Min, Max int) (values []int, err error) {
Code = strings.Replace(Code, "*", "0-60", -1)
var tempValues []int
tempValues, err = parseTimeCode(Code)
if err != nil {
return
}
for _, val := range tempValues {
if val >= Min && val <= Max {
values = append(values, val)
}
}
if len(values) == 0 {
return []int{}, fmt.Errorf("cron-schedule error - timecode parse error - Code %s corresponds to no matching times between %d and %d", Code, Min, Max)
}
return values, nil
}
func parseTimeCode(Code string) ([]int, error) {
Values := []int{}
SubFields := strings.Split(Code, ",")
for _, field := range SubFields {
if vals, valid := parseLiteral(field); valid {
Values = append(Values, vals)
continue
}
if vals, valid := parseRange(field); valid {
Values = append(Values, vals...)
continue
}
if vals, valid := parseStepRange(field); valid {
Values = append(Values, vals...)
continue
}
return nil, fmt.Errorf("cron-schedule error - timecode parse error - Unexpected token %s", field)
}
if len(Values) == 0 {
return []int{}, fmt.Errorf("cron-schedule error - timecode parse error - Code %s corresponds to no matching times", Code)
}
return Values, nil
}
func parseLiteral(Code string) (int, bool) {
val, err := strconv.Atoi(Code)
if err != nil {
return -1, false
}
return val, true
}
func parseRange(Code string) ([]int, bool) {
r := strings.Split(Code, "-")
if len(r) != 2 {
return nil, false
}
Start, valid := parseLiteral(r[0])
if !valid {
return nil, valid
}
End, valid := parseLiteral(r[1])
if !valid {
return nil, valid
}
Values := []int{}
if Start < End {
for i := Start; i <= End; i++ {
Values = append(Values, i)
}
} else {
for i := End; i <= Start; i++ {
Values = append(Values, i)
}
}
return Values, true
}
func parseStepRange(Code string) ([]int, bool) {
r := strings.Split(Code, "-")
if len(r) != 2 {
return nil, false
}
Start, valid := parseLiteral(r[0])
if !valid {
return nil, valid
}
step := strings.Split(r[1], "/")
if len(step) != 2 {
return nil, false
}
End, valid := parseLiteral(step[0])
if !valid {
return nil, valid
}
Step, valid := parseLiteral(step[1])
if !valid {
return nil, valid
}
Values := []int{}
for i := Start; i <= End; i += Step {
Values = append(Values, i)
}
return Values, true
} | cron-schedule.go | 0.696371 | 0.529811 | cron-schedule.go | starcoder |
package schema
import (
"fmt"
"path"
"github.com/ipld/go-ipld-prime"
)
// FUTURE: we also want something *almost* identical to this Validate method,
// but returning a `typed.Node` in the case of no error.
// (Such a method would go in the same package as `typed.Node`, presumably.)
// How do we avoid writing this method twice?
// Maybe both a Validate and Reify method belong in `typed` package,
// and Validate just returns less?
// No... Reify should probably short-circuit sooner?
// Unclear. Guess first step is that we need to decide the intended UX!
func Validate(ts TypeSystem, t Type, node ipld.Node) []error {
return validate(ts, t, node, "/")
}
// review: 'ts' param might not actually be necessary; everything relevant can be reached from t so far.
func validate(ts TypeSystem, t Type, node ipld.Node, pth string) []error {
switch t2 := t.(type) {
case TypeBool:
if node.ReprKind() != ipld.ReprKind_Bool {
return []error{fmt.Errorf("Schema match failed: expected type %q (which is kind %v) at path %q, but found kind %v", t2.Name(), t.ReprKind(), pth, node.ReprKind())}
}
return nil
case TypeString:
if node.ReprKind() != ipld.ReprKind_String {
return []error{fmt.Errorf("Schema match failed: expected type %q (which is kind %v) at path %q, but found kind %v", t2.Name(), t.ReprKind(), pth, node.ReprKind())}
}
return nil
case TypeBytes:
if node.ReprKind() != ipld.ReprKind_Bytes {
return []error{fmt.Errorf("Schema match failed: expected type %q (which is kind %v) at path %q, but found kind %v", t2.Name(), t.ReprKind(), pth, node.ReprKind())}
}
return nil
case TypeInt:
if node.ReprKind() != ipld.ReprKind_Int {
return []error{fmt.Errorf("Schema match failed: expected type %q (which is kind %v) at path %q, but found kind %v", t2.Name(), t.ReprKind(), pth, node.ReprKind())}
}
return nil
case TypeFloat:
if node.ReprKind() != ipld.ReprKind_Float {
return []error{fmt.Errorf("Schema match failed: expected type %q (which is kind %v) at path %q, but found kind %v", t2.Name(), t.ReprKind(), pth, node.ReprKind())}
}
return nil
case TypeMap:
if node.ReprKind() != ipld.ReprKind_Map {
return []error{fmt.Errorf("Schema match failed: expected type %q (which is kind %v) at path %q, but found kind %v", t2.Name(), t.ReprKind(), pth, node.ReprKind())}
}
errs := []error(nil)
for itr := node.MapIterator(); !itr.Done(); {
k, v, err := itr.Next()
if err != nil {
return []error{err}
}
// FUTURE: if KeyType is an enum rather than string, do membership check.
ks, _ := k.AsString()
if v.IsNull() {
if !t2.ValueIsNullable() {
errs = append(errs, fmt.Errorf("Schema match failed: map at path %q contains unpermitted null in key %q", pth, ks))
}
} else {
errs = append(errs, validate(ts, t2.ValueType(), v, path.Join(pth, ks))...)
}
}
return errs
case TypeList:
case TypeLink:
// TODO interesting case: would need resolver to keep checking.
case TypeUnion:
// TODO *several* interesting errors
case TypeStruct:
switch t2.tupleStyle {
case false: // as map!
if node.ReprKind() != ipld.ReprKind_Map {
return []error{fmt.Errorf("Schema match failed: expected type %q (which is kind %v) at path %q, but found kind %v", t2.Name(), t.ReprKind(), pth, node.ReprKind())}
}
// TODO loop over em
// TODO REVIEW order strictness questions?
case true: // as array!
}
case TypeEnum:
// TODO another interesting error
}
return nil
} | schema/validate.go | 0.5144 | 0.456168 | validate.go | starcoder |
package output
import (
"github.com/Jeffail/benthos/lib/log"
"github.com/Jeffail/benthos/lib/metrics"
"github.com/Jeffail/benthos/lib/output/writer"
"github.com/Jeffail/benthos/lib/types"
)
//------------------------------------------------------------------------------
func init() {
Constructors[TypeDynamoDB] = TypeSpec{
constructor: NewDynamoDB,
description: `
Inserts items into a DynamoDB table.
The field ` + "`string_columns`" + ` is a map of column names to string values,
where the values are
[function interpolated](../config_interpolation.md#functions) per message of a
batch. This allows you to populate string columns of an item by extracting
fields within the document payload or metadata like follows:
` + "``` yaml" + `
string_columns:
id: ${!json_field:id}
title: ${!json_field:body.title}
topic: ${!metadata:kafka_topic}
full_content: ${!content}
` + "```" + `
The field ` + "`json_map_columns`" + ` is a map of column names to json paths,
where the path is extracted from each document and converted into a map value.
Both an empty path and the path ` + "`.`" + ` are interpreted as the root of the
document. This allows you to populate map columns of an item like follows:
` + "``` yaml" + `
json_map_columns:
user: path.to.user
whole_document: .
` + "```" + `
A column name can be empty:
` + "``` yaml" + `
json_map_columns:
"": .
` + "```" + `
In which case the top level document fields will be written at the root of the
item, potentially overwriting previously defined column values. If a path is not
found within a document the column will not be populated.
### Credentials
By default Benthos will use a shared credentials file when connecting to AWS
services. It's also possible to set them explicitly at the component level,
allowing you to transfer data across accounts. You can find out more
[in this document](../aws.md).`,
}
}
//------------------------------------------------------------------------------
// NewDynamoDB creates a new DynamoDB output type.
func NewDynamoDB(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
dyn, err := writer.NewDynamoDB(conf.DynamoDB, log, stats)
if err != nil {
return nil, err
}
return NewWriter(
"dynamodb", dyn, log, stats,
)
}
//------------------------------------------------------------------------------ | lib/output/dynamodb.go | 0.794664 | 0.759961 | dynamodb.go | starcoder |
package geom
// Ported to Go from the C++ implementation made by <NAME> (https://github.com/juj/RectangleBinPack/)
import (
"github.com/maxfish/go-libs/pkg/imath"
"math"
)
type RectNode struct {
Rect // Width and Height include padding
Index int // This index is used to keep track of which of the input rects this is
Rotated bool // True if the rect has been rotated 90 degrees
}
func NewRectNode(index, width, height int) RectNode {
return RectNode{
Index: index,
Rect: Rect{W: width, H: height},
}
}
func NewRectNodeFrom(b RectNode) RectNode {
return RectNode{
Index: b.Index,
Rect: b.Rect,
}
}
type MaxRectsBinResult struct {
PlacedRects []RectNode // Rects that have been placed
NotPlacedRects []RectNode // Rects that didn't fit in the provided area
Width int // Total Width used by the rects
Height int // Total Height used by the rects
Method FreeRectChoiceHeuristic // Method used to pack the rects
}
type FreeRectChoiceHeuristic int
const (
// BSSF: Positions the rectangle against the short side of a free rectangle into which it fits the best.
RectBestShortSideFit FreeRectChoiceHeuristic = iota
// BAF: Positions the rectangle into the smallest free rect into which it fits.
RectBestAreaFit
// BL: Does the Tetris placement.
RectBottomLeftRule
// BLSF: Positions the rectangle against the long side of a free rectangle into which it fits the best.
// RectBestLongSideFit
// CP: Chooses the placement where the rectangle touches other rects as much as possible.
// RectContactPointRule
)
type MaxRectsBinPacker struct {
binWidth, binHeight int
paddingX, paddingY int
allowRotation bool
usedRectangles []RectNode
freeRectangles []RectNode
}
// Create a new MaxRectsBinPacker packer
func NewMaxRectsBinPacker(width, height int, paddingX, paddingY int, allowRotation bool) *MaxRectsBinPacker {
mr := &MaxRectsBinPacker{
binWidth: width,
binHeight: height,
paddingX: paddingX,
paddingY: paddingY,
allowRotation: allowRotation,
usedRectangles: make([]RectNode, 0),
freeRectangles: make([]RectNode, 0),
}
r := NewRectNode(-1, width, height)
mr.freeRectangles = append(mr.freeRectangles, r)
return mr
}
// Packs the passed rects with the chosen heuristic.
// The dimensions of the returned rects will include the padding.
func (mr *MaxRectsBinPacker) Pack(inputRects []RectNode, method FreeRectChoiceHeuristic) *MaxRectsBinResult {
rects := inputRects
for len(rects) > 0 {
bestRectIndex := -1
var bestNode RectNode
bestScore1 := math.MaxInt32
bestScore2 := math.MaxInt32
for i := 0; i < len(rects); i++ {
var score1, score2 int
newNode := mr.scoreRect(rects[i], method, &score1, &score2)
if score1 < bestScore1 || (score1 == bestScore1 && score2 < bestScore2) {
bestScore1 = score1
bestScore2 = score2
bestNode = newNode
bestNode.Index = rects[i].Index
bestRectIndex = i
}
}
if bestRectIndex == -1 {
break
}
mr.placeRect(bestNode)
rects = append(rects[:bestRectIndex], rects[bestRectIndex+1:]...)
}
// Put the result together
result := &MaxRectsBinResult{
PlacedRects: mr.usedRectangles,
NotPlacedRects: rects,
}
// Compute the total area used by the rects
for i := 0; i < len(mr.usedRectangles); i++ {
rect := mr.usedRectangles[i]
result.Width = imath.Max(result.Width, rect.Right())
result.Height = imath.Max(result.Height, rect.Bottom())
}
return result
}
func (mr *MaxRectsBinPacker) placeRect(node RectNode) {
numRectanglesToProcess := len(mr.freeRectangles)
for i := 0; i < numRectanglesToProcess; i++ {
if mr.splitFreeNode(mr.freeRectangles[i], node) {
mr.freeRectangles = append(mr.freeRectangles[:i], mr.freeRectangles[i+1:]...)
i--
numRectanglesToProcess--
}
}
mr.pruneFreeList()
mr.usedRectangles = append(mr.usedRectangles, node)
}
func (mr *MaxRectsBinPacker) scoreRect(rect RectNode, method FreeRectChoiceHeuristic, score1, score2 *int) RectNode {
width := rect.W + mr.paddingX
height := rect.H + mr.paddingY
rotatedWidth := rect.H + mr.paddingX
rotatedHeight := rect.W + mr.paddingY
*score1 = math.MaxInt32
*score2 = math.MaxInt32
var newNode RectNode
switch method {
case RectBestShortSideFit:
newNode = mr.findPositionForNewNodeBestShortSideFit(width, height, rotatedWidth, rotatedHeight, mr.allowRotation, score1, score2)
break
case RectBottomLeftRule:
newNode = mr.findPositionForNewNodeBottomLeft(width, height, rotatedWidth, rotatedHeight, mr.allowRotation, score1, score2)
break
case RectBestAreaFit:
newNode = mr.findPositionForNewNodeBestAreaFit(width, height, rotatedWidth, rotatedHeight, mr.allowRotation, score1, score2)
break
default:
panic("Unknown free-rect choice heuristic")
}
// Cannot fit the current rectangle.
if newNode.H == 0 {
*score1 = math.MaxInt32
*score2 = math.MaxInt32
}
return newNode
}
// Computes the ratio of used surface area
func (mr *MaxRectsBinPacker) Occupancy() float32 {
usedSurfaceArea := 0
for i := 0; i < len(mr.usedRectangles); i++ {
usedSurfaceArea += mr.usedRectangles[i].W * mr.usedRectangles[i].H
}
return float32(usedSurfaceArea) / float32(mr.binWidth*mr.binHeight)
}
func (mr *MaxRectsBinPacker) findPositionForNewNodeBestAreaFit(width, height, rotatedWidth, rotatedHeight int, rotate bool, bestAreaFit, bestShortSideFit *int) RectNode {
bestNode := RectNode{}
*bestAreaFit = math.MaxInt32
*bestShortSideFit = math.MaxInt32
for i := 0; i < len(mr.freeRectangles); i++ {
areaFit := mr.freeRectangles[i].W*mr.freeRectangles[i].H - width*height
// Try to place the rectangle in upright (non-rotated) orientation
if mr.freeRectangles[i].W >= width && mr.freeRectangles[i].H >= height {
leftoverH := imath.Abs(mr.freeRectangles[i].W - width)
leftoverV := imath.Abs(mr.freeRectangles[i].H - height)
shortSideFit := imath.Min(leftoverH, leftoverV)
if areaFit < *bestAreaFit || (areaFit == *bestAreaFit && shortSideFit < *bestShortSideFit) {
bestNode.X = mr.freeRectangles[i].X
bestNode.Y = mr.freeRectangles[i].Y
bestNode.W = width
bestNode.H = height
*bestShortSideFit = shortSideFit
*bestAreaFit = areaFit
bestNode.Rotated = false
}
}
if rotate && mr.freeRectangles[i].W >= rotatedWidth && mr.freeRectangles[i].H >= rotatedHeight {
leftoverH := imath.Abs(mr.freeRectangles[i].W - rotatedWidth)
leftoverV := imath.Abs(mr.freeRectangles[i].H - rotatedHeight)
shortSideFit := imath.Min(leftoverH, leftoverV)
if areaFit < *bestAreaFit || (areaFit == *bestAreaFit && shortSideFit < *bestShortSideFit) {
bestNode.X = mr.freeRectangles[i].X
bestNode.Y = mr.freeRectangles[i].Y
bestNode.W = rotatedWidth
bestNode.H = rotatedHeight
*bestShortSideFit = shortSideFit
*bestAreaFit = areaFit
bestNode.Rotated = true
}
}
}
return bestNode
}
func (mr *MaxRectsBinPacker) findPositionForNewNodeBestShortSideFit(width, height, rotatedWidth, rotatedHeight int, rotate bool, bestShortSideFit, bestLongSideFit *int) RectNode {
bestNode := RectNode{}
*bestShortSideFit = math.MaxInt32
*bestLongSideFit = math.MaxInt32
for i := 0; i < len(mr.freeRectangles); i++ {
// Try to place the rectangle in upright (non-rotated) orientation
if mr.freeRectangles[i].W >= width && mr.freeRectangles[i].H >= height {
leftoverH := imath.Abs(mr.freeRectangles[i].W - width)
leftoverV := imath.Abs(mr.freeRectangles[i].H - height)
shortSideFit := imath.Min(leftoverH, leftoverV)
longSideFit := imath.Max(leftoverH, leftoverV)
if shortSideFit < *bestShortSideFit || (shortSideFit == *bestShortSideFit && longSideFit < *bestLongSideFit) {
bestNode.X = mr.freeRectangles[i].X
bestNode.Y = mr.freeRectangles[i].Y
bestNode.W = width
bestNode.H = height
*bestShortSideFit = shortSideFit
*bestLongSideFit = longSideFit
bestNode.Rotated = false
}
}
if rotate && mr.freeRectangles[i].W >= rotatedWidth && mr.freeRectangles[i].H >= rotatedHeight {
flippedLeftoverHoriz := imath.Abs(mr.freeRectangles[i].W - rotatedWidth)
flippedLeftoverVert := imath.Abs(mr.freeRectangles[i].H - rotatedHeight)
flippedShortSideFit := imath.Min(flippedLeftoverHoriz, flippedLeftoverVert)
flippedLongSideFit := imath.Max(flippedLeftoverHoriz, flippedLeftoverVert)
if flippedShortSideFit < *bestShortSideFit || (flippedShortSideFit == *bestShortSideFit && flippedLongSideFit < *bestLongSideFit) {
bestNode.X = mr.freeRectangles[i].X
bestNode.Y = mr.freeRectangles[i].Y
bestNode.W = rotatedWidth
bestNode.H = rotatedHeight
*bestShortSideFit = flippedShortSideFit
*bestLongSideFit = flippedLongSideFit
bestNode.Rotated = true
}
}
}
return bestNode
}
func (mr *MaxRectsBinPacker) findPositionForNewNodeBottomLeft(width, height, rotatedWidth, rotatedHeight int, rotate bool, bestY, bestX *int) RectNode {
bestNode := RectNode{}
*bestX = math.MaxInt32
*bestY = math.MaxInt32
for i := 0; i < len(mr.freeRectangles); i++ {
// Try to place the rectangle in upright (non-rotated) orientation
if mr.freeRectangles[i].W >= width && mr.freeRectangles[i].H >= height {
topSideY := mr.freeRectangles[i].Y + height
if topSideY < *bestY || (topSideY == *bestY && mr.freeRectangles[i].X < *bestX) {
bestNode.X = mr.freeRectangles[i].X
bestNode.Y = mr.freeRectangles[i].Y
bestNode.W = width
bestNode.H = height
*bestY = topSideY
*bestX = mr.freeRectangles[i].X
bestNode.Rotated = false
}
}
if rotate && mr.freeRectangles[i].W >= rotatedWidth && mr.freeRectangles[i].H >= rotatedHeight {
topSideY := mr.freeRectangles[i].Y + rotatedHeight
if topSideY < *bestY || (topSideY == *bestY && mr.freeRectangles[i].X < *bestX) {
bestNode.X = mr.freeRectangles[i].X
bestNode.Y = mr.freeRectangles[i].Y
bestNode.W = rotatedWidth
bestNode.H = rotatedHeight
*bestY = topSideY
*bestX = mr.freeRectangles[i].X
bestNode.Rotated = true
}
}
}
return bestNode
}
func (mr *MaxRectsBinPacker) splitFreeNode(freeNode, usedNode RectNode) bool {
if !usedNode.Rect.Intersect(freeNode.Rect) {
return false
}
if usedNode.X < freeNode.X+freeNode.W && usedNode.X+usedNode.W > freeNode.X {
// New node at the top side of the used node
if usedNode.Y > freeNode.Y && usedNode.Y < freeNode.Y+freeNode.H {
newNode := NewRectNodeFrom(freeNode)
newNode.H = usedNode.Y - newNode.Y
mr.freeRectangles = append(mr.freeRectangles, newNode)
}
// New node at the bottom side of the used node
if usedNode.Y+usedNode.H < freeNode.Y+freeNode.H {
newNode := NewRectNodeFrom(freeNode)
newNode.Y = usedNode.Y + usedNode.H
newNode.H = freeNode.Y + freeNode.H - (usedNode.Y + usedNode.H)
mr.freeRectangles = append(mr.freeRectangles, newNode)
}
}
if usedNode.Y < freeNode.Y+freeNode.H && usedNode.Y+usedNode.H > freeNode.Y {
// New node at the left side of the used node.
if usedNode.X > freeNode.X && usedNode.X < freeNode.X+freeNode.W {
newNode := NewRectNodeFrom(freeNode)
newNode.W = usedNode.X - newNode.X
mr.freeRectangles = append(mr.freeRectangles, newNode)
}
// New node at the right side of the used node
if usedNode.X+usedNode.W < freeNode.X+freeNode.W {
newNode := NewRectNodeFrom(freeNode)
newNode.X = usedNode.X + usedNode.W
newNode.W = freeNode.X + freeNode.W - (usedNode.X + usedNode.W)
mr.freeRectangles = append(mr.freeRectangles, newNode)
}
}
return true
}
// pruneFreeList Find the next rectangle that packs best.
func (mr *MaxRectsBinPacker) pruneFreeList() {
n := len(mr.freeRectangles)
for i := 0; i < n; i++ {
for j := i + 1; j < n; j++ {
rect1 := mr.freeRectangles[i]
rect2 := mr.freeRectangles[j]
if rect1.IsContainedIn(rect2.Rect) {
mr.freeRectangles = append(mr.freeRectangles[:i], mr.freeRectangles[i+1:]...)
i--
n--
break
}
if rect2.IsContainedIn(rect1.Rect) {
mr.freeRectangles = append(mr.freeRectangles[:j], mr.freeRectangles[j+1:]...)
j--
n--
}
}
}
} | pkg/geom/maxrectsbin.go | 0.868367 | 0.70282 | maxrectsbin.go | starcoder |
package gofuzzheaders
import (
"errors"
"fmt"
"reflect"
)
type ConsumeFuzzer struct {
data []byte
CommandPart []byte
RestOfArray []byte
NumberOfCalls int
position int
}
func IsDivisibleBy(n int, divisibleby int) bool {
return (n % divisibleby) == 0
}
func NewConsumer(fuzzData []byte) *ConsumeFuzzer {
f := &ConsumeFuzzer{data: fuzzData, position: 0}
return f
}
/*
SplitToSeveral splits the input into 3 chunks:
1: the first byte - Is converted to an int, and
that int determines the number of command-line
calls the fuzzer will make.
2: The next n bytes where n is equal to the int from
the first byte. These n bytes are converted to
a corresponding command and represent which
commands will be called.
3: The rest of the data array should have a length
that is divisible by the number of calls.
This part is split up into equally large chunks,
and each chunk is used as parameters for the
corresponding command.
*/
func (f *ConsumeFuzzer) Split(minCalls, maxCalls int) error {
if len(f.data)==0 {
fmt.Println("f.data is", f.data)
return errors.New("Could not split")
}
numberOfCalls := int(f.data[0])
if numberOfCalls < minCalls || numberOfCalls > maxCalls {
return errors.New("Bad number of calls")
}
if len(f.data) < numberOfCalls+numberOfCalls+1 {
return errors.New("Length of data does not match required parameters")
}
// Define part 2 and 3 of the data array
commandPart := f.data[1 : numberOfCalls+1]
restOfArray := f.data[numberOfCalls+1:]
// Just a small check. It is necessary
if len(commandPart) != numberOfCalls {
return errors.New("Length of commandPart does not match number of calls")
}
// Check if restOfArray is divisible by numberOfCalls
if !IsDivisibleBy(len(restOfArray), numberOfCalls) {
return errors.New("Length of commandPart does not match number of calls")
}
f.CommandPart = commandPart
f.RestOfArray = restOfArray
f.NumberOfCalls = numberOfCalls
return nil
}
func (f *ConsumeFuzzer) GenerateStruct(targetStruct interface{}) error {
//position := 0
//fmt.Println("Byte position is ", *position)
if f.position>=len(f.data) {
return errors.New("Not enough bytes to proceed")
}
e := reflect.ValueOf(targetStruct).Elem()
for i := 0; i < e.NumField(); i++ {
//fmt.Println("Type: ", e.Field(i).Kind())
if e.Field(i).Kind().String()=="struct" {
panic("We have a struct")
}
//name := e.Type().Field(i).Name
//fmt.Println("position is: ", position)
fieldtype := e.Type().Field(i).Type.String()
//fmt.Println(fieldtype)
switch ft := fieldtype; ft {
case "string":
stringChunk, err := f.GetString()
if err != nil {
return err
}
chunk := stringChunk
e.Field(i).SetString(chunk)
case "bool":
newBool, err := f.GetBool()
if err != nil {
return err
}
e.Field(i).SetBool(newBool)
case "int":
newInt, err := f.GetInt()
if err != nil {
return err
}
e.Field(i).SetInt(int64(newInt))
case "[]string":
//fmt.Println("the type is []string")
continue
case "[]byte":
fmt.Println("the type is []byte")
newBytes, err := f.GetBytes()
if err != nil {
return err
}
e.Field(i).SetBytes(newBytes)
default:
//fmt.Println("fieldtype was something else:", fieldtype)
continue
}
//varName := e.Type().Field(i).Name
//varType := e.Type().Field(i).Type
//varValue := e.Field(i).Interface()
//fmt.Printf("%v %v %v\n", varName,varType,varValue)
}
//fmt.Printf("After: %+v\n", i)
return nil
}
func (f *ConsumeFuzzer) GetInt() (int, error) {
if f.position>=len(f.data) {
return 0, errors.New("Not enough bytes to create int")
}
returnInt := int(f.data[f.position])
f.position++
return returnInt, nil
}
func (f *ConsumeFuzzer) GetBytes() ([]byte, error) {
if f.position>=len(f.data) {
return nil, errors.New("Not enough bytes to create byte array")
}
length := int(f.data[f.position])
if f.position+length>=len(f.data) {
return nil, errors.New("Not enough bytes to create byte array")
}
b := f.data[f.position:f.position+length]
f.position = f.position + length
return b, nil
}
func (f *ConsumeFuzzer) GetString() (string, error) {
//fmt.Println("Complete string: ", string(data))
//fmt.Println("And we are getting the string at position: ", position, "with length: ", length)
//fmt.Println("...and that gives us ", string(data[position:position+length]))
if f.position>=len(f.data) {
return "nil", errors.New("Not enough bytes to create string")
}
length := int(f.data[f.position])
if f.position+length>=len(f.data) {
return "nil", errors.New("Not enough bytes to create string")
}
str := string(f.data[f.position:f.position+length])
f.position = f.position + length
return str, nil
}
func (f *ConsumeFuzzer) GetBool() (bool, error) {
if f.position>=len(f.data) {
return false, errors.New("Not enough bytes to create bool")
}
if IsDivisibleBy(int(f.data[f.position]), 2) {
f.position++
return true, nil
}else{
f.position++
return false, nil
}
} | vendor/github.com/AdamKorcz/go-fuzz-headers/consumer.go | 0.600305 | 0.408395 | consumer.go | starcoder |
package base
import (
"github.com/corbym/gogiven/testdata"
"sync"
)
// Some holds the test context and has a reference to the test's testing.T
type Some struct {
sync.RWMutex
globalTestingT TestingT
testMetaData *TestMetaData
testTitle string
interestingGivens testdata.InterestingGivens
capturedIO testdata.CapturedIO
parsedTestContent ParsedTestContent
}
//NewSome creates a new Some context. This is an internal function that was exported for testing.
func NewSome(
globalTestingT TestingT,
testTitle string,
testingT *TestMetaData,
givenWhenThen ParsedTestContent,
givenFunc ...GivenData) *Some {
some := new(Some)
some.testMetaData = testingT
some.testTitle = testTitle
some.globalTestingT = globalTestingT
some.parsedTestContent = givenWhenThen
some.interestingGivens = make(testdata.InterestingGivens)
some.capturedIO = make(testdata.CapturedIO)
if len(givenFunc) > 0 {
for _, someGivenFunc := range givenFunc {
someGivenFunc(some.interestingGivens)
}
}
return some
}
//TestTitle is the name of the test
func (some *Some) TestTitle() string {
some.RLock()
defer some.RUnlock()
return some.testTitle
}
// ParsedTestContent holds a parsed test as an array string.
// All lines of the test func will be listed, from the first call to the first Given
// up to the end of the test func and converted to (as close as possible) natural language.
func (some *Some) ParsedTestContent() ParsedTestContent {
some.RLock()
defer some.RUnlock()
return some.parsedTestContent
}
// TestMetaData is an interface that mimics testingT but stores the test state rather than act on it.
// Gogivens will act on the meta data's behalf via globalTestingT (the "real" testing.T for the test).
func (some *Some) TestMetaData() *TestMetaData {
some.RLock()
defer some.RUnlock()
return some.testMetaData
}
// CapturedIO is a convenience method for retrieving the CapturedIO map
func (some *Some) CapturedIO() map[interface{}]interface{} {
some.RLock()
defer some.RUnlock()
return some.capturedIO
}
// InterestingGivens is a convenience method for retrieving the InterestingGivens map
func (some *Some) InterestingGivens() map[interface{}]interface{} {
some.RLock()
defer some.RUnlock()
return some.interestingGivens
}
// When - call When when you want to perform Some action, call a function, or perform a test operation.
func (some *Some) When(action ...CapturedIOGivenData) *Some {
some.Lock()
defer some.Unlock()
action[0](some.capturedIO, some.interestingGivens) // TODO: there could be multiple actions..
return some
}
// Then is a function that executes the given function and asserts whether the test has failed.
// It can be called in a table test (for loop). Provide a function in which assertions will be made.
// Use the testMetaData typed var in place of testing.T.
// The test state is recorded in testMetaData type and goGiven fails the test if the error methods (ErrorF etc)
// were called after the function exits.
func (some *Some) Then(assertions TestingWithGiven) *Some {
some.Lock()
defer some.Unlock()
if !some.testMetaData.Skipped() {
assertions(some.testMetaData, some.capturedIO, some.interestingGivens)
if some.testMetaData.Failed() {
globalTestingT := some.globalTestingT
globalTestingT.Helper()
globalTestingT.Errorf(some.testMetaData.TestOutput())
}
}
return some
}
//SkippingThisOne still records we have a skipped tests in our test output generator
func (some *Some) SkippingThisOne(reason string, args ...interface{}) *Some {
some.testMetaData.Skipf(reason, args...)
some.globalTestingT.Helper()
some.globalTestingT.Skipf(reason, args...) // skip so we don't worry about it
return some
}
//SkippingThisOneIf skips if the condition is true, and still records we have a skipped tests in our test output generator.
// This will be best used in a table test (range) when running sub-tests, since in a main test the entire test will be skipped
// and the condition pointless.
func (some *Some) SkippingThisOneIf(why func(someData ...interface{}) bool, reason string, args ...interface{}) *Some {
if why() {
some.testMetaData.Skipf(reason, args...)
some.globalTestingT.Helper()
some.globalTestingT.Skipf(reason, args...) // skip so we don't worry about it
}
return some
} | base/some.go | 0.525856 | 0.458288 | some.go | starcoder |
package v1alpha1
// NamespaceListerExpansion allows custom methods to be added to
// NamespaceLister.
type NamespaceListerExpansion interface{}
// NamespaceNamespaceListerExpansion allows custom methods to be added to
// NamespaceNamespaceLister.
type NamespaceNamespaceListerExpansion interface{}
// NamespaceAuthorizationRuleListerExpansion allows custom methods to be added to
// NamespaceAuthorizationRuleLister.
type NamespaceAuthorizationRuleListerExpansion interface{}
// NamespaceAuthorizationRuleNamespaceListerExpansion allows custom methods to be added to
// NamespaceAuthorizationRuleNamespaceLister.
type NamespaceAuthorizationRuleNamespaceListerExpansion interface{}
// NamespaceDisasterRecoveryConfigListerExpansion allows custom methods to be added to
// NamespaceDisasterRecoveryConfigLister.
type NamespaceDisasterRecoveryConfigListerExpansion interface{}
// NamespaceDisasterRecoveryConfigNamespaceListerExpansion allows custom methods to be added to
// NamespaceDisasterRecoveryConfigNamespaceLister.
type NamespaceDisasterRecoveryConfigNamespaceListerExpansion interface{}
// NamespaceNetworkRuleSetListerExpansion allows custom methods to be added to
// NamespaceNetworkRuleSetLister.
type NamespaceNetworkRuleSetListerExpansion interface{}
// NamespaceNetworkRuleSetNamespaceListerExpansion allows custom methods to be added to
// NamespaceNetworkRuleSetNamespaceLister.
type NamespaceNetworkRuleSetNamespaceListerExpansion interface{}
// QueueListerExpansion allows custom methods to be added to
// QueueLister.
type QueueListerExpansion interface{}
// QueueNamespaceListerExpansion allows custom methods to be added to
// QueueNamespaceLister.
type QueueNamespaceListerExpansion interface{}
// QueueAuthorizationRuleListerExpansion allows custom methods to be added to
// QueueAuthorizationRuleLister.
type QueueAuthorizationRuleListerExpansion interface{}
// QueueAuthorizationRuleNamespaceListerExpansion allows custom methods to be added to
// QueueAuthorizationRuleNamespaceLister.
type QueueAuthorizationRuleNamespaceListerExpansion interface{}
// SubscriptionListerExpansion allows custom methods to be added to
// SubscriptionLister.
type SubscriptionListerExpansion interface{}
// SubscriptionNamespaceListerExpansion allows custom methods to be added to
// SubscriptionNamespaceLister.
type SubscriptionNamespaceListerExpansion interface{}
// SubscriptionRuleListerExpansion allows custom methods to be added to
// SubscriptionRuleLister.
type SubscriptionRuleListerExpansion interface{}
// SubscriptionRuleNamespaceListerExpansion allows custom methods to be added to
// SubscriptionRuleNamespaceLister.
type SubscriptionRuleNamespaceListerExpansion interface{}
// TopicListerExpansion allows custom methods to be added to
// TopicLister.
type TopicListerExpansion interface{}
// TopicNamespaceListerExpansion allows custom methods to be added to
// TopicNamespaceLister.
type TopicNamespaceListerExpansion interface{}
// TopicAuthorizationRuleListerExpansion allows custom methods to be added to
// TopicAuthorizationRuleLister.
type TopicAuthorizationRuleListerExpansion interface{}
// TopicAuthorizationRuleNamespaceListerExpansion allows custom methods to be added to
// TopicAuthorizationRuleNamespaceLister.
type TopicAuthorizationRuleNamespaceListerExpansion interface{} | client/listers/servicebus/v1alpha1/expansion_generated.go | 0.505859 | 0.637934 | expansion_generated.go | starcoder |
package uniqrode
import (
"fmt"
"bytes"
"errors"
)
// UniQRode object
type UniQRode struct {
// should we draw QR-code in negative
Invert bool
mode asciiMapping
data *[][]bool
}
// Stores map table and "resolution" for this table
type asciiMapping struct {
// "resolution" of the resulting picture
// represents how many bits HxW be used to get single char
// depends on the mode
stepx int
stepy int
// exact mapping uses for drawing
mapTable *map[int]string
}
// Several modes to draw QR code
var modes = map[int]asciiMapping {
1: {stepx: 1, stepy: 1, mapTable: &u_symbol_mappings_1x1},
2: {stepx: 1, stepy: 2, mapTable: &u_symbol_mappings_1x2},
3: {stepx: 2, stepy: 2, mapTable: &u_symbol_mappings_2x2},
}
// Map for chars. Main part will construct integer from bit mask, i.e.
// {{0, 0}, {1, 1}} will become 3, and {{0, 1}, {1, 1}} - 7
var u_symbol_mappings_1x1 = map[int]string {
0: " ", // Empty block
1: "\u2588\u2588", // Full block
}
var u_symbol_mappings_1x2 = map[int]string {
0: " ", // | | Empty block
1: "\u2584", // |▄| Lower half block
2: "\u2580", // |▀| Upper half block
3: "\u2588", // |█| Full block
}
var u_symbol_mappings_2x2 = map[int]string {
0: " ", // | | Empty block
1: "\u2597", // |▗| Quadrant lower right
2: "\u2596", // |▖| Quadrant lower left
3: "\u2584", // |▄| Lower half block
4: "\u259D", // |▝| Quadrant upper right
5: "\u2590", // |▐| Right half block
6: "\u259E", // |▞| Quadrant upper right and lower left
7: "\u259F", // |▟| Quadrant upper right and lower left and lower right
8: "\u2598", // |▘| Quadrant upper left
9: "\u259A", // |▚| Quadrant upper left and lower right
10: "\u258C", // |▌| Left half block
11: "\u2599", // |▙| Quadrant upper left and lower left and lower right
12: "\u2580", // |▀| Upper half block
13: "\u259C", // |▜| Quadrant upper left and upper right and lower right
14: "\u259B", // |▛| Quadrant upper left and upper right and lower left
15: "\u2588", // |█| Full block
}
// Tiny helper function. Converts bool to int.
func btou(b bool) int {
if b {
return 1
}
return 0
}
// Constructor for UniQRode object.
func New(mode int, invert_colors bool, data *[][]bool) (*UniQRode, error) {
m, present := modes[mode];
// worth to check first if we have the mode we are going to draw
if !present {
msg := fmt.Sprintf("No such mode - %d", mode)
return nil, errors.New(msg)
}
u := &UniQRode {
Invert: invert_colors,
mode: m,
data: data,
}
return u, nil
}
// The only public method available for UniQRode
// scans through data cutting patches size stepx * stepy.
// Then converts every patch to a symbol based on
// which mapping table was chosen in constructor.
func (u *UniQRode) Draw() string {
var buf bytes.Buffer
lenY := len(*u.data)
for y := 0; y <= lenY - u.mode.stepy; y += u.mode.stepy {
lenX := len((*u.data)[y])
for x := 0; x <= lenX - u.mode.stepx; x += u.mode.stepx {
patch := u.getPatch(x, y)
buf.WriteString(u.convertChar(&patch))
}
buf.WriteString("\n")
}
return buf.String()
}
// Returns patch sized stepx * stepy on position startX, startY.
func (u *UniQRode) getPatch(startX int, startY int) [][]bool {
res := make([][]bool, u.mode.stepy)
for y := 0; y < u.mode.stepy; y++ {
res[y] = make([]bool, u.mode.stepx)
for x := 0; x < u.mode.stepx; x++ {
res[y][x] = (*u.data)[startY + y][startX + x]
}
}
return res
}
// Convert patch to single int.
// Shifting bits, starting from bottom-right corner.
func (u *UniQRode) convertChar(pixels *[][]bool) string {
symbol_key := 0;
// flatten array and convert the result to int
for _, row := range (*pixels) {
for _, val := range row {
// "bool != bool" is XOR
symbol_key = (symbol_key << 1) + btou(u.Invert != val)
}
}
return (*u.mode.mapTable)[symbol_key]
} | ascii_mapper/ascii_mapper.go | 0.707 | 0.530541 | ascii_mapper.go | starcoder |
package pflag
import "strconv"
// -- float32 Value
type float32Value float32
func newFloat32Value(val float32, p *float32) *float32Value {
*p = val
return (*float32Value)(p)
}
func (f *float32Value) Set(s string) error {
v, err := strconv.ParseFloat(s, 32)
*f = float32Value(v)
return err
}
func (f *float32Value) Type() string {
return "float32"
}
func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) }
func float32Conv(sval string) (interface{}, error) {
v, err := strconv.ParseFloat(sval, 32)
if err != nil {
return 0, err
}
return float32(v), nil
}
// GetFloat32 return the float32 value of a flag with the given name
func (f *FlagSet) GetFloat32(name string) (float32, error) {
val, err := f.getFlagType(name, "float32", float32Conv)
if err != nil {
return 0, err
}
return val.(float32), nil
}
// Float32Var defines a float32 flag with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the flag.
func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
f.VarP(newFloat32Value(value, p), name, "", usage)
}
// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
f.VarP(newFloat32Value(value, p), name, shorthand, usage)
}
// Float32Var defines a float32 flag with specified name, default value, and usage string.
// The argument p points to a float32 variable in which to store the value of the flag.
func Float32Var(p *float32, name string, value float32, usage string) {
CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
}
// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
}
// Float32 defines a float32 flag with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the flag.
func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
p := new(float32)
f.Float32VarP(p, name, "", value, usage)
return p
}
// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
p := new(float32)
f.Float32VarP(p, name, shorthand, value, usage)
return p
}
// Float32 defines a float32 flag with specified name, default value, and usage string.
// The return value is the address of a float32 variable that stores the value of the flag.
func Float32(name string, value float32, usage string) *float32 {
return CommandLine.Float32P(name, "", value, usage)
}
// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
func Float32P(name, shorthand string, value float32, usage string) *float32 {
return CommandLine.Float32P(name, shorthand, value, usage)
} | vendor/github.com/spf13/pflag/float32.go | 0.835986 | 0.491151 | float32.go | starcoder |
package iso20022
// Chain of parties involved in the settlement of a transaction, including receipts and deliveries, book transfers, treasury deals, or other activities, resulting in the movement of a security or amount of money from one account to another.
type SettlementParties11 struct {
// First party in the settlement chain. In a plain vanilla settlement, it is the Central Securities Depository where the counterparty requests to receive the financial instrument or from where the counterparty delivers the financial instruments.
Depository *PartyIdentification48 `xml:"Dpstry,omitempty"`
// Party that, in a settlement chain interacts with the depository.
Party1 *PartyIdentificationAndAccount42 `xml:"Pty1,omitempty"`
// Party that, in a settlement chain interacts with the party 1.
Party2 *PartyIdentificationAndAccount42 `xml:"Pty2,omitempty"`
// Party that, in a settlement chain interacts with the party 2.
Party3 *PartyIdentificationAndAccount42 `xml:"Pty3,omitempty"`
// Party that, in a settlement chain interacts with the party 3.
Party4 *PartyIdentificationAndAccount42 `xml:"Pty4,omitempty"`
// Party that, in a settlement chain interacts with the party 4.
Party5 *PartyIdentificationAndAccount42 `xml:"Pty5,omitempty"`
}
func (s *SettlementParties11) AddDepository() *PartyIdentification48 {
s.Depository = new(PartyIdentification48)
return s.Depository
}
func (s *SettlementParties11) AddParty1() *PartyIdentificationAndAccount42 {
s.Party1 = new(PartyIdentificationAndAccount42)
return s.Party1
}
func (s *SettlementParties11) AddParty2() *PartyIdentificationAndAccount42 {
s.Party2 = new(PartyIdentificationAndAccount42)
return s.Party2
}
func (s *SettlementParties11) AddParty3() *PartyIdentificationAndAccount42 {
s.Party3 = new(PartyIdentificationAndAccount42)
return s.Party3
}
func (s *SettlementParties11) AddParty4() *PartyIdentificationAndAccount42 {
s.Party4 = new(PartyIdentificationAndAccount42)
return s.Party4
}
func (s *SettlementParties11) AddParty5() *PartyIdentificationAndAccount42 {
s.Party5 = new(PartyIdentificationAndAccount42)
return s.Party5
} | SettlementParties11.go | 0.683525 | 0.49884 | SettlementParties11.go | starcoder |
package transaction
/*
Author - <NAME>
Date - 11th October 2020
RFC3261 - SIP: Session Initiation Protocol
https://tools.ietf.org/html/rfc3261#section-17.1
Client Transaction
The client transaction provides its functionality through the
maintenance of a state machine.
The TU communicates with the client transaction through a simple
interface. When the TU wishes to initiate a new transaction, it
creates a client transaction and passes it the SIP request to send
and an IP address, port, and transport to which to send it. The
client transaction begins execution of its state machine. Valid
responses are passed up to the TU from the client transaction.
There are two types of client transaction state machines, depending
on the method of the request passed by the TU. One handles client
transactions for INVITE requests. This type of machine is referred
to as an INVITE client transaction. Another type handles client
transactions for all requests except INVITE and ACK. This is
referred to as a non-INVITE client transaction. There is no client
transaction for ACK. If the TU wishes to send an ACK, it passes one
directly to the transport layer for transmission.
The INVITE transaction is different from those of other methods
because of its extended duration. Normally, human input is required
in order to respond to an INVITE. The long delays expected for
sending a response argue for a three-way handshake. On the other
hand, requests of other methods are expected to complete rapidly.
Because of the non-INVITE transaction's reliance on a two-way
handshake, TUs SHOULD respond immediately to non-INVITE requests. */
import (
"github.com/KalbiProject/Kalbi/interfaces"
"github.com/KalbiProject/Kalbi/log"
"github.com/KalbiProject/Kalbi/sip/message"
"github.com/KalbiProject/Kalbi/sip/method"
"github.com/looplab/fsm"
"time"
)
const (
clientInputRequest = "client_input_request"
clientInput1xx = "client_input_1xx"
clientInput2xx = "client_input_2xx"
clientInput300Plus = "client_input_300_plus"
clientInputTimerA = "client_input_timer_a"
clientInputTimerB = "client_input_timer_b"
clientInputTimerD = "client_input_timer_d"
clientInputTransportErr = "client_input_transport_err"
clientInputDelete = "client_input_transport_err"
)
// ClientTransaction represents a client transaction references in RFC3261
type ClientTransaction struct {
ID string
BranchID string
ServerTxID string
TransManager *TransactionManager
Origin *message.SipMsg
FSM *fsm.FSM
msgHistory []*message.SipMsg
ListeningPoint interfaces.ListeningPoint
Host string
Port string
LastMessage *message.SipMsg
timerATime time.Duration
timerA *time.Timer
timerB *time.Timer
timerDTime time.Duration
timerD *time.Timer
}
//InitFSM initializes the finite state machine within the client transaction
func (ct *ClientTransaction) InitFSM(msg *message.SipMsg) {
switch string(msg.Req.Method) {
case method.INVITE:
ct.FSM = fsm.NewFSM("", fsm.Events{
{Name: clientInputRequest, Src: []string{""}, Dst: "Calling"},
{Name: clientInput1xx, Src: []string{"Calling"}, Dst: "Proceeding"},
{Name: clientInput300Plus, Src: []string{"Proceeding"}, Dst: "Completed"},
{Name: clientInput2xx, Src: []string{"Proceeding"}, Dst: "Terminated"},
{Name: clientInputTransportErr, Src: []string{"Calling", "Proceeding", "Completed"}, Dst: "Terminated"},
}, fsm.Callbacks{
clientInput1xx: ct.act100,
clientInput2xx: ct.actDelete,
clientInput300Plus: ct.act300,
clientInputTimerA: ct.actResend,
clientInputTimerB: ct.actTransErr,
})
default:
ct.FSM = fsm.NewFSM("", fsm.Events{
{Name: clientInputRequest, Src: []string{""}, Dst: "Calling"},
{Name: clientInput1xx, Src: []string{"Calling"}, Dst: "Proceeding"},
{Name: clientInput300Plus, Src: []string{"Proceeding"}, Dst: "Completed"},
{Name: clientInput2xx, Src: []string{"Proceeding"}, Dst: "Terminated"},
}, fsm.Callbacks{})
}
}
//SetListeningPoint sets a listening point to the client transaction
func (ct *ClientTransaction) SetListeningPoint(lp interfaces.ListeningPoint) {
ct.ListeningPoint = lp
}
//GetListeningPoint returns current listening point
func (ct *ClientTransaction) GetListeningPoint() interfaces.ListeningPoint {
return ct.ListeningPoint
}
//GetBranchID returns branchId which is the identifier of a transaction
func (ct *ClientTransaction) GetBranchID() string {
return ct.BranchID
}
//GetOrigin returns the SIP message that initiated this transaction
func (ct *ClientTransaction) GetOrigin() *message.SipMsg {
return ct.Origin
}
//Receive takes in the SIP message from the transport layer
func (ct *ClientTransaction) Receive(msg *message.SipMsg) {
//fmt.Println("CURRENT STATE: " + ct.FSM.Current())
ct.LastMessage = msg
if msg.GetStatusCode() < 200 {
err := ct.FSM.Event(clientInput1xx)
if err != nil {
log.Log.Error(err)
}
} else if msg.GetStatusCode() < 300 {
err := ct.FSM.Event(clientInput2xx)
if err != nil {
log.Log.Error(err)
}
} else {
err := ct.FSM.Event(clientInput300Plus)
if err != nil {
log.Log.Error(err)
}
}
}
func (ct *ClientTransaction) act100(event *fsm.Event) {
ct.timerA.Stop()
}
//SetServerTransaction is used to set a Server Transaction
func (ct *ClientTransaction) SetServerTransaction(txID string) {
ct.ServerTxID = txID
}
//GetServerTransactionID returns a ServerTransaction that has been set with SetServerTransaction()
func (ct *ClientTransaction) GetServerTransactionID() string {
return ct.ServerTxID
}
//GetLastMessage returns the last received SIP message to this transaction
func (ct *ClientTransaction) GetLastMessage() *message.SipMsg {
return ct.LastMessage
}
//SetLastMessage sets the last message received
func (ct *ClientTransaction) SetLastMessage(msg *message.SipMsg) {
ct.LastMessage = msg
}
func (ct *ClientTransaction) actSend(event *fsm.Event) {
err := ct.ListeningPoint.Send(ct.Host, ct.Port, ct.Origin.String())
if err != nil {
err2 := ct.FSM.Event(clientInputTransportErr)
if err2 != nil {
log.Log.Error("Event error in error handling for transactionID: " + ct.BranchID)
}
}
}
func (ct *ClientTransaction) act300(event *fsm.Event) {
log.Log.Info("Client transaction %p, act_300", ct)
ct.timerD = time.AfterFunc(ct.timerDTime, func() {
err := ct.FSM.Event(clientInputTimerD)
if err != nil {
log.Log.Error("Event error for transactionID: " + ct.BranchID)
}
})
}
func (ct *ClientTransaction) actTransErr(event *fsm.Event) {
log.Log.Error("Transport error for transactionID: " + ct.BranchID)
err := ct.FSM.Event(clientInputDelete)
if err != nil {
log.Log.Error("Event error for transactionID: " + ct.BranchID)
}
}
func (ct *ClientTransaction) actDelete(event *fsm.Event) {
ct.TransManager.DeleteClientTransaction(string(ct.Origin.Via[0].Branch))
}
func (ct *ClientTransaction) actResend(event *fsm.Event) {
log.Log.Info("Client transaction %p, act_resend", ct)
ct.timerATime *= 2
ct.timerA.Reset(ct.timerATime)
ct.Resend()
}
//Resend is used for retransmissions
func (ct *ClientTransaction) Resend() {
err := ct.ListeningPoint.Send(ct.Host, ct.Port, ct.Origin.String())
if err != nil {
err2 := ct.FSM.Event(clientInputTransportErr)
if err2 != nil {
log.Log.Error("Event error in error handling for transactionID: " + ct.BranchID)
}
}
}
//StatelessSend send a sip message without acting on the FSM
func (ct *ClientTransaction) StatelessSend(msg *message.SipMsg, host string, port string) {
err := ct.ListeningPoint.Send(ct.Host, ct.Port, ct.Origin.String())
if err != nil {
log.Log.Error("Transport error for transactionID : " + ct.BranchID)
}
}
//Send is used to send a SIP message
func (ct *ClientTransaction) Send(msg *message.SipMsg, host string, port string) {
ct.Origin = msg
ct.Host = host
ct.Port = port
ct.timerATime = T1
//Retransmition timer
ct.timerA = time.AfterFunc(ct.timerATime, func() {
err := ct.FSM.Event(clientInputTimerA)
if err != nil {
log.Log.Error(err)
}
})
//timeout timer
ct.timerB = time.AfterFunc(64*T1, func() {
err := ct.FSM.Event(clientInputTimerB)
if err != nil {
log.Log.Error(err)
}
})
err := ct.ListeningPoint.Send(ct.Host, ct.Port, ct.Origin.String())
if err != nil {
err2 := ct.FSM.Event(serverInputTransportErr)
if err2 != nil {
log.Log.Error(err)
}
}
ct.FSM.Event(clientInputRequest)
} | sip/transaction/client.go | 0.652131 | 0.433862 | client.go | starcoder |
package gendemographics
// GenBusinesses returns a map of business to number of businesses for the given population size.
func GenBusinesses(population int) map[string]int {
res := make(map[string]int)
for _, bt := range BusinessTypes {
if population > bt.Serves {
//log.Println(fmt.Sprintf("%q: %d", bt.Name, population/bt.Serves))
res[bt.Name] = population / bt.Serves
}
}
return res
}
// Business represents a single business of a given BusinessType.
type Business struct {
*BusinessType
}
// BusinessType represents a type of business.
type BusinessType struct {
Name string
Serves int
}
// New returns a new business of the given business types.
func (bt *BusinessType) New() *Business {
return &Business{
BusinessType: bt,
}
}
var BusinessTypes = []*BusinessType{
{"Shoemaker", 150},
{"Furrier", 250},
{"Maidservant", 250},
{"Tailor", 250},
{"Barber", 350},
{"Jeweler", 400},
{"Taverns/Restaurant", 400},
{"Old-clothes", 400},
{"Pastrycook", 500},
{"Mason", 500},
{"Carpenter", 550},
{"Weaver", 600},
{"Chandler", 700},
{"Mercer", 700},
{"Cooper", 700},
{"Baker", 800},
{"Watercarrier", 850},
{"Scabbardmaker", 850},
{"Wine-seller", 900},
{"Hatmaker", 950},
{"Saddler", 1000},
{"<NAME>", 1000},
{"Pursemaker", 1100},
{"Woodseller", 2400},
{"Magic-shop", 2800},
{"Bookbinder", 3000},
{"Butcher", 1200},
{"Fishmonger", 1200},
{"Beer-seller", 1400},
{"<NAME>", 1400},
{"Plasterer", 1400},
{"<NAME>", 1400},
{"Blacksmith", 1500},
{"Painter", 1500},
{"Doctor", 1700},
{"Roofer", 1800},
{"Locksmith", 1900},
{"Bather", 1900},
{"Ropemaker", 1900},
{"Inn", 2000},
{"Tanner", 2000},
{"Copyist", 2000},
{"sculptor", 2000},
{"Rugmaker", 2000},
{"Harness-Maker", 2000},
{"Bleacher", 2100},
{"<NAME>", 2300},
{"Cutler", 2300},
{"Glovemaker", 2400},
{"Woodcarver", 2400},
{"Bookseller", 6300},
{"Illuminator", 3900},
{"Place Of Worship", 400},
// OTHER JOBS / NOT BUSINESS
{"Law Enforcement", 150},
{"Noble", 200},
{"Administrator", 650},
{"Clergy", 40},
{"Priest", 1000},
}
/* ORIGINAL LIST
* Shoemakers 150
* Furriers 250
* Maidservants 250
* Tailors 250
* Barbers 350
* Jewelers 400
* Taverns/Restaurants 400
* Old-clothes 400 ?
* Pastrycooks 500
* Masons 500
* Carpenters 550
* Weavers 600
* chandlers 700
* Mercers 700
* Coopers 700
* Bakers 800
* Watercarriers 850 This is a job
* Scabbardmakers 850
* wine-sellers 900
* Hatmakers 950
* Saddlers 1,000
* Chicken Butchers 1,000
* Pursemakers 1,100
* Woodsellers 2,400
* Magic-shops 2,800
* Bookbinders 3,000
* Butchers 1,200
* Fishmongers 1,200
* Beer-sellers 1,400
* Buckle Makers 1,400
* Plasterers 1,400
* Spice Merchants 1,400
* Blacksmiths 1,500
* Painters 1,500
* Doctors* 1,700
* Roofers 1,800
* Locksmiths 1,900
* Bathers 1,900
* Ropemakers 1,900
* Inns 2,000
* Tanners 2,000
* Copyists 2,000
* sculptors 2,000
* Rugmakers 2,000
* Harness-Makers 2,000
* Bleachers 2,100
* Hay Merchants 2,300
* Cutlers 2,300
* Glovemakers 2,400
* Woodcarvers 2,400
* Booksellers 6,300
* Illuminators 3,900
* Place Of Worship 400
*
* OTHER JOBS / NOT BUSINESS
*
* Law Enforcement 150
* Noble 200
* Administrator 650
* Clergy 40
* Priest 1000
*
*/ | gendemographics/business.go | 0.62223 | 0.565539 | business.go | starcoder |
package geometry
import (
"github.com/schidstorm/engine/gls"
"github.com/schidstorm/engine/math32"
"math"
)
// NewCone creates a cone geometry with the specified base radius, height,
// number of radial segments, number of height segments, and presence of a bottom cap.
func NewCone(radius, height float64, radialSegments, heightSegments int, bottom bool) *Geometry {
return NewConeSector(radius, height, radialSegments, heightSegments, 0, 2*math.Pi, bottom)
}
// NewConeSector creates a cone sector geometry with the specified base radius, height, number of radial segments,
// number of height segments, sector start angle in radians, sector size angle in radians, and presence of a bottom cap.
func NewConeSector(radius, height float64, radialSegments, heightSegments int, thetaStart, thetaLength float64, bottom bool) *Geometry {
return NewTruncatedConeSector(0, radius, height, radialSegments, heightSegments, thetaStart, thetaLength, false, bottom)
}
// NewCylinder creates a cylinder geometry with the specified radius, height,
// number of radial segments, number of height segments, and presence of a top and/or bottom cap.
func NewCylinder(radius, height float64, radialSegments, heightSegments int, top, bottom bool) *Geometry {
return NewCylinderSector(radius, height, radialSegments, heightSegments, 0, 2*math.Pi, top, bottom)
}
// NewCylinderSector creates a cylinder sector geometry with the specified radius, height, number of radial segments,
// number of height segments, sector start angle in radians, sector size angle in radians, and presence of a top and/or bottom cap.
func NewCylinderSector(radius, height float64, radialSegments, heightSegments int, thetaStart, thetaLength float64, top, bottom bool) *Geometry {
return NewTruncatedConeSector(radius, radius, height, radialSegments, heightSegments, thetaStart, thetaLength, top, bottom)
}
// NewTruncatedCone creates a truncated cone geometry with the specified top and bottom radii,
// height, number of radial segments, number of height segments, and presence of a top and/or bottom cap.
func NewTruncatedCone(radiusTop, radiusBottom, height float64, radialSegments, heightSegments int, top, bottom bool) *Geometry {
return NewTruncatedConeSector(radiusTop, radiusBottom, height, radialSegments, heightSegments, 0, 2*math.Pi, top, bottom)
}
// NewTruncatedConeSector creates a truncated cone sector geometry with the specified top and bottom radii, height, number of radial segments,
// number of height segments, sector start angle in radians, sector size angle in radians, and presence of a top and/or bottom cap.
func NewTruncatedConeSector(radiusTop, radiusBottom, height float64, radialSegments, heightSegments int, thetaStart, thetaLength float64, top, bottom bool) *Geometry {
c := NewGeometry()
heightHalf := height / 2
vertices := [][]int{}
uvsOrig := [][]math32.Vector2{}
// Create buffer for vertex positions
positions := math32.NewArrayF32(0, 0)
for y := 0; y <= heightSegments; y++ {
var verticesRow = []int{}
var uvsRow = []math32.Vector2{}
v := float64(y) / float64(heightSegments)
radius := v*(radiusBottom-radiusTop) + radiusTop
for x := 0; x <= radialSegments; x++ {
u := float64(x) / float64(radialSegments)
var vertex math32.Vector3
vertex.X = float32(radius * math.Sin(u*thetaLength+thetaStart))
vertex.Y = float32(-v*height + heightHalf)
vertex.Z = float32(radius * math.Cos(u*thetaLength+thetaStart))
positions.AppendVector3(&vertex)
verticesRow = append(verticesRow, positions.Size()/3-1)
uvsRow = append(uvsRow, math32.Vector2{float32(u), 1.0 - float32(v)})
}
vertices = append(vertices, verticesRow)
uvsOrig = append(uvsOrig, uvsRow)
}
tanTheta := (radiusBottom - radiusTop) / height
var na, nb math32.Vector3
// Create preallocated buffers for normals and uvs and buffer for indices
npos := positions.Size()
normals := math32.NewArrayF32(npos, npos)
uvs := math32.NewArrayF32(2*npos/3, 2*npos/3)
indices := math32.NewArrayU32(0, 0)
for x := 0; x < radialSegments; x++ {
if radiusTop != 0 {
positions.GetVector3(3*vertices[0][x], &na)
positions.GetVector3(3*vertices[0][x+1], &nb)
} else {
positions.GetVector3(3*vertices[1][x], &na)
positions.GetVector3(3*vertices[1][x+1], &nb)
}
na.SetY(float32(math.Sqrt(float64(na.X*na.X+na.Z*na.Z)) * tanTheta)).Normalize()
nb.SetY(float32(math.Sqrt(float64(nb.X*nb.X+nb.Z*nb.Z)) * tanTheta)).Normalize()
for y := 0; y < heightSegments; y++ {
v1 := vertices[y][x]
v2 := vertices[y+1][x]
v3 := vertices[y+1][x+1]
v4 := vertices[y][x+1]
n1 := na
n2 := na
n3 := nb
n4 := nb
uv1 := uvsOrig[y][x]
uv2 := uvsOrig[y+1][x]
uv3 := uvsOrig[y+1][x+1]
uv4 := uvsOrig[y][x+1]
indices.Append(uint32(v1), uint32(v2), uint32(v4))
normals.SetVector3(3*v1, &n1)
normals.SetVector3(3*v2, &n2)
normals.SetVector3(3*v4, &n4)
indices.Append(uint32(v2), uint32(v3), uint32(v4))
normals.SetVector3(3*v2, &n2)
normals.SetVector3(3*v3, &n3)
normals.SetVector3(3*v4, &n4)
uvs.SetVector2(2*v1, &uv1)
uvs.SetVector2(2*v2, &uv2)
uvs.SetVector2(2*v3, &uv3)
uvs.SetVector2(2*v4, &uv4)
}
}
// First group is the body of the cylinder
// without the caps
c.AddGroup(0, indices.Size(), 0)
nextGroup := indices.Size()
// Top cap
if top && radiusTop > 0 {
// Array of vertex indicesOrig to build used to build the faces.
indicesOrig := []uint32{}
nextidx := positions.Size() / 3
// Appends top segments vertices and builds array of its indicesOrig
var uv1, uv2, uv3 math32.Vector2
for x := 0; x < radialSegments; x++ {
uv1 = uvsOrig[0][x]
uv2 = uvsOrig[0][x+1]
uv3 = math32.Vector2{uv2.X, 0}
// Appends CENTER with its own UV.
positions.Append(0, float32(heightHalf), 0)
normals.Append(0, 1, 0)
uvs.AppendVector2(&uv3)
indicesOrig = append(indicesOrig, uint32(nextidx))
nextidx++
// Appends vertex
v := math32.Vector3{}
vi := vertices[0][x]
positions.GetVector3(3*vi, &v)
positions.AppendVector3(&v)
normals.Append(0, 1, 0)
uvs.AppendVector2(&uv1)
indicesOrig = append(indicesOrig, uint32(nextidx))
nextidx++
}
// Appends copy of first vertex (center)
var vertex, normal math32.Vector3
var uv math32.Vector2
positions.GetVector3(3*int(indicesOrig[0]), &vertex)
normals.GetVector3(3*int(indicesOrig[0]), &normal)
uvs.GetVector2(2*int(indicesOrig[0]), &uv)
positions.AppendVector3(&vertex)
normals.AppendVector3(&normal)
uvs.AppendVector2(&uv)
indicesOrig = append(indicesOrig, uint32(nextidx))
nextidx++
// Appends copy of second vertex (v1) USING LAST UV2
positions.GetVector3(3*int(indicesOrig[1]), &vertex)
normals.GetVector3(3*int(indicesOrig[1]), &normal)
positions.AppendVector3(&vertex)
normals.AppendVector3(&normal)
uvs.AppendVector2(&uv2)
indicesOrig = append(indicesOrig, uint32(nextidx))
nextidx++
// Append faces indicesOrig
for x := 0; x < radialSegments; x++ {
pos := 2 * x
i1 := indicesOrig[pos]
i2 := indicesOrig[pos+1]
i3 := indicesOrig[pos+3]
indices.Append(uint32(i1), uint32(i2), uint32(i3))
}
// Second group is optional top cap of the cylinder
c.AddGroup(nextGroup, indices.Size()-nextGroup, 1)
nextGroup = indices.Size()
}
// Bottom cap
if bottom && radiusBottom > 0 {
// Array of vertex indicesOrig to build used to build the faces.
indicesOrig := []uint32{}
nextidx := positions.Size() / 3
// Appends top segments vertices and builds array of its indicesOrig
var uv1, uv2, uv3 math32.Vector2
for x := 0; x < radialSegments; x++ {
uv1 = uvsOrig[heightSegments][x]
uv2 = uvsOrig[heightSegments][x+1]
uv3 = math32.Vector2{uv2.X, 1}
// Appends CENTER with its own UV.
positions.Append(0, float32(-heightHalf), 0)
normals.Append(0, -1, 0)
uvs.AppendVector2(&uv3)
indicesOrig = append(indicesOrig, uint32(nextidx))
nextidx++
// Appends vertex
v := math32.Vector3{}
vi := vertices[heightSegments][x]
positions.GetVector3(3*vi, &v)
positions.AppendVector3(&v)
normals.Append(0, -1, 0)
uvs.AppendVector2(&uv1)
indicesOrig = append(indicesOrig, uint32(nextidx))
nextidx++
}
// Appends copy of first vertex (center)
var vertex, normal math32.Vector3
var uv math32.Vector2
positions.GetVector3(3*int(indicesOrig[0]), &vertex)
normals.GetVector3(3*int(indicesOrig[0]), &normal)
uvs.GetVector2(2*int(indicesOrig[0]), &uv)
positions.AppendVector3(&vertex)
normals.AppendVector3(&normal)
uvs.AppendVector2(&uv)
indicesOrig = append(indicesOrig, uint32(nextidx))
nextidx++
// Appends copy of second vertex (v1) USING LAST UV2
positions.GetVector3(3*int(indicesOrig[1]), &vertex)
normals.GetVector3(3*int(indicesOrig[1]), &normal)
positions.AppendVector3(&vertex)
normals.AppendVector3(&normal)
uvs.AppendVector2(&uv2)
indicesOrig = append(indicesOrig, uint32(nextidx))
nextidx++
// Appends faces indicesOrig
for x := 0; x < radialSegments; x++ {
pos := 2 * x
i1 := indicesOrig[pos]
i2 := indicesOrig[pos+3]
i3 := indicesOrig[pos+1]
indices.Append(uint32(i1), uint32(i2), uint32(i3))
}
// Third group is optional bottom cap of the cylinder
c.AddGroup(nextGroup, indices.Size()-nextGroup, 2)
}
c.SetIndices(indices)
c.AddVBO(gls.NewVBO(positions).AddAttrib(gls.VertexPosition))
c.AddVBO(gls.NewVBO(normals).AddAttrib(gls.VertexNormal))
c.AddVBO(gls.NewVBO(uvs).AddAttrib(gls.VertexTexcoord))
return c
} | geometry/cone-cylinder.go | 0.854824 | 0.667629 | cone-cylinder.go | starcoder |
package model3d
import (
"fmt"
"math"
"github.com/heustis/tsp-solver-go/model"
)
// Edge3D represents the line segment between two points
type Edge3D struct {
Start *Vertex3D `json:"start"`
End *Vertex3D `json:"end"`
vector *Vertex3D
length float64
}
// DistanceIncrease returns the difference in length between the edge
// and the two edges formed by inserting the vertex between the edge's start and end.
// For example, if start->end has a length of 5, start->vertex has a length of 3,
// and vertex->end has a length of 6, this will return 4 (i.e. 6 + 3 - 5)
func (e *Edge3D) DistanceIncrease(vertex model.CircuitVertex) float64 {
return e.Start.DistanceTo(vertex) + e.End.DistanceTo(vertex) - e.length
}
func (e *Edge3D) Equals(other interface{}) bool {
// Compare pointers first, for performance, but then check start and end points, in case the same edge is created multiple times.
if e == other {
return true
} else if other == (*Edge3D)(nil) || other == nil {
return e == (*Edge3D)(nil)
} else if otherVertex, okay := other.(*Edge3D); okay && e != (*Edge3D)(nil) {
return e.Start.Equals(otherVertex.Start) && e.End.Equals(otherVertex.End)
} else {
return false
}
}
// GetStart returns the start vertex of the edge
func (e *Edge3D) GetStart() model.CircuitVertex {
return e.Start
}
// GetEnd returns the end vertex of the edge
func (e *Edge3D) GetEnd() model.CircuitVertex {
return e.End
}
// GetLength returns the length of the edge
func (e *Edge3D) GetLength() float64 {
return e.length
}
// GetVector returns the normalized (length=1.0) vector from the edge's start to the edges end
func (e *Edge3D) GetVector() *Vertex3D {
if e.vector == nil {
e.vector = NewVertex3D((e.End.X-e.Start.X)/e.length, (e.End.Y-e.Start.Y)/e.length, (e.End.Z-e.Start.Z)/e.length)
}
return e.vector
}
// Intersects checks if the two edges go through at least one identical point.
func (e *Edge3D) Intersects(other model.CircuitEdge) bool {
otherEdge3D := other.(*Edge3D)
// See http://paulbourke.net/geometry/pointlineplane/
// Note: due to point deduplication, we do not need to check for zero length edges.
vec21 := e.End.Subtract(e.Start)
vec43 := otherEdge3D.End.Subtract(otherEdge3D.Start)
vec13 := e.Start.Subtract(otherEdge3D.Start)
dot4321 := vec43.DotProduct(vec21)
dot4343 := vec43.DotProduct(vec43)
dot2121 := vec21.DotProduct(vec21)
dot1321 := vec13.DotProduct(vec21)
denominator := (dot2121 * dot4343) - (dot4321 * dot4321)
if math.Abs(denominator) < model.Threshold {
// Edges are parallel, check if they are colinear, then return true if they overlap.
// For this we can do similar math to the denominator, using vec13 (the start-to-start vector) as the "other" edge for this check.
dot1313 := vec13.DotProduct(vec13)
startToStartDenominator := (dot2121 * dot1313) - (dot1321 * dot1321)
return math.Abs(startToStartDenominator) < model.Threshold && (model.IsBetween(e.Start.X, otherEdge3D.Start.X, otherEdge3D.End.X) ||
model.IsBetween(e.End.X, otherEdge3D.Start.X, otherEdge3D.End.X) ||
model.IsBetween(otherEdge3D.Start.X, e.Start.X, e.End.X) ||
model.IsBetween(otherEdge3D.End.X, e.Start.X, e.End.X))
}
dot1343 := vec13.DotProduct(vec43)
numerator := (dot1343 * dot4321) - (dot1321 * dot4343)
percentE := numerator / denominator
// If the closest point is not within the the start and end points, then the line segments do not intersect, even if the infinite lines do.
if percentE < -model.Threshold || percentE > 1.0+model.Threshold {
return false
}
percentOther := (dot1343 + (dot4321 * percentE)) / dot4343
// If the closest point is not within the the start and end points, then the line segments do not intersect, even if the infinite lines do.
if percentOther < -model.Threshold || percentOther > 1.0+model.Threshold {
return false
}
pointE := NewVertex3D(e.Start.X+(percentE*vec21.X), e.Start.Y+(percentE*vec21.Y), e.Start.Z+(percentE*vec21.Z))
pointOther := NewVertex3D(otherEdge3D.Start.X+(percentOther*vec43.X), otherEdge3D.Start.Y+(percentOther*vec43.Y), otherEdge3D.Start.Z+(percentOther*vec43.Z))
return pointE.Equals(pointOther)
}
// Merge creates a new edge starting from this edge's start vertex and ending at the supplied edge's end vertex.
func (e *Edge3D) Merge(other model.CircuitEdge) model.CircuitEdge {
return NewEdge3D(e.Start, other.GetEnd().(*Vertex3D))
}
// Split creates two new edges "start-to-vertex" and "vertex-to-end" based on this edge and the supplied vertex.
func (e *Edge3D) Split(vertex model.CircuitVertex) (model.CircuitEdge, model.CircuitEdge) {
return NewEdge3D(e.Start, vertex.(*Vertex3D)), NewEdge3D(vertex.(*Vertex3D), e.End)
}
// String prints the edge as a string.
func (e *Edge3D) String() string {
return fmt.Sprintf(`{"start":%s,"end":%s}`, e.Start.String(), e.End.String())
}
// NewEdge3D creates a edge from the starting Vertex3D to the ending Vertex3D
func NewEdge3D(start *Vertex3D, end *Vertex3D) *Edge3D {
length := start.DistanceTo(end)
return &Edge3D{
Start: start,
End: end,
vector: nil,
length: length,
}
}
var _ model.CircuitEdge = (*Edge3D)(nil) | model3d/edge3d.go | 0.897874 | 0.674288 | edge3d.go | starcoder |
package osc
import (
"encoding/json"
)
// Phase2Options Information about Phase 2 of the Internet Key Exchange (IKE) negotiation.
type Phase2Options struct {
// The Diffie-Hellman (DH) group numbers allowed for the VPN tunnel for phase 2.
Phase2DhGroupNumbers *[]int32 `json:"Phase2DhGroupNumbers,omitempty"`
// The encryption algorithms allowed for the VPN tunnel for phase 2.
Phase2EncryptionAlgorithms *[]string `json:"Phase2EncryptionAlgorithms,omitempty"`
// The integrity algorithms allowed for the VPN tunnel for phase 2.
Phase2IntegrityAlgorithms *[]string `json:"Phase2IntegrityAlgorithms,omitempty"`
// The lifetime for phase 2 of the Internet Key Exchange (IKE) negociation process, in seconds.
Phase2LifetimeSeconds *int32 `json:"Phase2LifetimeSeconds,omitempty"`
// The pre-shared key to establish the initial authentication between the client gateway and the virtual gateway. This key can contain any character except line breaks and double quotes (").
PreSharedKey *string `json:"PreSharedKey,omitempty"`
}
// NewPhase2Options instantiates a new Phase2Options object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewPhase2Options() *Phase2Options {
this := Phase2Options{}
return &this
}
// NewPhase2OptionsWithDefaults instantiates a new Phase2Options object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewPhase2OptionsWithDefaults() *Phase2Options {
this := Phase2Options{}
return &this
}
// GetPhase2DhGroupNumbers returns the Phase2DhGroupNumbers field value if set, zero value otherwise.
func (o *Phase2Options) GetPhase2DhGroupNumbers() []int32 {
if o == nil || o.Phase2DhGroupNumbers == nil {
var ret []int32
return ret
}
return *o.Phase2DhGroupNumbers
}
// GetPhase2DhGroupNumbersOk returns a tuple with the Phase2DhGroupNumbers field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Phase2Options) GetPhase2DhGroupNumbersOk() (*[]int32, bool) {
if o == nil || o.Phase2DhGroupNumbers == nil {
return nil, false
}
return o.Phase2DhGroupNumbers, true
}
// HasPhase2DhGroupNumbers returns a boolean if a field has been set.
func (o *Phase2Options) HasPhase2DhGroupNumbers() bool {
if o != nil && o.Phase2DhGroupNumbers != nil {
return true
}
return false
}
// SetPhase2DhGroupNumbers gets a reference to the given []int32 and assigns it to the Phase2DhGroupNumbers field.
func (o *Phase2Options) SetPhase2DhGroupNumbers(v []int32) {
o.Phase2DhGroupNumbers = &v
}
// GetPhase2EncryptionAlgorithms returns the Phase2EncryptionAlgorithms field value if set, zero value otherwise.
func (o *Phase2Options) GetPhase2EncryptionAlgorithms() []string {
if o == nil || o.Phase2EncryptionAlgorithms == nil {
var ret []string
return ret
}
return *o.Phase2EncryptionAlgorithms
}
// GetPhase2EncryptionAlgorithmsOk returns a tuple with the Phase2EncryptionAlgorithms field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Phase2Options) GetPhase2EncryptionAlgorithmsOk() (*[]string, bool) {
if o == nil || o.Phase2EncryptionAlgorithms == nil {
return nil, false
}
return o.Phase2EncryptionAlgorithms, true
}
// HasPhase2EncryptionAlgorithms returns a boolean if a field has been set.
func (o *Phase2Options) HasPhase2EncryptionAlgorithms() bool {
if o != nil && o.Phase2EncryptionAlgorithms != nil {
return true
}
return false
}
// SetPhase2EncryptionAlgorithms gets a reference to the given []string and assigns it to the Phase2EncryptionAlgorithms field.
func (o *Phase2Options) SetPhase2EncryptionAlgorithms(v []string) {
o.Phase2EncryptionAlgorithms = &v
}
// GetPhase2IntegrityAlgorithms returns the Phase2IntegrityAlgorithms field value if set, zero value otherwise.
func (o *Phase2Options) GetPhase2IntegrityAlgorithms() []string {
if o == nil || o.Phase2IntegrityAlgorithms == nil {
var ret []string
return ret
}
return *o.Phase2IntegrityAlgorithms
}
// GetPhase2IntegrityAlgorithmsOk returns a tuple with the Phase2IntegrityAlgorithms field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Phase2Options) GetPhase2IntegrityAlgorithmsOk() (*[]string, bool) {
if o == nil || o.Phase2IntegrityAlgorithms == nil {
return nil, false
}
return o.Phase2IntegrityAlgorithms, true
}
// HasPhase2IntegrityAlgorithms returns a boolean if a field has been set.
func (o *Phase2Options) HasPhase2IntegrityAlgorithms() bool {
if o != nil && o.Phase2IntegrityAlgorithms != nil {
return true
}
return false
}
// SetPhase2IntegrityAlgorithms gets a reference to the given []string and assigns it to the Phase2IntegrityAlgorithms field.
func (o *Phase2Options) SetPhase2IntegrityAlgorithms(v []string) {
o.Phase2IntegrityAlgorithms = &v
}
// GetPhase2LifetimeSeconds returns the Phase2LifetimeSeconds field value if set, zero value otherwise.
func (o *Phase2Options) GetPhase2LifetimeSeconds() int32 {
if o == nil || o.Phase2LifetimeSeconds == nil {
var ret int32
return ret
}
return *o.Phase2LifetimeSeconds
}
// GetPhase2LifetimeSecondsOk returns a tuple with the Phase2LifetimeSeconds field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Phase2Options) GetPhase2LifetimeSecondsOk() (*int32, bool) {
if o == nil || o.Phase2LifetimeSeconds == nil {
return nil, false
}
return o.Phase2LifetimeSeconds, true
}
// HasPhase2LifetimeSeconds returns a boolean if a field has been set.
func (o *Phase2Options) HasPhase2LifetimeSeconds() bool {
if o != nil && o.Phase2LifetimeSeconds != nil {
return true
}
return false
}
// SetPhase2LifetimeSeconds gets a reference to the given int32 and assigns it to the Phase2LifetimeSeconds field.
func (o *Phase2Options) SetPhase2LifetimeSeconds(v int32) {
o.Phase2LifetimeSeconds = &v
}
// GetPreSharedKey returns the PreSharedKey field value if set, zero value otherwise.
func (o *Phase2Options) GetPreSharedKey() string {
if o == nil || o.PreSharedKey == nil {
var ret string
return ret
}
return *o.PreSharedKey
}
// GetPreSharedKeyOk returns a tuple with the PreSharedKey field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *Phase2Options) GetPreSharedKeyOk() (*string, bool) {
if o == nil || o.PreSharedKey == nil {
return nil, false
}
return o.PreSharedKey, true
}
// HasPreSharedKey returns a boolean if a field has been set.
func (o *Phase2Options) HasPreSharedKey() bool {
if o != nil && o.PreSharedKey != nil {
return true
}
return false
}
// SetPreSharedKey gets a reference to the given string and assigns it to the PreSharedKey field.
func (o *Phase2Options) SetPreSharedKey(v string) {
o.PreSharedKey = &v
}
func (o Phase2Options) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Phase2DhGroupNumbers != nil {
toSerialize["Phase2DhGroupNumbers"] = o.Phase2DhGroupNumbers
}
if o.Phase2EncryptionAlgorithms != nil {
toSerialize["Phase2EncryptionAlgorithms"] = o.Phase2EncryptionAlgorithms
}
if o.Phase2IntegrityAlgorithms != nil {
toSerialize["Phase2IntegrityAlgorithms"] = o.Phase2IntegrityAlgorithms
}
if o.Phase2LifetimeSeconds != nil {
toSerialize["Phase2LifetimeSeconds"] = o.Phase2LifetimeSeconds
}
if o.PreSharedKey != nil {
toSerialize["PreSharedKey"] = o.PreSharedKey
}
return json.Marshal(toSerialize)
}
type NullablePhase2Options struct {
value *Phase2Options
isSet bool
}
func (v NullablePhase2Options) Get() *Phase2Options {
return v.value
}
func (v *NullablePhase2Options) Set(val *Phase2Options) {
v.value = val
v.isSet = true
}
func (v NullablePhase2Options) IsSet() bool {
return v.isSet
}
func (v *NullablePhase2Options) Unset() {
v.value = nil
v.isSet = false
}
func NewNullablePhase2Options(val *Phase2Options) *NullablePhase2Options {
return &NullablePhase2Options{value: val, isSet: true}
}
func (v NullablePhase2Options) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullablePhase2Options) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | v2/model_phase2_options.go | 0.812644 | 0.464598 | model_phase2_options.go | starcoder |
package migratest
import (
"database/sql"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/driver005/oauth/consent"
"github.com/driver005/oauth/jwk"
"github.com/driver005/oauth/models"
"github.com/driver005/oauth/oauth2"
sqlPersister "github.com/driver005/oauth/persistence/sql"
"github.com/ory/x/sqlxx"
)
func assertEqualClients(t *testing.T, expected, actual *models.Client) {
now := time.Now()
expected.CreatedAt = now
expected.UpdatedAt = now
actual.CreatedAt = now
actual.UpdatedAt = now
assert.Equal(t, expected, actual)
}
func assertEqualJWKs(t *testing.T, expected, actual *jwk.SQLData) {
now := time.Now()
expected.CreatedAt = now
actual.CreatedAt = now
assert.Equal(t, expected, actual)
}
func assertEqualConsentRequests(t *testing.T, expected, actual *consent.ConsentRequest) {
now := time.Now()
expected.AuthenticatedAt = sqlxx.NullTime(now)
expected.RequestedAt = now
actual.AuthenticatedAt = sqlxx.NullTime(now)
actual.RequestedAt = now
assert.NotZero(t, actual.ClientID)
actual.ClientID = ""
assert.NotNil(t, actual.Client)
actual.Client = nil
assert.Equal(t, expected, actual)
}
func assertEqualLoginRequests(t *testing.T, expected, actual *consent.LoginRequest) {
now := time.Now()
expected.AuthenticatedAt = sqlxx.NullTime(now)
expected.RequestedAt = now
actual.AuthenticatedAt = sqlxx.NullTime(now)
actual.RequestedAt = now
assert.NotZero(t, actual.ClientID)
actual.ClientID = ""
assert.NotNil(t, actual.Client)
actual.Client = nil
assert.Equal(t, expected, actual)
}
func assertEqualLoginSessions(t *testing.T, expected, actual *consent.LoginSession) {
now := time.Now()
expected.AuthenticatedAt = sqlxx.NullTime(now)
actual.AuthenticatedAt = sqlxx.NullTime(now)
assert.Equal(t, expected, actual)
}
func assertEqualHandledConsentRequests(t *testing.T, expected, actual *consent.HandledConsentRequest) {
now := time.Now()
expected.AuthenticatedAt = sqlxx.NullTime(now)
expected.RequestedAt = now
actual.AuthenticatedAt = sqlxx.NullTime(now)
actual.RequestedAt = now
actual.HandledAt = sqlxx.NullTime{}
assert.Equal(t, expected, actual)
}
func assertEqualHandledLoginRequests(t *testing.T, expected, actual *consent.HandledLoginRequest) {
now := time.Now()
expected.AuthenticatedAt = sqlxx.NullTime(now)
expected.RequestedAt = now
actual.AuthenticatedAt = sqlxx.NullTime(now)
actual.RequestedAt = now
assert.Equal(t, expected, actual)
}
func assertEqualLogoutRequests(t *testing.T, expected, actual *consent.LogoutRequest) {
assert.NotZero(t, actual.ClientID)
actual.ClientID = sql.NullString{}
assert.Equal(t, expected, actual)
}
func assertEqualForcedObfucscatedLoginSessions(t *testing.T, expected, actual *consent.ForcedObfuscatedLoginSession) {
assert.NotNil(t, actual.ClientID)
actual.ClientID = ""
assert.Equal(t, expected, actual)
}
func assertEqualOauth2Data(t *testing.T, expected, actual *sqlPersister.OAuth2RequestSQL) {
now := time.Now()
expected.RequestedAt = now
actual.RequestedAt = now
assert.NotZero(t, actual.Client)
actual.Client = ""
if expected.ConsentChallenge.Valid {
assert.NotZero(t, actual.ConsentChallenge, "%+v", actual)
}
expected.ConsentChallenge = sql.NullString{}
actual.ConsentChallenge = sql.NullString{}
assert.Equal(t, expected, actual)
}
func assertEqualOauth2BlacklistedJTIs(t *testing.T, expected, actual *oauth2.BlacklistedJTI) {
now := time.Now()
expected.Expiry = now
actual.Expiry = now
assert.Equal(t, expected, actual)
} | persistence/sql/migratest/assertion_helpers.go | 0.604632 | 0.625252 | assertion_helpers.go | starcoder |
package regexpmap
import (
"regexp"
"sort"
)
// RegexpList is a utility struct that keeps an array of strings sorted by
// length
type RegexpList []string
// NewRegexpList returns a new RegexpList, if any initialValues is in place
// will add into the utility.
func NewRegexpList(initialValues ...string) (result *RegexpList) {
sort.Slice(initialValues, func(i, j int) bool {
return len(initialValues[i]) < len(initialValues[j])
})
tmp := RegexpList(initialValues)
return &tmp
}
// Add function adds a new item in the list and sort the data based on the
// length
func (r *RegexpList) Add(val string) {
for _, item := range *r {
if item == val {
return
}
}
tmpData := append(*r, val)
sort.Slice(tmpData, func(i, j int) bool {
return len(tmpData[i]) < len(tmpData[j])
})
*r = tmpData
}
// Remove removes the item from the internal array and keep the data sorted
func (r *RegexpList) Remove(val string) {
tmpData := []string{}
for _, item := range *r {
if item == val {
continue
}
tmpData = append(tmpData, item)
}
*r = tmpData
return
}
// Get return an array of strings ordered by string len
func (r *RegexpList) Get() []string {
return *r
}
// Len returns the len of the internal array.
func (r *RegexpList) Len() int {
return len(*r)
}
// refCount is a map alias that has a specific options for reference counts
type refCount map[string]int
// Increment adds one to the given key
func (r refCount) Increment(key string) int {
r[key]++
return r[key]
}
// Decrement remove one to the given key. If the value is 0, the key will be
// deleted.
func (r refCount) Decrement(key string) int {
val := r[key]
if val <= 1 {
delete(r, key)
return 0
}
r[key]--
return r[key]
}
// Keys return the list of keys that are in place.
func (r refCount) Keys() []string {
result := make([]string, len(r))
position := 0
for key := range r {
result[position] = key
position++
}
return result
}
// // Len returns the len of the inner map
// func (r refCount) Len() int {
// return len(r)
// }
// RegexpMap is a map-like type that allows lookups to match regexp keys. These
// keys are managed internally as strings, and are uniqued by this
// representation (and not recompiled on repeat inserts).
// Stored values are strings and are returned as-is, with the exception that
// repeat inserts of the same value are de-duped.
// Note: RegexpMap is not thread-safe and managing concurrent access is the
// responsibility of the callers.
type RegexpMap struct {
// lookupValues is a map that use a lookupValue as a key and has a
// RegexpList with the stringToRegExp that ONLY affect that lookupValue
lookupValues map[string]*RegexpList
// stringToRegExp is a map that use a regular expression as a key and the value is
// the compiled regexp
stringToRegExp map[string]*regexp.Regexp
// regexRefCount is a map that use a regular expression as a key and the
// values are all the lookupValues that has used this rule. To be able to
// support duplicates we use a refcount type to be able to increment and
// decrement the use.
regexRefCount map[string]refCount
}
// NewRegexpMap returns an initialized RegexpMap
func NewRegexpMap() *RegexpMap {
return &RegexpMap{
lookupValues: make(map[string]*RegexpList),
stringToRegExp: make(map[string]*regexp.Regexp),
regexRefCount: make(map[string]refCount),
}
}
// Add associates a Regular expression to a lookupValue that will be used in
// the lookup functions. It will return an error and data will be not saved if
// the regexp does not compile correctly
func (m *RegexpMap) Add(reStr string, lookupValue string) error {
_, exists := m.stringToRegExp[reStr]
if !exists {
rule, err := regexp.Compile(reStr)
if err != nil {
return err
}
m.stringToRegExp[reStr] = rule
}
val, exists := m.lookupValues[lookupValue]
if !exists {
val = NewRegexpList()
m.lookupValues[lookupValue] = val
}
val.Add(reStr)
lookupCount, exists := m.regexRefCount[reStr]
if !exists {
lookupCount = refCount{}
m.regexRefCount[reStr] = lookupCount
}
lookupCount.Increment(lookupValue)
return nil
}
// LookupValues returns all lookupValues, inserted via Add, where the reStr
// matches lookupKey
func (m *RegexpMap) LookupValues(lookupKey string) (lookupValues []string) {
for reStr, rule := range m.stringToRegExp {
if !rule.MatchString(lookupKey) {
continue
}
val, exists := m.regexRefCount[reStr]
if exists {
lookupValues = append(lookupValues, val.Keys()...)
}
}
return keepUniqueStrings(lookupValues)
}
// LookupContainsValue returns true if any reStr in lookups, inserted via Add,
// matches lookupKey AND has a lookupValue, inserted via the same Add, that
// matches expectedValue.
func (m *RegexpMap) LookupContainsValue(lookupKey, expectedValue string) (found bool) {
val, exists := m.lookupValues[expectedValue]
if !exists {
return false
}
for _, item := range val.Get() {
rule := m.stringToRegExp[item]
if rule != nil && rule.MatchString(lookupKey) {
return true
}
}
return false
}
// Remove dissociates lookupValue from Lookups that match reStr. When no
// lookupValues remain for reStr the internall regexp is deleted (later Adds
// will recompile it).
func (m *RegexpMap) Remove(reStr, lookupValue string) (deleted bool) {
lookupRelation, exists := m.regexRefCount[reStr]
if !exists {
return false
}
if lookupRelation.Decrement(lookupValue) > 0 {
return false
}
// Making sure that no other stringToRegExp for the same reStr are in place.
if len(lookupRelation) > 0 {
return false
}
val, exists := m.lookupValues[lookupValue]
if exists {
val.Remove(reStr)
}
delete(m.stringToRegExp, reStr)
delete(m.regexRefCount, reStr)
return true
}
// GetPrecompiledRegexp returns the regexp matching reStr if it is in the map.
// This is a utility function to avoid recompiling regexps repeatedly, and the
// RegexpMap keeps the refcount for us.
func (m *RegexpMap) GetPrecompiledRegexp(reStr string) (re *regexp.Regexp) {
return m.stringToRegExp[reStr]
}
// keepUniqueStrings deduplicates strings in s. The output is sorted.
func keepUniqueStrings(s []string) []string {
sort.Strings(s)
out := s[:0] // len==0 but cap==cap(ips)
for readIdx, str := range s {
if len(out) == 0 ||
out[len(out)-1] != s[readIdx] {
out = append(out, str)
}
}
return out
} | pkg/fqdn/regexpmap/regexp_map.go | 0.621081 | 0.469703 | regexp_map.go | starcoder |
package syntax
import (
"github.com/strict-lang/sdk/pkg/compiler/grammar/token"
"github.com/strict-lang/sdk/pkg/compiler/grammar/tree"
)
// parseConditionalStatement parses a conditional statement and it's optional else-clause.
func (parsing *Parsing) parseConditionalStatement() *tree.ConditionalStatement {
parsing.beginStructure(tree.ConditionalStatementNodeKind)
parsing.skipKeyword(token.IfKeyword)
condition := parsing.parseConditionalExpression()
parsing.skipEndOfStatement()
consequence := parsing.parseStatementBlock()
return parsing.parseElseClauseIfPresent(condition, consequence)
}
func (parsing *Parsing) parseElseClauseIfPresent(
condition tree.Expression,
consequence *tree.StatementBlock) *tree.ConditionalStatement {
if token.HasKeywordValue(parsing.token(), token.ElseKeyword) {
return parsing.parseConditionalStatementWithAlternative(
condition, consequence)
}
return &tree.ConditionalStatement{
Condition: condition,
Consequence: consequence,
Region: parsing.completeStructure(tree.ConditionalStatementNodeKind),
}
}
func (parsing *Parsing) parseConditionalStatementWithAlternative(
condition tree.Expression,
consequence *tree.StatementBlock) *tree.ConditionalStatement {
parsing.advance()
alternative := parsing.parseElseIfOrBlock()
return &tree.ConditionalStatement{
Condition: condition,
Consequence: consequence,
Alternative: alternative,
Region: parsing.completeStructure(tree.ConditionalStatementNodeKind),
}
}
func (parsing *Parsing) parseElseIfOrBlock() *tree.StatementBlock {
if token.HasKeywordValue(parsing.token(), token.IfKeyword) {
statement := parsing.parseConditionalStatement()
return &tree.StatementBlock{
Children: []tree.Statement{statement},
Region: statement.Region,
}
}
parsing.skipEndOfStatement()
return parsing.parseStatementBlock()
}
func (parsing *Parsing) parseLoopStatement() tree.Node {
parsing.beginStructure(tree.ForEachLoopStatementNodeKind)
parsing.skipKeyword(token.ForKeyword)
field := parsing.parseIdentifier()
parsing.skipKeyword(token.InKeyword)
value := parsing.parseExpression()
parsing.skipEndOfStatement()
body := parsing.parseStatementBlock()
return &tree.ForEachLoopStatement{
Field: field,
Sequence: value,
Body: body,
Region: parsing.completeStructure(tree.ForEachLoopStatementNodeKind),
}
}
func (parsing *Parsing) parseYieldStatement() *tree.YieldStatement {
parsing.beginStructure(tree.YieldStatementNodeKind)
parsing.skipKeyword(token.YieldKeyword)
rightHandSide := parsing.parseExpression()
parsing.skipEndOfStatement()
return &tree.YieldStatement{
Value: rightHandSide,
Region: parsing.completeStructure(tree.YieldStatementNodeKind),
}
}
func (parsing *Parsing) parseBreakStatement() *tree.BreakStatement {
parsing.beginStructure(tree.BreakStatementNodeKind)
parsing.skipKeyword(token.BreakKeyword)
parsing.skipEndOfStatement()
return &tree.BreakStatement{
Region: parsing.completeStructure(tree.BreakStatementNodeKind),
}
}
func (parsing *Parsing) parseAssertStatement() tree.Node {
parsing.beginStructure(tree.AssertStatementNodeKind)
parsing.skipKeyword(token.AssertKeyword)
expression := parsing.parseExpression()
parsing.skipEndOfStatement()
return &tree.AssertStatement{
Region: parsing.completeStructure(tree.AssertStatementNodeKind),
Expression: expression,
}
}
func (parsing *Parsing) parseReturnStatement() *tree.ReturnStatement {
parsing.beginStructure(tree.ReturnStatementNodeKind)
parsing.skipKeyword(token.ReturnKeyword)
defer parsing.skipEndOfStatement()
if token.IsEndOfStatementToken(parsing.token()) {
parsing.advance()
return &tree.ReturnStatement{
Region: parsing.completeStructure(tree.ReturnStatementNodeKind),
}
}
rightHandSide := parsing.parseExpression()
return &tree.ReturnStatement{
Value: rightHandSide,
Region: parsing.completeStructure(tree.ReturnStatementNodeKind),
}
} | pkg/compiler/grammar/syntax/statement_control.go | 0.660172 | 0.44089 | statement_control.go | starcoder |
package pattern
import (
"fmt"
"math"
)
// Tessellation generator pattern.
func (p Pattern) Tessellation() {
// 3.4.6.4 semi-regular tessellation
sideLen := p.reMap(p.seedToInt(0, 1), 0, 15, 10, 35) // was dMin: 5, dMax: 40
hexWidth := sideLen * 2
hexHeight := sideLen * math.Sqrt(3)
triangleHeight := sideLen / 2 * math.Sqrt(3)
tileWidth := sideLen*3 + triangleHeight*2
tileHeight := (hexHeight * 2) + (sideLen * 2)
// create a tiling pattern
p.Svg.Defs()
p.Svg.Pattern("pattern", 0.0, 0.0, tileWidth, tileHeight, true)
p.Svg.Group()
p.buildTessellationPattern(sideLen, hexWidth, hexHeight, tileWidth, tileHeight, triangleHeight)
p.Svg.GroupClose()
p.Svg.PatternClose()
p.Svg.DefsClose()
p.Svg.Rect(0, 0, p.Width, p.Height, `fill="url(#pattern)"`)
}
func (p Pattern) buildTessellationPattern(sideLen, hexWidth, hexHeight, tileWidth, tileHeight, triangleHeight float64) {
// build rotated triangle shape points
triangle := [][2]float64{{0, 0}, {triangleHeight, sideLen / 2}, {0, sideLen}, {0, 0}}
var transforms []string
for index := 0; index <= 19; index++ {
val := p.seedToInt(index, 1)
styles := p.tessellationStyles(val, "")
switch index {
case 0: // all 4 corners
p.Svg.Rect(-sideLen/2, -sideLen/2, sideLen, sideLen, styles...)
p.Svg.Rect(tileWidth-sideLen/2, -sideLen/2, sideLen, sideLen, styles...)
p.Svg.Rect(-sideLen/2, tileHeight-sideLen/2, sideLen, sideLen, styles...)
p.Svg.Rect(tileWidth-sideLen/2, tileHeight-sideLen/2, sideLen, sideLen, styles...)
case 1: // center / top square
p.Svg.Rect(hexWidth/2+triangleHeight, hexHeight/2, sideLen, sideLen, styles...)
case 2: // side squares
p.Svg.Rect(-sideLen/2, tileHeight/2-sideLen/2, sideLen, sideLen, styles...)
p.Svg.Rect(tileWidth-sideLen/2, tileHeight/2-sideLen/2, sideLen, sideLen, styles...)
case 3: // center / bottom square
p.Svg.Rect(hexWidth/2+triangleHeight, hexHeight*1.5+sideLen, sideLen, sideLen, styles...)
case 4: // left top / bottom triangle
transforms = []string{
p.Svg.Translate(sideLen/2, -sideLen/2),
p.Svg.Rotate(0.0, sideLen/2, triangleHeight/2),
}
p.Svg.Polyline(triangle, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
transforms = []string{
p.Svg.Translate(sideLen/2, tileHeight- -sideLen/2),
p.Svg.Rotate(0.0, sideLen/2, triangleHeight/2),
p.Svg.Scale(1, -1),
}
p.Svg.Polyline(triangle, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
case 5: // right top / bottom triangle
transforms = []string{
p.Svg.Translate(tileWidth-sideLen/2, -sideLen/2),
p.Svg.Rotate(0.0, sideLen/2, triangleHeight/2),
p.Svg.Scale(-1, 1),
}
p.Svg.Polyline(triangle, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
transforms = []string{
p.Svg.Translate(tileWidth-sideLen/2, tileHeight+sideLen/2),
p.Svg.Rotate(0.0, sideLen/2, triangleHeight/2),
p.Svg.Scale(-1, -1),
}
p.Svg.Polyline(triangle, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
case 6: // center / top / right triangle
p.Svg.Polyline(triangle, p.tessellationStyles(val, p.Svg.Transform(p.Svg.Translate(tileWidth/2+sideLen/2, hexHeight/2)))...)
case 7: // center / top / left triangle
transforms = []string{
p.Svg.Translate(tileWidth-tileWidth/2-sideLen/2, hexHeight/2),
p.Svg.Scale(-1, 1),
}
p.Svg.Polyline(triangle, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
case 8: // center / bottom / right triangle
transforms = []string{
p.Svg.Translate(tileWidth/2+sideLen/2, tileHeight-hexHeight/2),
p.Svg.Scale(1, -1),
}
p.Svg.Polyline(triangle, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
case 9: // center / bottom / left triangle
transforms = []string{
p.Svg.Translate(tileWidth-tileWidth/2-sideLen/2, tileHeight-hexHeight/2),
p.Svg.Scale(-1, -1),
}
p.Svg.Polyline(triangle, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
case 10: // left / middle triangle
p.Svg.Polyline(triangle, p.tessellationStyles(val, p.Svg.Transform(p.Svg.Translate(sideLen/2, tileHeight/2-sideLen/2)))...)
case 11: // right / middle triangle
transforms = []string{
p.Svg.Translate(tileWidth-sideLen/2, tileHeight/2-sideLen/2),
p.Svg.Scale(-1, 1),
}
p.Svg.Polyline(triangle, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
case 12: // left / top square
transforms = []string{
p.Svg.Translate(sideLen/2, sideLen/2),
p.Svg.Rotate(-30.0, 0.0, 0.0),
}
p.Svg.Rect(0.0, 0.0, sideLen, sideLen, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
case 13: // right / top square
transforms = []string{
p.Svg.Scale(-1, 1),
p.Svg.Translate(-tileWidth+sideLen/2, sideLen/2),
p.Svg.Rotate(-30.0, 0.0, 0.0),
}
p.Svg.Rect(0.0, 0.0, sideLen, sideLen, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
case 14: // left / center-top square
transforms = []string{
p.Svg.Translate(sideLen/2, tileHeight/2-sideLen/2-sideLen),
p.Svg.Rotate(30.0, 0.0, sideLen),
}
p.Svg.Rect(0.0, 0.0, sideLen, sideLen, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
case 15: // right / center-top square
transforms = []string{
p.Svg.Scale(-1, 1),
p.Svg.Translate(-tileWidth+sideLen/2, tileHeight/2-sideLen/2-sideLen),
p.Svg.Rotate(30.0, 0.0, sideLen),
}
p.Svg.Rect(0.0, 0.0, sideLen, sideLen, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
case 16: // left / center-top square
transforms = []string{
p.Svg.Scale(1, -1),
p.Svg.Translate(sideLen/2, -tileHeight+tileHeight/2-sideLen/2-sideLen),
p.Svg.Rotate(30.0, 0.0, sideLen),
}
p.Svg.Rect(0.0, 0.0, sideLen, sideLen, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
case 17: // right / center-bottom square
transforms = []string{
p.Svg.Scale(-1, -1),
p.Svg.Translate(-tileWidth+sideLen/2, -tileHeight+tileHeight/2-sideLen/2-sideLen),
p.Svg.Rotate(30.0, 0.0, sideLen),
}
p.Svg.Rect(0.0, 0.0, sideLen, sideLen, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
case 18: // left / bottom square
transforms = []string{
p.Svg.Scale(1, -1),
p.Svg.Translate(sideLen/2, -tileHeight+sideLen/2),
p.Svg.Rotate(-30.0, 0.0, 0.0),
}
p.Svg.Rect(0.0, 0.0, sideLen, sideLen, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
case 19: // right / bottom square
transforms = []string{
p.Svg.Scale(-1, -1),
p.Svg.Translate(-tileWidth+sideLen/2, -tileHeight+sideLen/2),
p.Svg.Rotate(-30.0, 0.0, 0.0),
}
p.Svg.Rect(0.0, 0.0, sideLen, sideLen, p.tessellationStyles(val, p.Svg.Transform(transforms...))...)
}
}
}
func (p Pattern) tessellationStyles(colourValue float64, style string) []string {
var styles []string
styles = append(styles, fmt.Sprintf(`stroke="%s"`, p.Styles.StrokeColour))
styles = append(styles, fmt.Sprintf(`stroke-opacity="%.2f"`, p.Styles.StrokeOpacity))
styles = append(styles, fmt.Sprintf(`fill="%s"`, p.fillColour(int(colourValue))))
styles = append(styles, fmt.Sprintf(`fill-opacity="%f"`, p.opacity(colourValue)))
styles = append(styles, fmt.Sprintf(`stroke-width="%d"`, 1))
if style != "" {
styles = append(styles, style)
}
return styles
} | geopattern/pattern/tessellation.go | 0.617167 | 0.425068 | tessellation.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.